Beispiel #1
0
def add_activation_layer(model, activation, activation_param):
    """Adds the specified activation layer to the given model.

    Parameters
    ----------
    model: tf.keras.model
        Tensorflow.keras model to which to add an activation layer

    activation: str
        Activation to add to the model

    activation_param: float
        Parameter related to the activation (when needed)
    """
    if activation == "relu":
        model.add(ReLU())
    elif activation == "leaky_relu":
        model.add(LeakyReLU(alpha=activation_param))
    elif activation == "prelu":
        model.add(PReLU())
    elif activation == "elu":
        model.add(ELU(alpha=activation_param))
    elif activation == "selu":
        model.add(Activation("selu"))
    elif activation == "thresholded_relu":
        model.add(ThresholdedReLU(theta=activation_param))
    elif activation == "softmax":
        model.add(Softmax())
    elif activation == "softplus":
        model.add(Activation("softplus"))
    #    elif activation == "rrelu":
    #        model.add(tfa.activations.rrelu())
    else:
        print(f"Selected activation function ({activation}) is not available!")
Beispiel #2
0
  def _build_graph(self):
    smile_images = Input(shape=self.input_shape)
    stem = chemnet_layers.Stem(self.base_filters)(smile_images)

    inceptionA_out = self.build_inception_module(inputs=stem, type="A")
    reductionA_out = chemnet_layers.ReductionA(
        self.base_filters)(inceptionA_out)

    inceptionB_out = self.build_inception_module(
        inputs=reductionA_out, type="B")
    reductionB_out = chemnet_layers.ReductionB(
        self.base_filters)(inceptionB_out)

    inceptionC_out = self.build_inception_module(
        inputs=reductionB_out, type="C")
    avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out)

    if self.mode == "classification":
      logits = Dense(self.n_tasks * 2)(avg_pooling_out)
      logits = Reshape((self.n_tasks, 2))(logits)
      output = Softmax()(logits)
      outputs = [output, logits]
      output_types = ['prediction', 'loss']
      loss = SoftmaxCrossEntropy()

    else:
      output = Dense(self.n_tasks * 1)(avg_pooling_out)
      output = Reshape((self.n_tasks, 1))(output)
      outputs = [output]
      output_types = ['prediction']
      loss = L2Loss()

    model = tf.keras.Model(inputs=[smile_images], outputs=outputs)
    return model, loss, output_types
Beispiel #3
0
    def __init__(
        self,
        kernel_sizes: List[int] = None,
        filters: int = DEFAULT_FILTERS,
        dropout_rate: float = DEFAULT_DROPOUT_RATE,
        dense_layers: int = DEFAULT_DENSE_LAYERS,
        activation: str = DEFAULT_ACTIVATION,
        classes: int = DEFAULT_CLASSES,
    ) -> None:
        if kernel_sizes is None:
            kernel_sizes = copy(ConvolutionalNGramsModel.DEFAULT_KERNEL_SIZES)

        super(ConvolutionalNGramsModel, self).__init__()
        self._convolutions: List[Conv1D] = [
            Conv1D(filters=filters,
                   kernel_size=kernel_size,
                   activation=activation) for kernel_size in kernel_sizes
        ]
        self._pools: List[GlobalMaxPool1D] = [
            GlobalMaxPool1D() for _ in range(len(self._convolutions))
        ]
        self._stack: Concatenate = Concatenate(axis=1)
        self._dropout: Dropout = Dropout(rate=dropout_rate)
        self._dense: List[Dense] = ([
            Dense(units=(filters * len(kernel_sizes)) // (2**layer))
            for layer in range(1, dense_layers)
        ] if dense_layers > 1 else [])
        self._classification: Dense = Dense(units=classes,
                                            activation=activation)
        self._softmax: Softmax = Softmax(axis=1)
Beispiel #4
0
 def add_last_layers(no_layers=2):
     model.add(Flatten())
     if no_layers == 3:
         if regularizer:
             model.add(
                 Dense(1024,
                       kernel_initializer='he_uniform',
                       kernel_regularizer=l2(0.001)))
         else:
             model.add(Dense(1024, kernel_initializer='he_uniform'))
         model.add(Activation(activation))
     if no_layers >= 2:
         if regularizer:
             model.add(
                 Dense(128,
                       kernel_initializer='he_uniform',
                       kernel_regularizer=l2(0.001)))
         else:
             model.add(Dense(128, kernel_initializer='he_uniform'))
         model.add(Activation(activation))
     if regularizer:
         model.add(
             Dense(10,
                   kernel_initializer='he_uniform',
                   kernel_regularizer=l2(0.001)))
     else:
         model.add(Dense(10, kernel_initializer='he_uniform'))
     model.add(Activation(Softmax()))
Beispiel #5
0
    def model_parameterized(num_classes=8, hparams=None, name_suffix=''):
        if hparams is None:
            hparams = {}

        base_width = hparams['base_width'] if 'base_width' in hparams else 16

        x = Input(shape=(None, None, 3))  # input_dim, input_dim  # <- use for model.summary() to see layer sizes
        input_layer = x

        if 'non_cropping_conv' in hparams and hparams['non_cropping_conv']:
            x = Conv2D(base_width, 3, kernel_initializer=he_norm, activation='relu', padding='same')(x)
            x = MaxPool2D((2, 2), strides=(1, 1), padding='same')(x)
            x = BatchNormalization()(x)

        for k, d, w in zip(kernels, dilations, widths):
            x = Conv2D(w * base_width, k, **conv_args, dilation_rate=d)(x)

            x = MaxPool2D((2, 2), strides=(1, 1), padding='same')(x)
            x = BatchNormalization()(x)

        x = Conv2D(num_classes, 1, kernel_initializer=he_norm, activation=None)(x)
        x = Softmax()(x)

        name = '{}x_d{}_{}'.format(input_dim, param_string, name_suffix)

        model = tf.keras.Model(inputs=input_layer, outputs=x,
                               name=name)
        return model, input_dim
Beispiel #6
0
def SF_Module(x_list, n_channel, reduction, limitation):
    ## Split
    fused = None
    for x_s in x_list:
        if fused == None:
            fused = x_s
        else:
            fused = Add()([fused, x_s])

    ## Fuse
    fused = GlobalAveragePooling2D()(fused)
    fused = BatchNormalization()(fused)
    fused = Dense(max(n_channel // reduction, limitation),
                  activation='selu')(fused)

    ## Select
    masks = []
    for i in range(len(x_list)):
        masks.append(Dense(n_channel)(fused))
    mask_stack = Lambda(K.stack, arguments={'axis': -1})(masks)
    mask_stack = Softmax(axis=-2)(mask_stack)  # (n_channel, n_kernel)

    selected = None
    for i, x_s in enumerate(x_list):
        mask = Lambda(lambda z: z[:, :, i])(mask_stack)
        mask = Reshape((1, 1, n_channel))(mask)
        x_s = Multiply()([x_s, mask])
        if selected == None:
            selected = x_s
        else:
            selected = Add()([selected, x_s])

    return selected
Beispiel #7
0
    def build_rnet(self, input_shape=None):
        if input_shape is None:
            input_shape = (24, 24, 3)

        r_inp = Input(input_shape)

        r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer)

        r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer)

        r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = Flatten()(r_layer)
        r_layer = Dense(128)(r_layer)
        r_layer = PReLU()(r_layer)

        r_layer_out1 = Dense(2)(r_layer)
        r_layer_out1 = Softmax(axis=1)(r_layer_out1)

        r_layer_out2 = Dense(4)(r_layer)

        r_net = Model(r_inp, [r_layer_out2, r_layer_out1])

        return r_net
Beispiel #8
0
    def __init__(self, num_filters=64, num_classes=5):
        super(CDFDecoder, self).__init__()

        self.up_sample4 = Conv2DTranspose(num_filters,
                                          kernel_size=2,
                                          strides=(2, 2),
                                          padding='same')
        self.cub4 = CompUnpoolBlock(num_filters)
        self.cdb_decoder4 = CompDenseBlock(num_filters)

        self.up_sample3 = Conv2DTranspose(num_filters,
                                          kernel_size=2,
                                          strides=(2, 2),
                                          padding='same')
        self.cub3 = CompUnpoolBlock(num_filters)
        self.cdb_decoder3 = CompDenseBlock(num_filters)

        self.up_sample2 = Conv2DTranspose(num_filters,
                                          kernel_size=2,
                                          strides=(2, 2),
                                          padding='same')
        self.cub2 = CompUnpoolBlock(num_filters)
        self.cdb_decoder2 = CompDenseBlock(num_filters)

        self.up_sample1 = Conv2DTranspose(num_filters,
                                          kernel_size=2,
                                          strides=(2, 2),
                                          padding='same')
        self.cub1 = CompUnpoolBlock(num_filters)
        self.cdb_decoder1 = CompDenseBlock(num_filters)

        self.final_conv = Conv2D(num_classes, kernel_size=1)
        self.softmax = Softmax()
Beispiel #9
0
def EMB_ECODER_BACILLUS_02(input_shape, n_class):
    # Input layer
    x = Input(shape=input_shape)

    emb = Embedding(4, 9, input_length=input_shape[0])(x)

    # Block 01
    block1 = Conv1D(
        filters=128,
        kernel_size=5,
        padding='same',
        strides=1)(emb)
    block1 = keras_contrib.InstanceNormalization()(block1)
    block1 = PReLU(shared_axes=[1])(block1)
    block1 = Dropout(rate=0.2)(block1)
    block1 = MaxPooling1D(pool_size=2)(block1)

    # Block 02
    block2 = Conv1D(
        filters=256,
        kernel_size=11,
        padding='same',
        strides=1)(emb)
    block2 = keras_contrib.InstanceNormalization()(block1)
    block2 = PReLU(shared_axes=[1])(block2)
    block2 = Dropout(rate=0.2)(block2)
    block2 = MaxPooling1D(pool_size=2)(block2)

    # # Block 03
    # block3 = Conv1D(
    #     filters=256,
    #     kernel_size=21,
    #     padding='same',
    #     strides=1)(emb)
    # block3 = keras_contrib.InstanceNormalization()(block2)
    # block3 = PReLU(shared_axes=[1])(block3)
    # block3 = Dropout(rate=0.2)(block3)
    # block3 = MaxPooling1D(pool_size=2)(block3)

    # split for attention
    attention_data = Lambda(lambda x: x)(block2)
    attention_softmax = Lambda(lambda x: x)(block2)

    # attention mechanism
    attention_softmax = Softmax()(attention_softmax)
    multiply_layer = Multiply()([attention_softmax, attention_data])

    # Fully connected layers
    dense_layer = Dense(units=256, activation='sigmoid')(multiply_layer)
    dense_layer = keras_contrib.InstanceNormalization()(dense_layer)


    # Classification layer
    flatten_layer = Flatten()(dense_layer)
    output_layer = Dense(units=n_class, activation='sigmoid')(flatten_layer)

    # Create model object
    model = models.Model(inputs=[x], outputs=[output_layer])

    return model
Beispiel #10
0
    def build(self, hidden_layers=[16], activations=['relu'], dropout=0.5, learning_rate=0.01, l2_norm=5e-4, p1=1.4, p2=0.7, epsilon=0.01):

        with self.device:

            x = Input(batch_shape=[self.n_nodes, self.n_features], dtype=tf.float32, name='features')
            adj = Input(batch_shape=[self.n_nodes, self.n_nodes], dtype=tf.float32, sparse=True, name='adj_matrix')
            index = Input(batch_shape=[None],  dtype=tf.int32, name='index')

            self.GCN_layers = [GraphConvolution(hidden_layers[0], activation=activations[0], 
                                                kernel_regularizer=regularizers.l2(l2_norm)),
                               GraphConvolution(self.n_classes)]
            self.dropout_layer = Dropout(rate=dropout)
            logit = self.propagation(x, adj)
            logit = tf.ensure_shape(logit, (self.n_nodes, self.n_classes))
            output = tf.gather(logit, index)
            output = Softmax()(output)
            model = Model(inputs=[x, adj, index], outputs=output)
            model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])

            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit, epsilon)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
            self.adv_optimizer = Adam(lr=learning_rate/10)
            self.built = True
    def make_model(self, input_shape, num_authors):
        param_mapping.map_params(self.params["model_params"])

        input = keras.Input(batch_shape=(None, input_shape),
                            name='secondary_model_input')

        mlp1 = Dense(512,
                     activation=None,
                     name="MLP1",
                     kernel_regularizer='l1')
        mlp2 = Dense(512,
                     activation=None,
                     name="MLP2",
                     kernel_regularizer='l1')

        prediction_layer = Dense(num_authors,
                                 kernel_regularizer='l2',
                                 name="prediction")
        softmax = Softmax(name="prediction_probs")

        base = self.base_model(input) * 1
        mlp1_out = Dropout(rate=.5)(mlp1(base))
        mlp2_out = Dropout(rate=.5)(mlp2(mlp1_out))
        prediction = prediction_layer(mlp2_out)
        prediction_probs = softmax(prediction)

        model = keras.Model(inputs=input, outputs=prediction_probs)
        model.summary()
        return model
Beispiel #12
0
def Deeplabv3(width,height,channel = 3, n_labels=2):
    img_input = Input(shape=(width,height,channel))
    # 主干网络
    x,atrous_rates,skip1 = Xception().xception(img_input,OS=16)

    # ASPP,rate值与Output Strides相关,SepConv_BN为先3x3膨胀卷积,再1x1卷积,进行压缩其膨胀率就是rate值
    b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, )(x)
    b0 = BatchNormalization(epsilon=1e-5)(b0)
    b0 = Activation('relu')(b0)

    # rate = 6 (12)
    b1 = SepConv_BN(x, 256,rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
    # rate = 12 (24)
    b2 = SepConv_BN(x, 256,rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
    # rate = 18 (36)
    b3 = SepConv_BN(x, 256,rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)

    b4 = GlobalAveragePooling2D()(x)  # 全局池化
    b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)  # 扩张一维
    b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)  # 再扩张一维  1*1*channels

    b4 = Conv2D(256, (1, 1), padding='same',use_bias=False)(b4)  # 卷积通道压缩  1*1*256
    b4 = BatchNormalization( epsilon=1e-5)(b4)
    b4 = Activation('relu')(b4)

    size_before = tf.keras.backend.int_shape(x)
    b4 = Lambda(lambda x: tf.image.resize(x, size_before[1:3]))(b4) # 扩张为64*64*256

    x = Concatenate()([b4,b0, b1, b2, b3])

    x = Conv2D(256, (1, 1), padding='same',
               use_bias=False)(x)
    x = BatchNormalization(epsilon=1e-5)(x)
    x = Activation('relu')(x)
    x = Dropout(0.1)(x)


    x = Lambda(lambda xx: tf.image.resize(xx, skip1.shape[1:3]))(x)


    dec_skip1 = Conv2D(48, (1, 1), padding='same',use_bias=False)(skip1)
    dec_skip1 = BatchNormalization(epsilon=1e-5)(dec_skip1)
    dec_skip1 = Activation('relu')(dec_skip1)
    x = Concatenate()([x, dec_skip1])

    x = SepConv_BN(x, 256,depth_activation=True, epsilon=1e-5)
    x = SepConv_BN(x, 256,depth_activation=True, epsilon=1e-5)

    x = Conv2D(n_labels, (1, 1), padding='same')(x)

    size_before3 = tf.keras.backend.int_shape(img_input)
    x = Lambda(lambda xx:tf.image.resize(xx,size_before3[1:3]))(x)

    # x = Reshape((-1,n_labels))(x)
    x = Softmax()(x)

    inputs = img_input
    model = Model(inputs, x)

    return model
Beispiel #13
0
    def __init__(self, params):
        super(Encoder, self).__init__()
        self.epsilon = params["epsilon"]

        self.batch_size = params["batch_size"]

        self.num_clusters = params["num_clusters"]

        self.fc11_hidden_dim = params["enc_fc11_hidden_dim"]
        self.fc12_hidden_dim = params["enc_fc12_hidden_dim"]

        self.fc21_hidden_dim = params["enc_fc21_hidden_dim"]
        self.fc22_hidden_dim = params["enc_fc22_hidden_dim"]
        self.gaussian_hidden_dim = params["enc_gaussian_hidden_dim"]

        self.activation = params["activation"]

        # Fully Connected Layer: Input to feature
        self.fc11 = Dense(units=self.fc11_hidden_dim,
                          activation=self.activation)
        self.fc12 = Dense(units=self.fc12_hidden_dim,
                          activation=self.activation)
        self.out = Dense(units=self.num_clusters)
        self.softmax = Softmax()

        # Fully Connected Layer: Learn Gaussian Distribution
        self.fc21 = Dense(units=self.fc21_hidden_dim,
                          activation=self.activation)
        self.fc22 = Dense(units=self.fc22_hidden_dim,
                          activation=self.activation)
        self.mean = Dense(units=self.gaussian_hidden_dim)
        self.variance = Dense(units=self.gaussian_hidden_dim,
                              activation=tf.nn.softplus)
Beispiel #14
0
def darknet19(inputs):
    """Generate Darknet-19 model for Imagenet classification."""
    body = darknet19_body()(inputs)
    x = DarknetConv2D(1000, (1, 1))(body)
    x = GlobalAveragePooling2D()(x)
    logits = Softmax()(x)
    return Model(inputs, logits)
Beispiel #15
0
def cmlstmModel(num_rules=18, lstm_units=50, num_months=6, dense_param=[2]):
    """
    Model with cmeans labels

    Parameters
    ----------
    num_rules : int
        number of rules/ number of non-diligence probabilities per ANM per time frame
    lstm_units : int
        number of lstm units
    num_months :  int
        number of months in non-diligence vector history taken as input
    dense_param : int
        dense layer parameter

    Returns
    -------
    model : compiled model
    """

    first_input = Input(shape=(num_months, num_rules))
    second_lstm = LSTM(lstm_units)(first_input)
    third_dense = Dense(dense_param[0], activation="relu")(second_lstm)
    fourth_softmax = Softmax()(third_dense)

    model = Model(inputs=first_input, outputs=fourth_softmax)

    model.compile(optimizer='adam', loss='mse', metrics=['mse', 'mae'])

    return model
Beispiel #16
0
    def build_pnet(self, input_shape=None):
        if input_shape is None:
            input_shape = (None, None, 3)

        p_inp = Input(input_shape)

        p_layer = Conv2D(10,
                         kernel_size=(3, 3),
                         strides=(1, 1),
                         padding="valid")(p_inp)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)
        p_layer = MaxPooling2D(pool_size=(2, 2),
                               strides=(2, 2),
                               padding="same")(p_layer)

        p_layer = Conv2D(16,
                         kernel_size=(3, 3),
                         strides=(1, 1),
                         padding="valid")(p_layer)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)

        p_layer = Conv2D(32,
                         kernel_size=(3, 3),
                         strides=(1, 1),
                         padding="valid")(p_layer)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)

        p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer)
        p_layer_out1 = Softmax(axis=3)(p_layer_out1)

        p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer)

        p_net = Model(p_inp, [p_layer_out2, p_layer_out1])

        return p_net
Beispiel #17
0
def softmax_unet(
    input_tensor: tf.Tensor, instruments: Iterable[str], params: Optional[Dict] = None
) -> Dict:
    """
    Apply softmax to multitrack unet in order to have mask suming to one.

    Parameters:
        input_tensor (tensorflow.Tensor):
            Tensor to apply blstm to.
        instruments (Iterable[str]):
            Iterable that provides a collection of instruments.
        params (Optional[Dict]):
            (Optional) dict of BLSTM parameters.

    Returns:
        Dict:
            Created output tensor dict.
    """
    logit_mask_list = []
    for instrument in instruments:
        out_name = f"{instrument}_spectrogram"
        logit_mask_list.append(
            apply_unet(
                input_tensor,
                output_name=out_name,
                params=params,
                output_mask_logit=True,
            )
        )
    masks = Softmax(axis=4)(tf.stack(logit_mask_list, axis=4))
    output_dict = {}
    for i, instrument in enumerate(instruments):
        out_name = f"{instrument}_spectrogram"
        output_dict[out_name] = Multiply(name=out_name)([masks[..., i], input_tensor])
    return output_dict
Beispiel #18
0
    def build(self, hidden_layers=[16], activations=['relu'], dropout=0.5, 
              learning_rate=0.01, l2_norm=5e-4, p1=1., p2=1., 
              n_power_iterations=1, epsilon=0.03, xi=1e-6):
        
        with self.device:
            
            x = Input(batch_shape=[self.n_nodes, self.n_features], dtype=tf.float32, name='features')
            adj = Input(batch_shape=[self.n_nodes, self.n_nodes], dtype=tf.float32, sparse=True, name='adj_matrix')
            index = Input(batch_shape=[None],  dtype=tf.int32, name='index')

            self.GCN_layers = [GraphConvolution(hidden_layers[0], 
                                                activation=activations[0], 
                                                kernel_regularizer=regularizers.l2(l2_norm)),
                               GraphConvolution(self.n_classes)]
            self.dropout_layer = Dropout(dropout)
            
            logit = self.propagation(x, adj)
            output = tf.gather(logit, index)
            output = Softmax()(output)
            model = Model(inputs=[x, adj, index], outputs=output)
    
            self.model = model
            self.train_metric = SparseCategoricalAccuracy()
            self.test_metric = SparseCategoricalAccuracy()
            self.optimizer = Adam(lr=learning_rate)
            self.built = True
            
        self.p1 = p1 # Alpha
        self.p2 = p2 # Beta
        self.xi = xi # Small constant for finite difference
        self.epsilon = epsilon # Norm length for (virtual) adversarial training
        self.n_power_iterations = n_power_iterations #  Number of power iterations
Beispiel #19
0
def DenseNet(pretrained=True, tnb_extractor=True):

    DenseNet121().summary()
    VGG16().summary()

    if (pretrained):
      feature_extractor = DenseNet121(weights='imagenet', include_top=False, input_shape=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH, config.CHANNELS))
      feature_extractor.trainable = tnb_extractor
    else:
      feature_extractor = DenseNet121(include_top=False, input_shape=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH, config.CHANNELS))
      feature_extractor.trainable = True

    feature_extractor.summary()

    x = Conv2D(1024, (7, 7), activation='relu', padding='same')(feature_extractor.output)
    x = Dropout(0.5)(x)
    '''x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)
                x = Dropout(0.5)(x)'''
    x = Conv2D(5, (1, 1), activation='linear')(x)
    x = Conv2DTranspose(5, kernel_size=(64, 64), strides=(32, 32), padding='same')(x)
    # x = Reshape((IMAGE_WIDTH*IMAGE_HEIGHT, -1))(x)
    outputs = Softmax(axis=-1)(x)
    '''outputs = Lambda(prob_to_labels)(x)
    outputs = Reshape((224, 224))(outputs)'''

    model = Model(inputs=feature_extractor.inputs, outputs=outputs, name="DenseNet")

    model.summary()

    return model
Beispiel #20
0
def FCN_VGG16_16s(pretrained=True, tnb_extractor=True):
    wd = 0.1
    kr = regularizers.l2
    in1 = Input(shape=(config.IMAGE_WIDTH, config.IMAGE_HEIGHT, config.CHANNELS))
    # ki = 'he_normal'
    ki = 'glorot_uniform'


    if (pretrained):
      feature_extractor = VGG16(weights='imagenet', include_top=False, input_tensor=in1)
      feature_extractor.trainable = tnb_extractor
    else:
      feature_extractor = VGG16(include_top=False, input_tensor=in1)
      feature_extractor.trainable = True

    pool_5 = feature_extractor.get_layer('block5_pool')

    x = Conv2D(4096, (7, 7), activation='relu', padding='same')(pool_5.output)
    x = Dropout(0.5)(x)
    x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)
    x = Dropout(0.5)(x)
    score_32s = Conv2D(5, (1, 1))(x)
    # At this point we have the normal output of the FCN 32s

    # Skip connection 1
    # Upscaling the last pooling output so that it can be further summed with the pool4 layer
    upscore2 = Conv2DTranspose(config.NUM_CLASSES, 4,
                               strides=(2, 2),
                               padding='same',
                               kernel_regularizer=kr(wd),
                               kernel_initializer=ki,
                               use_bias=False,
                               name='upscore2')(score_32s)

    # Getting the pool4 layer
    pool_4 = feature_extractor.get_layer('block4_pool')
    # Applying a 1x1 convolution to generate as many feature maps as there are classes (output of the upsampled conv7 layer)
    score_pool4 = Conv2D(config.NUM_CLASSES, 1,
                         kernel_regularizer=kr(wd),
                         use_bias=True)(pool_4.output)

    # Adding the upsampled conv7 layer to the pool4 layer
    fuse_pool4 = Add()([upscore2, score_pool4])

    upscore8 = Conv2DTranspose(config.NUM_CLASSES, 32,
                               strides=(16, 16),
                               padding='same',
                               kernel_regularizer=kr(wd),
                               kernel_initializer=ki,
                               use_bias=False,
                               name='upscore8')(fuse_pool4)

    #reshape = Reshape((config.IMAGE_HEIGHT * config.IMAGE_WIDTH, -1))(upscore8)
    output = Softmax(axis=-1)(upscore8)

    model = Model(in1, output, name="FCN_VGG16_16s")

    model.summary()

    return model
Beispiel #21
0
  def __init__(self, units):
    super(BahdanauAttention, self).__init__()
    self.W1 = Dense(units)
    self.W2 = Dense(units)
    self.V = Dense(1)

    self.attention = Softmax(axis=1, name="attention")
Beispiel #22
0
    def build(self, hidden_layers=[32], activations=['relu'], dropout=0.5, 
              learning_rate=0.01, l2_norm=5e-4, use_bias=False):
        
        with self.device:
            
            x = Input(batch_shape=[self.n_nodes, self.n_features], dtype=tf.float32, name='features')
            adj = Input(batch_shape=[self.n_nodes, self.n_nodes], dtype=tf.float32, name='adj_matrix')
            index = Input(batch_shape=[None],  dtype=tf.int32, name='index')

            h = x
            for hid, activation in zip(hidden_layers, activations):
                h = DenseGraphConv(hid, use_bias=use_bias,
                                     activation=activation, 
                                     kernel_regularizer=regularizers.l2(l2_norm))([h, adj])
                
                h = Dropout(rate=dropout)(h)

            h = DenseGraphConv(self.n_classes, use_bias=use_bias)([h, adj])
            h = tf.ensure_shape(h, [self.n_nodes, self.n_classes])
            h = tf.gather(h, index)
            output = Softmax()(h)

            model = Model(inputs=[x, adj, index], outputs=output)
            model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])

            self.model = model
            self.built = True
def conv_failed_attempt(num_classes):
    """
    December
    does not train at all

    """
    c = 32
    model = tf.keras.Sequential(
        [
            Input(shape=(None, None, 3)),
            Rescaling(1.0 / 255),
            Conv2D(c, 3, activation='relu'),
            Conv2D(c, 3, activation='relu'),
            MaxPool2D(),
            Conv2D(2 * c, 3, activation='relu'),
            Conv2D(2 * c, 3, activation='relu'),
            Conv2D(2 * c, 3, activation='relu'),
            MaxPool2D(),
            Conv2D(4 * c, 3, activation='relu'),
            Conv2D(4 * c, 3, activation='relu'),
            MaxPool2D(),
            Conv2D(8 * c, 3, activation='relu'),
            MaxPool2D(),

            # [1 x 1] here

            # Conv2D(128, 1, activation='relu'),
            Conv2D(num_classes, 1,
                   activation='relu'),  # [W x H x many] -> [W x H x C]
            Flatten(),
            Softmax()
        ],
        name='sequential_9l_{}c'.format(c))
    return model, 32
def fcn_residual_32x_18l(num_classes, name_suffix=''):
    """
    February 15

    Adding residual branches

    :param num_classes:
    :param name_suffix:
    :return:
    """

    coef = 3
    width = 64

    input_layer = Input(shape=(None, None, 3))
    x = BatchNormalization()(input_layer)
    x = Conv2D(
        width,
        3,
        **conv_args,
    )(x)  # Makes width wide enough for addition inside skip module

    for i in range(7):
        y = Cropping2D(cropping=((2, 2), (2, 2)), )(x)

        x = BatchNormalization()(x)
        x = Conv2D(
            width,
            3,
            **conv_args,
        )(x)

        x = BatchNormalization()(x)
        x = Conv2D(
            width,
            3,
            **conv_args,
        )(x)

        # if i % 2 == 0:
        #     x = AvgPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')(x)

        # y = Conv2D(width, 1, **conv_args)(y)  # 1x1
        x = add([x, y])

    x = BatchNormalization()(x)
    x = Conv2D(16 * 1 << coef, 2, **conv_args)(x)  # fit-once

    x = BatchNormalization()(x)
    x = Conv2D(16 * 1 << coef, 1, **conv_args)(x)

    x = BatchNormalization()(x)
    x = Conv2D(num_classes, 1, kernel_initializer=he_norm)(x)  # no activation

    x = Softmax()(x)

    model = tf.keras.Model(inputs=input_layer,
                           outputs=x,
                           name='residual_32x_64w_18l_' + name_suffix)
    return model, 32
Beispiel #25
0
    def build(self,
              hidden_layers=[32],
              n_filters=[8, 8],
              activations=[None],
              dropout=0.8,
              learning_rate=0.1,
              l2_norm=5e-4,
              use_bias=False,
              k=8):

        with self.device:

            x = Input(batch_shape=[None, self.n_features],
                      dtype=tf.float32,
                      name='features')
            adj = Input(batch_shape=[None, None],
                        dtype=tf.float32,
                        sparse=False,
                        name='adj_matrix')
            mask = Input(batch_shape=[None], dtype=tf.bool, name='mask')

            h = x
            for hid, activation in zip(hidden_layers, activations):
                h = Dropout(rate=dropout)(h)
                h = DenseGraphConv(
                    hid,
                    use_bias=use_bias,
                    activation=activation,
                    kernel_regularizer=regularizers.l2(l2_norm))([h, adj])

            for n_filter in n_filters:
                top_k_h = Top_k_features(k=k)([h, adj])
                cur_h = LGConvolution(
                    n_filter,
                    k,
                    use_bias=use_bias,
                    dropout=dropout,
                    activation=None,
                    kernel_regularizer=regularizers.l2(l2_norm))(top_k_h)
                cur_h = BatchNormalization()(cur_h)
                h = Concatenate()([h, cur_h])

            h = Dropout(rate=dropout)(h)
            h = DenseGraphConv(
                self.n_classes,
                use_bias=use_bias,
                kernel_regularizer=regularizers.l2(l2_norm))([h, adj])

            h = tf.boolean_mask(h, mask)
            output = Softmax()(h)

            model = Model(inputs=[x, adj, mask], outputs=output)
            model.compile(loss='sparse_categorical_crossentropy',
                          optimizer=Nadam(lr=learning_rate),
                          metrics=['accuracy'])

            self.k = k
            self.model = model
            self.built = True
Beispiel #26
0
    def model(num_class=173, train=False, batch_size=64):
        # 加载预训练模型
        bert = build_transformer_model(
            config_path=config_path,
            checkpoint_path=checkpoint_path,
            with_pool=False,
            return_keras_model=False,
        )

        filter_sizes = [3, 4, 5]
        bert_output = bert.model.get_layer(
            "Transformer-11-FeedForward-Norm").output
        reshape = Reshape((batch_size, bert.hidden_size, 1))(bert_output)

        conv_0 = Conv2D(128,
                        kernel_size=(filter_sizes[0], bert.hidden_size),
                        padding='valid',
                        kernel_initializer=tf.keras.initializers.RandomNormal(
                            mean=0.0, stddev=0.05, seed=None))(reshape)
        conv_1 = Conv2D(128,
                        kernel_size=(filter_sizes[1], bert.hidden_size),
                        padding='valid',
                        kernel_initializer=tf.keras.initializers.RandomNormal(
                            mean=0.0, stddev=0.05, seed=None))(reshape)
        conv_2 = Conv2D(128,
                        kernel_size=(filter_sizes[2], bert.hidden_size),
                        padding='valid',
                        kernel_initializer=tf.keras.initializers.RandomNormal(
                            mean=0.0, stddev=0.05, seed=None))(reshape)

        maxpool_0 = MaxPool2D(pool_size=(batch_size - filter_sizes[0] + 1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_0)
        maxpool_1 = MaxPool2D(pool_size=(batch_size - filter_sizes[1] + 1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_1)
        maxpool_2 = MaxPool2D(pool_size=(batch_size - filter_sizes[2] + 1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_2)

        concatenated_tensor = Concatenate(axis=1)(
            [maxpool_0, maxpool_1, maxpool_2])

        flatten = Flatten()(concatenated_tensor)

        if train == True:
            dropout_output = Dropout(rate=0.1)(flatten)
        else:
            dropout_output = flatten

        dense_output = Dense(units=num_class)(dropout_output)

        bn = BatchNormalization()(dense_output)

        logits = Softmax()(bn)

        model = Model(bert.model.input, logits)

        return model
Beispiel #27
0
def create_model():
    model = Sequential()

    model.add(Conv2D(2, 3, activation = None,use_bias = False, \
                input_shape = (28, 28, 1), padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(Conv2D(4, 3, activation = None,use_bias = False, \
                padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(
        MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding="valid",
                     data_format="channels_last"))

    model.add(Conv2D(8, 3, activation = None,use_bias = False, \
                input_shape = (28, 28, 1), padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(Conv2D(16, 3, activation = None,use_bias = False, \
                padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(
        MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding="valid",
                     data_format="channels_last"))

    model.add(Flatten())

    model.add(Dense(10, activation = None, use_bias = True, \
                   kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
                   ))

    model.add(Softmax(axis=1))

    optimizer = Adam(lr=0.0001)
    model.compile(loss="categorical_crossentropy", \
                    optimizer=optimizer, metrics=["accuracy"])
    return model
Beispiel #28
0
def _get_resnet(input_shape, ResidualLayer, num_class, name, stages):
    input_ = Input(shape=input_shape)
    #conv1
    x = Conv2D(filters=64, kernel_size=(7, 7), strides=2,
               padding='same')(input_)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    print('conv1 output size', x.shape)
    x = MaxPool2D(pool_size=(3, 3), strides=2, padding='same')(x)

    #conv2_x
    for i in range(stages[0]):
        if i == 0 and not isinstance(ResidualLayer, BasicBlock):
            x = ResidualLayer(filters=64,
                              name='stage_1_{}'.format(i),
                              is_shortcut=True)(x)
        else:
            x = ResidualLayer(filters=64, name='stage_1_{}'.format(i))(x)
    print('conv2_x output size', x.shape)

    #conv3_x
    for i in range(stages[1]):
        if i == 0:
            x = ResidualLayer(filters=128,
                              strides=2,
                              is_shortcut=True,
                              name='stage_2_{}'.format(i))(x)
        else:
            x = ResidualLayer(filters=128, name='stage_2_{}'.format(i))(x)
    print('conv3_x output size', x.shape)

    #conv4_x
    for i in range(stages[2]):
        if i == 0:
            x = ResidualLayer(filters=256,
                              strides=2,
                              is_shortcut=True,
                              name='stage_3_{}'.format(i))(x)
        else:
            x = ResidualLayer(filters=256, name='stage_3_{}'.format(i))(x)
    print('conv4_x output size', x.shape)

    #conv5_x
    for i in range(stages[3]):
        if i == 0:
            x = ResidualLayer(filters=512,
                              strides=2,
                              is_shortcut=True,
                              name='stage_4_{}'.format(i))(x)
        else:
            x = ResidualLayer(filters=512, name='stage_4_{}'.format(i))(x)
    print('conv5_x output size', x.shape)

    x = GlobalAveragePooling2D()(x)

    x = Flatten()(x)
    x = Dense(units=num_class)(x)
    x = Softmax()(x)
    return Model(inputs=input_, outputs=x, name=name)
    def build(self, input_shape):
        # ########################################################################
        # order segment generation network

        # 1. Convs
        self.conv_order_seg1 = Convolution2D(filters=self.filter_num,
                                             kernel_size=(3, 3),
                                             strides=2,
                                             name="conv_order_seg1",
                                             padding="same")  # 1/2
        self.conv_order_seg2 = Convolution2D(filters=self.filter_num,
                                             kernel_size=(3, 3),
                                             strides=2,
                                             name="conv_order_seg2",
                                             padding="same")  # 1/4
        self.conv_order_seg3 = Convolution2D(filters=self.filter_num,
                                             kernel_size=(3, 3),
                                             strides=2,
                                             name="conv_order_seg3",
                                             padding="same")  # 1/8

        # 2. GRU
        self.transpose1 = Permute((2, 1, 3))  # [B,H,W,C] => [B,W,H,C]
        # self.reshape1 = Reshape((-1,self.conf.INPUT_IMAGE_WIDTH,self.conf.INPUT_IMAGE_HEIGHT*self.filter_num)) # [B,W,H,C] => [B,W,H*C]
        self.gru_order_seg = GRU(units=self.filter_num * (input_shape[1] // 8),
                                 return_sequences=True,
                                 name="gru_order_seg")
        # self.reshape2 = Reshape((-1,self.conf.INPUT_IMAGE_WIDTH,self.conf.INPUT_IMAGE_HEIGHT,self.filter_num)) # [B,W,H*C] => [B,W,H,C]
        self.transpose2 = Permute((2, 1, 3))  # [B,W,H,C] => [B,H,W,C]

        # 3. DeConvs
        self.dconv_order_seg3 = Conv2DTranspose(filters=self.filter_num,
                                                kernel_size=(3, 3),
                                                strides=2,
                                                name="dconv_order_seg3",
                                                padding="same")  # 1
        self.dconv_order_seg2 = Conv2DTranspose(filters=self.filter_num,
                                                kernel_size=(3, 3),
                                                strides=2,
                                                name="dconv_order_seg2",
                                                padding="same")  # 1/2
        self.dconv_order_seg1 = Conv2DTranspose(filters=self.sequence_length,
                                                kernel_size=(3, 3),
                                                strides=2,
                                                name="dconv_order_seg1",
                                                padding="same")  # 1/4
        self.softmax = Softmax(name="softmax")

        # ########################################################################
        # localization map generation network
        self.conv_loc_map1 = Convolution2D(filters=self.filter_num,
                                           kernel_size=(3, 3),
                                           padding="same",
                                           name="conv_loc_map1")
        self.conv_loc_map2 = Convolution2D(filters=1,
                                           kernel_size=(1, 1),
                                           padding="same",
                                           name="conv_loc_map2")
        self.sigmoid = Activation("sigmoid", name="sigmoid")
Beispiel #30
0
 def __init__(self, units1, units2, units3):
     super(MyModel, self).__init__()
     self.layer1 = MyLayer(units1)
     self.dropout1 = MyDropout(0.5)
     self.layer2 = MyLayer(units2)
     self.dropout2 = MyDropout(0.5)
     self.layer3 = MyLayer(units3)
     self.softmax = Softmax()