def cnn_model(self, timestamp, batchSize):

        model = tf.keras.Sequential()
        # 整理成RBF可以讀取的shape狀態 , 註解表示不放RBF 且 66行的inputshape要改成(timestamp, 3, 3, 7)
        model.add(Permute((1, 4, 2, 3), input_shape=(3, 3, 3, 7)))
        model.add(Reshape((21, 3, 3)))
        model.add(f.FuzzyLayer(63, name='fuzzylayer'))
        model.add(Reshape((189, 9)))
        model.add(Reshape((3, 63, 9)))
        model.add(Permute((1, 3, 2)))
        model.add(Reshape((3, 3, 3, 63)))
        # 此時的輸入shape是(3, 3, 3, 63)
        model.add(layers.TimeDistributed(layers.Conv2D(126, (2, 2), padding='same', strides=2), input_shape=(timestamp, 3, 3, 63),
                                         batch_size=batchSize))
        model.add(layers.TimeDistributed(layers.Conv2D(252, (2, 2))))
        model.add(layers.TimeDistributed(layers.BatchNormalization()))
        model.add(layers.TimeDistributed(layers.Activation("relu")))
        model.add(layers.TimeDistributed(layers.GlobalAveragePooling2D()))
        model.add(layers.TimeDistributed(layers.Flatten()))
        # 錯誤在這裡開始 (錯誤訊息)
        # ValueError: If a RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors:
        # - If using a Sequential model, specify the batch size by passing a `batch_input_shape` argument to your first layer.
        # - If using the functional API, specify the batch size by passing a `batch_shape` argument to your Input layer.
        model.add(layers.LSTM(100, return_sequences=True))  # 錯誤在這裡
        model.add(layers.LSTM(25))
        model.add(layers.Dense(1))

        model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(lr=0.01))

        return model
Ejemplo n.º 2
0
def spatial_attention(input_feature):
    kernel_size = 7

    if K.image_data_format() == "channels_first":
        channel = input_feature._keras_shape[1]
        cbam_feature = Permute((2, 3, 1))(input_feature)
    else:
        channel = input_feature._keras_shape[-1]
        cbam_feature = input_feature

    avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
    assert avg_pool._keras_shape[-1] == 1
    max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
    assert max_pool._keras_shape[-1] == 1
    concat = Concatenate(axis=3)([avg_pool, max_pool])
    assert concat._keras_shape[-1] == 2
    cbam_feature = Conv2D(filters=1,
                          kernel_size=kernel_size,
                          strides=1,
                          padding='same',
                          activation='sigmoid',
                          kernel_initializer='he_normal',
                          use_bias=False)(concat)
    assert cbam_feature._keras_shape[-1] == 1

    if K.image_data_format() == "channels_first":
        cbam_feature = Permute((3, 1, 2))(cbam_feature)

    return multiply([input_feature, cbam_feature])
Ejemplo n.º 3
0
    def __init__(self, d_model, num_heads, scope="multi_head_attention"):
        assert d_model % num_heads == 0

        self.wq = Dense(d_model, name="%s/dense_q" % scope)
        self.wk = Dense(d_model, name="%s/dense_k" % scope)
        self.wv = Dense(d_model, name="%s/dense_v" % scope)

        self.reshapeq = Reshape((-1, num_heads, d_model // num_heads),
                                name="%s/reshape_q" % scope)
        self.reshapek = Reshape((-1, num_heads, d_model // num_heads),
                                name="%s/reshape_k" % scope)
        self.reshapev = Reshape((-1, num_heads, d_model // num_heads),
                                name="%s/reshape_v" % scope)

        self.transposeq = Permute((2, 1, 3), name="%s/transpose_q" % scope)
        self.transposek = Permute((2, 1, 3), name="%s/transpose_k" % scope)
        self.transposev = Permute((2, 1, 3), name="%s/transpose_v" % scope)

        self.reshape_output = Reshape((-1, d_model),
                                      name="%s/reshape_output" % scope)

        self.transpose_output = Permute((2, 1, 3),
                                        name="%s/transpose_output" % scope)

        self.dense = Dense(d_model, name="%s/dense" % scope)

        self.attention = Attention(name="%s/attention" % scope)
Ejemplo n.º 4
0
def loader(input_shape, num_outputs, output_activation="softmax", weight_decay=0.001):
    inputs = Input(shape=input_shape, name="input")
    images = Reshape((*input_shape, 1), name="expand_channel_dim")(inputs)
    images = Permute((2, 1, 3), name="freq_bins_first")(images)

    # CNN
    filter_def = (16, 32, 64, 128, 256)
    kernel_def = (7, 5, 3, 3, 3)
    x = images
    for i, (f, k) in enumerate(zip(filter_def, kernel_def), start=1):
        x = Conv2D(f, k,
                activation="relu",
                kernel_regularizer=l2(weight_decay),
                padding="same",
                name="conv_{}".format(i))(x)
        x = BatchNormalization(name="conv_{}_bn".format(i))(x)
        x = MaxPool2D(2, name="conv_{}_pool".format(i))(x)

    # BLSTM
    timesteps_first = Permute((2, 1, 3), name="timesteps_first")(x)
    cols, rows, channels = timesteps_first.shape[1:]
    flatten_channels = Reshape((cols, rows * channels), name="flatten_channels")(timesteps_first)
    blstm = Bidirectional(LSTM(256), merge_mode="concat", name="blstm")(flatten_channels)

    # Output
    outputs = Dense(num_outputs, activation=None, name="output")(blstm)
    if output_activation:
        outputs = Activation(getattr(tf.nn, output_activation), name=str(output_activation))(outputs)
    return Model(inputs=inputs, outputs=outputs, name="CRNN")
Ejemplo n.º 5
0
    def build_model(self):
        """
        Function to define and build the neural network architecture for EWSNet

    
        """
        
        ip = Input(shape=(1, None))
        x = Permute((2, 1))(ip)
        x = LSTM(128)(x)
        x = Dropout(0.2)(x)
        y = Permute((2, 1))(ip)
        y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = GlobalAveragePooling1D()(y)
        x = concatenate([x, y])
        x = Dense(256, activation='relu',kernel_regularizer=regularizers.l2(0.01))(x)
        out = Dense(3, activation='softmax',kernel_regularizer=regularizers.l2(0.001))(x)
        model = Model(ip, out)
        return model
Ejemplo n.º 6
0
    def inverse(self, x):
        output = Permute((1, 3, 4, 2))(x)
        (batch_size, d_height, d_width, d_depth) = output.size()

        s_depth = int(d_depth / self.block_size_sq)
        s_width = int(d_width * self.block_size)
        s_height = int(d_height * self.block_size)

        t_1 = tf.reshape(
            output, (batch_size, d_height, d_width, self.block_size_sq, s_depth)
        )

        spl = tf.split(
            t_1, compute_block_size_shapes(t_1.shape[3], self.block_size), axis=3
        )
        stack = [
            tf.reshape(t_t, (batch_size, d_height, s_width, s_depth)) for t_t in spl
        ]

        # TODO transpose permutation?
        output = tf.reshape(
            Permute((1, 3, 2, 4, 5))(tf.transpose(tf.stack(stack, axis=0))),
            (batch_size, s_height, s_width, s_depth),
        )

        return Permute((1, 4, 2, 3))(output)
    def call(self, inputs):  # (B, S, H)
        # Expand weights to include batch size through implicit broadcasting
        W1, W2 = self.W1[None, :, :], self.W2[None, :, :]
        W1, W2 = tf.tile(W1, [self.batch_size, 1, 1]), tf.tile(
            W2, [self.batch_size, 1, 1])
        #W1, W2 = tf.compat.v1.repeat(W1, repeats = [self.batch_size], axis=0), tf.compat.v1.repeat(W2, repeats = [self.batch_size], axis=0)
        hidden_states_transposed = Permute(dims=(2, 1))(inputs)  # (B, H, S)
        attention_score = tf.matmul(W1,
                                    hidden_states_transposed)  # (B, size, S)
        attention_score = Activation('tanh')(attention_score)  # (B, size, S)
        attention_weights = tf.matmul(W2, attention_score)  # (B, num_hops, S)
        attention_weights = Activation('softmax')(
            attention_weights)  # (B, num_hops, S)
        embedding_matrix = tf.matmul(attention_weights,
                                     inputs)  # (B, num_hops, H)
        embedding_matrix_flattened = Flatten()(
            embedding_matrix)  # (B, num_hops*H)

        if self.use_penalization:
            attention_weights_transposed = Permute(dims=(2, 1))(
                attention_weights)  # (B, S, num_hops)
            product = tf.matmul(
                attention_weights,
                attention_weights_transposed)  # (B, num_hops, num_hops)
            identity = tf.eye(
                self.num_hops,
                batch_shape=(inputs.shape[0], ))  # (B, num_hops, num_hops)
            frobenius_norm = tf.sqrt(
                tf.reduce_sum(tf.square(product - identity)))  # distance
            self.add_loss(self.penalty_coefficient * frobenius_norm)  # loss

        if self.model_api == 'functional':
            return embedding_matrix_flattened, attention_weights
        elif self.model_api == 'sequential':
            return embedding_matrix_flattened
Ejemplo n.º 8
0
    def build(self, input_shape) -> None:
        self.embedding: Embedding = Embedding(
            input_dim=self.embedding_matrix.shape[0],
            output_dim=self.embedding_matrix.shape[1],
            weights=[self.embedding_matrix],
            input_length=self.sentence_len,
            trainable=False)
        self.permute_1: Permute = Permute((2, 1), name="permute_1")
        self.dence_1: Dense = Dense(self.sentence_len,
                                    activation="softmax",
                                    name="dense_1")
        self.attention: Permute = Permute((2, 1), name="attention")
        self.multiply: Multiply = Multiply(name="multiply")

        self.bidirectionnal_1: Bidirectional = Bidirectional(
            LSTM(units=10, dropout=0.2, return_sequences=True),
            name="bidirectional_1")

        self.bidirectionnal_2: Bidirectional = Bidirectional(
            LSTM(units=10, dropout=0.2), name="bidirectional_2")

        self.dence_2: Dense = Dense(100, activation="relu", name="dense_2")
        self.dence_3: Dense = Dense(self.class_num,
                                    activation="softmax",
                                    name="dense_3")
        super(SimpleAttention, self).build(input_shape)
Ejemplo n.º 9
0
def get_test_model_sequential():
    """Returns a typical (VGG-like) sequential test model."""
    model = Sequential()
    model.add(Conv2D(8, (3, 3), activation='relu', input_shape=(32, 32, 3)))
    model.add(Conv2D(8, (3, 3), activation='relu'))
    model.add(Permute((3, 1, 2)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Permute((2, 3, 1)))
    model.add(Dropout(0.25))

    model.add(Conv2D(16, (3, 3), activation='elu'))
    model.add(Conv2D(16, (3, 3)))
    model.add(ELU())

    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(64, activation='sigmoid'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='sgd')

    # fit to dummy data
    training_data_size = 2
    data_in = [np.random.random(size=(training_data_size, 32, 32, 3))]
    data_out = [np.random.random(size=(training_data_size, 10))]
    model.fit(data_in, data_out, epochs=10)
    return model
Ejemplo n.º 10
0
def generate_ndlstmfcn(MAX_SEQUENCE_LENGTH, NB_CLASS, NUM_CELLS=8):

    ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))

    x = Permute((2, 1))(ip)
    x = LSTM(NUM_CELLS)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model
Ejemplo n.º 11
0
    def cnn_model(self, timestamp, batchSize):

        model = tf.keras.Sequential()
        # 整理成RBF可以讀取的shape狀態 , 註解表示不放RBF 且 66行的inputshape要改成(timestamp, 3, 3, 7)
        model.add(Permute((1, 4, 2, 3), input_shape=(3, 3, 3, 7)))
        model.add(Reshape((21, 3, 3)))
        model.add(f.FuzzyLayer(63))
        model.add(Reshape((189, 9)))
        model.add(Reshape((3, 63, 9)))
        model.add(Permute((1, 3, 2)))
        model.add(Reshape((3, 3, 3, 63)))
        # 此時的輸入shape是(3, 3, 3, 63)
        model.add(
            layers.TimeDistributed(layers.Conv2D(126, (2, 2),
                                                 padding='same',
                                                 strides=2),
                                   input_shape=(timestamp, 3, 3, 63),
                                   batch_size=batchSize))
        model.add(layers.TimeDistributed(layers.Conv2D(252, (2, 2))))
        model.add(layers.TimeDistributed(layers.BatchNormalization()))
        model.add(layers.TimeDistributed(layers.Activation("relu")))
        model.add(layers.TimeDistributed(layers.GlobalAveragePooling2D()))
        model.add(layers.TimeDistributed(layers.Flatten()))

        model.add(layers.LSTM(100, return_sequences=True))  # 錯誤在這裡
        model.add(layers.LSTM(25))
        model.add(layers.Dense(1))

        model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(lr=0.01))

        return model
Ejemplo n.º 12
0
    def build(self, input_shape):
        # ########################################################################
        # order segment generation network

        # 1. Convs
        self.conv_order_seg1 = Convolution2D(filters=self.filter_num,
                                             kernel_size=(3, 3),
                                             strides=2,
                                             name="conv_order_seg1",
                                             padding="same")  # 1/2
        self.conv_order_seg2 = Convolution2D(filters=self.filter_num,
                                             kernel_size=(3, 3),
                                             strides=2,
                                             name="conv_order_seg2",
                                             padding="same")  # 1/4
        self.conv_order_seg3 = Convolution2D(filters=self.filter_num,
                                             kernel_size=(3, 3),
                                             strides=2,
                                             name="conv_order_seg3",
                                             padding="same")  # 1/8

        # 2. GRU
        self.transpose1 = Permute((2, 1, 3))  # [B,H,W,C] => [B,W,H,C]
        # self.reshape1 = Reshape((-1,self.conf.INPUT_IMAGE_WIDTH,self.conf.INPUT_IMAGE_HEIGHT*self.filter_num)) # [B,W,H,C] => [B,W,H*C]
        self.gru_order_seg = GRU(units=self.filter_num * (input_shape[1] // 8),
                                 return_sequences=True,
                                 name="gru_order_seg")
        # self.reshape2 = Reshape((-1,self.conf.INPUT_IMAGE_WIDTH,self.conf.INPUT_IMAGE_HEIGHT,self.filter_num)) # [B,W,H*C] => [B,W,H,C]
        self.transpose2 = Permute((2, 1, 3))  # [B,W,H,C] => [B,H,W,C]

        # 3. DeConvs
        self.dconv_order_seg3 = Conv2DTranspose(filters=self.filter_num,
                                                kernel_size=(3, 3),
                                                strides=2,
                                                name="dconv_order_seg3",
                                                padding="same")  # 1
        self.dconv_order_seg2 = Conv2DTranspose(filters=self.filter_num,
                                                kernel_size=(3, 3),
                                                strides=2,
                                                name="dconv_order_seg2",
                                                padding="same")  # 1/2
        self.dconv_order_seg1 = Conv2DTranspose(filters=self.sequence_length,
                                                kernel_size=(3, 3),
                                                strides=2,
                                                name="dconv_order_seg1",
                                                padding="same")  # 1/4
        self.softmax = Softmax(name="softmax")

        # ########################################################################
        # localization map generation network
        self.conv_loc_map1 = Convolution2D(filters=self.filter_num,
                                           kernel_size=(3, 3),
                                           padding="same",
                                           name="conv_loc_map1")
        self.conv_loc_map2 = Convolution2D(filters=1,
                                           kernel_size=(1, 1),
                                           padding="same",
                                           name="conv_loc_map2")
        self.sigmoid = Activation("sigmoid", name="sigmoid")
Ejemplo n.º 13
0
 def __init__(self,
              output_channels: int,
              input_channels: Optional[int] = None,
              kernel_size: int = 3,
              pooling_size: int = 1,
              batch_normalization: bool = True,
              dropout_rate: float = 0.0,
              l2_regularization: float = 0.0):
     super().__init__()
     leaky_relu = LeakyReLU(alpha=0.01)
     dimension_decrease_factor = 4
     if batch_normalization:
         self.batch_normalization = BatchNormalization(scale=False)
         self.batch_normalization1 = BatchNormalization(scale=False)
         self.batch_normalization2 = BatchNormalization(scale=False)
     else:
         self.batch_normalization = None
     if l2_regularization > 0:
         l2_regularizer = L2(l2_regularization)
     else:
         l2_regularizer = None
     self.dimension_decrease_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=1,
         activation=leaky_relu,
         kernel_regularizer=l2_regularizer)
     self.convolutional_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=kernel_size,
         activation=leaky_relu,
         padding='same',
         kernel_regularizer=l2_regularizer)
     self.dimension_increase_layer = Convolution1D(
         output_channels,
         kernel_size=1,
         activation=leaky_relu,
         kernel_regularizer=l2_regularizer)
     if pooling_size > 1:
         self.pooling_layer = MaxPooling1D(pool_size=pooling_size,
                                           padding='same')
     else:
         self.pooling_layer = None
     if input_channels is not None and output_channels != input_channels:
         if output_channels < input_channels:
             raise NotImplementedError(
                 f'Residual blocks with less output channels than input channels is not'
                 f'implemented. Output channels was {output_channels} and input was'
                 f'{input_channels}')
         self.dimension_change_permute0 = Permute((2, 1))
         self.dimension_change_layer = ZeroPadding1D(
             padding=(0, output_channels - input_channels))
         self.dimension_change_permute1 = Permute((2, 1))
     else:
         self.dimension_change_layer = None
     if dropout_rate > 0:
         self.dropout_layer = SpatialDropout1D(rate=dropout_rate)
     else:
         self.dropout_layer = None
Ejemplo n.º 14
0
def attention_3d_block(inputs, n):
    print(inputs)
    a = Permute((2, 1))(inputs)
    # print(a)
    a = Dense(n, activation='softmax')(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
    # print(a_probs)
    output = Multiply(name='attention_mul')([inputs, a_probs])
    return output
Ejemplo n.º 15
0
def squeeze_excite_block(inp, filters, ratio, name):

    inp = Permute((3, 1, 2), name=name + "_permute_1")(inp)
    x = GlobalAveragePooling2D('channels_first', name=name + "_avg_pool")(inp)
    x = dense(x, filters // ratio, name + "_dense_1")
    x = dense(x, filters, name + "_dense_2", act='sigmoid')
    x = Reshape((filters, 1, 1), name=name + "_reshape")(x)
    x = Multiply(name=name + "_multiply")([inp, x])
    x = Permute((2, 3, 1), name=name + "_permute_2")(x)
    return x
Ejemplo n.º 16
0
def attention_simple(inputs, timesteps):
    input_dim = int(inputs.shape[-1])
    a = Permute((2, 1), name='transpose')(inputs)
    a = Dense(timesteps, activation='softmax', name='attention_probs')(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
    output_attention_mul = Multiply(name='focused_attention')(
        [inputs, a_probs])
    output_flat = Lambda(lambda x: K.sum(x, axis=1),
                         name='temporal_average')(output_attention_mul)
    return output_flat, a_probs
def attention_rnn(inputs):
    # inputs.shape = (batch_size, time_steps, input_dim)
    input_dim = int(inputs.shape[2])
    timestep = int(inputs.shape[1])
    a = Permute((2, 1))(inputs)
    a = Dense(timestep, activation='softmax')(a)
    a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)
    a = RepeatVector(input_dim)(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
    output_attention_mul = multiply([inputs, a_probs], name='attention_mul')
    return output_attention_mul
Ejemplo n.º 18
0
def reorg(input_tensor, stride):
    _, h, w, c = input_tensor.get_shape().as_list() 

    channel_first = Permute((3, 1, 2))(input_tensor)
    
    reshape_tensor = Reshape((c // (stride ** 2), h, stride, w, stride))(channel_first)
    permute_tensor = Permute((3, 5, 1, 2, 4))(reshape_tensor)
    target_tensor = Reshape((-1, h // stride, w // stride))(permute_tensor)
    
    channel_last = Permute((2, 3, 1))(target_tensor)
    return Reshape((h // stride, w // stride, -1))(channel_last)
Ejemplo n.º 19
0
def attention_3d_block(
    slt_api_num,
    feature_dim,
    name='',
):
    """
    :param query: (None,D)
    :param key: (None,slt_api_num,D)
    :param value: (None,slt_api_num,D) 一般等于key
    :return:
    """

    # slt_api_num = int(key.shape[1])
    # feature_dim = int(key.shape[2])

    query = Input(shape=(feature_dim, ), name=name + 'query_input')
    key = Input(shape=(
        slt_api_num,
        feature_dim,
    ), name=name + 'key_input')
    value = Input(shape=(
        slt_api_num,
        feature_dim,
    ),
                  name=name + 'value_input')

    Repeat_query = RepeatVector(slt_api_num)(query)  # (None,slt_api_num,D)
    outer_prod = Multiply()([Repeat_query, key])
    sub = Subtract()([Repeat_query, key])
    att_score = Concatenate(name=name + 'att_info_concate')(
        [Repeat_query, key, outer_prod, sub])  # (None,slt_api_num,4*D)

    a = Permute((2, 1))(att_score)  # shape=(?, 4*D, slt_api_num)
    a = Dense(slt_api_num, activation='softmax')(
        a)  # shape=(?, 4*D, slt_api_num)   # 每个特征上都做softmax
    a = Lambda(lambda x: K.mean(x, axis=1), name=name + 'dim_reduction')(
        a)  # shape=(?, slt_api_num) # 所有平均得到单个service的权重
    a = RepeatVector(feature_dim)(a)  # shape=(?,D,slt_api_num)
    a_probs = Permute(
        (2, 1), name=name + 'attention_vec')(a)  # shape=(?,slt_api_num,D)
    output_attention_mul = Multiply(name=name + 'attention_mul')(
        [value, a_probs])  # shape=(?,slt_api_num, D)
    att_result = Lambda(lambda x: tf.reduce_sum(x, axis=1))(
        output_attention_mul)  # (None,D)

    model = Model(inputs=[query, key, value],
                  outputs=[att_result],
                  name=name + 'attBlock')
    return model
Ejemplo n.º 20
0
    def forward(self, inp):
        output = Permute((2, 3, 1))(inp)
        batch_size, s_height, s_width, s_depth = output.get_shape().as_list()
        d_depth = s_depth * self.block_size_sq
        d_height = int(s_height / self.block_size)

        t_1 = tf.split(
            output, compute_block_size_shapes(output.shape[2], self.block_size), axis=2
        )
        stack = [tf.reshape(t_t, (batch_size, d_height, d_depth)) for t_t in t_1]
        output = tf.stack(stack, axis=1)
        output = Permute((2, 1, 3))(output)
        output = Permute((3, 1, 2))(output)

        return output
Ejemplo n.º 21
0
def construct_model():
    input1 = Input(shape=(seq_length, 5790, 6))
    input2 = Input(shape=(seq_length, 6))

    re_input1 = Reshape((5790 * seq_length, 6))(input1)

    rere_input1 = Permute((2, 1), input_shape=(5790 * seq_length, 6))(re_input1)

    conv1 = Conv1D(30, 1, strides=1, padding='valid', activation='relu', data_format="channels_first", name='X1_input')(
        rere_input1)
    conv2 = Conv1D(30, 1, strides=1, padding='valid', activation='relu', data_format="channels_first", name='Conv7')(
        conv1)

    LSTM1 = LSTM(4, return_sequences=True)(input2)
    LSTM2 = LSTM(4, return_sequences=False)(LSTM1)

    reshape_conv2 = Reshape((30, 5790, seq_length))(conv2)

    pool = MaxPooling2D(pool_size=(1, 2), strides=(1, 2), padding='valid', data_format="channels_last")(reshape_conv2)

    reshape1 = Reshape((1, 30, 2895, seq_length))(pool)
    reshape2 = Permute((4, 2, 3, 1), input_shape=(1, 30, 2895, seq_length))(reshape1)

    convLSTM1 = ConvLSTM2D(filters=10, kernel_size=(3, 3), strides=(3, 3),
                           padding='same', return_sequences=True)(reshape2)
    convLSTM2 = ConvLSTM2D(filters=20, kernel_size=(3, 2), strides=(2, 2),
                           padding='same', return_sequences=True)(convLSTM1)
    convLSTM3 = ConvLSTM2D(filters=40, kernel_size=(3, 1), strides=(2, 2),
                           padding='same', return_sequences=True)(convLSTM2)
    convLSTM4 = ConvLSTM2D(filters=40, kernel_size=(2, 2), strides=(2, 2),
                           padding='same', return_sequences=False)(convLSTM3)

    flat1 = Flatten()(convLSTM4)
    flat2 = Flatten()(LSTM2)

    dense1 = Dense(120)(flat1)
    activation1 = Activation('relu')(dense1)
    merge2 = concatenate([activation1, flat2])
    dense2 = Dense(30)(merge2)
    activation2 = Activation('relu')(dense2)
    output = Dense(1, kernel_regularizer=regularizers.l2(0.000001))(activation2)

    model = Model(inputs=[input1, input2], outputs=[output])
    sgd = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    model = multi_gpu_model(model, gpus=2)
    model.compile(loss='mean_squared_error', optimizer=sgd)
    print(model.summary())
    return model
 def __init__(self,
              output_channels: int,
              input_channels: Optional[int] = None,
              kernel_size: int = 3,
              pooling_size: int = 1,
              dropout_rate: float = 0.0):
     super().__init__()
     dimension_decrease_factor = 4
     kernel_initializer = LecunNormal()
     self.dimension_decrease_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     self.convolutional_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=kernel_size,
         activation=selu,
         padding='same',
         kernel_initializer=kernel_initializer)
     self.dimension_increase_layer = Convolution1D(
         output_channels,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     if pooling_size > 1:
         self.pooling_layer = AveragePooling1D(pool_size=pooling_size,
                                               padding='same')
     else:
         self.pooling_layer = None
     if input_channels is not None and output_channels != input_channels:
         if output_channels < input_channels:
             raise NotImplementedError(
                 f'Residual blocks with less output channels than input channels is not'
                 f'implemented. Output channels was {output_channels} and input was'
                 f'{input_channels}')
         self.dimension_change_permute0 = Permute((2, 1))
         self.dimension_change_layer = ZeroPadding1D(
             padding=(0, output_channels - input_channels))
         self.dimension_change_permute1 = Permute((2, 1))
     else:
         self.dimension_change_layer = None
     if dropout_rate > 0:
         self.dropout_layer = AlphaDropout(rate=dropout_rate,
                                           noise_shape=(50, 1,
                                                        output_channels))
     else:
         self.dropout_layer = None
Ejemplo n.º 23
0
def create_cnn_model(fingerprint_shape, is_training=True):
    model = Sequential()
    model.add(Permute((2, 1)), input_shape=fingerprint_shape)
    model.add(Reshape((fingerprint_shape[1], fingerprint_shape[0], 1)))
    
    model.add(Conv2D(filters=64, kernel_size=3, use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(MaxPooling2D())
    #if (is_training):
    #    model.add(Dropout(0.5))
    model.add(Conv2D(filters=64, kernel_size=3, use_bias=False)) 
    model.add(BatchNormalization())
    model.add(Activation("relu"))

    model.add(MaxPooling2D())
    #if (is_training):
    #    model.add(Dropout(0.5))
    model.add(Conv2D(filters=64, kernel_size=3, use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation("relu"))

    model.add(MaxPooling2D())
    
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(BatchNormalization())
    model.add(Activation("sigmoid"))
    
    return model
Ejemplo n.º 24
0
def generate_lstmfcn(MAX_SEQUENCE_LENGTH,
                     NB_CLASS,
                     lstm_dim=128,
                     attention=True,
                     dropout=0.2):

    ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
    if attention:
        x = AttentionLSTM(lstm_dim, implementation=2)(ip)
    else:
        x = LSTM(lstm_dim)(ip)
    x = Dropout(dropout)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding="same", kernel_initializer="he_uniform")(ip)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = Conv1D(256, 5, padding="same", kernel_initializer="he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = Conv1D(128, 3, padding="same", kernel_initializer="he_uniform")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation="softmax")(x)

    return Model(inputs=ip, outputs=out)
Ejemplo n.º 25
0
def se_block(input_feature, ratio=8):
    """Contains the implementation of Squeeze-and-Excitation(SE) block.
	As described in https://arxiv.org/abs/1709.01507.
	"""

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    channel = input_feature._keras_shape[channel_axis]

    se_feature = GlobalAveragePooling2D()(input_feature)
    se_feature = Reshape((1, 1, channel))(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel)
    se_feature = Dense(channel // ratio,
                       activation='relu',
                       kernel_initializer='he_normal',
                       use_bias=True,
                       bias_initializer='zeros')(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel // ratio)
    se_feature = Dense(channel,
                       activation='sigmoid',
                       kernel_initializer='he_normal',
                       use_bias=True,
                       bias_initializer='zeros')(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel)
    if K.image_data_format() == 'channels_first':
        se_feature = Permute((3, 1, 2))(se_feature)

    se_feature = multiply([input_feature, se_feature])
    return se_feature
Ejemplo n.º 26
0
def get_model_crepe_without_time_component(block_size):
    layers = [1, 2, 3, 4, 5, 6]
    filters = [n * 32 for n in [32, 4, 4, 4, 8, 16]]
    widths = [512, 64, 64, 64, 64, 64]
    strides = [(4, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]

    x = Input(shape=(block_size, ), name='input', dtype='float32')
    y = Reshape(target_shape=(block_size, 1, 1), name='input-reshape')(x)

    for layer, filters, width, strides in zip(layers, filters, widths,
                                              strides):
        y = Conv2D(filters, (width, 1),
                   strides=strides,
                   padding='same',
                   activation='relu',
                   name="conv%d" % layer)(y)
        y = BatchNormalization(name="conv%d-BN" % layer)(y)
        y = MaxPool2D(pool_size=(2, 1),
                      strides=None,
                      padding='valid',
                      name="conv%d-maxpool" % layer)(y)
        y = Dropout(0.25, name="conv%d-dropout" % layer)(y)

    y = Permute((2, 1, 3), name="transpose")(y)
    y = Flatten(name="flatten")(y)
    y = Dense(360, activation='sigmoid', name="classifier")(y)

    model = Model(inputs=x, outputs=y)
    model.compile('adam', 'binary_crossentropy', metrics=['mse', 'mae'])

    return model
def generate_model():
    ip = Input(shape=(MAX_TIMESTEPS, MAX_NB_VARIABLES))

    x = Masking()(ip)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # add load model code here to fine-tune

    return model
Ejemplo n.º 28
0
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
    Returns: a keras tensor
    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init.shape[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x
Ejemplo n.º 29
0
def build_model(embedding_layer,embedding_layer_entity,max_len):
    sequence_input = Input(shape=(max_len,))
    entity_input = Input(shape=(2,),)
    embedded_sequences = embedding_layer(sequence_input)
    embedded_entity = embedding_layer_entity(entity_input)
    #print(entity_input.shape)
    x = Conv1D(128, 3, activation='relu',padding='same')(embedded_sequences)
    x1 = Conv1D(128, 2, activation='relu')(embedded_entity)
    ###aspect based attention block
    con = Concatenate(axis = 1)([x,x1])
    x2 = Dense(1,activation= 'tanh')(con)
    x2 = Flatten()(x2)
    x2 = Activation('softmax')(x2)
    x2 = RepeatVector(64)(x2)
    x2 = dot([x,x2],axes = 1)
    x2 = Permute([2, 1])(x2)
    ###attention end
    x = MaxPooling1D(3)(x2)
    x = Conv1D(128, 3, activation='relu')(x)
    x = MaxPooling1D(3)(x)
    x = Conv1D(128, 3, activation='relu')(x)
    x = MaxPooling1D(3)(x)  # global max pooling
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    #x = concatenate([x,d])
    preds = Dense(1, activation='sigmoid')(x)

    model = Model([sequence_input,entity_input], preds)
    model.compile(optimizer='adam', loss='binary_crossentropy', 
                  metrics=['acc',f1_m,precision_m, recall_m])
    return model
Ejemplo n.º 30
0
 def __init__(self, n_classes):
     # Creating Attention Layer
     super(CustomAttention, self).__init__()
     self.dense = Dense(n_classes, activation='linear', use_bias=False)
     self.permute = Permute((2,1))
     self.activation = Activation('softmax')
     self.attention = Lambda(lambda x: K.batch_dot(x[0], x[1]))