Exemple #1
0
 def input_layer(self):
     lacc_x = Input(shape=(self.window_size, 1),
                    dtype='float32',
                    name='laccx_input')
     lacc_y = Input(shape=(self.window_size, 1),
                    dtype='float32',
                    name='laccy_input')
     lacc_z = Input(shape=(self.window_size, 1),
                    dtype='float32',
                    name='laccz_input')
     gyr_x = Input(shape=(self.window_size, 1),
                   dtype='float32',
                   name='gyrx_input')
     gyr_y = Input(shape=(self.window_size, 1),
                   dtype='float32',
                   name='gyry_input')
     gyr_z = Input(shape=(self.window_size, 1),
                   dtype='float32',
                   name='gyrz_input')
     mag_x = Input(shape=(self.window_size, 1),
                   dtype='float32',
                   name='magx_input')
     mag_y = Input(shape=(self.window_size, 1),
                   dtype='float32',
                   name='magy_input')
     mag_z = Input(shape=(self.window_size, 1),
                   dtype='float32',
                   name='magz_input')
     pressure = Input(shape=(self.window_size, 1),
                      dtype='float32',
                      name='pres_input')
     return gyr_x, gyr_y, gyr_z, lacc_x, lacc_y, lacc_z, mag_x, mag_y, mag_z, pressure
Exemple #2
0
    def build_discriminator(self):

        model = Sequential()

        model.add(Dense(78, activation="relu", input_dim=self.input_shape))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(56, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(32, activation="relu"))
        model.add(Dropout(rate=0.3))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(28, activation="relu"))
        model.add(Dropuout(rate=0.3))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(10, activation="relu"))

        model.summary()

        img = Input(shape=self.img_shape)

        features = model(img)
        valid = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes + 1, activation="softmax")(features)

        return Model(img, [valid, label])
Exemple #3
0
    def __init__(self, args):
        self.input_shape = 28
        self.num_classes = 2
        self.latent_dim = 100

        optimizer = Adam(0.0002, 0.5)

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(
            loss=['binary_crossentropy', 'categorical_crossentropy'],
            loss_weights=[0.5, 0.5],
            optimizer=optimizer,
            metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # The generator takes noise as input and generates imgs
        noise = Input(shape=(64, ))
        img = self.generator(noise)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The valid takes generated images as input and determines validity
        valid, _ = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains generator to fool discriminator
        self.combined = Model(noise, valid)
        self.combined.compile(loss=['binary_crossentropy'],
                              optimizer=optimizer)
Exemple #4
0
 def feature_extractor_network(self):
     # input
     in_image = Input(shape = in_shape)
     # C1 Layer
     nett = Conv2D(32,(5,5))(in_image)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # M2 Layer
     nett = MaxPooling2D(pool_size = (3,3))(nett)
     # C3 Layer
     nett = Conv2D(64,(3,3))		
     nett = BatchNormalization(pool_size = (3,3))(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # L4 Layer
     nett = LocallyConnected2D(128,(3,3))(nett)
     # L5 Layer
     nett = LocallyConnected2D(256,(3,3))(nett)
     # F6 Layer
     nett = Dense(512,activation='relu')(nett)
     nett = Dropout(0.2)(nett)
     # F7 Layer 
     out_features = Dense(activation='tanh')(nett)
     # output
     model = Model(inputs = in_image, outputs = out_features)
     return model
Exemple #5
0
def main1():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation_moe(
    )
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    #print('Training laebl shape = {}'.format(len(train_label)))

    # Set up the input layer
    input_layer = Input(shape=(num_features, ))

    # Set up MMoE layer
    mmoe_layers = MMoE(units=16, num_experts=8, num_tasks=2)(input_layer)

    output_layers = []

    output_info = ['y0', 'y1']

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(units=8,
                            activation='relu',
                            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(units=1,
                             name=output_info[index],
                             activation='linear',
                             kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    learning_rates = [1e-4, 1e-3, 1e-2]
    adam_optimizer = Adam(lr=learning_rates[0])
    model.compile(loss={
        'y0': 'mean_squared_error',
        'y1': 'mean_squared_error'
    },
                  optimizer=adam_optimizer,
                  metrics=[metrics.mae])

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(x=train_data,
              y=train_label,
              validation_data=(validation_data, validation_label),
              epochs=100)
    return model
Exemple #6
0
def create_autoencoder(input_dim, encoding_dim):
    """
    Args:
        input_dim: dimension of one-hot encoded categorical features
        encoding_dim: dimension of encoded data(hidden layer representation)
    Return: 
        model
    """
    one_hot_in = Input(shape=(input_dim, ), name='input', sparse=True)
    X = Dense(HIDDEN_UNITS, activation='selu')(one_hot_in)
    encoding = Dense(encoding_dim, activation='selu', name='enco')(X)
    X = Dense(HIDDEN_UNITS, activation='selu')(encoding)
    output = Dense(input_dim, activation='sigmoid')(X)

    model = Model(inputs=one_hot_in, outputs=output)
    return model
Exemple #7
0
    def build_generator(self):

        model = Sequential()

        model.add(Dense(78, activation="relu", input_dim=self.latent_dim))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(56, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(32, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(28, activation="tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)
Exemple #8
0
 def generator_network(self):
     # input
     in_latents = Input(shape = (self.latent_dim,))
     #DC1
     nett = Conv2DTranspose(512,(3,3))(in_latents)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC2
     nett = Conv2DTranspose(128,(3,3))(nett)	
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC3
     nett = Conv2DTranspose(64,(3,3))		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC4
     nett = Conv2DTranspose(32,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     out_image = Dense(alpha = 0.2)(nett)
     #output
     model = Model(inputs = in_latents, outputs = out_image)
     return model
Exemple #9
0
 def discriminator_network(self):
     # input
     in_image = Input(shape=self.img_shape)
     # C1 layer
     nett = Conv2D(64,(5,5))(in_image)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # C2 layer
     nett = Conv2D(128,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     nett = Dropout(0.2)(nett)
     # C3 layer
     nett = Conv2D(256,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     nett = Dropout(0.2)(nett)
     # F4 layer
     nett = Flatten()(nett)
     validity = Dense(1,alpha = 0.2)(nett)
     #output
     model =  Model(inputs = in_image, outputs = validity)
     return model
# ## 모델

# In[23]:
import tensorflow as tf

# (10) 양방향 LSTM 모델링 작업
from tf.keras.models import Model, Sequential
from tf.keras.layers import SimpleRNN, Input, Dense, LSTM
from tf.keras.layers import Bidirectional, TimeDistributed

# 학습
from tf.keras.callbacks import EarlyStopping
# 조기종료 콜백함수 정의

xInput = Input(batch_shape=(None, right_idx3, 256))
xBiLstm = Bidirectional(LSTM(240, return_sequences=True),
                        merge_mode='concat')(xInput)
xOutput = TimeDistributed(Dense(1, activation='sigmoid'))(xBiLstm)
# 각 스텝에서 cost가 전송되고, 오류가 다음 step으로 전송됨.

model1 = Model(xInput, xOutput)
model1.compile(loss='binary_crossentropy',
               optimizer='rmsprop',
               metrics=['accuracy'])
model1.summary()

from keras.callbacks import EarlyStopping

early_stopping = EarlyStopping(monitor='val_loss', patience=3)  # 조기종료 콜백함수 정의
# In[24]:
def createModel(patchSize, numClasses, usingClassification=False):

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    input_tensor = Input(shape=(patchSize[0], patchSize[1], patchSize[2], 1))

    # first stage
    x = Conv3D(filters=16,
               kernel_size=(5, 5, 5),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(input_tensor)
    x = BatchNormalization(axis=bn_axis)(x)
    x_after_stage_1 = LeakyReLU(alpha=0.01)(x)

    #x_after_stage_1 = Add()([input_tensor, x])

    # first down convolution
    x_down_conv_1 = projection_block_3D(x_after_stage_1,
                                        filters=(32, 32),
                                        kernel_size=(2, 2, 2),
                                        stage=1,
                                        block=1,
                                        se_enabled=True,
                                        se_ratio=4)

    # second stage
    x = identity_block_3D(x_down_conv_1,
                          filters=(32, 32),
                          kernel_size=(3, 3, 3),
                          stage=2,
                          block=1,
                          se_enabled=True,
                          se_ratio=4)
    x_after_stage_2 = identity_block_3D(x,
                                        filters=(32, 32),
                                        kernel_size=(3, 3, 3),
                                        stage=2,
                                        block=2,
                                        se_enabled=True,
                                        se_ratio=4)

    # second down convolution
    x_down_conv_2 = projection_block_3D(x_after_stage_2,
                                        filters=(64, 64),
                                        kernel_size=(2, 2, 2),
                                        stage=2,
                                        block=3,
                                        se_enabled=True,
                                        se_ratio=8)

    # third stage
    x = identity_block_3D(x_down_conv_2,
                          filters=(64, 64),
                          kernel_size=(3, 3, 3),
                          stage=3,
                          block=1,
                          se_enabled=True,
                          se_ratio=8)
    x_after_stage_3 = identity_block_3D(x,
                                        filters=(64, 64),
                                        kernel_size=(3, 3, 3),
                                        stage=3,
                                        block=2,
                                        se_enabled=True,
                                        se_ratio=8)
    #x = identity_block_3D(x, filters=(64, 64), kernel_size=(3, 3, 3), stage=3, block=3, se_enabled=False, se_ratio=16)

    # third down convolution
    x_down_conv_3 = projection_block_3D(x_after_stage_3,
                                        filters=(128, 128),
                                        kernel_size=(2, 2, 2),
                                        stage=3,
                                        block=4,
                                        se_enabled=True,
                                        se_ratio=16)

    # fourth stage
    x = identity_block_3D(x_down_conv_3,
                          filters=(128, 128),
                          kernel_size=(3, 3, 3),
                          stage=4,
                          block=1,
                          se_enabled=True,
                          se_ratio=16)
    x_after_stage_4 = identity_block_3D(x,
                                        filters=(128, 128),
                                        kernel_size=(3, 3, 3),
                                        stage=4,
                                        block=2,
                                        se_enabled=True,
                                        se_ratio=16)
    #x = identity_block_3D(x, filters=(128, 128), kernel_size=(3, 3, 3), stage=4, block=3, se_enabled=False, se_ratio=16)

    ### end of encoder path

    if usingClassification:
        # use x_after_stage_4 as quantification output
        # global average pooling
        x_class = GlobalAveragePooling3D(
            data_format=K.image_data_format())(x_after_stage_4)

        # fully-connected layer
        classification_output = Dense(units=numClasses,
                                      activation='softmax',
                                      kernel_initializer='he_normal',
                                      name='classification_output')(x_class)

    ### decoder path

    # first 3D upsampling
    x = UpSampling3D(size=(2, 2, 2),
                     data_format=K.image_data_format())(x_after_stage_4)
    x = Conv3D(filters=64,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = concatenate([x, x_after_stage_3], axis=bn_axis)

    # first decoder stage
    x = identity_block_3D(x,
                          filters=(128, 128),
                          kernel_size=(3, 3, 3),
                          stage=6,
                          block=1,
                          se_enabled=True,
                          se_ratio=16)
    x = identity_block_3D(x,
                          filters=(128, 128),
                          kernel_size=(3, 3, 3),
                          stage=6,
                          block=2,
                          se_enabled=True,
                          se_ratio=16)

    # second 3D upsampling
    x = UpSampling3D(size=(2, 2, 2), data_format=K.image_data_format())(x)
    x = Conv3D(filters=32,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = concatenate([x, x_after_stage_2], axis=bn_axis)

    # second decoder stage
    x = identity_block_3D(x,
                          filters=(64, 64),
                          kernel_size=(3, 3, 3),
                          stage=7,
                          block=1,
                          se_enabled=True,
                          se_ratio=8)
    x = identity_block_3D(x,
                          filters=(64, 64),
                          kernel_size=(3, 3, 3),
                          stage=7,
                          block=2,
                          se_enabled=True,
                          se_ratio=8)

    # third 3D upsampling
    x = UpSampling3D(size=(2, 2, 2), data_format=K.image_data_format())(x)
    x = Conv3D(filters=16,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = concatenate([x, x_after_stage_1], axis=bn_axis)

    # third decoder stage
    x = identity_block_3D(x,
                          filters=(32, 32),
                          kernel_size=(3, 3, 3),
                          stage=9,
                          block=1,
                          se_enabled=True,
                          se_ratio=4)
    #x = identity_block_3D(x, filters=(32, 32), kernel_size=(3, 3, 3), stage=9, block=2, se_enabled=True, se_ratio=4)

    ### End of decoder

    ### last segmentation segments
    # 1x1x1-Conv3 produces 2 featuremaps for probabilistic  segmentations of the foreground and background
    x = Conv3D(filters=2,
               kernel_size=(1, 1, 1),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name='conv_veryEnd')(x)
    #x = BatchNormalization(axis=bn_axis)(x) # warum leakyrelu vor softmax?
    #x = LeakyReLU(alpha=0.01)(x)

    segmentation_output = Softmax(axis=bn_axis, name='segmentation_output')(x)
    #segmentation_output = keras.layers.activations.sigmoid(x)

    # create model
    if usingClassification:
        cnn = Model(inputs=[input_tensor],
                    outputs=[segmentation_output, classification_output],
                    name='3D-VResFCN-Classification')
        sModelName = cnn.name
    else:
        cnn = Model(inputs=[input_tensor],
                    outputs=[segmentation_output],
                    name='3D-VResFCN')
        sModelName = cnn.name

    return cnn, sModelName
Exemple #12
0
    def __init__(self,
                 n_word_vocab=50001,
                 n_role_vocab=7,
                 n_factors_emb=256,
                 n_factors_cls=512,
                 n_hidden=256,
                 word_vocabulary={},
                 role_vocabulary={},
                 unk_word_id=50000,
                 unk_role_id=7,
                 missing_word_id=50001,
                 using_dropout=False,
                 dropout_rate=0.3,
                 optimizer='adagrad',
                 loss='sparse_categorical_crossentropy',
                 metrics=['accuracy']):
        super(NNRF, self).__init__(n_word_vocab, n_role_vocab, n_factors_emb,
                                   n_hidden, word_vocabulary, role_vocabulary,
                                   unk_word_id, unk_role_id, missing_word_id,
                                   using_dropout, dropout_rate, optimizer,
                                   loss, metrics)

        # minus 1 here because one of the role is target role
        self.input_length = n_role_vocab - 1

        # each input is a fixed window of frame set, each word correspond to one role
        input_words = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_words')  # Switched dtype to tf specific (team1-change)
        input_roles = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_roles')  # Switched dtype to tf specific (team1-change)
        target_role = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_role')  # Switched dtype to tf specific (team1-change)

        # role based embedding layer
        embedding_layer = role_based_word_embedding(
            input_words, input_roles, n_word_vocab, n_role_vocab,
            glorot_uniform(), missing_word_id, self.input_length,
            n_factors_emb, True, using_dropout, dropout_rate)

        # sum on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_factors_emb)
        event_embedding = Lambda(
            lambda x: K.sum(x, axis=1),
            name='event_embedding',
            output_shape=(n_factors_emb, ))(embedding_layer)

        # fully connected layer, output shape is (batch_size, input_length, n_hidden)
        hidden = Dense(n_hidden,
                       activation='linear',
                       input_shape=(n_factors_emb, ),
                       name='projected_event_embedding')(event_embedding)

        # non-linear layer, using 1 to initialize
        non_linearity = PReLU(alpha_initializer='ones',
                              name='context_embedding')(hidden)

        # hidden layer
        hidden_layer2 = target_word_hidden(non_linearity,
                                           target_role,
                                           n_word_vocab,
                                           n_role_vocab,
                                           glorot_uniform(),
                                           n_factors_cls,
                                           n_hidden,
                                           using_dropout=using_dropout,
                                           dropout_rate=dropout_rate)

        # softmax output layer
        output_layer = Dense(n_word_vocab,
                             activation='softmax',
                             input_shape=(n_factors_cls, ),
                             name='softmax_word_output')(hidden_layer2)

        self.model = Model(inputs=[input_words, input_roles, target_role],
                           outputs=[output_layer])

        self.model.compile(optimizer, loss, metrics)
Exemple #13
0
                                                      padding='post')
decoder_input_data = np.array(padded_answers)
print((decoder_input_data.shape, maxlen_answers))

# decoder_output_data
for i in range(len(tokenized_answers)):
    tokenized_answers[i] = tokenized_answers[i][1:]
padded_answers = preprocessing.sequence.pad_sequences(tokenized_answers,
                                                      maxlen=maxlen_answers,
                                                      padding='post')
onehot_answers = utils.to_categorical(padded_answers, vocab_size)
decoder_output_data = np.array(onehot_answers)
print(decoder_output_data.shape)

# 定义encoder-decoder模型
encoder_inputs = Input(shape=(None, ))
encoder_embedding = Embedding(vocab_size, 200, mask_zero=True)(encoder_inputs)
#参考链接:嵌入层 Embedding<https://keras.io/zh/layers/embeddings/#embedding>
encoder_outputs, state_h, state_c = tf.keras.layers.LSTM(
    200, return_state=True)(encoder_embedding)
#参考链接:https://keras.io/zh/layers/recurrent/#lstm
encoder_states = [state_h, state_c]

decoder_inputs = Input(shape=(None, ))
decoder_embedding = Embedding(vocab_size, 200, mask_zero=True)(decoder_inputs)
decoder_lstm = LSTM(200, return_state=True, return_sequences=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding,
                                     initial_state=encoder_states)
decoder_dense = Dense(vocab_size, activation=tf.keras.activations.softmax)
output = decoder_dense(decoder_outputs)
    def __init__(self,
                 n_word_vocab=50001,
                 n_role_vocab=7,
                 n_factors_emb=300,
                 n_hidden=300,
                 word_vocabulary=None,
                 role_vocabulary=None,
                 unk_word_id=50000,
                 unk_role_id=7,
                 missing_word_id=50001,
                 using_dropout=False,
                 dropout_rate=0.3,
                 optimizer='adagrad',
                 loss='sparse_categorical_crossentropy',
                 metrics=['accuracy'],
                 loss_weights=[1., 1.]):
        super(MTRFv4, self).__init__(n_word_vocab, n_role_vocab, n_factors_emb,
                                     n_hidden, word_vocabulary,
                                     role_vocabulary, unk_word_id, unk_role_id,
                                     missing_word_id, using_dropout,
                                     dropout_rate, optimizer, loss, metrics)

        # minus 1 here because one of the role is target role
        input_length = n_role_vocab - 1

        n_factors_cls = n_hidden

        # each input is a fixed window of frame set, each word correspond to one role
        input_words = Input(
            shape=(input_length, ), dtype=tf.uint32,
            name='input_words')  # Switched dtype to tf specific (team1-change)
        input_roles = Input(
            shape=(input_length, ), dtype=tf.uint32,
            name='input_roles')  # Switched dtype to tf specific (team1-change)
        target_word = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_word')  # Switched dtype to tf specific (team1-change)
        target_role = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_role')  # Switched dtype to tf specific (team1-change)

        # role based embedding layer
        embedding_layer = factored_embedding(input_words, input_roles,
                                             n_word_vocab, n_role_vocab,
                                             glorot_uniform(), missing_word_id,
                                             input_length, n_factors_emb,
                                             n_hidden, True, using_dropout,
                                             dropout_rate)

        # non-linear layer, using 1 to initialize
        non_linearity = PReLU(alpha_initializer='ones')(embedding_layer)

        # mean on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_hidden)
        context_embedding = Lambda(lambda x: K.mean(x, axis=1),
                                   name='context_embedding',
                                   output_shape=(n_hidden, ))(non_linearity)

        # target word hidden layer
        tw_hidden = target_word_hidden(context_embedding,
                                       target_role,
                                       n_word_vocab,
                                       n_role_vocab,
                                       glorot_uniform(),
                                       n_hidden,
                                       n_hidden,
                                       using_dropout=using_dropout,
                                       dropout_rate=dropout_rate)

        # target role hidden layer
        tr_hidden = target_role_hidden(context_embedding,
                                       target_word,
                                       n_word_vocab,
                                       n_role_vocab,
                                       glorot_uniform(),
                                       n_hidden,
                                       n_hidden,
                                       using_dropout=using_dropout,
                                       dropout_rate=dropout_rate)

        # softmax output layer
        target_word_output = Dense(n_word_vocab,
                                   activation='softmax',
                                   input_shape=(n_hidden, ),
                                   name='softmax_word_output')(tw_hidden)

        # softmax output layer
        target_role_output = Dense(n_role_vocab,
                                   activation='softmax',
                                   input_shape=(n_hidden, ),
                                   name='softmax_role_output')(tr_hidden)

        self.model = Model(
            inputs=[input_words, input_roles, target_word, target_role],
            outputs=[target_word_output, target_role_output])

        self.model.compile(optimizer, loss, metrics, loss_weights)
Exemple #15
0
#################################### helper functions #########################################
def conv_bn_relu(inputs):
    out = Conv2D(24,
                 3,
                 1,
                 "same",
                 kernel_initializer='he_normal',
                 bias_initializer='zeros')(inputs)
    out = BatchNormalization()(out)
    out = Relu()(out)
    return out


##################################### model structure #########################################
#---------------------------------------- encoder --------------------------------------------#
inputs = Input(shape=(512, 512, 2))

a1 = Conv2D(24,
            3,
            1,
            "same",
            kernel_initializer='he_normal',
            bias_initializer='zeros')(inputs)
a1 = BatchNormalization()(a1)
a1 = Relu()(a1)

a2 = Conv2D(24,
            3,
            1,
            "same",
            kernel_initializer='he_normal',
Exemple #16
0
import tf as tf
from tf.keras.layers import Input, Conv2D
from tf.keras import Model

input = Input((3072, 3072, 3))

x = Conv2D(filters=32, kernel_size=(11, 11), strides=(3, 3),
           padding="same")(input)
x = Conv2D(filters=32, kernel_size=(11, 11), strides=(2, 2), padding="same")(x)
x = Conv2D(filters=32, kernel_size=(11, 11), strides=(2, 2), padding="same")(x)
# x = Conv2D(filters=128,kernel_size=(11,11),strides=(2,2),padding="same")(x)

# x = Conv2D(filters=64,kernel_size=(11,11),padding="same")(x)
# x = Conv2D(filters=64,kernel_size=(11,11),padding="same")(x)
# x = Conv2D(filters=64,kernel_size=(11,11),strides=(2,2),padding="same")(x)
#
# x = Conv2D(filters=128,kernel_size=(11,11),padding="same")(x)
# x = Conv2D(filters=128,kernel_size=(11,11),padding="same")(x)
# x = Conv2D(filters=128,kernel_size=(11,11),strides=(2,2),padding="same")(x)

model = Model(inputs=input, outputs=x)
model.summary()