コード例 #1
0
def buildModel():
    '''Build a Seq2Seq LSTM model'''

    #encoder
    model = Sequential()
    model.add(
        LSTM(num_hidden, input_dim=pitch_dimension, return_sequences=True))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(LSTM(num_hidden))
    model.add(RepeatVector(y_length))

    #decoder
    model.add(LSTM(num_hidden, return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(num_hidden, return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(num_hidden, return_sequences=True))
    model.add(Dropout(0.2))

    model.add(TimeDistributed(Dense(pitch_dimension, activation='softmax')))
    model.add(TimeDistributed(Dense(pitch_dimension, activation='softmax')))

    return model
コード例 #2
0
ファイル: trainPose3d.py プロジェクト: rtmtree/CSPS
    def build_model_rath(self, n_unit_lstm=200, n_unit_atten=400, seqLen=30):

        input_len = 52
        model = tf.keras.Sequential()

        #csi feature extractor

        # model.add(Conv2D(320, 2, activation='relu',input_shape=(seqLen,input_len)))
        # model.add(Conv2D(150, 2, activation='relu',input_shape=(seqLen,input_len)))
        # model.add(Conv2D(300, 2, activation='relu',input_shape=(seqLen,input_len)))
        # model.add(Conv2D(300, 2, activation='relu',input_shape=(seqLen,input_len)))

        # encoder layer
        model.add(
            Bidirectional(
                LSTM(100, activation='relu', input_shape=(seqLen, input_len))))

        # repeat vector
        # model.add(RepeatVector(3*3*19*19))
        model.add(RepeatVector(seqLen))
        # model.add(RepeatVector(19*19))

        # decoder layer
        model.add(
            Bidirectional(LSTM(100, activation='relu', return_sequences=True)))

        model.add(TimeDistributed(Dense(1083)))

        return model
コード例 #3
0
def build_model(embedding_layer,embedding_layer_entity,max_len):
    sequence_input = Input(shape=(max_len,))
    entity_input = Input(shape=(2,),)
    embedded_sequences = embedding_layer(sequence_input)
    embedded_entity = embedding_layer_entity(entity_input)
    #print(entity_input.shape)
    x = Conv1D(128, 3, activation='relu',padding='same')(embedded_sequences)
    x1 = Conv1D(128, 2, activation='relu')(embedded_entity)
    ###aspect based attention block
    con = Concatenate(axis = 1)([x,x1])
    x2 = Dense(1,activation= 'tanh')(con)
    x2 = Flatten()(x2)
    x2 = Activation('softmax')(x2)
    x2 = RepeatVector(64)(x2)
    x2 = dot([x,x2],axes = 1)
    x2 = Permute([2, 1])(x2)
    ###attention end
    x = MaxPooling1D(3)(x2)
    x = Conv1D(128, 3, activation='relu')(x)
    x = MaxPooling1D(3)(x)
    x = Conv1D(128, 3, activation='relu')(x)
    x = MaxPooling1D(3)(x)  # global max pooling
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    #x = concatenate([x,d])
    preds = Dense(1, activation='sigmoid')(x)

    model = Model([sequence_input,entity_input], preds)
    model.compile(optimizer='adam', loss='binary_crossentropy', 
                  metrics=['acc',f1_m,precision_m, recall_m])
    return model
コード例 #4
0
def LSTM_Autoencoder():

    model = Sequential()
    #Each input in data sample is a 2D array that will be fed to LSTM Network layer
    #The output of the layer will be an encoded feature vector of the input data

    #Input shape is 2D array timesteps x n_features.
    #First layer will have 128 neurons
    model.add(LSTM(128, input_shape=(timesteps, n_features)))

    #Dropout regularization. 20% of neurons
    model.add(Dropout(0.2))

    #When second hidden layer is LSTM:
    #The encoded feature vector ouput must be replicated * timesteps
    model.add(RepeatVector(timesteps))

    #Decoder layer
    #We set return_sequences to True. Each neuron will give a signal per timestep
    model.add(LSTM(128, return_sequences=True))

    #Dropout regularizaiton
    model.add(Dropout(0.2))

    #To use TimeDistributedlayer, return sequences from previous LSTM layer must be set to True
    #This is the output layer. It will create a vector with length of previous LSTM neurons
    model.add(TimeDistributed(Dense(n_features)))

    #Compiling using mean absolute error as the loss function
    #Adam' optimizer for gradient descent with default learning rate
    model.compile(loss='mae', optimizer='adam')

    return model
コード例 #5
0
    def __init__(self, config
                        ):
        super(CNNLSTMATTN, self).__init__()
        self.n_outputs = config.label_width
        self.filters = config.filters
        self.kernel_size = config.kernel_size
        self.activation = config.activation
        self.lstm_units = config.lstm_units

        self.conv1d1 = Conv1D(filters = self.filters, 
                            kernel_size = self.kernel_size, 
                            activation = self.activation)
        self.conv1d2 = Conv1D(filters = self.filters, 
                            kernel_size = self.kernel_size, 
                            activation = self.activation)
        self.mp1d = MaxPooling1D(pool_size = 2)
        self.flatten = Flatten()
        # self.lstm_in = LSTM(units = self.units, activation = self.activation)
        self.rv = RepeatVector(self.n_outputs)
        # output, forward_h, backward_h, forward_c, backward_c
        self.lstm_out = Bidirectional(LSTM(units = self.lstm_units, return_sequences = True, return_state = True))
        # self.td1 = TimeDistributed(Dense(10, activation = self.activation ))
        self.attention = Attention()
        self.concat = Concatenate()
        self.td2 = Dense(self.n_outputs) # self.n_outputs
コード例 #6
0
def lstm_autoencoder(encoding_size,
                     seq_length,
                     feature_num,
                     return_sequences=True):
    # https://machinelearningmastery.com/lstm-autoencoders/
    # model = Sequential()
    # model.add(LSTM(encoding_size, activation='relu', input_shape=(seq_length, feature_num)))
    # model.add(RepeatVector(seq_length))
    # model.add(LSTM(encoding_size, activation='relu', return_sequences=return_sequences))
    # model.add(TimeDistributed(Dense(feature_num)))

    # https://towardsdatascience.com/step-by-step-understanding-lstm-autoencoder-layers-ffab055b6352
    outer_layer_size = encoding_size * 2
    inner_layer_size = encoding_size
    model = Sequential()
    model.add(
        LSTM(outer_layer_size,
             activation='relu',
             input_shape=(seq_length, feature_num),
             return_sequences=True))
    model.add(LSTM(inner_layer_size, activation='relu',
                   return_sequences=False))
    model.add(RepeatVector(seq_length))
    model.add(LSTM(inner_layer_size, activation='relu', return_sequences=True))
    model.add(
        LSTM(outer_layer_size,
             activation='relu',
             return_sequences=return_sequences))
    model.add(TimeDistributed(Dense(feature_num)))
    return model
コード例 #7
0
def build_model(train, n_input):
    # prepare data
    train_x, train_y = to_supervised(train, n_input)
    # define parameters
    verbose, epochs, batch_size = 0, 20, 16
    n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[
        2], train_y.shape[1]
    # reshape output into [samples, timesteps, features]
    train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
    # define model
    model = keras.Sequential()
    model.add(
        LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
    model.add(RepeatVector(n_outputs))
    model.add(LSTM(200, activation='relu', return_sequences=True))
    model.add(TimeDistributed(Dense(100, activation='relu')))
    model.add(TimeDistributed(Dense(1)))
    model.compile(loss='mse', optimizer='adam')
    # fit network
    model.fit(train_x,
              train_y,
              epochs=epochs,
              batch_size=batch_size,
              verbose=verbose)
    return model
コード例 #8
0
    def __init__(self, sequence_length, total_words, *args, **kwargs):
        input_layer = Input(shape=[sequence_length], dtype='int32')

        # get the embedding layer
        embedded = Embedding(input_dim=total_words,
                             output_dim=5,
                             input_length=sequence_length,
                             trainable=True)(input_layer)

        activations = LSTM(total_words, return_sequences=True)(embedded)

        # compute importance for each step
        attention = TimeDistributed(Dense(1, activation='tanh'))(activations)
        attention = Flatten()(attention)
        attention = Activation('softmax')(attention)
        attention = RepeatVector(total_words)(attention)
        attention = Permute([2, 1])(attention)

        # apply the attention
        sent_representation = Multiply()([activations, attention])
        sent_representation = Lambda(lambda xin: sum(xin, axis=1))(
            sent_representation)

        probabilities = Dense(total_words,
                              activation='softmax')(sent_representation)

        super().__init__(inputs=input_layer,
                         outputs=probabilities,
                         *args,
                         **kwargs)
        self.compile(optimizer='adam',
                     loss='categorical_crossentropy',
                     metrics=["accuracy"])

        print(self.summary())
コード例 #9
0
def create_lstm_autoencoder(input_dim, timesteps, latent_dim):
    """
    Creates an LSTM Autoencoder (VAE). Returns Autoencoder, Encoder, Generator. 
    (All code by fchollet - see reference.)

    # Arguments
        input_dim: int.
        timesteps: int, input timestep dimension.
        latent_dim: int, latent z-layer shape. 

    # References
        - [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
    """

    inputs = Input(shape=(
        timesteps,
        input_dim,
    ))
    encoded = LSTM(latent_dim)(inputs)

    decoded = RepeatVector(timesteps)(encoded)
    decoded = LSTM(input_dim, return_sequences=True)(decoded)

    #    sequence_autoencoder = Model(inputs, decoded)
    #    encoder = Model(inputs, encoded)

    autoencoder = Model(inputs, decoded)
    autoencoder.compile(optimizer='adam', loss='mse')
    return autoencoder
コード例 #10
0
def lstm_encoder_decoder(
    input_sequence_len: int,
    input_vocab_size: int,
    output_sequence_len: int,
    output_vocab_size: int,
    embedding_dim: int = 64,
    lstm_hidden_dim: int = 64,
    learning_rate: float = 1e-3,
):
    _input = Input(shape=(input_sequence_len,), name=MODEL_INPUT_NAME)
    embedding = Embedding(
        output_dim=embedding_dim, input_dim=input_vocab_size, mask_zero=False
    )(_input)
    encoding = Bidirectional(LSTM(lstm_hidden_dim, return_sequences=False))(embedding)
    repeat_encoding = RepeatVector(output_sequence_len)(encoding)

    decoding = LSTM(lstm_hidden_dim, return_sequences=True)(repeat_encoding)
    _output = TimeDistributed(Dense(output_vocab_size, activation="softmax"))(decoding)

    model = Model(inputs=[_input], outputs={MODEL_OUTPUT_NAME: _output})
    optimizer = Adam(lr=learning_rate)
    model.compile(
        optimizer,
        loss="sparse_categorical_crossentropy",
        metrics=["accuracy", sequence_accuracy],
    )
    return model
コード例 #11
0
ファイル: models.py プロジェクト: Beeseey/Lenz
def AlternativeRNNModel2(vocab_size, max_len):

    embedding_size = 256

    input_1 = Input(shape=(256, ))

    #image_model_1 = Dense(embedding_size, activation='relu')(input_1)
    image_model = RepeatVector(max_len)(input_1)

    caption_input = Input(shape=(max_len, ))
    # mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
    caption_model_1 = Embedding(vocab_size, embedding_size,
                                mask_zero=True)(caption_input)
    # Since we are going to predict the next word using the previous words
    # (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
    caption_model_2 = LSTM(256, return_sequences=True)(caption_model_1)
    # caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
    caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)

    # Merging the models and creating a softmax classifier
    final_model_1 = concatenate([image_model, caption_model])
    # final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)

    final_model_2 = LSTM(256, return_sequences=True)(final_model_1)
    attention_output = attention_3d_block(final_model_2)
    # final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
    # final_model = Dense(vocab_size, activation='softmax')(final_model_3)
    final_model = Dense(vocab_size, activation='softmax')(attention_output)

    model = Model(inputs=[input_1, caption_input], outputs=final_model)

    #print(model.summary())
    # model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    return model
コード例 #12
0
    def updatedCaptionModel(self, vocabSize, maxCaption, modelType, RNNmodel):
        shape = 1000
        # squeezing features from the CNN model
        imageInput = Input(shape=(shape, ))
        imageModel_1 = Dense(rnnConfig['embedding_size'],
                             activation='relu')(imageInput)
        imageModel = RepeatVector(maxCaption)(imageModel_1)

        # Sequence Model
        captionInput = Input(shape=(maxCaption, ))
        captionModel_1 = Embedding(vocabSize,
                                   rnnConfig['embedding_size'],
                                   mask_zero=True)(captionInput)
        if RNNmodel == 'LSTM':
            captionModel_2 = LSTM(rnnConfig['LSTM_GRU_units'],
                                  return_sequences=True)(captionModel_1)
        elif RNNmodel == 'GRU':
            captionModel_2 = GRU(rnnConfig['LSTM_GRU_units'],
                                 return_sequences=True)(captionModel_1)
        captionModel = TimeDistributed(Dense(
            rnnConfig['embedding_size']))(captionModel_2)

        # Merging the models and creating a softmax classifier
        finalModel_1 = concatenate([imageModel, captionModel])
        finalModel_2 = Bidirectional(
            GRU(rnnConfig['LSTM_GRU_units'],
                return_sequences=False))(finalModel_1)
        finalModel = Dense(vocabSize, activation='softmax')(finalModel_2)

        # tieing it together
        model = Model(inputs=[imageInput, captionInput], outputs=finalModel)
        model.compile(loss=CategoricalCrossentropy(),
                      optimizer='adam',
                      metrics=["accuracy"])
        return model
コード例 #13
0
 def repeat_vector(args):
     layer_to_repeat = args[0]
     sequence_layer = args[1]
     print('=============================================',
           layer_to_repeat, sequence_layer)
     print(K.shape(sequence_layer)[1])
     return RepeatVector(K.shape(sequence_layer)[1])(layer_to_repeat)
コード例 #14
0
def define_model(inp_vocab_size, tar_vocab_size, inp_timesteps, tar_timesteps,
                 n_units):
    """
    Creates a Tensorflow Sequential Model

    Parameters
    ----------
    inp_vocab_size : int, length of input tokenizer object word index +1
    tar_vocab_size : int, length of output tokenizer object word index +1
    inp_timesteps : int, input sequence max length
    tar_timesteps : int, target sequence max length
    n_units : int

    Returns
    -------
    Tensorflow Sequential Model
    """

    model = Sequential()
    model.add(
        Embedding(inp_vocab_size,
                  n_units,
                  input_length=inp_timesteps,
                  mask_zero=True))
    model.add(LSTM(n_units, recurrent_regularizer='l2', dropout=0.15))
    model.add(RepeatVector(tar_timesteps))
    model.add(
        LSTM(n_units,
             return_sequences=True,
             recurrent_regularizer='l2',
             dropout=0.15))
    model.add(TimeDistributed(Dense(tar_vocab_size, activation='softmax')))
    return model
コード例 #15
0
    def float_model(self):
        """ Instantiate a Keras model for fitting a float from x.
                
        The model takes the following inputs:
        x : internal state
        a : int
            the action considered at x
        
        Parameters
        -----------
            
        Returns
        -------
        model that outputs a float
    
        """

        if (self._high_int_dim == True):
            dim = self._input_dimensions[0]  #FIXME
            inputs = [
                Input(shape=((dim[-2] // self._pooling_encoder),
                             (dim[-1] // self._pooling_encoder),
                             self.n_channels_internal_dim)),
                Input(shape=(self._n_actions, ))
            ]  #data_format='channels_last'

            layers_action = inputs[1]
            layers_action = RepeatVector(
                (dim[-2] // self._pooling_encoder) *
                (dim[-1] // self._pooling_encoder))(layers_action)
            layers_action = Reshape(((dim[-2] // self._pooling_encoder),
                                     (dim[-1] // self._pooling_encoder),
                                     self._n_actions))(layers_action)

            x = Concatenate(axis=-1)([layers_action, inputs[0]])
            x = Conv2D(16, (2, 2), padding='same', activation='tanh')(x)
            x = Conv2D(32, (3, 3), padding='same', activation='tanh')(x)
            x = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')(x)
            x = Conv2D(16, (2, 2), padding='same', activation='tanh')(x)
            x = Conv2D(4, (1, 1), padding='same', activation='tanh')(x)

            # we stack a deep fully-connected network on top
            x = Flatten()(x)
            x = Dense(200, activation='tanh')(x)
        else:
            inputs = [
                Input(shape=(self.internal_dim, )),
                Input(shape=(self._n_actions, ))
            ]  #x
            x = Concatenate()(inputs)  #,axis=-1)
            x = Dense(10, activation='tanh')(x)

        x = Dense(50, activation='tanh')(x)
        x = Dense(20, activation='tanh')(x)

        out = Dense(1)(x)

        model = Model(inputs=inputs, outputs=out)

        return model
コード例 #16
0
ファイル: auto.py プロジェクト: solislemuslab/mycovirus-ml
    def compile_LSTM(self, input_shape):
        # Total params: 366,112
        i = Input(shape=input_shape)
        x = i
        x = Bidirectional(LSTM(128, activation='relu'))(x)
        print(x.shape)
        latent_shape = x.shape[1:]
        self.encoder = models.Model(inputs=i, outputs=x)
        self.encoder.compile(optimizer='adam', loss='mean_squared_error')
        
        i = Input(shape=latent_shape)
        x = i
        x = RepeatVector(32)(x)
        print(x.shape)
        x = LSTM(128, activation='relu', return_sequences=True)(x)
        print(x.shape)
        x = TimeDistributed(Dense(32, activation=None))(x)
        print(x.shape)
        self.decoder = models.Model(inputs=i, outputs=x)
        self.decoder.compile(optimizer='adam', loss='mean_squared_error')

        x = Input(shape=input_shape)
        latent = self.encoder(x)
        x_h = self.decoder(latent)
        self.ae = models.Model(inputs=x, outputs=x_h)
        self.ae.compile(optimizer='adam', loss='mean_squared_error')
        self.ae.summary()
def joint_convLstm(num_filters, kernel_length, input_timesteps, num_links,
                   output_timesteps, quantiles, prob, loss, opt):
    model = Sequential()
    model.add(
        BatchNormalization(name='batch_norm_0',
                           input_shape=(input_timesteps, num_links, 1, 1)))
    model.add(
        ConvLSTM2D(name='conv_lstm_1',
                   filters=num_filters,
                   kernel_size=(kernel_length, 1),
                   padding='same',
                   return_sequences=False))

    model.add(Dropout(prob, name='dropout_1'))
    model.add(BatchNormalization(name='batch_norm_1'))

    model.add(Flatten())
    model.add(RepeatVector(output_timesteps))
    model.add(Reshape((output_timesteps, num_links, 1, num_filters)))

    model.add(
        ConvLSTM2D(name='conv_lstm_2',
                   filters=num_filters,
                   kernel_size=(kernel_length, 1),
                   padding='same',
                   return_sequences=True))
    model.add(Dropout(prob, name='dropout_2'))

    model.add(TimeDistributed(Dense(units=len(quantiles) + 1, name='dense_1')))
    model.compile(loss=loss, optimizer=opt)
    return model
コード例 #18
0
def создать_слой(данные, input_shape=None, last_layer=False):
  args = {'activation':'relu'}
  if input_shape != None:
    args['input_shape'] = input_shape
  if last_layer:
    args['activation'] = 'softmax'
  if '-' in данные:
    буква, параметр = данные.split('-')    
  else:
    буква = данные
  if буква == 'Полносвязный':
    return Dense(int(параметр), **args)
  if буква == 'Повтор':
    return RepeatVector(int(параметр))
  if буква == 'Эмбеддинг':
    return Embedding(1100, int(параметр), input_length=20)
  elif буква == 'Сверточный2D':
    return Conv2D(int(параметр), (3,3), padding='same', **args)
  elif буква == 'Сверточный1D':
    return Conv1D(int(параметр), 5, padding='same', **args)
  elif буква == 'Выравнивающий':
    if 'input_shape' in args:
      return Flatten(input_shape=args['input_shape'])
    else:
      return Flatten() 
  elif буква == 'Нормализация':
    return BatchNormalization()
  elif буква == 'МаксПуллинг':
    return MaxPooling2D() 
  elif буква == 'МаксПуллинг1D':
    return MaxPooling1D()      
  elif буква == 'Дропаут':
    return Dropout(float(параметр))
  else:
    return 0
コード例 #19
0
def ModelFitter(train, n_in, n_out, epo, bs, sr, layers):

    tf.keras.backend.clear_session()

    training_features, training_labels = NextSample(train, n_in, n_out)
    y_shape = training_labels.shape
    x_shape = training_features.shape

    Ndate, Nfeat, Nout = x_shape[1], x_shape[2], y_shape[1]

    training_labels = training_labels.reshape((y_shape[0], y_shape[1], 1))

    model = keras.Sequential([
        LSTM(layers[0], activation='relu', input_shape=(Ndate, Nfeat)),
        RepeatVector(Nout),
        LSTM(layers[0], activation='relu', return_sequences=True),
        TimeDistributed(Dense(layers[1], activation='relu')),
        TimeDistributed(Dense(1))
    ])

    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
    model.fit(training_features,
              training_labels,
              epochs=epo,
              batch_size=bs,
              validation_split=sr)

    return model
コード例 #20
0
    def model(self):
        """
        Model definition for inject architecture --> Encoded Image is passed as an input argument to LSTM
        :return: Defined Model
        """
        img_input = Input(shape=(2048, ))

        img_enc = Dense(300, activation="relu")(img_input)

        images = RepeatVector(self.max_sentence_length)(img_enc)

        # Text input
        text_input = Input(shape=(self.max_sentence_length, ))
        embedding = Embedding(
            self.vocab_size,
            self.embedding_dimension,
            input_length=self.max_sentence_length)(text_input)

        x = Concatenate()([images, embedding])
        y = Bidirectional(LSTM(256, return_sequences=False))(x)

        pred = Dense(self.vocab_size, activation='softmax')(y)

        model = Model(inputs=[img_input, text_input], outputs=pred)

        return model
コード例 #21
0
def build_univariate_encdec_cnn_lstm_model(train, n_input):
    # Prepare data.
    train_x, train_y = to_supervised(train, n_input)

    # Define parameters.
    verbose, epochs, batch_size = 0, 20, 16
    n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[
        2], train_y.shape[1]

    # Reshape output into [samples, timesteps, features].
    train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))

    # Define model.
    model = Sequential()
    model.add(
        Conv1D(filters=64,
               kernel_size=3,
               activation='relu',
               input_shape=(n_timesteps, n_features)))
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(RepeatVector(n_outputs))
    model.add(LSTM(200, activation='relu', return_sequences=True))
    model.add(TimeDistributed(Dense(100, activation='relu')))
    model.add(TimeDistributed(Dense(1)))
    model.compile(loss='mse', optimizer='adam')

    # Fit network.
    model.fit(train_x,
              train_y,
              epochs=epochs,
              batch_size=batch_size,
              verbose=verbose)
    return model
コード例 #22
0
    def model_final(input_shape, output_sequence_length, s_size, t_size):
        """  
        Build and train a model that incorporates embedding, encoder-decoder, and bidirectional RNN on x and y
        :param input_shape: Tuple of input shape
        :param output_sequence_length: Length of output sequence
        :param english_vocab_size: Number of unique English words in the dataset
        :param french_vocab_size: Number of unique French words in the dataset
        :return: Keras model built, but not trained
        """
        # TODO: Implement
        # Hyperparameters
        learning_rate = 0.005

        # Build the layers
        model = Sequential()
        # Embedding
        model.add(
            L.Embedding(s_size,
                        100,
                        input_length=input_shape[1],
                        input_shape=input_shape[1:],
                        weights=[embedding_matrix],
                        trainable=False))
        # Encoder
        model.add(Bidirectional(GRU(100)))
        model.add(RepeatVector(output_sequence_length))
        # Decoder
        model.add(Bidirectional(GRU(100, return_sequences=True)))
        model.add(TimeDistributed(Dense(512, activation='relu')))
        model.add(Dropout(0.5))
        model.add(TimeDistributed(Dense(t_size, activation='softmax')))
        model.compile(loss=sparse_categorical_crossentropy,
                      optimizer=Adam(learning_rate),
                      metrics=['accuracy'])
        return model
コード例 #23
0
    def __init__(self, config):
        super(BiLSTMATTNre, self).__init__()
        self.n_outputs = config.label_width
        self.filters = config.filters
        self.kernel_size = config.kernel_size
        self.activation = config.activation
        self.lstm_units = config.lstm_units
        self.attn_units = config.attn_units
        
        self.encoder_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.1, return_sequences = True, return_state= True, recurrent_initializer='glorot_uniform'))
        self.rv = RepeatVector(self.n_outputs)
        self.decoder_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.1, return_sequences = False, return_state=False, recurrent_initializer='glorot_uniform'))
        self.concat = Concatenate(axis=-1)
        self.attention = BahdanauAttention(self.lstm_units)
        self.fcn0 = TimeDistributed(Dense(1))
        self.flatten = Flatten()

        self.fcn1 = Dense(50)#, activation='relu')

        self.aux_lstm = LSTM(self.lstm_units, dropout=0.5, return_sequences=False)
        self.aux_fcn1 = Dense(20)
        
        self.aux_fnc2 = TimeDistributed(Dense(20))
        self.aux_flatten = Flatten()

        self.fcn3 = Dense(10)
        self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
コード例 #24
0
def new_attention_3d_block(
    slt_api_num,
    feature_dim,
    name='',
):
    """
    :param query: (None,D)
    :param key: (None,slt_api_num,D)
    :param value: (None,slt_api_num,D) 一般等于key
    :return:
    """

    query = Input(shape=(feature_dim, ), name=name + 'query_input')
    key = Input(shape=(
        slt_api_num,
        feature_dim,
    ), name=name + 'key_input')
    value = Input(shape=(
        slt_api_num,
        feature_dim,
    ),
                  name=name + 'value_input')

    Repeat_query = RepeatVector(slt_api_num)(query)  # (None,slt_api_num,D)
    att_score = Concatenate(name=name + 'att_info_concate')(
        [Repeat_query, key])  # (None,slt_api_num,2*D) 不加入外积和差效果较好?
    # outer_prod = Multiply()([Repeat_query,key])
    # sub = Subtract()([Repeat_query,key])
    # att_score = Concatenate(name=name+'att_info_concate')([Repeat_query,key,outer_prod,sub]) # (None,slt_api_num,4*D)

    att_score = Dense(36)(att_score)  # (None,slt_api_num,36)
    att_score = PReLU()(att_score)
    if 'new_3layer' in new_Para.param.CI_handle_slt_apis_mode:
        att_score = Dense(16)(att_score)  # (None,slt_api_num,16)
        att_score = PReLU()(att_score)

    # att_score = Dense(1, activation='linear')(att_score)  # (None,slt_api_num,1)
    att_score = Dense(1)(att_score)  # (None,slt_api_num,1) # 最后非线性
    att_score = PReLU()(att_score)

    att_score = Reshape((slt_api_num, ), )(att_score)  # (None,slt_api_num)
    a_probs = Dense(slt_api_num, activation='softmax')(
        att_score
    )  # (None,slt_api_num) 不需要加这一层dense???加上效果好。 一般softmax层也会加上dense参数
    # a_probs = Activation('softmax')(att_score) #
    a_probs = Reshape((slt_api_num, 1), )(a_probs)  # (None,slt_api_num,1)

    # # 直接全连接+softmax层,有问题!
    # a_probs = Dense(slt_api_num, activation='softmax')(att_score) # (None,slt_api_num,16)
    # a_probs = Permute((2, 1))(a_probs)

    output_attention_mul = Multiply(name=name + 'attention_mul')(
        [a_probs, value])  # shape=(?,slt_api_num, D)
    att_result = Lambda(lambda x: tf.reduce_sum(x, axis=1))(
        output_attention_mul)  # (None,D)

    model = Model(inputs=[query, key, value],
                  outputs=[att_result],
                  name=name + 'attBlock')
    return model
コード例 #25
0
    def fit(self, X, y, X_val, y_val):
        """        
        This method will apply the fit operation.
        
        The model will be trained on the training set and the early stopping 
        method will be applied on the validation set.
        
        Parameters
        ----------
        X            :  array-lke
                        The predictor features in the training set. 
                    
        y            :  array-lke
                        The target feature in the training set. 
                    
        X_val        :  array-lke
                        The predictor features in the validation set. 
                    
        y_val        :  array-lke
                        The target feature in the training set. 
                
       """
        # Convert the input data to tensors
        X_train = X.reshape(
            (len(X), self.num_time_steps, self.num_temporal_feats))
        y_train = y.values.reshape((len(y), 1))
        X_val = X_val.reshape(
            (len(X_val), self.num_time_steps, self.num_temporal_feats))
        y_val = y_val.values.reshape((len(y_val), 1))
        train_x = X_train.reshape((X_train.shape[0], 1, 1, self.num_time_steps,
                                   self.num_temporal_feats))
        train_y = y_train.reshape((y_train.shape[0], y_train.shape[1], 1))
        val_x = X_val.reshape((X_val.shape[0], 1, 1, self.num_time_steps,
                               self.num_temporal_feats))
        val_y = y_val.reshape((y_val.shape[0], y_val.shape[1], 1))

        # ConvLstm architecture
        model = tf.keras.Sequential()
        model.add(
            ConvLSTM2D(filters=64,
                       kernel_size=(1, 3),
                       input_shape=(1, 1, self.num_time_steps,
                                    self.num_temporal_feats)))
        model.add(Flatten())
        model.add(RepeatVector(X.shape))
        model.add(LSTM(32, return_sequences=True))
        model.add(LSTM(16, dropout=0.1, recurrent_dropout=0.1))
        model.add(Dense(1))
        model.compile(optimizer='adam',
                      loss=tf.keras.losses.Huber(),
                      metrics=[tf.keras.metrics.RootMeanSquaredError()])
        # fit network
        self.history = model.fit(train_x,
                                 train_y,
                                 epochs=self.epoch,
                                 batch_size=self.batch,
                                 validation_data=(val_x, val_y),
                                 callbacks=[self.es])

        return self.history
コード例 #26
0
def Attention_CNN_Bi_LSTM_AE(n_steps, n_features, activation):
    en_input = Input(shape=[n_steps, n_features])
    e = Conv1D(32, kernel_size=1, padding="SAME",
               activation=activation)(en_input)
    e = MaxPool1D(pool_size=2)(e)
    e = Conv1D(64, kernel_size=3, padding="SAME", activation=activation)(e)
    e = MaxPool1D(pool_size=2)(e)
    e = Conv1D(128, kernel_size=5, padding="SAME", activation=activation)(e)
    e = MaxPool1D(pool_size=2)(e)
    e = Bidirectional(LSTM(64, recurrent_dropout=0.1, dropout=0.1))(e)
    e = Attention(use_scale=True)([e, e])
    en_output = Dense(get_output_dim(n_steps * n_features),
                      kernel_initializer='lecun_normal',
                      activation='selu')(e)
    encoder = keras.models.Model(inputs=[en_input], outputs=[en_output])

    decoder = keras.models.Sequential([
        RepeatVector(n_steps,
                     input_shape=[get_output_dim(n_steps * n_features)]),
        LSTM(256, return_sequences=True),
        keras.layers.Reshape([n_steps, 256, 1]),
        Conv2DTranspose(filters=16, kernel_size=3, activation=activation),
        Conv2DTranspose(filters=1, kernel_size=3, activation=activation),
        keras.layers.Flatten(),
        Dense(n_steps * n_features),
        keras.layers.Reshape([n_steps, n_features])
    ])

    return encoder, decoder
コード例 #27
0
    def create_model(self, ret_model=False):
        #base_model = VGG16(weights='imagenet', include_top=False, input_shape = (224, 224, 3))
        #base_model.trainable=False
        image_model = Sequential()
        #image_model.add(base_model)
        #image_model.add(Flatten())
        image_model.add(Dense(EMBEDDING_DIM, input_dim=4096,
                              activation='relu'))

        image_model.add(RepeatVector(self.max_cap_len))

        lang_model = Sequential()
        lang_model.add(
            Embedding(self.vocab_size, 256, input_length=self.max_cap_len))
        lang_model.add(LSTM(256, return_sequences=True))
        lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))

        model = Sequential()
        model.add(Concatenate([image_model, lang_model]))
        model.add(LSTM(1000, return_sequences=False))
        model.add(Dense(self.vocab_size))
        model.add(Activation('softmax'))

        print("Model created!")

        if (ret_model == True):
            return model

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])
        return model
コード例 #28
0
def model_layers(input, embed_input):
    """
    model from: https://github.com/emilwallner/Coloring-greyscale-images/blob/master/Full-version/full_version.ipynb
    """

    # Going Down
    x = Conv2D(64, (3, 3), activation="relu", padding="same", strides=2)(input)
    x = Conv2D(128, (3, 3), activation="relu", padding="same")(x)
    x = Conv2D(128, (3, 3), activation="relu", padding="same", strides=2)(x)
    x = Conv2D(256, (3, 3), activation="relu", padding="same")(x)
    x = Conv2D(256, (3, 3), activation="relu", padding="same", strides=2)(x)
    x = Conv2D(512, (3, 3), activation="relu", padding="same")(x)
    x = Conv2D(512, (3, 3), activation="relu", padding="same")(x)
    x = Conv2D(256, (3, 3), activation="relu", padding="same")(x)

    # Embedding
    f = RepeatVector(32 * 32)(embed_input)
    f = Reshape(([32, 32, c.EMBED_SIZE]))(f)
    x = concatenate([x, f], axis=3)
    x = Conv2D(256, (1, 1), activation="relu", padding="same")(x)

    # Going Up
    x = Conv2D(128, (3, 3), activation="relu", padding="same")(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(64, (3, 3), activation="relu", padding="same")(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(32, (3, 3), activation="relu", padding="same")(x)
    x = Conv2D(16, (3, 3), activation="relu", padding="same")(x)
    x = Conv2D(2, (3, 3), activation="tanh", padding="same")(x)
    x = UpSampling2D((2, 2))(x)

    return x
コード例 #29
0
def load():
    image_model = Sequential([
        Dense(300, input_shape=(2048, ), activation='relu'),
        RepeatVector(40)
    ])
    caption_model = Sequential([
        Embedding(8256, 300, input_length=40),
        LSTM(256, return_sequences=True),
        TimeDistributed(Dense(300))
    ])
    # image_in = Input(shape=(2048,))
    # caption_in = Input(shape=(8256))
    merged = concatenate([image_model.output, caption_model.output], axis=1)
    latent = Bidirectional(LSTM(256, return_sequences=False))(merged)
    out_1 = Dense(8256, activation='softmax')(latent)
    out = Activation('softmax')(out_1)
    final_model = Model([image_model.input, caption_model.input], out)

    final_model.compile(loss='categorical_crossentropy',
                        optimizer=RMSprop(),
                        metrics=['accuracy'])

    # print(final_model.summary())

    final_model.load_weights("./weights/time_inceptionV3_1.5987_loss.h5")
    return final_model
コード例 #30
0
def LSTM_TC_model_left(L, train_tokens, target_vector_array, num_classes=3):
    MAX_SEQUENCE_LENGTH = len(max(train_tokens, key=len))
    input_tokens = Input(shape=MAX_SEQUENCE_LENGTH, dtype='int32')
    input_target_vector = Input(shape=target_vector_array.shape[1],
                                dtype='float32')

    embb = Embedding(L.shape[0],
                     L.shape[1],
                     input_length=train_tokens.shape[1],
                     embeddings_initializer=Constant(L),
                     trainable=False)(input_tokens)
    repeat_target = RepeatVector(MAX_SEQUENCE_LENGTH)(
        input_target_vector)  #plut the word vector to each input of the LSTM
    conc = Concatenate(axis=-1)([embb, repeat_target])
    lstm = LSTM(100,
                dropout=0.3,
                recurrent_dropout=0.1,
                recurrent_regularizer='l2')(conc)
    #lstm=Dropout(0.3)(lstm)

    #dense=Dense(64)(lstm)
    #dense=BatchNormalization()(dense)

    #dense=Dropout(0.3)(dense)

    output = Dense(num_classes, activation='softmax')(lstm)
    model = Model([input_tokens, input_target_vector], outputs=output)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model