コード例 #1
0
ファイル: lstur.py プロジェクト: namaljayathunga/Recommenders
    def _build_userencoder(self, titleencoder, type="ini"):
        """The main function to create user encoder of LSTUR.

        Args:
            titleencoder(obj): the news encoder of LSTUR. 

        Return:
            obj: the user encoder of LSTUR.
        """
        hparams = self.hparams
        his_input_title = keras.Input(
            shape=(hparams.his_size, hparams.title_size), dtype="int32"
        )
        user_indexes = keras.Input(shape=(1,), dtype="int32")

        user_embedding_layer = layers.Embedding(
            len(self.train_iterator.uid2index),
            hparams.gru_unit,
            trainable=True,
            embeddings_initializer="zeros",
        )

        long_u_emb = layers.Reshape((hparams.gru_unit,))(
            user_embedding_layer(user_indexes)
        )
        click_title_presents = layers.TimeDistributed(titleencoder)(his_input_title)

        if type == "ini":
            user_present = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(
                layers.Masking(mask_value=0.0)(click_title_presents),
                initial_state=[long_u_emb],
            )
        elif type == "con":
            short_uemb = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(layers.Masking(mask_value=0.0)(click_title_presents))

            user_present = layers.Concatenate()([short_uemb, long_u_emb])
            user_present = layers.Dense(
                hparams.gru_unit,
                bias_initializer=keras.initializers.Zeros(),
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
            )(user_present)

        model = keras.Model(
            [his_input_title, user_indexes], user_present, name="user_encoder"
        )
        return model
コード例 #2
0
    def __init__(self,
                 num_students,
                 num_skills,
                 max_sequence_length,
                 embed_dim=200,
                 hidden_units=100,
                 dropout_rate=0.2):
        x = tf.keras.Input(shape=(max_sequence_length, num_skills * 2),
                           name='x')
        q = tf.keras.Input(shape=(max_sequence_length, num_skills), name='q')

        emb = layers.Dense(
            embed_dim,
            trainable=False,
            kernel_initializer=tf.keras.initializers.RandomNormal(seed=777),
            input_shape=(None, max_sequence_length, num_skills * 2))
        mask = layers.Masking(mask_value=0,
                              input_shape=(max_sequence_length, embed_dim))
        lstm = layers.LSTM(hidden_units, return_sequences=True)
        out_dropout = layers.TimeDistributed(layers.Dropout(dropout_rate))
        out_sigmoid = layers.TimeDistributed(
            layers.Dense(num_skills, activation='sigmoid'))
        dot = layers.Multiply()
        # HACK: the shape of q does not fit to Timedistributed operation(may be correct?)
        # dot =  layers.TimeDistributed(layers.Multiply())

        reduce_sum = layers.Dense(
            1,
            trainable=False,
            kernel_initializer=tf.keras.initializers.constant(value=1),
            input_shape=(None, max_sequence_length, num_skills))
        # reshape layer does not work as graph  # reshape_l = layers.Reshape((-1,6),dynamic=False)#,
        final_mask = layers.TimeDistributed(layers.Masking(
            mask_value=0, input_shape=(None, max_sequence_length, 1)),
                                            name='outputs')

        # define graph
        n = emb(x)
        masked_n = mask(n)
        h = lstm(masked_n)
        o = out_dropout(h)
        y_pred = out_sigmoid(o)
        y_pred = dot([y_pred, q])
        # HACK: without using layer(tf.reduce) might be faster
        # y_pred = reduce_sum(y_pred, axis=2)
        y_pred = reduce_sum(y_pred)
        outputs = final_mask(y_pred)
        #  KEEP: another approach for final mask
        # patch initial mask by boolean_mask(tensor, mask)
        #tf.boolean_mask(y_pred, masked_n._keras_mask)
        #y_pred._keras_mask=masked_n._keras_mask

        super().__init__(inputs=[x, q], outputs=outputs, name="DKTModel")
コード例 #3
0
def fit_model(x_train, y_train):

    max_vocab = 25000
    model = Sequential()
    model.add(layers.Embedding(
        max_vocab, output_dim=128))  # We have 25000 words in the vocabulary,
    model.add(layers.Masking()
              )  # and each word is represnted by a vector of size 128
    model.add(layers.LSTM(128, activation='tanh'))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    es = EarlyStopping(patience=5, restore_best_weights=True)

    model.fit(x_train,
              y_train,
              epochs=5,
              batch_size=16,
              validation_split=0.3,
              callbacks=[es])

    return model
コード例 #4
0
def encoder(X, l2=0.001, dropout=1e-6, lr=0.006, seed=42):

    tf.random.set_seed(seed)
    regularizer = keras.regularizers.l2(l2)
    CustomGRU = partial(keras.layers.GRU,
                        kernel_regularizer=regularizer,
                        dropout=dropout,
                        recurrent_dropout=dropout)
    '''
    For masking, refer: 
        https://www.tensorflow.org/guide/keras/masking_and_padding
        https://gist.github.com/ragulpr/601486471549cfa26fe4af36a1fade21
    '''
    model = keras.models.Sequential([
        layers.Masking(mask_value=0.0, input_shape=[None, X.shape[-1]]),
        CustomGRU(16, return_sequences=True),
        CustomGRU(16, return_sequences=True),
        CustomGRU(16, return_sequences=True),
        layers.TimeDistributed(layers.Dense(3, activation='linear')),
        layers.TimeDistributed(layers.Dense(15, activation='softmax'))
    ])

    optimizer = keras.optimizers.Adam(lr=lr)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['sparse_categorical_accuracy'])

    return model
コード例 #5
0
def deeptriangle(timesteps,
                 features,
                 names_output=["paid_output", "case_reserves_output"]):

    tfk.clear_session()

    ay_seq_input = layers.Input(shape=(timesteps, features),
                                name='ay_seq_input')
    company_code_input = layers.Input(shape=1, name="company_input")
    company_code_embedding = layers.Embedding(200, 49)(company_code_input)
    company_code_embedding = layers.Flatten()(company_code_embedding)
    company_code_embedding = layers.RepeatVector(timesteps)(
        company_code_embedding)

    encoded = layers.Masking(mask_value=-99)(ay_seq_input)
    encoded = layers.GRU(128, dropout=0.2, recurrent_dropout=0.2)(encoded)

    concat_layer = lambda x: layers.Concatenate()([x, company_code_embedding])
    decoded = layers.RepeatVector(timesteps)(encoded)
    decoded = layers.GRU(128,
                         return_sequences=True,
                         dropout=0.2,
                         recurrent_dropout=0.2)(decoded)
    decoded = layers.Lambda(concat_layer)(decoded)

    feature_list = []
    for name in names_output:
        feature_list.append(create_feature_output(name, decoded))

    model = keras.Model(inputs=[ay_seq_input, company_code_input],
                        outputs=feature_list,
                        name="DeepTriangle")

    return model
コード例 #6
0
    def call(self, inputs, pos_inputs, training):
        word_embed = tf.nn.embedding_lookup(self.embeddings, inputs)
        pos_embed = tf.nn.embedding_lookup(self.embeddings, pos_inputs)

        ### TODO(Students) START
        # ...
        #print ("word_embed shape: ", word_embed.shape)
        masking_layer = layers.Masking()
        unmasked_embedding = tf.cast(tf.tile(tf.expand_dims(inputs, axis=-1), [1, 1, 10]), tf.float32)        
        masked_embedding = masking_layer(unmasked_embedding)
        
        embed = tf.concat([word_embed, pos_embed], -1)
        

        if(training==True):
            embed = layers.Dropout(0.3)(embed)

        op = self.biDirection(embed, mask = masked_embedding._keras_mask)

        if(training==True):
            op = layers.Dropout(0.3)(op)
        #print (op.shape)
        
        attention = self.attn(op)
        
        if(training==True):
            attention = layers.Dropout(0.5)(attention)        

        logits = self.decoder(tf.reshape(attention, [-1 , 2*self.hidden_size]))
        #print (output.shape)

        ### TODO(Students) END

        return {'logits': logits}
コード例 #7
0
ファイル: ms_tcn_model.py プロジェクト: Miha87/phd_mg
    def __init__(self, num_layers, filters, num_classes, dropout_rate,
                 **kwargs):
        super().__init__(**kwargs)
        ##Definiranje hiperparametara modula
        self.num_layers = num_layers
        self.filters = filters
        self.num_classes = num_classes
        self.dropout_rate = dropout_rate

        ##Definicija slojeva u modulu
        #Slojevi maskiranja
        self.masking = layers.Masking(mask_value=0.)
        self.conv_mask = MaskConv1D()

        #Sloj podešavanje dimenzionalnosti ulaza
        self.conv_1x1 = layers.Conv1D(filters, kernel_size=1)
        #U originalnoj implementaciji oni koriste dijeljene težine tj. isti modul više
        #puta, time smanjuju broj parametara, ali gube na točnosti modela
        self.dilated_residual_blocks = [
            DilatedResidualModule(filters,
                                  dilation_rate=2**i,
                                  dropout_rate=dropout_rate)
            for i in range(num_layers)
        ]

        #Sloj izlazne predikcije modula
        self.conv_out = layers.Conv1D(num_classes,
                                      kernel_size=1,
                                      activation="softmax")
コード例 #8
0
ファイル: models.py プロジェクト: LCE-UMD/GRU
def GRUDecoder(X, Y, k_layers=1, l2=0, dropout=0, lr=0.001, seed=42):
    """
    Parameters
    ---------
    X: tensor (batch x time x feat)
    k_layers: int, number of hidden layers
    k_hidden: int, number of units
    k_class: int, number of classes
    
    Returns
    -------
    model: complied model
    """

    tf.random.set_seed(seed)
    regularizer = keras.regularizers.l2(l2)
    CustomGRU = partial(keras.layers.GRU,
                        kernel_regularizer=regularizer,
                        dropout=dropout,
                        recurrent_dropout=dropout)
    input_layers = [
        layers.Masking(mask_value=0.0, input_shape=[None, X.shape[-1]])
    ]

    hidden_layers = []

    for ii in range(k_layers):
        hidden_layers.append(CustomGRU(Y.shape[-1], return_sequences=True))

    optimizer = keras.optimizers.Adam(lr=lr)

    model = keras.models.Sequential(input_layers + hidden_layers)

    model.compile(loss='mse', optimizer=optimizer)
    return model
コード例 #9
0
    def __init__(self, n_feat=22, n_lstm=1, lstm_sizes="[5]", fc_sizes="[80]", lstm_dropout=0.2, dropout=0.1, activation='sigmoid'):
        super(LSTM_one_to_one, self).__init__()

        lstm_sizes = ast.literal_eval(lstm_sizes)
        fc_sizes = ast.literal_eval(fc_sizes)

        shape = (None, n_feat)
        Input = keras.Input(shape)

        slices = layers.Lambda(
            lambda x, i: x[:, :, i: i + 1], name='slicer_lambda')
        y = layers.Masking(mask_value=0, name="masking")(Input)

        n_hidden = lstm_sizes[0]

        lstms = [layers.CuDNNLSTM(
            n_hidden, return_sequences=False, name="lstm1_feature_%d" % _) for _ in range(n_feat)]

        ys = []
        for i, lstm in enumerate(lstms):
            slices.arguments = {'i': i}
            ys.append(lstm(slices(y)))
        y = layers.concatenate(ys, axis=-1, name="merge")

        for i, fc in enumerate(fc_sizes):
            y = layers.Dense(fc, activation=activation, name="fc_%d" % i)(y)
            y = layers.Dropout(dropout, name="dropout_%i" % i)(y)
        y = layers.Dense(1, activation=activation)(y)

        self.model = keras.Model(Input, y)
コード例 #10
0
def build_model():
    inp = keras.Input(shape=[
        102,
    ], dtype=tf.int32)
    emb = layers.Embedding(
        41,
        64,
        mask_zero=True,
        embeddings_regularizer=keras.regularizers.l2(1e-5),
        embeddings_constraint=keras.constraints.max_norm(3))(inp)
    mask = tf.equal(inp, 0)
    emb = layers.Masking(mask_value=0.0)(emb)
    emb = layers.Dropout(dropout_rate)(emb)
    x = layers.Bidirectional(layers.LSTM(128, return_sequences=True))(emb)
    x = layers.Dropout(dropout_rate)(x)
    x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)

    x = RemoveMask()(x)
    x = AttentionWithContext(x, mask)
    x = layers.Dense(
        256,
        activation='relu',
    )(x)
    x = layers.Dropout(0.25)(x)
    x = layers.Dense(64, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    y = layers.Dense(1)(x)
    model = keras.Model(inputs=inp, outputs=y)
    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
        0.005, decay_steps=3000, decay_rate=0.96, staircase=True)

    optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(loss='mse', optimizer=optimizer, metrics=[r2_keras])
    return model
コード例 #11
0
def configure_model(model_info, lstm_type='', optimizer = tf.compat.v1.train.AdamOptimizer(0.001)):

    '''

    :param input_size:
    :param n_classes:
    :param layers:
    :param lstm_type:
    :param optimizer:
    :param CD: concatenated depth
    :return:
    '''

    model = tf.keras.Sequential()
    model.add(layers.Masking(mask_value=1., input_shape=(None, model_info.feat_size)))

    for l, layer in enumerate(model_info.layers):
        if l == 0:
            if lstm_type == 'b':
                logging.info('Using bidirectional LSTM')
                model.add(layers.Bidirectional(layers.LSTM(layer, input_shape=(None, model_info.feat_size), dropout=0.1, return_sequences=True, recurrent_dropout=0.1)))
            else:
                model.add(layers.LSTM(layer, input_shape=(None, model_info.feat_size), dropout=0.1, recurrent_dropout=0.1, return_sequences=True))
        else:
            model.add(layers.TimeDistributed(layers.Dense(layer,activation='relu')))
            model.add(layers.Dropout(0.1))

        model.add(layers.TimeDistributed(layers.Dense(model_info.n_classes,activation='softmax')))

    model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])

    return model
コード例 #12
0
def LogReg(k_dim=3,k_class=15,seed=42):
    '''
    Logistic regression classifier 
    
    Parameters
    ----------
    k_dim: int, number of input features
    k_class: int, number of classes
    
    Returns
    -------
    model: complied model
    '''
        
    tf.random.set_seed(seed)
    masking_layer = [
        layers.Masking(mask_value=0.0, input_shape=[None,k_dim])
    ]
    output_layer = [
        layers.Dense(k_class,activation='softmax')
    ]
    model = keras.models.Sequential(
        masking_layer + output_layer
    )
    
    optimizer = keras.optimizers.Adam()
    model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False),
                      optimizer=optimizer,metrics=['sparse_categorical_accuracy'])
    return model
コード例 #13
0
def FFRegressor (X,k_hidden,k_layers,seed=42):
    
    """
    FF regressor for individual difference
    
    Parameters
    ---------
    X: tensor (batch x time x feat)
    k_layers: int, number of hidden layers
    k_hidden: int, number of units
    
    Returns
    -------
    model: complied model
    """
    
    tf.random.set_seed(seed)
    input_layers = [layers.Masking(mask_value=0.0, input_shape = [X.shape[-2], X.shape[-1]])]
    
    hidden_layers = []
    for ii in range(k_layers):
        hidden_layers.append(layers.Dense(k_hidden,activation='relu'))
    
    output_layer = [layers.Dense(1,activation='linear')]

    model = keras.models.Sequential(input_layers+hidden_layers+output_layer)
    
    optimizer = keras.optimizers.Adam()
    model.compile(loss='mse', optimizer=optimizer)
    return model
コード例 #14
0
def FFClassifier(X,k_hidden,k_layers,k_class,seed=42):
    '''
    Feed-forward network classifier
    
    Parameters
    ----------
    X: tensor (batch x time x feat)
    k_layers: int, number of hidden layers
    k_hidden: int, number of units
    k_class: int, number of classes
    
    Returns
    -------
    model: complied model
    '''
        
    tf.random.set_seed(seed)
    input_layers = [layers.Masking(mask_value=0.0, input_shape = [X.shape[-2], X.shape[-1]])]
    
    hidden_layers = []
    for ii in range(k_layers):
        hidden_layers.append(layers.Dense(k_hidden,activation='relu'))
    
    output_layer = [layers.Dense(k_class,activation='softmax')]

    model = keras.models.Sequential(input_layers+hidden_layers+output_layer)
    
    optimizer = keras.optimizers.Adam()
    model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False),
                      optimizer=optimizer,metrics=['sparse_categorical_accuracy'])
    return model
コード例 #15
0
def TCNClassifier (X, k_hidden, k_wind, k_class,seed=42):
    '''
    TCN classifier
    
    Parameters
    ----------
    X: tensor (batch x time x feat)
    k_hidden: int, number of filters
    k_wind: int, kernel size
    k_class: int, number of classes
    
    Returns
    -------
    model: complied model
    '''
    
    tf.random.set_seed(seed)
    input_layers = [layers.Masking(mask_value=0.0, 
                                   input_shape = [None, X.shape[-1]])]
    hidden_layers = [layers.Conv1D(filters=k_hidden,kernel_size=k_wind,
                                   strides=1,padding='same',activation="relu")]
    
    output_layer = [layers.TimeDistributed(layers.Dense(k_class,activation='softmax'))]

    model = keras.models.Sequential(input_layers+hidden_layers+output_layer)
    
    optimizer = keras.optimizers.Adam()
    model.compile(loss='sparse_categorical_crossentropy',
                      optimizer=optimizer,metrics=['sparse_categorical_accuracy'])
    return model
コード例 #16
0
ファイル: encoder.py プロジェクト: neoTCR/cu-tsp
 def _encoder(x, mask=None):
     mask = layers.Lambda(lambda inp: tf.cast(
         tf.expand_dims(inp, axis=-1), tf.float32))(mask)
     x = layers.multiply([x, mask])
     x = layers.Masking(mask_value=0., input_shape=(
         None,
         input_dim,
     ))(x)
     for i in range(num_layers):
         y = x
         if batch_norm:
             x = layers.BatchNormalization(axis=-1)(x)
         if 'CuDNN' not in RNN:
             layer = eval(f'layers.{RNN}')(hid_dim,
                                           dropout=dropout,
                                           return_sequences=True)
         else:
             layer = eval(f'layers.{RNN}')(hid_dim,
                                           return_sequences=True)
         if bidirectional:
             dim = input_dim if i == 0 else hid_dim * 2
             layer = layers.Bidirectional(layer,
                                          input_shape=(
                                              None,
                                              dim,
                                          ))
         x = layer(x) if 'CuDNN' not in RNN else layer(x, mask=None)
         if residual:
             x = layers.add([x, y])
     return x
コード例 #17
0
def create(maskValue=None) -> None:
    Input1 = Input(shape=(34), name='static')
    Dense1 = layers.Dense(35, activation='relu')(Input1)
    Dense2 = layers.Dense(35, activation='relu')(Dense1)
    Dropout1 = layers.Dropout(0.5)(Dense2)

    Input2 = Input(shape=(None, 8), name='timeSeries')
    Masking1 = layers.Masking(mask_value=maskValue)(Input2)
    RNN1 = layers.LSTM(9, return_sequences=True)(Masking1)
    RNN2 = layers.LSTM(9)(RNN1)

    concatenated = layers.concatenate([Dropout1, RNN2], axis=-1)
    Dense4 = layers.Dense(45, activation='relu')(concatenated)
    Dense5 = layers.Dense(45, activation='relu')(Dense4)
    Dropout2 = layers.Dropout(0.5)(Dense5)
    output = layers.Dense(1, activation='sigmoid')(Dropout2)

    model = Model([Input1, Input2], output)

    METRICS = [
        keras.metrics.TruePositives(name='tp'),
        keras.metrics.FalsePositives(name='fp'),
        keras.metrics.TrueNegatives(name='tn'),
        keras.metrics.FalseNegatives(name='fn'),
        keras.metrics.BinaryAccuracy(name='accuracy'),
        keras.metrics.Precision(name='precision'),
        keras.metrics.Recall(name='recall'),
        keras.metrics.AUC(name='auc'),
    ]

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=METRICS)

    return model
コード例 #18
0
    def __init__(self,
                 n_feat=13,
                 n_cells=1,
                 gru_size=5,
                 fc_sizes="80",
                 Wemb_size=30,
                 dropout=0.5,
                 mask_value=None,
                 activation='sigmoid'):
        super(GRU, self).__init__()

        fc_sizes = ast.literal_eval(fc_sizes)
        self.dropout = dropout

        if mask_value is not None:
            self.mask_value = mask_value
            self.mask = L.Masking(mask_value=np.float32(mask_value),
                                  name="masking")
        else:
            self.mask = None

        self.Wemb = L.Dense(units=Wemb_size,
                            activation=None,
                            use_bias=False,
                            name="Embedding")

        self.output = L.Dense(units=1, activation=activation, name="y")

        self.rnn = self.gru_graph(gru_size, n_cells)
        self.fc = self.fc_graph(fc_sizes)

        x = keras.Input(shape=(None, n_feat))
        y = self.forward(x)

        self.model = keras.Model(inputs=x, outputs=y)
コード例 #19
0
def create_recurrent_model(
    input_size: Tuple[int, ...]
) -> Tuple[Union[tf.Tensor, List[tf.Tensor]], tf.Tensor]:
    """
    Creates a recurrent neural network with 4 stacked LSTMs, each with a hidden state of size 64.

    The output layer and activation are omitted, as they are added by the wrapper function.
    """
    input_cont = keras.Input((2, ))
    input_rec = keras.Input(input_size)

    masked = layers.Masking()(input_rec)
    lstm1 = layers.LSTM(64, return_sequences=True)(masked)
    lstm2 = layers.LSTM(64, return_sequences=True)(lstm1)
    lstm3 = layers.LSTM(64, return_sequences=True)(lstm2)
    lstm4 = layers.LSTM(64)(lstm3)

    conc = layers.Concatenate()([input_cont, lstm4])

    dense1 = layers.Dense(1024, activation='relu')(conc)
    drop5 = layers.Dropout(0.2)(dense1)
    dense2 = layers.Dense(1024, activation='relu')(drop5)
    drop6 = layers.Dropout(0.2)(dense2)
    out = layers.Dense(512, activation='relu')(drop6)

    # Omit final layer as it is added by the wrapper function
    # out = layers.Dense(1, activation='sigmoid')(dense4)

    return [input_cont, input_rec], out
コード例 #20
0
def TCNRegressor (X, k_hidden, k_wind, seed=42):
    '''
    TCN classifier
    
    Parameters
    ----------
    X: tensor (batch x time x feat)
    k_hidden: int, number of filters
    k_wind: int, kernel size
    
    Returns
    -------
    model: complied model
    '''
    
    tf.random.set_seed(seed)
    input_layers = [layers.Masking(mask_value=0.0, 
                                   input_shape = [None, X.shape[-1]])]
    hidden_layers = [layers.Conv1D(filters=k_hidden,kernel_size=k_wind,
                                   strides=1,padding='same',activation='relu')]
    
    output_layer = [layers.TimeDistributed(layers.Dense(1,activation='linear'))]

    model = keras.models.Sequential(input_layers+hidden_layers+output_layer)
    
    optimizer = keras.optimizers.Adam()
    
    model.compile(loss='mse',optimizer=optimizer)
    return model
コード例 #21
0
    def _build_model(self, x, y):
        """Construct the predictive model using feature and label statistics.
    
    Args:
      - x: temporal feature
      - y: labels
      
    Returns:
      - model: predictor model
    """
        # Parameters
        dim = len(x[0, 0, :])
        max_seq_len = len(x[0, :, 0])

        model = tf.keras.Sequential()
        model.add(
            layers.Masking(mask_value=-1., input_shape=(max_seq_len, dim)))

        # Stack multiple layers
        for _ in range(self.n_layer - 1):
            model = rnn_sequential(model,
                                   self.model_type,
                                   self.h_dim,
                                   return_seq=True)

        dim_y = len(y.shape)
        if dim_y == 2: return_seq_bool = False
        elif dim_y == 3: return_seq_bool = True
        else:
            raise ValueError('Dimension of y {} is not 2 or 3.'.format(
                str(dim_y)))

        model = rnn_sequential(model,
                               self.model_type,
                               self.h_dim,
                               return_seq_bool,
                               name='intermediate_state')
        self.adam = tf.keras.optimizers.Adam(learning_rate=self.learning_rate,
                                             beta_1=0.9,
                                             beta_2=0.999,
                                             amsgrad=False)

        if self.task == 'classification':
            if dim_y == 3:
                model.add(
                    layers.TimeDistributed(
                        layers.Dense(y.shape[-1], activation='sigmoid')))
            elif dim_y == 2:
                model.add(layers.Dense(y.shape[-1], activation='sigmoid'))
            model.compile(loss=binary_cross_entropy_loss, optimizer=self.adam)
        elif self.task == 'regression':
            if dim_y == 3:
                model.add(
                    layers.TimeDistributed(
                        layers.Dense(y.shape[-1], activation='linear')))
            elif dim_y == 2:
                model.add(layers.Dense(y.shape[-1], activation='linear'))
            model.compile(loss=mse_loss, optimizer=self.adam, metrics=['mse'])

        return model
コード例 #22
0
def create_model():
    model = keras.models.Sequential()
    # 添加一个Masking层
    model.add(layers.Masking(mask_value=0.0, input_shape=(2, 2)))
    # 添加一个普通RNN层
    rnn_layer = layers.SimpleRNN(50, return_sequences=False)
    model.add(rnn_layer)
    model.add(Dense(300, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(100, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(30, activation='relu'))
    # 多个标签
    # model.add(Dense(10, activation='sigmoid'))
    # 单个标签
    model.add(Dense(10, activation='sigmoid'))

    adam = keras.optimizers.Adam(lr=0.05,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 decay=0.0,
                                 amsgrad=False)
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy', 'binary_accuracy'])
    return model
コード例 #23
0
ファイル: vanilla_lstm.py プロジェクト: qnl/qnl_nonmarkov_ml
    def build_model(self):
        self.model = tf.keras.Sequential()

        # Mask it
        self.model.add(
            layers.Masking(mask_value=self.mask_value,
                           input_shape=(self.sequence_length,
                                        self.num_features)))

        # Add an LSTM layer
        self.model.add(
            layers.LSTM(
                self.lstm_neurons,
                batch_input_shape=(self.sequence_length, self.num_features),
                dropout=0.0,  #self.init_dropout, # Dropout of the hidden state
                stateful=False,
                kernel_regularizer=tf.keras.regularizers.l2(
                    self.l2_regularization),  # regularize input weights
                recurrent_regularizer=tf.keras.regularizers.l2(
                    self.l2_regularization),  # regularize recurrent weights
                bias_regularizer=tf.keras.regularizers.l2(
                    self.l2_regularization),  # regularize bias weights
                return_sequences=True))

        # Add a dropout layer
        # self.model.add(layers.TimeDistributed(layers.Dropout(self.init_dropout)))

        # Cast to the output
        self.model.add(layers.TimeDistributed(layers.Dense(6)))

        self.model.summary()
コード例 #24
0
ファイル: lstur.py プロジェクト: mindis/recommender
    def _build_newsencoder(self, embedding_layer):
        """The main function to create news encoder of LSTUR.

        Args:
            embedding_layer(obj): a word embedding layer.
        
        Return:
            obj: the news encoder of LSTUR.
        """
        hparams = self.hparams
        sequences_input_title = keras.Input(shape=(hparams.doc_size, ),
                                            dtype="int32")
        embedded_sequences_title = embedding_layer(sequences_input_title)

        y = layers.Dropout(hparams.dropout)(embedded_sequences_title)
        y = layers.Conv1D(
            hparams.filter_num,
            hparams.window_size,
            activation=hparams.cnn_activation,
            padding="same",
            bias_initializer=keras.initializers.Zeros(),
            kernel_initializer=keras.initializers.glorot_uniform(
                seed=self.seed),
        )(y)
        y = layers.Dropout(hparams.dropout)(y)
        y = layers.Masking()(
            OverwriteMasking()([y, ComputeMasking()(sequences_input_title)]))
        pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)

        model = keras.Model(sequences_input_title,
                            pred_title,
                            name="news_encoder")
        return model
コード例 #25
0
    def make_gru_network(self):

        
        x0 = tf.keras.Input(shape=[None, self.num_channels])

        x = layers.Masking(mask_value=-1.0)(x0)

        x = tf.keras.layers.GaussianNoise(0.1)(x)

        x = layers.BatchNormalization()(x)
        
        x_e, x_h_fwd, x_h_bwd = layers.Bidirectional(layers.GRU(units=512, activation='tanh', use_bias=False, kernel_initializer="glorot_normal", return_sequences=True, return_state=True), name="bi_gru")(x)
        x_e = layers.Dropout(self.drop_prob)(x_e)
        x_h_fwd = layers.Dropout(self.drop_prob)(x_h_fwd)
        x_h_bwd = layers.Dropout(self.drop_prob)(x_h_bwd)

        x_a_fwd, w_a_fwd = BahdanauAttention(1024)(x_h_fwd, x_e)
        x_a_bwd, w_a_bwd = BahdanauAttention(1024)(x_h_bwd, x_e)

        x = tf.concat([x_h_fwd, x_a_fwd, x_h_bwd, x_a_bwd], axis=-1)

        x = layers.Dense(1, activation='sigmoid', use_bias=False, name='prediction')(x)
        x = tf.math.add(tf.math.multiply(x, 90.0), 190.0)

        return tf.keras.Model(inputs=x0, outputs=x)
コード例 #26
0
ファイル: lstm.py プロジェクト: ZhiliangWu/etips
def build_lstm_classifier(timesteps=32,
                          feature_size=784,
                          output_shape=3,
                          repr_size=64,
                          activation='tanh',
                          inp_drop=0.0,
                          re_drop=0.0,
                          l2_coef=1e-3,
                          lr=3e-4):

    seq_inputs = layers.Input(shape=(timesteps, feature_size),
                              name='Sequential_Input')
    x = layers.Masking(mask_value=0, name='Masking')(seq_inputs)
    x = layers.LSTM(repr_size,
                    activation=activation,
                    use_bias=True,
                    dropout=inp_drop,
                    recurrent_dropout=re_drop,
                    return_sequences=False,
                    name='Sequential_Representation')(x)
    class_pred = layers.Dense(output_shape,
                              activation='softmax',
                              use_bias=True,
                              kernel_regularizer=l2(l2_coef),
                              name='Class_Prediction')(x)

    m = Model(inputs=[seq_inputs], outputs=class_pred)
    m.compile(optimizer=Adam(lr=lr),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

    print('model is built and compiled')

    return m
コード例 #27
0
 def get_line_model():
     line_input = layers.Input(shape=(LINE_LEN, INPUT_DIM))
     masking = layers.Masking(0)(line_input)
     bi_seq = layers.Bidirectional(layers.GRU(128),
                                   merge_mode='sum')(masking)
     bi_seq = layers.BatchNormalization()(bi_seq)
     bi_seq = layers.Activation('relu')(bi_seq)
     return line_input, bi_seq
コード例 #28
0
def catNetwork(trackShape, trackCategories):
    '''

    Track category classifier taking input with the same shape as the tag network, using a recurrent layer.
    Outputs are returned per event as shape (nBatch, nTracks, nCategories).

    _________________________________________________________________
    Layer (type)                 Output Shape              Param #
    =================================================================
    input_1 (InputLayer)         (None, 100, 18)           0
    _________________________________________________________________
    mask (Masking)               (None, 100, 18)           0
    _________________________________________________________________
    td_dense1 (TimeDistributed)  (None, 100, 32)           608
    _________________________________________________________________
    track_gru (GRU)              (None, 100, 32)           6240
    _________________________________________________________________
    noseq_gru (GRU)              (None, 100, 32)           6240
    _________________________________________________________________
    time_distributed_1 (TimeDist (None, 100, 32)           1056
    _________________________________________________________________
    time_distributed_2 (TimeDist (None, 100, 32)           1056
    _________________________________________________________________
    outputCat (Dense)            (None, 100, 4)            132
    =================================================================
    Total params: 15,332
    Trainable params: 15,332
    Non-trainable params: 0
    _________________________________________________________________

    '''

    trackInput = Klayers.Input(trackShape)
    tracks = Klayers.Masking(mask_value=-999, name='mask')(trackInput)

    tracks = Klayers.TimeDistributed(Klayers.Dense(32, activation='relu'),
                                     name='td_dense1')(tracks)

    tracks = Klayers.GRU(32,
                         activation='relu',
                         return_sequences=True,
                         name='track_gru')(tracks)
    tracks = Klayers.GRU(32,
                         activation='relu',
                         return_sequences=True,
                         recurrent_dropout=0.5,
                         name='noseq_gru')(tracks)

    tracks = Klayers.TimeDistributed(
        Klayers.Dense(32, activation='relu', name='out_dense_1'))(tracks)
    tracks = Klayers.TimeDistributed(
        Klayers.Dense(32, activation='relu', name='out_dense_4'))(tracks)

    outputCat = Klayers.Dense(trackCategories,
                              activation='softmax',
                              name='outputCat')(tracks)

    return Model(inputs=trackInput, outputs=outputCat)
コード例 #29
0
 def __init__(self):
     super(LSTM, self).__init__()
     # Define a Masking Layer with -1 as mask.
     self.masking = layers.Masking(mask_value=masking_val)
     # Define a LSTM layer to be applied over the Masking layer.
     # Dynamic computation will automatically be performed to ignore -1 values.
     self.lstm = layers.LSTM(units=num_units)
     # Output fully connected layer (2 classes: linear or random seq).
     self.out = layers.Dense(num_classes)
コード例 #30
0
def GRUEncoder(X, gru_model_path, k_layers=1, k_hidden=32, k_dim = 3,
               k_class = 15,
               l2=0.001, dropout=1e-6, lr=0.006, seed=42):
    
    '''
    GRU Encoder: classification after supervised dim reduction
    
    Parameters
    ----------
    X: tensor (batch x time x feat)
    k_layers: int, number of hidden layers
    k_hidden: int, number of units
    k_dim: int, reduce to k_dim
    k_class: int, number of classes
    
    Returns
    -------
    model: complied model
    '''
    
    tf.random.set_seed(seed)
    regularizer = keras.regularizers.l2(l2)
    
    ''' 
    Transfer Learning
    -----------------
    Using pretrained gru model for finetuning DR_layer 
    '''
    gru_model = keras.models.load_model(gru_model_path)
    gru_model.trainable = False
    
    '''
    For masking, refer: 
        https://www.tensorflow.org/guide/keras/masking_and_padding
        https://gist.github.com/ragulpr/601486471549cfa26fe4af36a1fade21
    '''
    input_layers = [layers.Masking(mask_value=0.0, 
                                   input_shape = [None, X.shape[-1]])]
    
    hidden_layers = [gru_model.layers[1]]
        
    DR_layer = [layers.TimeDistributed(layers.Dense(k_dim,activation='linear'))]
    output_layer = [layers.TimeDistributed(layers.Dense(k_class,activation='softmax'))]
    
    optimizer = keras.optimizers.Adam(lr=lr)
    
    model = keras.models.Sequential(input_layers +
                                    hidden_layers +
                                    DR_layer +
                                    output_layer)
    
    model.compile(loss='sparse_categorical_crossentropy',
                      optimizer=optimizer,metrics=['sparse_categorical_accuracy'])
    
    return model