예제 #1
0
def CRNN1_1D(input_shape, n_classes):

    X_input = Input(input_shape)

    X = Lambda(lambda q: expand_dims(q, -1), name='expand_dims')(X_input)

    X = Conv1D(16, 9, activation=relu, padding='valid')(X)
    X = MaxPool1D(8)(X)

    X = Conv1D(32, 9, activation=relu, padding='valid')(X)
    X = MaxPool1D(8)(X)

    X = Conv1D(32, 9, activation=relu, padding='valid')(X)
    X = MaxPool1D(6)(X)

    X = CuDNNGRU(32, return_sequences=True)(X)
    X = Dropout(0.1)(X)
    X = CuDNNGRU(32, return_sequences=True)(X)
    X = Dropout(0.1)(X)
    X = Flatten()(X)

    X = Dense(64)(X)
    X = Dropout(0.5)(X)
    X = Activation(relu)(X)

    X = Dense(n_classes, activation=softmax)(X)

    model = Model(inputs=X_input, outputs=X)

    return model
예제 #2
0
파일: lstm.py 프로젝트: YBHwang/DeepNLP
def model_lstm_atten(embedding_matrix):

    inp = Input(shape=(maxlen, ))
    x = Embedding(max_features,
                  embed_size,
                  weights=[embedding_matrix],
                  trainable=False)(inp)
    x = SpatialDropout1D(0.1)(x)
    x = Bidirectional(CuDNNLSTM(40, return_sequences=True))(x)
    y = Bidirectional(CuDNNGRU(40, return_sequences=True))(x)

    atten_1 = Attention(maxlen)(x)  # skip connect
    atten_2 = Attention(maxlen)(y)
    avg_pool = GlobalAveragePooling1D()(y)
    max_pool = GlobalMaxPooling1D()(y)

    conc = concatenate([atten_1, atten_2, avg_pool, max_pool])
    conc = Dense(16, activation="relu")(conc)
    conc = Dropout(0.1)(conc)
    outp = Dense(1, activation="sigmoid")(conc)

    model = Model(inputs=inp, outputs=outp)
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[f1])

    return model
예제 #3
0
파일: ml.py 프로젝트: mikepatel/Beamforming
def build_model():
    gpu_check = tf.test.is_gpu_available()

    m = Sequential()

    #potentially put in dense or embedding layer

    if gpu_check:
        m.add(
            CuDNNGRU(
                units=512,
                input_shape=(train_data.shape[1], train_data.shape[2]
                             ),  # (batch_size, num_time_steps, num_features)
                return_sequences=True))

    else:  # no GPU
        m.add(
            GRU(
                units=512,
                input_shape=(train_data.shape[1], train_data.shape[2]
                             ),  # (batch_size, num_time_steps, num_features)
                return_sequences=True))

    m.add(Flatten())

    m.add(Dense(units=4))

    # configure how model will be trained
    # define loss function and optimizer choices
    m.compile(loss=MSE, optimizer=Adam(), metrics=["accuracy"])

    m.summary()  # prints out architecture of network
    return m
예제 #4
0
    def ModelCreator(self, input_nodes):
        input_ = Input(shape=(input_nodes, ))

        embd_seq = Embedding(
            len(self.dictionary) + 1,
            output_dim=300,  # 词向量维度
            weights=[self.embedding_matrix],
            input_length=config.sequence_max_len,  # 文本或者句子截断长度
            trainable=False,
            mask_zero=False)(input_)

        gru = Bidirectional(CuDNNGRU(256))(embd_seq)
        # gru = Lambda(lambda x: expand_dims(x, axis=1))(gru)

        outputs_list = []
        for i in range(20):
            # cap = Capsule(10,16,5,True)(gru)
            # cap = Flatten()(cap)
            out = Dense(64)(gru)
            out = Dropout(0.5)(out)
            out = Dense(4, activation='softmax')(out)
            outputs_list.append(out)

        model = Model(inputs=input_, outputs=outputs_list, name='base')
        print(model.summary())
        return model
def initialise_model():
    global model, bottleneck_mdl
    from tensorflow.keras.layers import CuDNNGRU, Dropout, Dense, Embedding, CuDNNLSTM, Input, add

    #===========================================================#
    #=========================[MODEL]===========================#
    #===========================================================#
    bottleneck_input = Input(shape=(2048, ))
    fe1 = Dropout(0.5)(bottleneck_input)
    fe2 = Dense(256, activation=tf.nn.selu)(fe1)

    #Partial Caption Input
    cap_inputs = Input(shape=(72, ))
    #se1 is already pretrained on GLOVE model
    se1 = Embedding(5411, 200)(cap_inputs)
    se2 = Dropout(0.5)(se1)
    se3 = CuDNNGRU(256, return_sequences=True)(se2)
    se4 = CuDNNLSTM(256)(se3)

    decoder1 = add([fe2, se4])
    decoder2 = Dense(256, activation=tf.nn.selu)(decoder1)
    outputs = Dense(5411, activation=tf.nn.softmax)(decoder2)

    model = Model(inputs=[bottleneck_input, cap_inputs], outputs=outputs)

    model.load_weights('model_xeon.h5')
    #===========================================================#
    #=========================[MODEL]===========================#
    #===========================================================#

    incep_mdl = InceptionV3(weights="imagenet")
    bottleneck_mdl = Model(incep_mdl.input, incep_mdl.layers[-2].output)
예제 #6
0
파일: models.py 프로젝트: zztin/medaka
def build_model(feature_len,
                num_classes,
                gru_size=128,
                classify_activation='softmax',
                time_steps=None,
                allow_cudnn=True):
    """Build a bidirectional GRU model with CuDNNGRU support.

    CuDNNGRU implementation is claimed to give speed-up on GPU of 7x.
    The function will build a model capable of running on GPU with
    CuDNNGRU provided a) a GPU is present, b) the option has been
    allowed by the `allow_cudnn` argument; otherwise a compatible
    (but not CuDNNGRU accelerated model) is built.

    :param feature_len: int, number of features for each pileup column.
    :param num_classes: int, number of output class labels.
    :param gru_size: int, size of each GRU layer.
    :param classify_activation: str, activation to use in classification layer.
    :param time_steps: int, number of pileup columns in a sample.
    :param allow_cudnn: bool, opt-in to cudnn when using a GPU.

    :returns: `keras.models.Sequential` object.

    """
    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, GRU, CuDNNGRU, Bidirectional

    # Determine whether to use CuDNNGRU or not
    cudnn = False
    if tf.test.is_gpu_available(cuda_only=True) and allow_cudnn:
        cudnn = True
    logger.info("Building model with cudnn optimization: {}".format(cudnn))

    model = Sequential()
    input_shape = (time_steps, feature_len)
    for i in [1, 2]:
        name = 'gru{}'.format(i)
        # Options here are to be mutually compatible: train with CuDNNGRU
        # but allow inference with GRU (on cpu).
        # https://gist.github.com/bzamecnik/bd3786a074f8cb891bc2a397343070f1
        if cudnn:
            gru = CuDNNGRU(gru_size, return_sequences=True, name=name)
        else:
            gru = GRU(gru_size,
                      reset_after=True,
                      recurrent_activation='sigmoid',
                      return_sequences=True,
                      name=name)
        model.add(Bidirectional(gru, input_shape=input_shape))

    # see keras #10417 for why we specify input shape
    model.add(
        Dense(num_classes,
              activation=classify_activation,
              name='classify',
              input_shape=(time_steps, 2 * gru_size)))

    return model
예제 #7
0
def build_model(chunk_size,
                feature_len,
                num_classes,
                gru_size=128,
                classify_activation='softmax'):
    """Builds a bidirectional GRU model. Uses CuDNNGRU for additional
    speed-up on GPU (claimed 7x).

    :param chunk_size: int, number of pileup columns in a sample.
    :param feature_len: int, number of features for each pileup column.
    :param num_classes: int, number of output class labels.
    :param gru_size: int, size of each GRU layer.
    :param classify_activation: str, activation to use in classification layer.

    :returns: `keras.models.Sequential` object.

    """
    import tensorflow as tf
    from tensorflow.keras import backend as K
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, GRU, CuDNNGRU, Bidirectional

    # if we can see a gpu, use CuDNNGRU for speed
    cudnn = False
    if tf.test.is_gpu_available(cuda_only=True):
        cudnn = True

    logger.info("With cudnn: {}".format(cudnn))

    model = Sequential()
    input_shape = (chunk_size, feature_len)
    for i in [1, 2]:
        name = 'gru{}'.format(i)
        # Options here are to be mutually compatible: train with CuDNNGRU
        # but allow inference with GRU (on cpu).
        # https://gist.github.com/bzamecnik/bd3786a074f8cb891bc2a397343070f1
        if cudnn:
            gru = CuDNNGRU(gru_size, return_sequences=True, name=name)
        else:
            gru = GRU(gru_size,
                      reset_after=True,
                      recurrent_activation='sigmoid',
                      return_sequences=True,
                      name=name)
        model.add(Bidirectional(gru, input_shape=input_shape))

    # see keras #10417 for why we specify input shape
    model.add(
        Dense(num_classes,
              activation=classify_activation,
              name='classify',
              input_shape=(chunk_size, 2 * gru_size)))

    return model
예제 #8
0
def gru(units):

    if tf.test.is_gpu_available():
        return CuDNNGRU(units,
                        return_sequences=False,
                        return_state=False,
                        recurrent_initializer='glorot_uniform')
    else:
        return GRU(units,
                   return_sequences=False,
                   return_state=False,
                   recurrent_activation='sigmoid',
                   recurrent_initializer='glorot_uniform')
예제 #9
0
def CRNN_v1(input_shape, n_classes):

    X_input = Input(input_shape)

    X = Lambda(lambda q: expand_dims(q, -1), name='expand_dims')(X_input)

    X = Permute((2, 1, 3))(X)

    X = Conv2D(32,
               kernel_size=[5, 5],
               strides=[2, 2],
               activation=relu,
               name='conv_1')(X)

    X = Conv2D(1,
               kernel_size=[1, 1],
               strides=[1, 1],
               activation=relu,
               name='conv_1x1')(X)

    X = Lambda(lambda q: squeeze(q, -1), name='squeeze_last_dim')(X)

    X = CuDNNGRU(32, return_sequences=True)(X)

    X = CuDNNGRU(32, return_sequences=True)(X)

    X = Flatten()(X)

    X = Dense(64)(X)
    X = Dropout(0.5)(X)
    X = Activation(relu)(X)

    X = Dense(n_classes, activation=softmax)(X)

    model = Model(inputs=X_input, outputs=X)

    return model
예제 #10
0
def get_cudnngru(shape, dropout):
    model = Sequential()
    with tf.variable_scope("CuDNNGRU1", reuse=tf.AUTO_REUSE) as scope:
        model.add(CuDNNGRU(64, input_shape=(shape), return_sequences=True))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("CuDNNGRU2", reuse=tf.AUTO_REUSE) as scope:
        model.add(CuDNNGRU(64, return_sequences=True))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("CuDNNGRU3", reuse=tf.AUTO_REUSE) as scope:
        model.add(CuDNNGRU(64, return_sequences=True))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("CuDNNGRU4", reuse=tf.AUTO_REUSE) as scope:
        model.add(CuDNNGRU(64, return_sequences=False))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("DENSE1", reuse=tf.AUTO_REUSE) as scope:
        model.add(Dense(128, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("DENSE2", reuse=tf.AUTO_REUSE) as scope:
        model.add(Dense(1, activation='sigmoid'))
        opt = tf.keras.optimizers.Adam(lr=1e-2, decay=1e-3)
        model.compile(loss='binary_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
        model.summary()

    return model
예제 #11
0
    def get_model(self):
        x = Input(shape=(187, 1))
        layer = Convolution1D(16, kernel_size=3, activation=activations.relu, padding="valid")(x)
        layer = Convolution1D(16, kernel_size=3, activation=activations.relu, padding="valid")(layer)
        layer = MaxPool1D(pool_size=2)(layer)
        layer = Dropout(rate=self.dropout)(layer)
        layer = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(layer)
        layer = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(layer)
        layer = MaxPool1D(pool_size=2)(layer)
        layer = Dropout(rate=self.dropout)(layer)
        layer = BatchNormalization()(layer)
        gru = Bidirectional(CuDNNGRU(self.hidden_size, name='rnn'), merge_mode='concat')(layer)
        layer = Dense(self.dense_size, activation=activations.relu, name='dense')(gru)
        y = Dense(self.output_classes, name='out_layer', activation=self.last_activation)(layer)

        model = models.Model(inputs=x, outputs=y, name=self.name)
        return model
예제 #12
0
def my_gru(name, units):
        # If you have a GPU, we recommend using CuDNNGRU(provides a 3x speedup than GRU)
        # the code automatically does that.
        if tf.test.is_gpu_available():
            print('### This is CuDNNGRU ###')
            return CuDNNGRU(name=name,
                            units=units,
                            return_sequences=True,
                            return_state=True,
                            recurrent_initializer='glorot_uniform')
        else:
            return GRU(name=name,
                       units=units,
                       return_sequences=True,
                       return_state=True,
                       recurrent_initializer='glorot_uniform',
                       recurrent_activation='sigmoid')
예제 #13
0
    def __init__(self, vocab_size, embedding_dim, num_RNN_units, batch_size):
        super(Model, self).__init__()
        self.num_RNN_units = num_RNN_units
        self.batch_size = batch_size

        # layer 1
        self.embedding = Embedding(vocab_size, embedding_dim)

        # layer 2
        if tf.test.is_gpu_available():
            self.gru = CuDNNGRU(self.num_RNN_units,
                                return_sequences=True,
                                return_state=True,
                                recurrent_initializer=glorot_uniform())
        else:
            self.gru = GRU(self.num_RNN_units,
                           return_state=True,
                           recurrent_activation=sigmoid,
                           recurrent_initializer=glorot_uniform())

        # layer 3
        self.full_connect = Dense(vocab_size)
예제 #14
0
import numpy as np
import pickle
from scipy.stats import zscore
import datetime
import pytz

np.random.seed(seed=11)

with open('series_34697_1000.pkl', 'rb') as f:
    segments = pickle.load(f)

segments = zscore(segments).astype(np.float32)  # standardize

deep_model = Sequential(name="LSTM-autoencoder")
deep_model.add(CuDNNGRU(80, input_shape=(1000, 1), return_sequences=False))
#deep_model.add(CuDNNGRU(100, return_sequences=False))
deep_model.add(Dense(20, activation=None))
deep_model.add(RepeatVector(1000))
#deep_model.add(CuDNNGRU(100, return_sequences=True))
deep_model.add(CuDNNGRU(80, return_sequences=True))
deep_model.add(TimeDistributed(Dense(1)))
deep_model.compile(optimizer=Adam(lr=5e-3, clipnorm=1.0), loss='mse')

#deep_model.load_weights("model_weights/lstm_autoencoder_2020-01-09_18-20-21.h5")

training_time_stamp = datetime.datetime.now(
    tz=pytz.timezone('Europe/London')).strftime("%Y-%m-%d_%H-%M-%S")

CB = EarlyStopping(monitor='val_loss',
                   min_delta=1e-4,
예제 #15
0
    def build_model():
        in_id = Input(shape=(MAX_SEQ_LENGTH, ), name="input_ids")
        in_mask = Input(shape=(MAX_SEQ_LENGTH, ), name="input_masks")
        in_segment = Input(shape=(MAX_SEQ_LENGTH, ), name="segment_ids")
        bert_inputs = [in_id, in_mask, in_segment]

        bert_cls_output, bert_exp_output = BertLayer(n_fine_tune_layers=10,
                                                     name='bert')(bert_inputs)

        outputs = []
        model_cls, model_exp = None, None

        if 'seq' not in dataset and not exp_only:
            # Classifier output
            dense = Dense(DIM_DENSE_CLS, activation='tanh',
                          name='cls_dense')(bert_cls_output)
            cls_output = Dense(1, activation='sigmoid',
                               name='cls_output')(dense)
            outputs.append(cls_output)
            model_cls = Model(inputs=bert_inputs, outputs=cls_output)
            optimizer = Adam(LEARNING_RATE)
            model_cls.compile(loss=loss['cls_output'],
                              optimizer=optimizer,
                              metrics=[metrics['cls_output']])
        else:
            model_cls = None

        if 'cls' not in dataset and exp_structure != 'none' and not cls_only:
            # Explainer output
            if exp_structure == 'gru':
                gru = CuDNNGRU(NUM_GRU_UNITS_BERT_SEQ,
                               kernel_initializer='random_uniform',
                               return_sequences=True,
                               name='exp_gru_gru')(bert_exp_output)
                exp = Dense(1, activation='sigmoid', name='exp_gru_dense')(gru)
                output_mask = Reshape((MAX_SEQ_LENGTH, 1),
                                      name='exp_gru_reshape')(in_mask)
                exp_outputs = Multiply(name='exp_output')([output_mask, exp])
            elif exp_structure == 'rnr':
                M1 = Bidirectional(
                    layer=CuDNNLSTM(NUM_INTERVAL_LSTM_WIDTH,
                                    return_sequences=True,
                                    name='exp_rnr_lstm1'),
                    merge_mode='concat',
                    name='exp_rnr_bidirectional1')(bert_exp_output)
                p_starts = Dense(1,
                                 activation='sigmoid',
                                 name='exp_rnr_starts')(Concatenate(axis=-1)(
                                     [bert_exp_output, M1]))
                start_mask = Reshape((MAX_SEQ_LENGTH, 1))(in_mask)
                p_starts = Multiply()([p_starts, start_mask])

                m1_tilde = Dot(axes=-2)([p_starts, M1])
                M1_tilde = Lambda(
                    lambda x: tf.tile(x, (1, MAX_SEQ_LENGTH, 1)))(m1_tilde)
                x = Multiply()([M1, M1_tilde])
                M2 = Bidirectional(layer=CuDNNLSTM(NUM_INTERVAL_LSTM_WIDTH,
                                                   return_sequences=True,
                                                   name='exp_rnr_lstm2'),
                                   merge_mode='concat',
                                   name='exp_rnr_bidirecitonal2')(
                                       Concatenate(axis=-1)(
                                           [bert_exp_output, M1, M1_tilde, x]))
                p_end_given_start = Dense(MAX_SEQ_LENGTH,
                                          activation='linear',
                                          name='exp_rnr_end')(Concatenate(
                                              axis=-1)([bert_exp_output, M2]))
                end_mask = Lambda(
                    lambda x: tf.tile(x, (1, MAX_SEQ_LENGTH, 1)))(Reshape(
                        (1, MAX_SEQ_LENGTH))(in_mask))
                p_end_given_start = Multiply()([p_end_given_start, end_mask])
                p_end_given_start = Lambda(
                    lambda x: tf.linalg.band_part(x, 0, -1))(p_end_given_start)
                p_end_given_start = Softmax(axis=-1)(p_end_given_start)

                exp_outputs = Concatenate(
                    axis=-1, name='exp_output')([p_starts, p_end_given_start])
            outputs.append(exp_outputs)
            model_exp = Model(inputs=bert_inputs, outputs=exp_outputs)
            optimizer = Adam(LEARNING_RATE)
            model_exp.compile(loss=loss['exp_output'],
                              optimizer=optimizer,
                              metrics=[metrics['exp_output']])
        else:
            model_exp = None

        model = Model(inputs=bert_inputs, outputs=outputs)
        optimizer = Adam(LEARNING_RATE)
        model.compile(loss=loss,
                      loss_weights=loss_weights,
                      optimizer=optimizer,
                      metrics=metrics)

        return model, model_cls, model_exp
예제 #16
0
            np.random.shuffle(self.indexes)


# Training and Validation generators in a 95/5 split
training_generator = DataGenerator(
    segments[:int(np.floor(len(segments) * 0.95))],
    errors[:int(np.floor(len(errors) * 0.95))],
    batch_size=384)
validation_generator = DataGenerator(
    segments[int(np.floor(len(segments) * 0.95)):],
    errors[int(np.floor(len(errors) * 0.95)):],
    batch_size=384)

y_in = Input(shape=(1024, 1))
y_err = Input(shape=(1024, 1))
h_enc = CuDNNGRU(512, return_sequences=True)(y_in)
h_enc = CuDNNGRU(256, return_sequences=True)(h_enc)
h_enc = CuDNNGRU(128, return_sequences=False)(h_enc)
# h_enc = BatchNormalization()(h_enc)
h_enc = Dense(16, activation=None, name='bottleneck')(h_enc)
# h_enc = BatchNormalization()(h_enc)
h_dec = RepeatVector(1024)(h_enc)
h_dec = CuDNNGRU(64, return_sequences=True)(h_dec)
# h_dec = CuDNNGRU(256, return_sequences=True)(h_dec)
h_dec = TimeDistributed(Dense(1))(h_dec)
model = Model(inputs=[y_in, y_err], outputs=h_dec)
model.compile(optimizer=Adam(clipvalue=0.5), loss=chi2(y_err))

# model.load_weights("model_weights/lstm_autoencoder_2020-01-09_18-20-21.h5")

training_time_stamp = datetime.datetime.now(
예제 #17
0
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Embedding, SpatialDropout1D, Dropout, add, concatenate
from tensorflow.keras.layers import CuDNNGRU, Bidirectional, GlobalMaxPooling1D, GlobalAveragePooling1D, Conv1D

# In[ ]:

sequence_input = Input(shape=(max_seq_size, ), dtype='int32')
embedding_layer = Embedding(total_vocab,
                            embedding_size,
                            weights=[embedding_matrix],
                            input_length=max_seq_size,
                            trainable=False)

x_layer = embedding_layer(sequence_input)
x_layer = SpatialDropout1D(0.2)(x_layer)
x_layer = Bidirectional(CuDNNGRU(64, return_sequences=True))(x_layer)
x_layer = Conv1D(64,
                 kernel_size=2,
                 padding="valid",
                 kernel_initializer="he_uniform")(x_layer)

avg_pool1 = GlobalAveragePooling1D()(x_layer)
max_pool1 = GlobalMaxPooling1D()(x_layer)

x_layer = concatenate([avg_pool1, max_pool1])

preds = Dense(1, activation=sigmoid)(x_layer)

model = Model(sequence_input, preds)
model.summary()
예제 #18
0
maxlen = 20
batch_size = 64

dict = eval(open("dictionary.txt").read())
words_dict = list(dict.keys())

filenames = ['train1.tfrecords']
dataset = tf.data.TFRecordDataset(filenames).apply(
    tf.contrib.data.map_and_batch(
        _parse_function, 256, num_parallel_calls=12)).shuffle(4096).prefetch(1)

model = Sequential()
model.add(Embedding(len(words_dict), 50, input_length=maxlen))
# model.add(LSTM(256, return_sequences=True))
# model.add(Dropout(0.1))
model.add(CuDNNGRU(1000, return_sequences=False))
model.add(Dropout(rate=0.20))
model.add(Dense(100, activation='relu'))
model.add(Dense(len(words_dict), activation='softmax'))

optimizer = tf.train.AdamOptimizer(0.001)
model.compile(loss='sparse_categorical_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])
iter = dataset.make_one_shot_iterator()

for i in range(10000):
    print(i)
    t = time.time()
    print(model.train_on_batch(iter))
    print(time.time() - t)
예제 #19
0

# Training and Validation generators in a 95/5 split
training_generator = DataGenerator(
    segments[:int(np.floor(len(segments) * 0.95))],
    errors[:int(np.floor(len(errors) * 0.95))],
    batch_size=384)
validation_generator = DataGenerator(
    segments[int(np.floor(len(segments) * 0.95)):],
    errors[int(np.floor(len(errors) * 0.95)):],
    batch_size=384)

y_in = Input(shape=(1024, 1))
y_err = Input(shape=(1024, 1))

h_enc = CuDNNGRU(64, return_sequences=False)(y_in)
h_enc = Dense(32, activation=None, name='bottleneck')(h_enc)
h_dec = RepeatVector(1024)(h_enc)
h_dec = CuDNNGRU(64, return_sequences=True)(h_dec)
h_dec = TimeDistributed(Dense(1))(h_dec)
model = Model(inputs=[y_in, y_err], outputs=h_dec)
model.compile(optimizer='adam', loss=chi2(y_err))

#model.load_weights("model_weights/lstm_autoencoder_2020-01-09_18-20-21.h5")

training_time_stamp = datetime.datetime.now(
    tz=pytz.timezone('Europe/London')).strftime("%Y-%m-%d_%H-%M-%S")

CB = EarlyStopping(monitor='val_loss',
                   min_delta=1e-5,
                   patience=100,
            model.add(
                Conv1D(filters=filters,
                       kernel_size=3,
                       padding='same',
                       activation='relu'))
            model.add(MaxPool1D(pool_size=2))

            for l in range(conv - 1):
                model.add(
                    Conv1D(filters=filters,
                           kernel_size=3,
                           padding='same',
                           activation='relu'))
                model.add(MaxPool1D(pool_size=2))

            model.add(CuDNNGRU(128, return_sequences=True))
            model.add(Dropout(0.2))

            model.add(CuDNNGRU(64, return_sequences=False))
            model.add(Dropout(0.2))

            model.add(Flatten())

            for n in range(dense):
                model.add(Dense(filters, activation="relu"))

            model.add(Dense(1, activation="sigmoid"))

            model.compile(loss="binary_crossentropy",
                          optimizer="adam",
                          metrics=["accuracy"])
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    print("Loaded model " + model_name + " from disk")
    # load latest weights into new model
    list_of_files = glob.glob('./data/model/weights-' + model_name + "*.hdf5")
    if len(list_of_files) > 0:
        latest_file = max(list_of_files, key=os.path.getmtime)
        model.load_weights(latest_file)
        epoch_init = int(latest_file.split('epoch')[1].split("-")[0])
        print("loading weights from file", latest_file, "starting from epoch",
              epoch_init)

elif model_name == "basic_CuDNNGRU":
    model = Sequential()
    model.add(CuDNNGRU(10, input_shape=(4096, 1)))
    model.add(Dense(1, activation='linear'))

elif model_name == "basic_CuDNNGRU_reg":
    model = Sequential()
    model.add(
        CuDNNGRU(
            10,
            input_shape=(4096, 1),
            kernel_regularizer=regularizers.l2(0.01),
        ))
    model.add(Dense(1, activation='linear'))

elif model_name == "basic_big_CuDNNGRU_reg":
    model = Sequential()
    model.add(
예제 #22
0
	def make_bi_gru(self, units):
		if tf.test.is_gpu_available() and not self.params['no_gpu']:
			return Bidirectional(CuDNNGRU(units, return_sequences=True), merge_mode='concat')
		else:
			return Bidirectional(GRU(units, return_sequences=True), merge_mode='concat')