Exemple #1
0
    def fit(self, eventlog_name):
        import tensorflow as tf
        from tensorflow.contrib.keras.python.keras.engine import Input, Model
        from tensorflow.contrib.keras.python.keras.layers import Dense, GaussianNoise, Dropout

        # load data
        features = self.dataset.load(eventlog_name)

        # parameters
        input_size = features.shape[1]
        hidden_size = np.round(input_size * 4)

        # input layer
        input_layer = Input(shape=(input_size,), name='input')

        # hidden layer
        hid = Dense(hidden_size, activation=tf.nn.relu)(GaussianNoise(0.1)(input_layer))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))
        hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid))

        # output layer
        output_layer = Dense(input_size, activation='linear')(Dropout(0.5)(hid))

        # build model
        self.model = Model(inputs=input_layer, outputs=output_layer)

        # compile model
        self.model.compile(
            optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
            loss=tf.losses.mean_squared_error
        )

        # train model
        self.model.fit(
            features,
            features,
            batch_size=100,
            epochs=100,
            validation_split=0.2,
        )
Exemple #2
0

#Encoder
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_sigma = Dense(latent_dim)(h)
z = Lambda(sampling, output_shape=(latent_dim, ))([z_mean, z_log_sigma])

#Decoder
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)

vae = Model(x, x_decoded_mean)

# encoder, from inputs to latent space
encoder = Model(x, z_mean)

# generator, from latent space to reconstructed inputs
decoder_input = Input(shape=(latent_dim, ))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)


def vae_loss(x, x_decoded_mean):
    xent_loss = binary_crossentropy(x, x_decoded_mean)
    kl_loss = -0.5 * K.mean(
        1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
Exemple #3
0
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(256, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(2)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)  # global max pooling
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])

embedding_matrix = None
embeddings_index = None

for epoch in range(NUM_EPOCHS):
    i = 0
    for j in range(NUM_ROWS_SAVE_TO_TRAIN - NUM_ROWS_SAVE_TO_VAL, lendata,
                   NUM_ROWS_SAVE_TO_TRAIN - NUM_ROWS_SAVE_TO_VAL):
        x_train = np.load(
            os.path.join(SAVE_DIR, 'data_' + str(i) + '_' + str(j) + '.npy'))
        y_train = np.load(
            os.path.join(SAVE_DIR, 'labels_' + str(i) + '_' + str(j) + '.npy'))
Exemple #4
0
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)

# at this point the representation is (4, 4, 8) i.e. 128-dimensional

x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

conv_autoencoder = Model(input_img, decoded)
conv_autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')


batch_size=128
steps_per_epoch = np.int(np.floor(x_train.shape[0] / batch_size))
conv_autoencoder.fit(x_train, x_train, epochs=50, batch_size=128,
                     shuffle=True, validation_data=(x_test, x_test),
                     callbacks=[TensorBoard(log_dir='./tf_autoencoder_logs')])


decoded_imgs = conv_autoencoder.predict(x_test)

n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
Exemple #5
0
                            trainable=False)

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 15, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)

x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)  # global max pooling
x = Dropout (0.2)(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)

model_text = Model(sequence_input, x)
model_gen = Model(sequence_input_gen, x_gen)
print(model_text.output.shape)

print(model_gen.output.shape)
merged = concatenate([model_text.output,model_gen.output])
preds = Dense(len(labels_index), activation='softmax')(merged)

model = Model(inputs=[sequence_input,sequence_input_gen],outputs=preds)


model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])

Exemple #6
0
    def fit(self, eventlog_name):

        import tensorflow as tf
        from tensorflow.contrib.keras.python.keras.engine import Input, Model
        from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, GRU, Embedding, merge, Masking

        features, targets = self.dataset.load(eventlog_name, train=True)
        inputs = []
        layers = []

        with tf.device('/cpu:0'):
            # split attributes
            features = [features[:, :, i] for i in range(features.shape[2])]

            for i, t in enumerate(features):
                voc_size = np.array(self.dataset.attribute_dims[i]) + 1  # we start at 1, hence +1
                emb_size = np.floor(voc_size / 2.0).astype(int)

                i = Input(shape=(None, *t.shape[2:]))
                x = Embedding(input_dim=voc_size, output_dim=emb_size, input_length=t.shape[1], mask_zero=True)(i)
                inputs.append(i)
                layers.append(x)

            # merge layers
            x = merge.concatenate(layers)

        x = GRU(64, implementation=2)(x)

        # shared hidden layer
        x = Dense(512, activation=tf.nn.relu)(x)
        x = Dense(512, activation=tf.nn.relu)(Dropout(0.5)(x))

        # hidden layers per attribute
        outputs = []
        for i, l in enumerate(targets):
            o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(x))
            o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(o))
            o = Dense(l.shape[1], activation=tf.nn.softmax)(Dropout(0.5)(o))
            outputs.append(o)

        self.model = Model(inputs=inputs, outputs=outputs)

        # compile model

        # old setting : optimizers from tensorflow

        # self.model.compile(
        # optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
        # loss='categorical_crossentropy'
        # )

        # new setting : optimizers from keras

        self.model.compile(
            optimizer='Adadelta',
            loss='categorical_crossentropy'
        )

        # train model
        self.model.fit(
            features,
            targets,
            batch_size=100,
            epochs=100,
            validation_split=0.2,
        )
Exemple #7
0
                       padding='same',
                       strides=2,
                       kernel_initializer='he_normal',
                       kernel_regularizer=l2(1e-4))(x)
        x = keras.layers.add([x, y])
        x = Activation('relu')(x)

    num_filters = 2 * num_filters

x = AveragePooling2D()(x)
y = Flatten()(x)
outputs = Dense(num_classes,
                activation='softmax',
                kernel_initializer='he_normal')(y)

model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(),
              metrics=['accuracy'])
model.summary()

save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_resnet_model.h5'
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)

checkpoint = ModelCheckpoint(filepath=filepath, verbose=1, save_best_only=True)

#earning rate decaying.
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),