def fit(self, eventlog_name): import tensorflow as tf from tensorflow.contrib.keras.python.keras.engine import Input, Model from tensorflow.contrib.keras.python.keras.layers import Dense, GaussianNoise, Dropout # load data features = self.dataset.load(eventlog_name) # parameters input_size = features.shape[1] hidden_size = np.round(input_size * 4) # input layer input_layer = Input(shape=(input_size,), name='input') # hidden layer hid = Dense(hidden_size, activation=tf.nn.relu)(GaussianNoise(0.1)(input_layer)) hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid)) hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid)) hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid)) hid = Dense(hidden_size, activation=tf.nn.relu)(Dropout(0.5)(hid)) # output layer output_layer = Dense(input_size, activation='linear')(Dropout(0.5)(hid)) # build model self.model = Model(inputs=input_layer, outputs=output_layer) # compile model self.model.compile( optimizer=tf.train.AdamOptimizer(learning_rate=0.0001), loss=tf.losses.mean_squared_error ) # train model self.model.fit( features, features, batch_size=100, epochs=100, validation_split=0.2, )
latent_dim = 2 intermediate_dim = 256 epochs = 50 epsilon_std = 1.0 def sampling(args): z_mean, z_log_sigma = args epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp(z_log_sigma) * epsilon #Encoder x = Input(batch_shape=(batch_size, original_dim)) h = Dense(intermediate_dim, activation='relu')(x) z_mean = Dense(latent_dim)(h) z_log_sigma = Dense(latent_dim)(h) z = Lambda(sampling, output_shape=(latent_dim, ))([z_mean, z_log_sigma]) #Decoder decoder_h = Dense(intermediate_dim, activation='relu') decoder_mean = Dense(original_dim, activation='sigmoid') h_decoded = decoder_h(z) x_decoded_mean = decoder_mean(h_decoded) vae = Model(x, x_decoded_mean) # encoder, from inputs to latent space encoder = Model(x, z_mean)
np.save( os.path.join(SAVE_DIR, 'labels_' + str(lendata - 1) + '_' + str(lendata) + '.npy'), labels[lendata - 1:lendata]) tokenizer = None data = None labels = None embedding_layer = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False) sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32') embedded_sequences = embedding_layer(sequence_input) x = Conv1D(256, 5, activation='relu')(embedded_sequences) x = MaxPooling1D(2)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(35)(x) # global max pooling x = Flatten()(x) x = Dense(512, activation='relu')(x) preds = Dense(len(labels_index), activation='softmax')(x)
shape_ord = (28, 28, 1) else: shape_ord = (1, 28, 28) (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, ((x_train.shape[0],) + shape_ord)) x_test = np.reshape(x_test, ((x_test.shape[0],) + shape_ord)) input_img = Input(shape=(28, 28, 1)) x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) x = UpSampling2D((2, 2))(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(16, (3, 3), activation='relu')(x)
def fit(self, eventlog_name): import tensorflow as tf from tensorflow.contrib.keras.python.keras.engine import Input, Model from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, GRU, Embedding, merge, Masking features, targets = self.dataset.load(eventlog_name, train=True) inputs = [] layers = [] with tf.device('/cpu:0'): # split attributes features = [features[:, :, i] for i in range(features.shape[2])] for i, t in enumerate(features): voc_size = np.array(self.dataset.attribute_dims[i]) + 1 # we start at 1, hence +1 emb_size = np.floor(voc_size / 2.0).astype(int) i = Input(shape=(None, *t.shape[2:])) x = Embedding(input_dim=voc_size, output_dim=emb_size, input_length=t.shape[1], mask_zero=True)(i) inputs.append(i) layers.append(x) # merge layers x = merge.concatenate(layers) x = GRU(64, implementation=2)(x) # shared hidden layer x = Dense(512, activation=tf.nn.relu)(x) x = Dense(512, activation=tf.nn.relu)(Dropout(0.5)(x)) # hidden layers per attribute outputs = [] for i, l in enumerate(targets): o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(x)) o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(o)) o = Dense(l.shape[1], activation=tf.nn.softmax)(Dropout(0.5)(o)) outputs.append(o) self.model = Model(inputs=inputs, outputs=outputs) # compile model # old setting : optimizers from tensorflow # self.model.compile( # optimizer=tf.train.AdamOptimizer(learning_rate=0.0001), # loss='categorical_crossentropy' # ) # new setting : optimizers from keras self.model.compile( optimizer='Adadelta', loss='categorical_crossentropy' ) # train model self.model.fit( features, targets, batch_size=100, epochs=100, validation_split=0.2, )
input_shape = (channels, img_rows, img_cols) else: img_rows = x_train.shape[1] img_cols = x_train.shape[2] channels = x_train.shape[3] x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels) input_shape = (img_rows, img_cols, channels) x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) inputs = Input(shape=input_shape) x = Conv2D(num_filters, kernel_size=7, padding='same', strides=2, kernel_initializer='he_normal', kernel_regularizer=l2(1e-4))(inputs) x = BatchNormalization()(x) x = Activation('relu')(x) if use_max_pool: x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x) num_blocks = 3 # convolutional base (stack of blocks). for i in range(num_blocks):