epochs) + '_v' + str(VERSION_NUMBER) keras_model_name = description + '.h5' if SAVE_ALT_MODEL_CPU: keras_model_name = 'alt_' + keras_model_name file_name = description seq_length = 2000 if num_channels < 2: x_shape = [seq_length, 1] input_shape = seq_length else: x_shape = [seq_length, 2] input_shape = (seq_length, num_channels) y_shape = [seq_length, num_classes] # Start Timer: start_time_ms = tfs.current_time_ms() # Import Data: x_tt, y_tt = tfs.load_data_v2('data/extended_5_class/mit_bih_tlabeled_w8s_fixed_all', [seq_length, 2], y_shape, 'relevant_data', 'Y') if num_channels < 2: x_tt = np.reshape(x_tt[:, :, 0], [-1, seq_length, 1]) xx_flex, y_flex = tfs.load_data_v2('data/flexEcg_8s_normal', [seq_length, 1], [1], 'relevant_data', 'Y') x_train, x_test, y_train, y_test = train_test_split(x_tt, y_tt, train_size=0.75, random_state=1) # 0.66 ptb_data_lead_ii = tfs.load_mat('_adversarial/data/lead_ii_all/all_y.mat', key='Y', shape=[seq_length, 1]) def get_model_alt(): k_model = Sequential() k_model.add(Reshape((seq_length, num_channels), input_shape=(input_shape, 1)))
outputs, prediction, accuracy = tfs.get_outputs(y, y_conv, output_node_name) # 9. Initialize tensorflow for training & evaluation: saver, init_op, config = tfs.tf_initialize() # Enter Training Routine: with tf.Session(config=config) as sess: sess.run(init_op) # Print Model Information Before Starting. model_dims = tfs.get_model_dimensions(h, h_flat, h_fc, y_conv, NUM_LAYERS) filter_dims = tfs.get_filter_dimensions(W_x, W_y, Str_X, Str_Y, Alphas, NUM_LAYERS) print(model_dims, '\n', filter_dims) # Save model as pbtxt: tf.train.write_graph(sess.graph_def, EXPORT_DIRECTORY, Model_description + '.pbtxt', True) start_time_ms = tfs.current_time_ms() # Train Model: val_accuracy_rate = tfs.train(x, y, keep_prob, accuracy, train_step, x_train, y_train, x_test, y_test, keep_prob_feed, train_steps) elapsed_time_ms = (tfs.current_time_ms() - start_time_ms) # Test Accuracy: (Test/Train Split) tt_acc = tfs.test(sess, x, y, accuracy, x_test, y_test, keep_prob, test_type='Train-Split') # Validation Accuracy:
d = InstanceNormalization()(d) return d input_samples = Input(shape=(input_length, 1)) d1 = discriminator_layer(input_samples, 64, 5, 2, normalization=False) d2 = discriminator_layer(d1, 128, 5, 2) d3 = discriminator_layer(d2, 256, 5, 2) d4 = discriminator_layer(d3, 512, 5, 2) validity = Conv1D(1, kernel_size=5, strides=1, padding='same')(d4) return Model(input_samples, validity) # Train: batch_size = 128 start_time_ms = tfs.current_time_ms() # Initialize: lambda_cycle = 10.0 # Cycle-consistency loss lambda_id = 0.1 * lambda_cycle # Identity loss optimizer = Adam(learn_rate, beta_1=0.50) # Build and compile the discriminators d_A = build_discriminator() d_B = build_discriminator() print('Discriminator: ') print(d_A.summary()) d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])