Ejemplo n.º 1
0
def train_model(total_epochs, sample_interval=100):
    # Class weights
    cw1 = {0: 1, 1: 1}
    cw2 = {i: num_classes / half_batch for i in range(num_classes)}
    cw2[num_classes] = 1 / half_batch

    # Adversarial ground truths:
    valid_labels = np.ones((batch_size, 1))
    fake = np.zeros((batch_size, 1))

    for epoch in range(total_epochs + 1):
        # 1. Train Discriminator:
        index = np.random.randint(0, x_train.shape[0], batch_size)
        input_sample_ = x_train[index]
        noise_ = np.random.normal(0, 1, (batch_size, latent_dim))
        gen_samples = gen.predict(noise_)

        labels = to_categorical(y_train[index], num_classes=num_classes + 1)
        fake_labels = to_categorical(np.full((batch_size, 1), num_classes), num_classes=num_classes + 1)

        # Train the discriminator:
        d_loss_real = disc.train_on_batch(input_sample_, [valid_labels, labels], class_weight=[cw1, cw2])
        d_loss_fake = disc.train_on_batch(gen_samples, [fake, fake_labels], class_weight=[cw1, cw2])
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

        # Train Generator:

        g_loss = combined_model.train_on_batch(noise_, valid_labels, class_weight=[cw1, cw2])

        print("%d [D loss: %f, acc: %.2f%%, op_acc: %.2f%%] [G loss: %f]" %
              (epoch, d_loss[0], 100 * d_loss[3], 100 * d_loss[4], g_loss))

        if (epoch % sample_interval) == 0:
            total_training_steps = previous_epochs + epoch
            new_samples = extract_samples()
            savemat(tfs.prep_dir('gen_out/') + 'gen_samples_e' + str(total_training_steps) + '.mat',
                    mdict={'outputs': new_samples})
            print('Saved samples: gen_out/gen_samples_e{}.mat'.format(total_training_steps))
    num_channels = 2
    data_directory = 'data/extended_5_class/mit_bih_tlabeled_w8s_fixed_all'
elif DATASET == 'ptb':
    num_classes = 2
    data_directory = 'data/ptb_ecg_1ch_temporal_labels/lead_v2_all'
elif DATASET == 'ptb6':
    num_classes = 6
    data_directory = 'data/ptb_6class_temporal/lead_v2_all'
elif DATASET == 'incart':
    num_classes = 5
    data_directory = 'data/incartdb_v1_all'
learn_rate = 0.0002

description = DATASET + '_annotate'
keras_model_name = description + '.h5'
model_dir = tfs.prep_dir('model_exports/')
keras_file_location = model_dir + keras_model_name

output_folder = 'classify_data_out/' + description + '/'
seq_length = 2000
input_length = seq_length
x_shape = [seq_length, num_channels]
y_shape = [seq_length, num_classes]

# Start Timer:
start_time_ms = tfs.current_time_ms()

# Load Data:
x_tt, y_tt = tfs.load_data_v2(data_directory, x_shape, y_shape, 'X', 'Y')

# Additional Dataset:
DATASET = 'mit'

batch_size = 128
epochs = 50

num_channels = 1
if DATASET == 'mit' or DATASET == 'incart':
    num_classes = 5
elif DATASET == 'ptb':
    num_classes = 2

learn_rate = 0.0002

description = DATASET + '_ecg_annotate_lr' + str(learn_rate) + '_r0'
keras_model_name = description + '.h5'
model_dir = tfs.prep_dir('model_exports/')
keras_file_location = model_dir + keras_model_name

output_folder = 'classify_data_out/' + description + '/'
seq_length = 2000
input_length = seq_length
x_shape = [seq_length, 1]
y_shape = [seq_length, num_classes]

# Start Timer:
start_time_ms = tfs.current_time_ms()
x_tt = []
y_tt = []

# Load Data:
if DATASET == 'mit':  # MIT-BIH Data set:
Ejemplo n.º 4
0
epochs = 10

# These variables are set based on the dataset we are looking at.
num_channels = 1
num_classes = 0
data_directory = ''

if DATASET == 'combined' or DATASET == 'combined_v2':
    num_classes = 2
    data_directory = 'data/incart_ptb_all'

learn_rate = 0.0002

description = DATASET + '_annotate_lstm'
keras_model_name = description + '.h5'
model_dir = tfs.prep_dir('model_exports/')
keras_file_location = model_dir + keras_model_name

output_folder = 'classify_data_out/' + description + '/'
seq_length = 2000
input_length = seq_length
x_shape = [seq_length, num_channels]
y_shape = [seq_length, num_classes]

# Start Timer:
start_time_ms = tfs.current_time_ms()

# Load Data:
x_tt, y_tt = tfs.load_data_v2(data_directory, x_shape, y_shape, 'X', 'Y')

# Load Test Data:
Ejemplo n.º 5
0
        new_vec_array = np.zeros([samples, seq_len, number_classes],
                                 dtype=np.int32)
        for i in range(0, samples):
            for s in range(0, seq_len):
                new_vec_array[i, s, int(x[i, s, 0])] = 1
        return new_vec_array
    else:
        print('Not yet supported')
    return 0


num_classes = 1
seq_length = 2000
input_length = seq_length
dir_x = 'data_labeled_3c/'
file_location = prep_dir(dir_x + '_all/') + 'all_data.mat'  # Output Directory
key_x = 'X'
key_y = 'Y'

# Load Data From Folder dir_x
x_data, y_data = load_data_v2(dir_x, [seq_length, 1], [seq_length, 1],
                              'relevant_data', 'Y')

# Change labels to vectors: (n, 1) → (n, num_classes)
y_data = ind2vec(y_data, dimensions=3)
print("Loaded Data Shape: X:", x_data.shape, " Y: ", y_data.shape)

# Save Matlab Output File:
savemat(file_location, mdict={key_x: x_data, key_y: y_data})
print("Saved Data: KeyX:", key_x, " KeyY: ", key_y, ' at: ', file_location)
Ejemplo n.º 6
0
        d = LeakyReLU(alpha=0.2)(d)
        if normalization:
            d = InstanceNormalization()(d)
        return d

    input_samples = Input(shape=(input_length, 1))
    d1 = discriminator_layer(input_samples, 64, 8, 2, normalization=False)
    d2 = discriminator_layer(d1, 128, 8, 2)
    d3 = discriminator_layer(d2, 256, 8, 2)
    d4 = discriminator_layer(d3, 512, 8, 2)
    validity = Conv1D(1, kernel_size=8, strides=1, padding='same')(d4)
    return Model(input_samples, validity)


# Restore Model if Present:
keras_training_file = tfs.prep_dir(model_dir) + description + 'training.mat'
keras_training_epochs_key = 'training_epochs'
keras_training_batch_size_key = 'training_batch_size'
keras_combined_model_location = tfs.prep_dir(
    model_dir) + description + 'combined_model.h5'
keras_d_A_location = tfs.prep_dir(model_dir) + description + 'd_A.h5'
keras_d_B_location = tfs.prep_dir(model_dir) + description + 'd_B.h5'
keras_g_BA_location = tfs.prep_dir(model_dir) + description + 'g_BA.h5'
keras_g_AB_location = tfs.prep_dir(model_dir) + description + 'g_AB.h5'

keras_g_AB_opt_location = tfs.prep_dir(model_dir) + '/opt_ptb_cycle_gan'
# Load if it exists:
if os.path.isfile(keras_d_A_location) and os.path.isfile(
        keras_d_B_location
) and os.path.isfile(keras_g_AB_location) and os.path.isfile(
        keras_g_BA_location) and os.path.isfile(keras_combined_model_location):
Ejemplo n.º 7
0
    data_directory = 'data/ptb_6class_temporal/lead_v2_all'
elif DATASET == 'incart':
    num_classes = 5
    data_directory = 'data/incartdb_v1_all'
elif DATASET == 'combined':
    num_classes = 2
    data_directory = 'data/incart_ptb_all'
elif DATASET == 'adv':
    num_classes = 2
    data_directory = 'data/ptb_adv'

learn_rate = 0.0002

description = DATASET + '_classify'
keras_model_name = description
model_dir = tfs.prep_dir('model_exports/')
keras_file_prefix = model_dir + keras_model_name

disc_file = keras_file_prefix + '_disc.h5'
gen_file = keras_file_prefix + '_gen.h5'
combined_file = keras_file_prefix + '_combined.h5'
train_stat_file_mat = keras_file_prefix + '_record.mat'
key_epochs = 'epochs'
all_files_as_list = [disc_file, gen_file, combined_file, train_stat_file_mat]

output_folder = 'classify_data_out/' + description + '/'
seq_length = 2000
seq_init_length = 250
input_length = seq_length
x_shape = [seq_length, num_channels]
y_shape = [1]
Ejemplo n.º 8
0
from scipy.io import savemat
# y generally needs to be one hot, but that is done in preprocessing (to_categorical)
from sklearn.model_selection import train_test_split

import tf_shared_k as tfs

data_directory = 'time_domain_hpf_new/w'
# Options
win_len = 512
# Hyperparams
train_ratio = 0.75
DATASET = 'ssvep_' + str(win_len)

description = DATASET + '_annotate'
keras_model_name = description + '.h5'
model_dir = tfs.prep_dir('model_exports/')
keras_file_location = model_dir + keras_model_name
# Start Timer:
start_time_ms = tfs.current_time_ms()

# Setup:
TRAIN = False
TEST = True
SAVE_PREDICTIONS = False
SAVE_HIDDEN = True
# EXPORT_OPT_BINARY = False


def load_data(data_directory, image_shape, key_x, key_y):
    x_train_data = np.empty([0, *image_shape], np.float32)
    y_train_data = np.empty([0], np.float32)