config.nhidden2 = FLAGS.nhidden2 config.attention_size1 = FLAGS.attention_size1 config.evaluate_every = FLAGS.evaluate_every eeg_active = ((FLAGS.eeg_train_data != "") and (FLAGS.eeg_test_data != "")) eog_active = ((FLAGS.eog_train_data != "") and (FLAGS.eog_test_data != "")) emg_active = ((FLAGS.emg_train_data != "") and (FLAGS.emg_test_data != "")) # EEG data loader and generator # training data is loaded just for calculating the normalization parameters if (eeg_active): print("eeg active") # Initalize the data generator seperately for the training and test sets eeg_train_gen = DataGenerator( os.path.abspath(FLAGS.eeg_train_data), data_shape=[config.frame_seq_len, config.ndim], seq_len=config.epoch_seq_len, shuffle=False) eeg_test_gen = DataGenerator( os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.frame_seq_len, config.ndim], seq_len=config.epoch_seq_len, shuffle=False) # data normalization here X = eeg_train_gen.X X = np.reshape(X, (eeg_train_gen.data_size * eeg_train_gen.data_shape[0], eeg_train_gen.data_shape[1])) meanX = X.mean(axis=0) stdX = X.std(axis=0) X = (X - meanX) / stdX
if not os.path.isdir(os.path.abspath(out_path)): os.makedirs(os.path.abspath(out_path)) if not os.path.isdir(os.path.abspath(checkpoint_path)): os.makedirs(os.path.abspath(checkpoint_path)) config = Config() config.dropout = FLAGS.dropout eeg_active = (FLAGS.eeg_test_data != "") eog_active = (FLAGS.eog_test_data != "") emg_active = (FLAGS.emg_test_data != "") if (eeg_active): print("eeg active") eeg_test_gen = DataGenerator(os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.ntime], shuffle=False, test_mode=True) eeg_test_gen.X = np.expand_dims(eeg_test_gen.X, axis=-1) # expand feature dimension if (eog_active): print("eog active") eog_test_gen = DataGenerator(os.path.abspath(FLAGS.eog_test_data), data_shape=[config.ntime], shuffle=False, test_mode=True) eog_test_gen.X = np.expand_dims(eog_test_gen.X, axis=-1) # expand feature dimension if (emg_active): print("emg active")
eog_active = ((FLAGS.eog_train_data != "") & (FLAGS.eog_test_data != "")) emg_active = ((FLAGS.emg_train_data != "") & (FLAGS.emg_test_data != "")) num_channel = 0 if (eeg_active): print("eeg active") num_channel += 1 # Initalize the data generator seperately for the training, validation, and test sets eeg_train_gen = EqualDataGenerator( os.path.abspath(FLAGS.eeg_train_data), data_shape=[config.n_time, config.n_dim], shuffle=False) eeg_test_gen = DataGenerator(os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.n_time, config.n_dim], shuffle=False) eeg_eval_gen = DataGenerator(os.path.abspath(FLAGS.eeg_eval_data), data_shape=[config.n_time, config.n_dim], shuffle=False) # load pretrained filterhanks and do filtering first eeg_filter = loadmat(FLAGS.eeg_pretrainedfb_path) Wfb = eeg_filter['Wfb'] eeg_train_gen.filter_with_filterbank(Wfb) eeg_test_gen.filter_with_filterbank(Wfb) eeg_eval_gen.filter_with_filterbank(Wfb) del Wfb, eeg_filter # normalization here X = eeg_train_gen.X X = np.reshape(X, (eeg_train_gen.data_size * eeg_train_gen.data_shape[0],
config = Config() config.dropout_cnn = FLAGS.dropout_cnn config.dropout_rnn = FLAGS.dropout_rnn config.epoch_seq_len = FLAGS.seq_len config.epoch_step = FLAGS.seq_len config.n_hidden = FLAGS.nhidden eeg_active = ((FLAGS.eeg_train_data != "") and (FLAGS.eeg_test_data != "")) eog_active = ((FLAGS.eog_train_data != "") and (FLAGS.eog_test_data != "")) emg_active = ((FLAGS.emg_train_data != "") and (FLAGS.emg_test_data != "")) if (eeg_active): print("eeg active") # Initalize the data generator seperately for the training, validation, and test sets eeg_train_gen = DataGenerator(os.path.abspath(FLAGS.eeg_train_data), data_shape=[config.ntime], seq_len=config.epoch_seq_len, shuffle=False) eeg_test_gen = DataGenerator(os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.ntime], seq_len=config.epoch_seq_len, shuffle=False) eeg_eval_gen = DataGenerator(os.path.abspath(FLAGS.eeg_eval_data), data_shape=[config.ntime], seq_len=config.epoch_seq_len, shuffle=False) if (eog_active): print("eog active") # Initalize the data generator seperately for the training, validation, and test sets eog_train_gen = DataGenerator(os.path.abspath(FLAGS.eog_train_data), data_shape=[config.ntime],
config.dropout = FLAGS.dropout config.epoch_seq_len = FLAGS.seq_len config.epoch_step = FLAGS.seq_len config.nhidden = FLAGS.nhidden config.evaluate_every = FLAGS.evaluate_every eeg_active = ((FLAGS.eeg_train_data != "") and (FLAGS.eeg_test_data != "")) eog_active = ((FLAGS.eog_train_data != "") and (FLAGS.eog_test_data != "")) emg_active = ((FLAGS.emg_train_data != "") and (FLAGS.emg_test_data != "")) # EEG data loader and generator if (eeg_active): print("eeg active") # Initalize the data generator seperately for the training and test sets # actually we do not need to load training data as no normalization (i was lazy to remove) eeg_train_gen = DataGenerator(os.path.abspath(FLAGS.eeg_train_data), data_shape=[config.ntime], seq_len=config.epoch_seq_len, shuffle = False) eeg_test_gen = DataGenerator(os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.ntime], seq_len=config.epoch_seq_len, shuffle = False) # no need for data normalization eeg_train_gen.X = np.expand_dims(eeg_train_gen.X, axis=-1) # expand feature dimension eeg_test_gen.X = np.expand_dims(eeg_test_gen.X, axis=-1) # expand feature dimension # EOG data loader and generator if (eog_active): print("eog active") # Initalize the data generator seperately for the training, validation, and test sets eog_train_gen = DataGenerator(os.path.abspath(FLAGS.eog_train_data), data_shape=[config.ntime], seq_len=config.epoch_seq_len, shuffle = False) eog_test_gen = DataGenerator(os.path.abspath(FLAGS.eog_test_data), data_shape=[config.ntime], seq_len=config.epoch_seq_len, shuffle = False) eog_train_gen.X = np.expand_dims(eog_train_gen.X, axis=-1) # expand feature dimension eog_test_gen.X = np.expand_dims(eog_test_gen.X, axis=-1) # expand feature dimension
config.dropout_keep_prob = FLAGS.dropout_keep_prob config.num_filters = FLAGS.num_filter eeg_active = ((FLAGS.eeg_train_data != "") & (FLAGS.eeg_test_data != "")) eog_active = ((FLAGS.eog_train_data != "") & (FLAGS.eog_test_data != "")) emg_active = ((FLAGS.emg_train_data != "") & (FLAGS.emg_test_data != "")) if (eeg_active): print("eeg active") # Initalize the data generator seperately for the training, validation, and test sets eeg_train_gen = EqualDataGenerator( os.path.abspath(FLAGS.eeg_train_data), data_shape=[config.time_length, config.freq_length], shuffle=False) eeg_test_gen = DataGenerator( os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.time_length, config.freq_length], shuffle=False) eeg_eval_gen = DataGenerator( os.path.abspath(FLAGS.eeg_eval_data), data_shape=[config.time_length, config.freq_length], shuffle=False) # load pretrained filterbank and do filtering first eeg_filter = loadmat(FLAGS.eeg_pretrainedfb_path) Wfb = eeg_filter['Wfb'] eeg_train_gen.filter_with_filterbank(Wfb) eeg_test_gen.filter_with_filterbank(Wfb) eeg_eval_gen.filter_with_filterbank(Wfb) del Wfb, eeg_filter if (eog_active): print("eog active")
config.seq_nhidden2 = FLAGS.seq_nhidden2 config.seq_attention_size1 = FLAGS.seq_attention_size1 config.dropout_cnn = FLAGS.dropout_cnn config.deep_nhidden = FLAGS.deep_nhidden eeg_active = ((FLAGS.eeg_train_data != "") and (FLAGS.eeg_test_data != "")) eog_active = ((FLAGS.eog_train_data != "") and (FLAGS.eog_test_data != "")) emg_active = ((FLAGS.emg_train_data != "") and (FLAGS.emg_test_data != "")) if (eeg_active): print("eeg active") # Initalize the data generator seperately for the training, validation, and test sets eeg_train_gen = DataGenerator( os.path.abspath(FLAGS.eeg_train_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle=False) eeg_test_gen = DataGenerator( os.path.abspath(FLAGS.eeg_test_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle=False) # data normalization for time-frequency here X2 = eeg_train_gen.X2 X2 = np.reshape(X2, (eeg_train_gen.data_size * eeg_train_gen.data_shape_2[0], eeg_train_gen.data_shape_2[1])) meanX = X2.mean(axis=0)
print("{}={}".format(attr.upper(), value)) print("") # path where some output are stored out_path = os.path.abspath(os.path.join(os.path.curdir, FLAGS.out_dir)) # path where checkpoint models are stored checkpoint_path = os.path.abspath(os.path.join(out_path, FLAGS.checkpoint_dir)) if not os.path.isdir(os.path.abspath(out_path)): os.makedirs(os.path.abspath(out_path)) if not os.path.isdir(os.path.abspath(checkpoint_path)): os.makedirs(os.path.abspath(checkpoint_path)) config = Config() config.dropout_keep_prob = FLAGS.dropout_keep_prob test_generator = DataGenerator(os.path.abspath(FLAGS.test_data), shuffle=False) test_batches_per_epoch = np.floor(test_generator.data_size / config.batch_size).astype(np.int16) # Training # ================================================== with tf.Graph().as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) with sess.as_default(): dnn = DNN_FilterBank(config=config)