def calculate_loss_tensor(filename, N_Windows, W, signals_models, signals_info, test_included=False): loss_tensor = np.zeros((len(signals_models), len(signals_info), N_Windows)) N_Signals = len(signals_info) X_matrix = np.zeros((N_Signals, N_Windows, W)) Y_matrix = np.zeros((N_Signals, N_Windows, W)) if not test_included: signals = get_signals_tests(signals_info, signals_models[0].Sd) for i in range(N_Signals): [X_matrix[i, :, :], Y_matrix[i, :, :]] = get_random_batch(signals[0][i], signals[1][i], W, N_Windows) else: i = 0 for model_info in signals_models: [x_test, y_test] = load_test_data( "GRU_" + model_info.dataset_name + "[" + str(model_info.Sd) + "." + str(model_info.Hd) + ".-1.-1.-1]", model_info.directory) X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch( x_test, y_test, N_Windows) i += 1 print("Loading model...") model_info = signals_models[0] model = GRU.LibPhys_GRU(model_info.Sd, hidden_dim=model_info.Hd, signal_name=model_info.dataset_name, n_windows=N_Windows) history = [] m = -1 for m in range(len(signals_models)): model_info = signals_models[m] model.signal_name = model_info.dataset_name model.load(signal_name=model_info.name, filetag=model.get_file_tag(model_info.DS, model_info.t), dir_name=model_info.directory) print("Processing " + model_info.name) for s in range(N_Signals): x_test = X_matrix[s, :, :] y_test = Y_matrix[s, :, :] signal_info = signals_info[s] print("Calculating loss for " + signal_info.name, end=';\n ') loss_tensor[m, s, :] = np.asarray( model.calculate_loss_vector(x_test, y_test)) np.savez(filename + ".npz", loss_tensor=loss_tensor, signals_models=signals_models, signals_tests=signals_info) return loss_tensor
def process_error_by_prediction(filename): signals_models = db.signal_models signals_tests = db.signal_tests def load_model(model_info, N_Windows): model = GRU.LibPhys_GRU(model_info.Sd, hidden_dim=model_info.Hd, signal_name=model_info.dataset_name, n_windows=N_Windows) model.load(signal_name=model_info.dataset_name, filetag=model.get_file_tag(model_info.DS, model_info.t), dir_name=model_info.directory) return model def get_models(signal_models, N_Windows=None, index=None): models = [] if index is None: for model_info in signals_models: models.append(load_model(model_info,N_Windows)) else: model_info = signals_models[index] models.append(load_model(model_info,N_Windows)) return models signals = get_signals_tests(signals_tests, signals_models[0].Sd) W = 256 N_Windows = 20000 for m in range(len(signals_models)): models = [] models = get_models(signals_models, N_Windows, index=m) predicted_signals = list(range(len(signals_tests))) model_errors = list(range(len(signals_tests))) predicted_signals_ = list(range(len(signals_tests))) model_errors_ = list(range(len(signals_tests))) print("\nProcessing Model " + signals_models[m].name + ":") for s in range(len(signals)): print("\tProcessing Signal " + signals_tests[s].name + ";") signal = signals[s] if model_errors[s].__class__ is int: model_errors[s] = [] model_errors_[s] = [] predicted_signals_[s] = [] predicted_signals[s] = [] [segmented, y, N_Windows, last_index] = segment_signal(signal, W, 0, N_Windows) [x, e] = models[0].predict_class(segmented, y) predicted_signals[s].append(x[0,:]) predicted_signals_[s].append(x[-1, :]) model_errors[s].append(e[0,:]) model_errors_[s].append(e[-1, :]) limit = last_index + (N_Windows + W) print("processing...", end =" ") while limit < signals_tests[s].size: print(str(limit) + " of " + str(signals_tests[s].size), end="_") [segmented, y, N_Windows, last_index] = segment_signal(signal, W, 0, N_Windows, start_index=last_index) [x, e] = models[0].predict_class(segmented, y) predicted_signals[s][-1] = np.append(predicted_signals[s][-1], x[0,:]) predicted_signals[s][-1] = np.append(predicted_signals_[s][-1], x[-1, :]) model_errors[s][-1] = np.append(model_errors[s][-1], e[-1, :]) model_errors_[s][-1] = np.append(model_errors_[s][-1], e[-1, :]) # print(np.shape(predicted_signals[s][-1])) limit = last_index + (N_Windows + W) np.savez(filename + str(m) +".npz", predicted_signals=predicted_signals, model_errors=model_errors, predicted_signals_=predicted_signals, model_errors_=model_errors, signals_models=signals_models, signals_tests=signals_tests) print(filename + ".npz has been saved")
# signal_name = "ecg_7" # signal_model = "ecg_7" signal_directory = 'BIOMETRIC_ECGs_[128.256]' # learning_rate_val = 0.01 batch_size = 128 mini_batch_size = 16 window_size = 256 # number_of_epochs = 1000000 #Noisy signals for noisy_index in [2]:#range(3,5): signals_tests = db.ecg_noisy_signals[noisy_index] signals_models = db.signal_models # # # Load signals from database signals = get_signals_tests(signals_tests, signals_models[0].Sd, type="ecg noise", noisy_index=noisy_index) # train each signal from fantasia for i in range(9, 19): name = 'bio_noise_'+str(noisy_index)+'_ecg_' + str(i) signal = Signal2Model(name, signal_directory, batch_size=batch_size) model = GRU.LibPhys_GRU(signal_dim=signal_dim, hidden_dim=hidden_dim, signal_name=name, n_windows=mini_batch_size) model.save(signal_directory, model.get_file_tag(-1, -1)) model.train_signals(signals[0][i], signals[1][i], signal, decay=0.95, track_loss=False) # Normal + noisy ECGs signal_dim = 64 hidden_dim = 256 signal_directory = 'BIOMETRIC_ECGs_[20.256]' n_for_each = 16
import DeepLibphys.models.LibphysMBGRU as MBGRU import seaborn from DeepLibphys.utils.functions.signal2model import * from DeepLibphys.utils.functions.common import get_signals_tests from DeepLibphys.utils.functions.database import * signal2model = Signal2Model("XPTO", "XPTO") signals = get_signals_tests(signal_tests, index=1) model = MBGRU.LibphysMBGRU(signal2model) model.train(signals[0], signal2model, loss_interval=10)
["eeg", 'EEG_Attention', 3, 429348, "EEG ATT 3"], ["eeg", 'EEG_Attention', 4, 429348, "EEG ATT 4"], ["eeg", 'EEG_Attention', 5, 429348, "EEG ATT 5"], ] N_Signals = len(signals_tests) N_Models = len(signals_models) s = -1 history = [""] N_windows = 0 # X_train, Y_train = get_fantasia_dataset(signals_models[0]['Sd'], [7], signals_tests[0][1], # peak_into_data=False) models = get_models(signals_models) signals = get_signals_tests(signals_tests, signals_models) loss_tensor = np.zeros((N_Models, N_Signals, Z)) m = -1 font = {'family': 'lato', 'weight': 'bold', 'size': 40} matplotlib.rc('font', **font) i = 0 fz = 250 x = [0] errors = np.zeros((len(signals_models), N_Signals, Z + 1)) pred_signals = np.zeros((len(signals_models), N_Signals, Z + 1)) A = "Synthetised signal" B = "Error"
for i in batch_indexes: loss_batch = loss_tensor[:, :, i:i + batch_size] temp_loss_tensor[:, :, x] = np.min(loss_batch, axis=2) x += 1 return temp_loss_tensor # CONFUSION_TENSOR_[W,Z] N_Windows = None W = 256 print("Processing HRV - with #windows of " + str(N_Windows)) signals_models = db.rr_128_models signals_tests = db.day_rr all_signals = get_signals_tests(signals_tests, 64) i = 0 j = 0 signals = [] signals_info = [] MAX_J = 0 for group_signals in all_signals: i += 1 MAX_J = j j = 1 for person in group_signals[:3]: j += 1 signals.append(person) if i < 4: signals_info.append(
def calculate_loss_tensor(filename, Total_Windows, W, signals_models, signals=None, noisy_index=None): n_windows = Total_Windows if Total_Windows / 256 > 1: ratio = round(Total_Windows / 256) else: ratio = 1 n_windows = 250 windows = np.arange(int(Total_Windows / n_windows)) N_Windows = len(windows) N_Signals = len(signals_models) Total_Windows = int(N_Windows * n_windows) loss_tensor = np.zeros((N_Signals, N_Signals, Total_Windows)) N_Signals = len(signals_models) X_matrix = np.zeros((N_Signals, Total_Windows, W)) Y_matrix = np.zeros((N_Signals, Total_Windows, W)) i = 0 indexes = signals_models #[np.random.permutation(len(signals_models))] for model_info in indexes: if signals is None: # [x_test, y_test] = load_test_data("GRU_" + model_info.dataset_name, + "["+str(model_info.Sd)+"."+str(model_info.Hd)+".-1.-1.-1]" # , model_info.directory) [x_test, y_test] = load_test_data(model_info.dataset_name, model_info.directory) X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch( x_test, y_test, Total_Windows) else: signals = get_signals_tests(db.ecg_noisy_signals[noisy_index - 1], index=i, noisy_index=noisy_index, peak_into_data=False) signal_test = segment_signal(signals[0][i], 256, 0.33) X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch( signal_test[0], signal_test[1], Total_Windows) i += 1 print("Loading model...") model_info = signals_models[0] model = GRU.LibPhys_GRU(model_info.Sd, hidden_dim=model_info.Hd, signal_name=model_info.dataset_name, n_windows=n_windows) for m in range(len(signals_models)): model_info = signals_models[m] model.signal_name = model_info.dataset_name model.load(signal_name=model_info.name, filetag=model.get_file_tag(model_info.DS, model_info.t), dir_name=model_info.directory) print("Processing " + model_info.name) for s in range(N_Signals): print("Calculating loss for " + signals_models[s].name, end=';\n ') for w in windows: index = w * n_windows x_test = X_matrix[s, index:index + n_windows, :] y_test = Y_matrix[s, index:index + n_windows, :] loss_tensor[m, s, index:index + n_windows] = np.asarray( model.calculate_loss_vector(x_test, y_test)) np.savez(filename + ".npz", loss_tensor=loss_tensor, signals_models_=indexes, signals_models=signals_models) return loss_tensor, indexes
def get_models(signal_models, index=None, N_Windows=None): models = [] if index is None: for model_info in signals_models: models.append(load_model(model_info, N_Windows)) else: model_info = signals_models[index] models.append(load_model(model_info, N_Windows)) return models N_Windows = 2048 signal_ecg = get_signals_tests(signals_tests, signals_models[0].Sd, index=7)[0][0] signal_resp = get_signals_tests(signals_tests, signals_models[0].Sd, index=27)[0][0] # predicted_signals = list(range(len(signals_tests))) # model_errors = list(range(len(signals_tests))) fi = np.random.randint(0, 50000) W = 256 [ecg_segments_, y_ecg, _a, end_index] = segment_signal(signal_ecg, W, 0, N_Windows, start_index=fi) [resp_segments, y_resp, _a, end_index] = segment_signal(signal_resp, W, 0, N_Windows,
number_of_epochs = 10000 signal_dim = 64 hidden_dim = 256 batch_size = 128 mini_batch_size = 16 window_size = 256 signal_directory = 'BIO_ACC_[{0}.{1}]'.format(window_size, batch_size) signals_tests = db.signal_tests signals_models = db.signal_models for i in range(178, 300): try: SIGNAL_BASE_NAME = "biometric_acc_x_" X_train, Y_train, X_test, Y_test = get_signals_tests( signals_tests, signals_models[0].Sd, type="biometric", index=i) signal_name = SIGNAL_BASE_NAME + str(i) signal_info = Signal2Model(signal_name, signal_directory, signal_dim=signal_dim, hidden_dim=hidden_dim, learning_rate_val=0.05, batch_size=batch_size, window_size=window_size, number_of_epochs=number_of_epochs, mini_batch_size=mini_batch_size) model = GRU.LibPhys_GRU(signal_dim=signal_dim, hidden_dim=hidden_dim, signal_name=signal_name, n_windows=mini_batch_size)
from DeepLibphys.utils.functions.common import get_signals_tests, segment_signal from DeepLibphys.utils.functions.signal2model import Signal2Model signal_dim = 64 hidden_dim = [16, 32, 64, 128, 256] batch_size = 128 mini_batch_size = 16 window_size = 128 signal_directory = 'DAY_HRV_[128.256]' n_for_each = 32 signals_tests = db.day_hf # signals_models = db.signal_models # Load signals from rr database all_signals = get_signals_tests(signals_tests, signal_dim) # i = 1 hidden_dim_array = [16, 32, 64, 128, 256] for i, hidden_dim in zip(range(1, len(hidden_dim_array) + 1), hidden_dim_array): signal_directory = 'DAY_HRV_HF_[' + str(hidden_dim) + '.' + str( window_size) + ']' for group_signals in all_signals: model_name = 'day_hrv_hf_{0}'.format(i) signal2model = Signal2Model(model_name, signal_directory, signal_dim=signal_dim, window_size=window_size, hidden_dim=hidden_dim, mini_batch_size=mini_batch_size, learning_rate_val=0.05,
NOISE_AMPLITUDE = np.mean(np.abs(noise))*2 SNR.append(10 * np.log10(SMOOTH_AMPLITUDE/NOISE_AMPLITUDE)) # print(SNR[-1]*10, "-" + str(SMOOTH_AMPLITUDE/NOISE_AMPLITUDE)) # print(np.mean(SNR * 10)) plt.plot(noise[:1000],'r',smoothed_signal[:1000],'k')#, signals_noise[-1][:1000], 'b') plt.show() print(np.mean(np.array(SNR))) # plt.show() signals_without_noise = np.array(signals_without_noise) signals_with_noise = [get_signals_tests(db.ecg_noisy_signals[noisy_index-1], signal_dim, type="ecg noise", noisy_index=noisy_index) for noisy_index in range(1,5)] signals = list(range(19)) for i in range(19): signals[i] = [signals_without_noise[0][i]] + [signals_with_noise[j][0][i] for j in range(4)] # train each signal from fantasia for i, ecg in zip(range(z, len(ecgs)), ecgs[z:]): name = 'noisy_ecg_' + str(i+1) signals= [ecg] + [pna[i] for pna in processed_noise_array] signal2model = Signal2Model(name, signal_directory, mini_batch_size=mini_batch_size) model = GRU.LibphysMBGRU(signal2model) model.train_block(signals, signal2model, n_for_each=n_for_each)
window_size = 256 # number_of_epochs = 1000000 signals_tests = db.signal_tests signals_models = db.signal_models signal2model = Signal2Model(signal_name, signal_directory, hidden_dim=hidden_dim, mini_batch_size=mini_batch_size, window_size=window_size, batch_size=batch_size, save_interval=10000, learning_rate_val=0.01) model = RGRU.LibPhys_RGRU("RGRU_", hidden_dim=hidden_dim, mini_batch_dim=mini_batch_size) signals = get_signals_tests(signals_tests, signals_models[0].Sd, index=7, regression=True) plt.plot(signals[0][0]) plt.show() model.train_signal(signals[0][0], signals[1][0], signal2model, save_distance=100, track_loss=True) # model.train_signal(x, y, signal2model, save_distance=1000, track_loss=True)
from DeepLibphys.utils.functions import database as db import seaborn N = 1024 example_index = [7] signal_dim = 64 hidden_dim = 256 signal_name = "RGRU_" signal_directory = 'ECGs_FANTASIA_[256.256]' # Load Model index = 8 signals_tests = db.signal_tests signals_models = db.ecg_models signals = get_signals_tests(signals_tests, signals_models[0].Sd, index=index, regression=False) model = GRU.LibPhys_GRU(signal_dim=64, signal_name=signals_models[index].dataset_name, hidden_dim=signals_models[index].Hd) model.load(filetag=model.get_file_tag(-5, -5), dir_name=signals_models[index].directory) predicted_signal = model.generate_predicted_signal(N, [40], 256) plot_gru_simple(model, signals[0], predicted_signal) # signals_tests = db.signal_tests # signals_models = db.signal_models # signals = get_signals_tests(signals_tests, signals_models, index=7, regression=True) #