示例#1
0
def calculate_loss_tensor(filename,
                          Total_Windows,
                          W,
                          signals_models,
                          signals=None,
                          noisy_index=None):
    n_windows = 250

    windows = np.arange(int(Total_Windows / n_windows))
    N_Windows = len(windows)
    N_Models = len(signals_models)
    Total_Windows = int(N_Windows * n_windows)
    N_Signals = len(signals)

    loss_tensor = np.zeros((N_Models, N_Signals, Total_Windows))

    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))

    i = 0
    first_test_index = int(len(signals[0]) * 0.33)
    for signal in signals:
        signal_test = segment_signal(signal[first_test_index:], 256, 0.33)
        X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
            signal_test[0], signal_test[1], Total_Windows)

        i += 1

    print("Loading model...")
    model_info = signals_models[0]

    signal2Model = Signal2Model(model_info.dataset_name,
                                model_info.directory,
                                signal_dim=model_info.Sd,
                                hidden_dim=model_info.Hd,
                                mini_batch_size=n_windows)
    model = DeepLibphys.models.LibphysMBGRU.LibphysMBGRU(signal2Model)

    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing Model " + model_info.name)

        for s in range(N_Signals):
            print("Calculating loss for ECG " + str(s + 1), end=';\n ')
            for w in windows:
                index = w * n_windows
                x_test = X_matrix[s, index:index + n_windows, :]
                y_test = Y_matrix[s, index:index + n_windows, :]
                loss_tensor[m, s, index:index + n_windows] = np.asarray(
                    model.calculate_mse_vector(x_test, y_test))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
示例#2
0
def start(s):
    signal_dim = 32
    hidden_dim = 32
    mini_batch_size = 8
    batch_size = 128
    window_size = 256
    save_interval = 250
    mean_tol = 0.4
    std_tol = 0.4
    # signal_directory = 'SIGNAL_ECG_RR_1024_256'.format(256, window_size)
    signal_directory = 'SIGNAL_ECG_RR'.format(256, window_size)

    raw_filenames, Ns, core_names = get_processing_variables()

    # process_and_save_signals(raw_filenames, core_names, Ns, indexes2process=np.array([3]))#np.arange(45, len(raw_filenames)))
    # exit()

    processed_filenames = np.array([
        '../data/processed/MIT/{0}[256].npz'.format(core_name)
        for core_name in core_names
    ])

    ind = [3]
    for i, filename in enumerate(processed_filenames[ind]):
        signal, core_name = np.load(filename)["signal"], np.load(
            filename)["core_name"]
        running_ok = False
        signal2model = Signal2Model(core_name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    number_of_epochs=10000,
                                    hidden_dim=hidden_dim,
                                    learning_rate_val=0.01,
                                    batch_size=batch_size,
                                    mini_batch_size=mini_batch_size,
                                    window_size=window_size + 1,
                                    save_interval=save_interval,
                                    lower_error=1e-10,
                                    count_to_break_max=5,
                                    n_signals=2)

        [x_train,
         y_train], [xRR,
                    yRR] = prepare_for_RR_data([signal[:int(len(signal) / 3)]],
                                               signal2model,
                                               overlap=0.11)

        indexes = range(len(x_train) - (len(x_train) % mini_batch_size))

        signal2model.batch_size = len(indexes)
        signal2model.window_size = window_size

        print("Compiling Model {0}".format(signal2model.model_name))

        model = GRU.LibphysMultisignalGRU(signal2model)
        # model.load(signal2model.signal_directory, model.get_file_tag(-5, -5))
        returned = model.train(x_train[indexes], y_train[indexes],
                               xRR[indexes], yRR[indexes])
示例#3
0
def train_fantasia(hidden_dim, mini_batch_size, batch_size, window_size, signal_directory, indexes, signals,save_interval,signal_dim):
    models = db.resp_64_models
    for i, signal, model_info in zip(indexes, signals, models):
        name = 'resp_' + str(i)
        signal2model = Signal2Model(name, signal_directory, signal_dim=signal_dim, hidden_dim=hidden_dim, batch_size=batch_size,
                                    mini_batch_size=mini_batch_size, window_size=window_size,
                                    save_interval=save_interval)
        print("Compiling Model {0}".format(name))
        model = DeepLibphys.models.LibphysMBGRU.LibphysMBGRU(signal2model)
        print("Initiating training... ")
        model.load(dir_name=model_info.directory)
        model.train(signal, signal2model)
def try_calculate_loss_tensor(filename,
                              Total_Windows,
                              W,
                              signals_models,
                              signals=None,
                              noisy_index=None):
    print("Loading model...")
    n_windows = 250
    modelx_info = signals_models[0]

    signal2Model = Signal2Model(modelx_info.dataset_name,
                                modelx_info.directory,
                                signal_dim=modelx_info.Sd,
                                hidden_dim=modelx_info.Hd,
                                mini_batch_size=n_windows)
    model = DeepLibphys.models.LibphysMBGRU.LibphysMBGRU(signal2Model)

    windows = np.arange(int(Total_Windows / n_windows))
    N_Windows = len(windows)
    N_Models = len(signals_models)
    Total_Windows = int(N_Windows * n_windows)
    N_Signals = len(signals)

    loss_tensor = np.zeros((N_Models, N_Signals, Total_Windows))

    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))

    i = 0
    first_test_index = int(len(signals[0]) * 0.33)
    for signal in signals:
        signal_test = segment_signal(signal[first_test_index:], 256, 0.33)
        X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
            signal_test[0], signal_test[1], Total_Windows)

        i += 1

    pool = Pool(8)
    pool.starmap(
        interate_loss_calculus,
        zip(signals_models, range(len(signals_models)), repeat(model),
            repeat(X_matrix), repeat(Y_matrix), repeat(n_windows),
            repeat(windows), repeat(N_Signals), repeat(loss_tensor)))

    if not os.path.isdir(os.path.dirname(filename + ".npz")):
        os.mkdir(os.path.dirname(filename + ".npz"))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
def create_history(subject):

    model_info = db.ecg_64_models[subject]
    signal2Model = Signal2Model(model_info.dataset_name,
                                model_info.directory,
                                signal_dim=model_info.Sd,
                                hidden_dim=model_info.Hd)
    model = GRU.LibphysSGDGRU(signal2Model)

    # plt.ion()
    # font = {'family': 'lato',
    #         'weight': 'bold',
    #         'size': 40}
    #
    # matplotlib.rc('font', **font)
    x = [0]
    i = 0
    window_size = 512
    fz = 250
    N = 3000
    titles = []
    signals = []
    for epoch in [-1, 20, 30, 50, 80, 410]:
        signal = [np.random.randint(0, 63, size=1)[0]]

        if epoch < 0:
            file_tag = model.get_file_tag(epoch, epoch)
        else:
            file_tag = model.get_file_tag(0, epoch)

        model.load(file_tag=file_tag, dir_name=model_info.directory)
        print("Processing epoch " + str(epoch))
        for i in range(N):
            # print(i)
            sample, _ = model.generate_online_predicted_signal(
                signal, window_size, uncertaintly=0.01)
            signal.append(sample)
            x.append((i + 1) / fz)

        titles.append("Epoch {0}".format(epoch))
        signals.append(np.array(signal))

    np.savez("../data/history_of_{0}.npz".format(subject),
             titles=titles,
             signals=signals,
             t=x)
    return [titles, signals, x]
def synthetize(models,
               uncertaintly=0.01,
               filename="synthesized_signals_1024.npz"):
    mini_bs = 256
    plt.ion()
    signals = []
    signal2Model = Signal2Model(models[0].dataset_name,
                                models[0].directory,
                                signal_dim=models[0].Sd,
                                hidden_dim=models[0].Hd,
                                mini_batch_size=mini_bs)
    model = GRU.LibphysMBGRU(signal2Model)
    for model_info in models:
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing " + model_info.name)

        # plt.ion()
        # font = {'family': 'lato',
        #         'weight': 'bold',
        #         'size': 40}
        #
        # matplotlib.rc('font', **font)
        x = [0]
        i = 0

        signal = np.random.randint(0, 63, size=(mini_bs, 1), dtype=np.int32)
        window_size = 512
        fz = 250
        N = 512
        # while True:
        for i in range(N):
            # print(i)
            y = model.generate_online_predicted_vector(
                signal, window_size, uncertaintly=uncertaintly)
            signal.append(y)
        plt.clf()
        plt.plot(signal)
        plt.pause(0.05)
        signals.append(np.array(signal))

        np.savez("img/" + model_info.dataset_name + ".npz",
                 synth_signal=signal,
                 probability=prob)
    np.savez(filename, synth_signals=signals, probabilities=probabilities)
示例#7
0
def train_FMH(hidden_dim, mini_batch_size, batch_size, window_size,
              signal_directory, indexes, signals, save_interval, signal_dim):
    for i, signal in zip(indexes, signals[indexes]):
        name = 'emg_' + str(i + 1)
        signal2model = Signal2Model(name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    hidden_dim=hidden_dim,
                                    batch_size=batch_size,
                                    mini_batch_size=mini_batch_size,
                                    window_size=window_size,
                                    save_interval=save_interval,
                                    tolerance=1e-12)

        returned = False
        while not returned:
            print("Compiling Model {0}".format(name))
            model = DeepLibphys.models.LibphysMBGRU.LibphysMBGRU(signal2model)
            print("Initiating training... ")
            returned = model.train(signal, signal2model, overlap=0.05)
示例#8
0
def calculate_loss_tensor(filename,
                          signals_models=[],
                          test_signals=None,
                          labels=None):
    X_Windows = test_signals[:, :-1]
    Y_Windows = test_signals[:, 1:]
    N_Signals = np.shape(X_Windows)[0]

    n_windows = np.shape(X_Windows)[0]
    print("Loading model...")
    model_info = signals_models[0]
    signal2Model = Signal2Model(model_info.dataset_name,
                                model_info.directory,
                                signal_dim=model_info.Sd,
                                hidden_dim=model_info.Hd,
                                mini_batch_size=n_windows)
    model = GRU.LibphysMBGRU(signal2Model)

    loss_tensor = np.zeros((len(signals_models), N_Signals))
    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing " + model_info.name)

        # for s in range(N_Signals):
        # if labels is not None:
        #     print("Calculating loss for " + labels[s], end=';\n ')
        # else:
        #     print("Calculating loss for " + signals_models[s].name, end=';\n ')

        loss_tensor[m, :] = np.asarray(
            model.calculate_mse_vector(X_Windows, Y_Windows))
        print(np.mean(loss_tensor[m, :]))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
示例#9
0
    # print("Saving signals...")
    #
    # np.savez(noise_filename,
    #          processed_noise_array=processed_noise_array, SNRs = SNRs)

    print("Loading signals...")
    # noise_filename = "../data/ecg_noisy_signals.npz"
    processed_noise_array = load_noisy_fantasia_signals(SNRx=[6, 9])
    # processed_noise_array, SNRs = npzfile["processed_noise_array"], npzfile["SNRs"]

    # snr1, snr2 = 7, 5
    # SNRs = [8]#, SNRs[5]]
    signal2model = Signal2Model("",
                                signal_directory,
                                signal_dim=signal_dim,
                                hidden_dim=hidden_dim,
                                batch_size=batch_size,
                                mini_batch_size=mini_batch_size,
                                window_size=window_size)
    model = DeepLibphys.models.LibphysMBGRU.LibphysMBGRU(signal2model)

    SNRs = ["RAW"] + [str(i) for i in range(9, 6, -1)]
    for SNR, signals_with_noise in zip(SNRs, processed_noise_array):
        for i, signal in zip(range(len(signals_with_noise)),
                             signals_with_noise):
            running_ok = False
            if SNR is not "RAW":
                u = i + 1
                # if np.logical_and(SNR == 9, u > 4) and np.logical_and(SNR == 9, u < 21):
                name = 'ecg_' + str(u) + '_SNR_' + str(SNR)
                print(name)
def train_fantasia(hidden_dim, mini_batch_size, batch_size, window_size,
                   signal_directory, indexes, signals, save_interval,
                   signal_dim):
    for i, signal in zip(indexes, signals[indexes]):
        name = 'ecg_' + str(i + 1)

        signal2model = Signal2Model(name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    hidden_dim=hidden_dim,
                                    batch_size=batch_size,
                                    mini_batch_size=mini_batch_size,
                                    window_size=window_size,
                                    save_interval=save_interval,
                                    lower_error=3e-5,
                                    lower_learning_rate=1e-4,
                                    count_to_break_max=30)
        print("Compiling Model {0}".format(name))

        last_index = int(len(signal) * 0.33)
        x_train, y_train = prepare_test_data([signal[22500:last_index]],
                                             signal2model,
                                             mean_tol=0.9,
                                             std_tol=0.5)

        # fig, ax = plt.subplots()
        # plt.subplots_adjust(bottom=0.2)
        # l, = plt.plot(x_train[0], lw=2)
        #
        # class BooleanSwitcher(object):
        #     indexes = []
        #     ind = 0
        #
        #     def yes(self, event):
        #         if self.ind < len(x_train):
        #             self.indexes.append(self.ind)
        #             self.ind += 1
        #         if self.ind < len(x_train):
        #             l.set_ydata(x_train[self.ind])
        #             plt.draw()
        #         else:
        #             self.crop()
        #             plt.close()
        #
        #     def no(self, event):
        #         self.ind += 1
        #         if self.ind < len(x_train):
        #             l.set_ydata(x_train[self.ind])
        #             plt.draw()
        #         else:
        #             self.crop()
        #             plt.close()
        #
        #     def crop(self):
        #         c = len(self.indexes) % 16
        #         self.indexes = self.indexes[:(len(self.indexes) - c)]
        # callback = BooleanSwitcher()
        # axprev = plt.axes([0.7, 0.05, 0.1, 0.075])
        # axnext = plt.axes([0.81, 0.05, 0.1, 0.075])
        # by = Button(axnext, 'Yes')
        # by.on_clicked(callback.yes)
        # bn = Button(axprev, 'No')
        # bn.on_clicked(callback.no)
        # plt.show()

        model = GRU.LibphysMBGRU(signal2model)
        # try:
        #
        #     # if i < 20:
        #     #     old_directory = "CLEAN_ECG_BIOMETRY[128.1024]"
        #     #     old_name = 'clean_ecg' + str(i+1)
        #     # else:
        #     old_directory = "BIOMETRY[256.1024]"
        #     old_name = name
        #
        #     old_tag= 'GRU_{0}[{1}.{2}.{3}.{4}.{5}]'. \
        #         format(old_name, signal_dim, hidden_dim, -1, -5, -5)
        #     model.load(old_tag, old_directory)
        # except:
        #     pass

        print("Initiating training... ")
        model.model_name = 'ecg_' + str(i + 1)

        model.start_time = time.time()
        # returned = model.train_model(x_train[callback.indexes], y_train[callback.indexes], signal2model)
        model.load(model.get_file_tag(), signal_directory)
        returned = model.train_model(x_train, y_train, signal2model)
        if returned:
            model.save(signal2model.signal_directory,
                       model.get_file_tag(-5, -5))
示例#11
0
mini_batch_size = 16
window_size = 256
# number_of_epochs = 1000000

#Noisy signals
for noisy_index in [2]:#range(3,5):
    signals_tests = db.ecg_noisy_signals[noisy_index]
    signals_models = db.signal_models
    #
    # #   Load signals from database
    signals = get_signals_tests(signals_tests, signals_models[0].Sd, type="ecg noise", noisy_index=noisy_index)

    # train each signal from fantasia
    for i in range(9, 19):
        name = 'bio_noise_'+str(noisy_index)+'_ecg_' + str(i)
        signal = Signal2Model(name, signal_directory, batch_size=batch_size)
        model = GRU.LibPhys_GRU(signal_dim=signal_dim, hidden_dim=hidden_dim, signal_name=name, n_windows=mini_batch_size)
        model.save(signal_directory, model.get_file_tag(-1, -1))
        model.train_signals(signals[0][i], signals[1][i], signal, decay=0.95, track_loss=False)


# Normal + noisy ECGs
signal_dim = 64
hidden_dim = 256
signal_directory = 'BIOMETRIC_ECGs_[20.256]'
n_for_each = 16
mini_batch_size = n_for_each
signals_models = db.signal_models

signals_with_noise = [get_signals_tests(db.ecg_noisy_signals[noisy_index-1], signals_models[0].Sd, type="ecg noise",
                                        noisy_index=noisy_index) for noisy_index in range(1,5)]
def calculate_loss_tensor(filename,
                          Total_Windows,
                          W,
                          signals_models,
                          signals=None,
                          overlap=0.33):

    if Total_Windows > 256 * 4:
        n_windows = 256
    elif Total_Windows > 256:
        n_windows = int(Total_Windows / 4)
    else:
        n_windows = Total_Windows

    windows = np.arange(0, Total_Windows - n_windows + 1, n_windows)
    N_Windows = len(windows)
    N_Models = len(signals_models)
    Total_Windows = int(N_Windows * n_windows)
    N_Signals = len(signals)

    loss_tensor = np.zeros((N_Models, N_Signals, Total_Windows))

    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))

    print("mini_batch: {0}, Total: {1}".format(n_windows, Total_Windows))
    i = 0

    for signal in signals:
        signal_test = segment_signal(signal[int(len(signal) * 0.33):], W,
                                     overlap)
        X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
            signal_test[0], signal_test[1], Total_Windows)

        i += 1

    print("Loading model...")
    model_info = signals_models[0]

    signal2Model = Signal2Model(model_info.dataset_name,
                                model_info.directory,
                                signal_dim=model_info.Sd,
                                hidden_dim=model_info.Hd,
                                mini_batch_size=n_windows)
    model = DeepLibphys.models.LibphysMBGRU.LibphysMBGRU(signal2Model)

    for m, model_info in zip(range(len(signals_models)), signals_models):
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing Model " + model_info.name)

        for s in range(N_Signals):
            print("Calculating loss for Signal " + str(s + 1), end=';\n ')
            for w in windows:
                # index = w * n_windows
                x_test = X_matrix[s, w:w + n_windows, :]
                y_test = Y_matrix[s, w:w + n_windows, :]
                loss_tensor[m, s, w:w + n_windows] = np.asarray(
                    model.calculate_mse_vector(x_test, y_test))
                # loss_tensor[m, s, w:w + n_windows] = np.asarray(model.calculate_loss_vector(x_test, y_test))

        # if not os.path.isdir(os.path.dirname(filename + ".npz")):
        #     os.mkdir(os.path.dirname(filename + ".npz"))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
# example_index = [7]
hidden_dim = 1048
signal_name = "ecg_7"
signal_directory = 'ECGs_FANTASIA_[256.256]'
batch_size = 32
mini_batch_size = 8
window_size = 256
# number_of_epochs = 1000000

signals_tests = db.signal_tests
signals_models = db.signal_models

signal2model = Signal2Model(signal_name,
                            signal_directory,
                            hidden_dim=hidden_dim,
                            mini_batch_size=mini_batch_size,
                            window_size=window_size,
                            batch_size=batch_size,
                            save_interval=10000,
                            learning_rate_val=0.01)

model = RGRU.LibPhys_RGRU("RGRU_",
                          hidden_dim=hidden_dim,
                          mini_batch_dim=mini_batch_size)
signals = get_signals_tests(signals_tests,
                            signals_models[0].Sd,
                            index=7,
                            regression=True)
plt.plot(signals[0][0])
plt.show()
model.train_signal(signals[0][0],
                   signals[1][0],
示例#14
0
# for i in range(2, 19):
#     name = 'biometry_with_noise_' + str(i)
#     signal2model = Signal2Model(name, signal_directory, mini_batch_size=mini_batch_size)
#     model = GRU.LibphysMBGRU(signal2model)
#     if i==2:
#         model.load(dir_name=signal_directory, file_tag=model.get_file_tag(0,1000))
#
#     model.train_block(signals[i], signal2model, n_for_each=n_for_each, loss_interval=1)

signal_dim = 64
hidden_dim = 256
mini_batch_size = 15
batch_size = 150
n_for_each = 150 / 5
signal_directory = 'NOISE_ECGs_[{0}.{1}]'.format(batch_size, window_size)
noise_filename = "../data/ecg_noisy_signals.npz"
npzfile = np.load(noise_filename)
processed_noise_array, SNRs = npzfile["processed_noise_array"], npzfile["SNRs"]

ecgs = np.load("signals_without_noise.npz")['signals_without_noise']
z = 0

for i, ecg in zip(range(z, len(ecgs)), ecgs[z:]):
    name = 'noisy_ecg_' + str(i + 1)
    signals = [ecg] + [pna[i] for pna in processed_noise_array]
    signal2model = Signal2Model(name,
                                signal_directory,
                                mini_batch_size=mini_batch_size)
    model = GRU.LibphysMBGRU(signal2model)
    model.train_block(signals, signal2model, n_for_each=n_for_each)
    # train(ecg, signal2model)
示例#15
0
def filter_loss_tensor(signals,
                       loss_tensor,
                       all_models_info,
                       W,
                       min_windows=512,
                       overlap=0.33,
                       max_tol=0.8,
                       std_tol=0.5,
                       batch_percentage=0,
                       already_cut=False):
    model_indexes = []
    list_of_windows_indexes = []
    number_of_windows = []
    do_it = True
    try:
        signals[0][0][0]
    except:
        do_it = False

    for i, model_info in enumerate(all_models_info):
        signal2model = Signal2Model(model_info.dataset_name,
                                    "",
                                    signal_dim=model_info.Sd,
                                    hidden_dim=model_info.Hd,
                                    batch_size=np.shape(loss_tensor)[2],
                                    window_size=W)
        if do_it:
            s = signals[i]
        else:
            s = [signals[i]]
        indexes, _, _, _, _ = get_clean_indexes(s,
                                                signal2model,
                                                overlap=overlap,
                                                max_tol=max_tol,
                                                std_tol=std_tol,
                                                already_cut=already_cut)
        indexes = np.array(indexes)
        if batch_percentage > 0:
            all_indexes, _, _, _, _ = get_clean_indexes(signals[i],
                                                        signal2model,
                                                        overlap=overlap,
                                                        max_tol=0.7)
            all_indexes = all_indexes[int(len(all_indexes) *
                                          batch_percentage):]

            new_indexes = []
            for ind in indexes[indexes >= all_indexes[0]]:
                if np.any(all_indexes == ind):
                    new_indexes.append(ind)

            indexes = new_indexes

        if len(indexes) >= min_windows + 20:
            model_indexes.append(i)
            list_of_windows_indexes.append(indexes)
            number_of_windows.append(len(indexes))

    li = 0
    new_models_indexes = []
    indexes_2_remove = []
    Total = min(number_of_windows)
    loss_list = np.zeros((len(model_indexes), len(model_indexes), Total - 1))
    for i, inds in zip(model_indexes, list_of_windows_indexes):
        first_index = 0
        if len(inds) > Total + 20:
            first_index = np.random.random_integers(0,
                                                    len(inds) - Total - 20,
                                                    1)[0]
        end_index = first_index + Total - 1

        if inds[end_index] > np.shape(loss_tensor)[2]:
            first_index = 0
            end_index = first_index + Total - 1

        if inds[end_index] < np.shape(loss_tensor)[2]:
            L = loss_tensor[i][model_indexes][:, inds[first_index:end_index]]
            if np.shape(L)[1] > 0:
                loss_list[li] = L
                new_models_indexes.append(i)
        if np.alltrue(loss_list[li] == np.zeros_like(loss_list[li])):
            indexes_2_remove.append(li)

        li += 1

    if indexes_2_remove != []:
        mask = mask_without_indexes(loss_list, indexes_2_remove)
        loss_list = loss_list[mask][:, mask]

    model_indexes = new_models_indexes
    print("Rejected {0} people, total of windows = {1}".format(
        len(all_models_info) - len(model_indexes), Total))

    if model_indexes != []:
        return loss_list, np.array(all_models_info)[np.array(model_indexes)]
    else:
        print("All models were rejected")
        return [], []
示例#16
0
batch_size = 128
window_size = 512
save_interval = 10000
signal_directory = "ECG_CLUSTER[256.512]"

print("Loading signals...")
mit_sinus = np.load('../data/processed/biometry_mit_sinus[256].npz')['signals']
mit_long_term = np.load(
    '../data/processed/biometry_mit_long_term[256].npz')['signals']
cibhi_1, cibhi_2 = np.load('../data/processed/biometry_cybhi[256].npz')['train_signals'], \
                              np.load('../data/processed/biometry_cybhi[256].npz')['test_signals']
fantasia = np.load("../data/processed/FANTASIA_ECG[256].npz")['x_train']

signal2model = Signal2Model('ecg_26_SNR_12',
                            signal_directory,
                            signal_dim=signal_dim,
                            hidden_dim=hidden_dim,
                            batch_size=batch_size,
                            mini_batch_size=mini_batch_size,
                            window_size=window_size,
                            save_interval=save_interval)

model = GRU.LibphysMBGRU(signal2model)
model.load(model.get_file_tag(), signal_directory)
model.model_name = "ecg_abstraction"

model.train_block(mit_sinus.tolist() + mit_long_term.tolist() +
                  cibhi_1.tolist() + cibhi_2.tolist(),
                  signal2model,
                  n_for_each=mini_batch_size)
#             model.start_time = time.time()
#             returned = model.train_model(x_train, y_train, signal2model)
#             # if i == 16:
#             #     model.load(dir_name=signal2model.signal_directory, file_tag=model.get_file_tag(0, 1000))
#             if returned:
#                 model.save(signal2model.signal_directory, model.get_file_tag(-5, -5))
#             running_ok = returned

x_trains, y_trains, signals_2_models = [], [], []
for i, signal, core_name in zip(indexes, signals[indexes],
                                core_names[indexes]):
    signal2model = Signal2Model(core_name,
                                signal_directory,
                                signal_dim=signal_dim,
                                hidden_dim=hidden_dim,
                                batch_size=batch_size + 5,
                                mini_batch_size=mini_batch_size,
                                window_size=window_size,
                                save_interval=save_interval,
                                lower_error=1e-6,
                                count_to_break_max=15)
    # last_index = int(len(signal) * 0.33)
    # x_train, y_train = prepare_test_data([signal[:last_index]], signal2model, mean_tol=0.9, std_tol=0.2)
    # x_train, y_train = windows_selection(x_train, y_train)
    # x_trains.append(x_train)
    # y_trains.append(y_train)
    signals_2_models.append(signal2model)

# np.savez("x_trains__.npz", x_trains=x_trains, y_trains=y_trains)
file = np.load("x_trains__.npz")
x_trains, y_trains = file["x_trains"], file["y_trains"]
signal_directory = 'BIO_ACC_[{0}.{1}]'.format(window_size, batch_size)

signals_tests = db.signal_tests
signals_models = db.signal_models

for i in range(178, 300):
    try:
        SIGNAL_BASE_NAME = "biometric_acc_x_"
        X_train, Y_train, X_test, Y_test = get_signals_tests(
            signals_tests, signals_models[0].Sd, type="biometric", index=i)
        signal_name = SIGNAL_BASE_NAME + str(i)
        signal_info = Signal2Model(signal_name,
                                   signal_directory,
                                   signal_dim=signal_dim,
                                   hidden_dim=hidden_dim,
                                   learning_rate_val=0.05,
                                   batch_size=batch_size,
                                   window_size=window_size,
                                   number_of_epochs=number_of_epochs,
                                   mini_batch_size=mini_batch_size)

        model = GRU.LibPhys_GRU(signal_dim=signal_dim,
                                hidden_dim=hidden_dim,
                                signal_name=signal_name,
                                n_windows=mini_batch_size)
        model.save(signal_directory, model.get_file_tag(-1, -1))

        model.train_signals(X_train[1],
                            Y_train[1],
                            signal_info,
                            decay=0.95,
# signals_models = db.signal_models

#   Load signals from rr database
all_signals = get_signals_tests(signals_tests, signal_dim)
# i = 1
hidden_dim_array = [16, 32, 64, 128, 256]
for i, hidden_dim in zip(range(1,
                               len(hidden_dim_array) + 1), hidden_dim_array):
    signal_directory = 'DAY_HRV_HF_[' + str(hidden_dim) + '.' + str(
        window_size) + ']'
    for group_signals in all_signals:
        model_name = 'day_hrv_hf_{0}'.format(i)
        signal2model = Signal2Model(model_name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    window_size=window_size,
                                    hidden_dim=hidden_dim,
                                    mini_batch_size=mini_batch_size,
                                    learning_rate_val=0.05,
                                    save_interval=500)
        model = GRU.LibphysMBGRU(signal2model)

        # model.load(dir_name=signal_directory, file_tag=model.get_file_tag(-5,-5))
        model.train_block(group_signals,
                          signal2model,
                          n_for_each=n_for_each,
                          random_training=True)
#
# signal_dim = 64
# hidden_dim = 256
# batch_size = 128
# mini_batch_size = 16
示例#20
0
signal_dim = model_info.Sd
hidden_dim = model_info.Hd
mini_batch_size = 32
batch_size = 256
window_size = 1024
save_interval = 250
signal_directory = "ECG_CLUSTER[128.1024]"

signal_paths = []
for file in os.listdir("../data/processed/ALL/"):
    if file.endswith(".npz") and file != "history.npz":
        signal_paths.append(file)

signal2model = Signal2Model("generic_ecg", signal_directory, signal_dim=signal_dim, hidden_dim=hidden_dim,
                            batch_size=batch_size,
                            mini_batch_size=mini_batch_size, window_size=window_size,
                            save_interval=save_interval, number_of_epochs=2000, lower_error=1e-9,
                            count_to_break_max=15, learning_rate_val=0.01)

model = GRU.LibphysMBGRU(signal2model)
limit = 3000
# model.start_time = time.time()
print(1)
i0 = 999
# history_of_indexes = {}
model.load(dir_name=signal_directory, file_tag=model.get_file_tag(i0, 0))
# history_of_indexes = np.load("../data/processed/ALL/history.npz")["history_of_indexes"]
#, history_of_indexes=history_of_indexes)
for i in range(i0, limit):
    returned = False
    while not returned:
def start(s):
    signal_dim = 256
    hidden_dim = 256
    mini_batch_size = 8
    batch_size = 256
    window_size = 1024
    save_interval = 250
    signal_directory = 'ECG_BIOMETRY[MIT]'.format(256, window_size)

    raw_filenames, Ns, core_names = get_processing_variables()

    # ind = np.array([0, 2,       10,         15, 29, 33,                 48, 50,     54,     57, 58, 59, 64, 68])
    # ind = np.array([51, 55, 59, 64, 0] + [10, 11, 29, 33])
    # process_and_save_signals_2(raw_filenames, core_names, Ns, indexes2process=[ind[s]])#np.arange(45, len(raw_filenames)))
    ind = np.array([29, 59, 64])
    # exit()

    processed_filenames = np.array([
        '../data/processed/MIT/{0}[256].npz'.format(core_name)
        for core_name in core_names
    ])
    x_trains, y_trains, signals_2_models = [], [], []

    # indexes = np.array([1, 7, 11, 12, 20, 30, 32, 33, 42] + list(range(Ns[0] + 2, sum(Ns) + 1))) - 1
    # s = indexes.tolist().index(29)
    # ind = np.arange(0, len(processed_filenames))
    # s = 2
    step = 1
    e = s * step + step
    ind = ind[s * step:e]
    # indexes = [48, 49]
    # indexes = np.array([0, 6, 11, 17, 26, 36, 37, 38])#, 41, 51, 55])
    # ind = np.array([indexes[s]])
    print(str(np.arange(s * step, e)) + " - " + str(ind))

    for i, filename in enumerate(processed_filenames[ind]):
        signal, core_name = np.load(filename)["signal"], np.load(
            filename)["core_name"]
        running_ok = False
        signal2model = Signal2Model(core_name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    hidden_dim=hidden_dim,
                                    batch_size=batch_size,
                                    mini_batch_size=mini_batch_size,
                                    window_size=window_size,
                                    save_interval=save_interval,
                                    lower_error=1e-10,
                                    count_to_break_max=15)
        last_index = int(len(signal) * 0.33)
        std_tol = 0.1
        mean_tol = 0.02
        n_runs = 0
        plt.plot(signal[:last_index])
        plt.figure()
        plt.plot(signal[last_index:])
        plt.figure()
        x_train, y_train = prepare_several_special_data([signal[:last_index]],
                                                        signal2model,
                                                        overlap=0.11,
                                                        mean_tol=mean_tol,
                                                        std_tol=std_tol)
        while not running_ok:
            print("Initiating training... ")
            # x_train, y_train = prepare_test_data([signal[:last_index]], signal2model, mean_tol=mean_tol, std_tol=std_tol)
            # x_train, y_train = prepare_special_data([signal[:last_index]], signal2model, mean_tol=mean_tol, std_tol=std_tol)

            print("done")
            print("Compiling Model {0}".format(signal2model.model_name))

            # if n_runs < 1:
            #     model = try_to_load(core_name, signal2model)
            # else:
            model = GRU.LibphysMBGRU(signal2model)

            if model:
                indexes = range(
                    len(x_train) - (len(x_train) % mini_batch_size))

                returned = model.train_model(x_train[indexes],
                                             y_train[indexes], signal2model)
                # if i == 16:
                if returned:
                    model.save(signal2model.signal_directory,
                               model.get_file_tag(-5, -5))
                else:
                    std_tol += 0.05

                running_ok = returned
                n_runs += 1
            else:
                running_ok = True
import DeepLibphys.utils.functions.libphys_GRU as GRU
from DeepLibphys.utils.functions.common import get_fantasia_dataset
from DeepLibphys.utils.functions.signal2model import Signal2Model
import theano

fantasia_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
signals = [Signal2Model("ecg_"+str(i), "FANTASIA[128.256]_", save_interval=1000, number_of_batches=1, batch_size=256) for i in fantasia_list]

X_train, Y_train = get_fantasia_dataset(signals[0].signal_dim, fantasia_list, 'Fantasia/ECG/mat/', peak_into_data=False)

signal2model = Signal2Model("ecg_old_fantasia", "FANTASIA[32x10.256]", save_interval=100, hidden_dim=256, batch_size=len(X_train)*32)

model = GRU.LibPhys_GRU(signal2model.signal_dim, hidden_dim=signal2model.hidden_dim, signal_name=signal2model.signal_name)
model.save(signal2model.signal_directory, model.get_file_tag(-1, -1))
model.train_signals(X_train, Y_train, signal2model)

# for i in [0, 1, 2, 3, 4, 5, 7]:
#     model = GRU.LibPhys_GRU(signals[i].signal_dim, hidden_dim=signals[i].hidden_dim, signal_name=signals[i].signal_name)
#     model.save(signals[i].signal_directory, model.get_file_tag(-1, -1))
#     model.train_signal(X_train[i], Y_train[i], signals[i], track_loss=False, save_distance=100)
示例#23
0
    quantize_signal(original_signals[i], 256)
    for i in range(original_signals.shape[0])
])

filtered_signals = np.array([
    quantize_signal(filtered_signals[i], 256)
    for i in range(filtered_signals.shape[0])
])

print("Quantization Finished")

save_directory = "bento"
for i in range(original_signals.shape[0]):
    sig_model = Signal2Model("GRU_Filter_all_" + str(i),
                             save_directory,
                             signal_dim=256,
                             mini_batch_size=16,
                             window_size=512)
    train_block([original_signals[i]], [filtered_signals[i]],
                n_for_each=128,
                signal2model=sig_model)

# test_signals = np.array([loadmat(get_fantasia_full_paths()[i])['val'][0][30000:60000] for i in range(len(get_fantasia_full_paths()))])
# model = LibphysSGDGRU.LibphysSGDGRU(sig_model)
# model.load(model.get_file_tag(-5, -5), sig_model.signal_directory)
# predicted = generate_predicted_signal(sig_model, starting_signal=test_signals[0],window_seen_by_GRU_size=1024)
# Predict? Do we have to do save and load?

# plt.subplot(311)
# plt.plot(original_signals[0])
# plt.subplot(312)
# all signals -> np.array([loadmat(get_fantasia_full_paths()[i])['val'][0] for i in range(len(get_fantasia_full_paths()))])
print(original_signals.shape)

start = time.time()
filtered_signals = remove_noise(original_signals)
end = time.time()
print("Time Required to remove noise:", end - start)

original_signals = quantize_signal(original_signals, 256)

filtered_signals = quantize_signal(filtered_signals, 256)

save_directory = "bento"
sig_model = Signal2Model("GRU_Filter_128",
                         save_directory,
                         signal_dim=256,
                         mini_batch_size=32,
                         window_size=512)
# maybe change mbs to 16, tune overlap

#print("Training...")
#train_block([original_signals], [filtered_signals], overlap=0.2, n_for_each=128, signal2model=sig_model)

test_signals = loadmat(get_fantasia_full_paths()[0])['val'][0][30000:60000]
test_signals = quantize_signal(test_signals, 256)

model = LibphysSGDGRU.LibphysSGDGRU(sig_model)
model.load(model.get_file_tag(-5, -5), sig_model.signal_directory)
predicted = generate_predicted_signal(model,
                                      3000,
                                      starting_signal=test_signals,
import DeepLibphys.utils.functions.libphys_GRU as GRU
from DeepLibphys.utils.functions.common import get_fantasia_dataset
from DeepLibphys.utils.functions.signal2model import Signal2Model

fantasia_list = [2, 3, 4, 5, 6, 7, 8, 9, 10]
signals = [
    Signal2Model("resp_" + str(i),
                 "RESP_FANTASIA[128.256]",
                 save_interval=1000,
                 number_of_batches=1,
                 batch_size=256) for i in fantasia_list
]

X_train, Y_train = get_fantasia_dataset(signals[0].signal_dim,
                                        fantasia_list,
                                        'Fantasia/RESP/mat/',
                                        peak_into_data=False)

signal2model = Signal2Model("resp_[1.2.3.4.5.6.7.8.9.10]_old_fantasia",
                            "RESP_FANTASIA[1000.256]",
                            save_interval=1000,
                            hidden_dim=256,
                            batch_size=500)

model = GRU.LibPhys_GRU(signal2model.signal_dim,
                        hidden_dim=signal2model.hidden_dim,
                        signal_name=signal2model.signal_name)
model.save(signal2model.signal_directory, model.get_file_tag(-1, -1))
model.train_signals(X_train, Y_train, signal2model)

for i in [0, 1, 2, 3, 4, 5, 7]:
示例#26
0
import numpy as np


def exists(where, what):
    try:
        where.index(what)
        return True
    except ValueError:
        return False


signals_models = db.ecg_1024_256_RAW + db.cybhi_512_M1 + db.cybhi_512_M2 + db.mit_1024
model_info = signals_models[0]

signal2Model = Signal2Model(model_info.dataset_name,
                            model_info.directory,
                            signal_dim=model_info.Sd,
                            hidden_dim=model_info.Hd)

model = GRU.LibphysMBGRU(signal2Model)
times_in_hours = [[], [], [], [], []]
for model_info in signals_models:
    try:
        model.model_name = model_info.dataset_name
        dirx = model_info.directory
        model.load(dir_name=dirx)
        hours = model.train_time / (3.6 * 10**6)
        times_in_hours[0].append(
            "cybhi" if exists(model.model_name, "cybhi") else
            "mit" if exists(model.model_name, "mit") else "fantasia")
        times_in_hours[1].append(256 if exists(dirx, "256") else
                                 1024 if exists(dirx, "1024") else 12)
示例#27
0
def calculate_loss_tensor(Total_Windows,
                          W,
                          signals_models,
                          signals,
                          mean_tol=10000,
                          overlap=0.33,
                          batch_percentage=0,
                          mini_batch=256,
                          std_tol=10000,
                          X_matrix=None,
                          Y_matrix=None,
                          min_windows=100):

    if X_matrix is None and Y_matrix is None:
        prepare_data = True
        X_matrix = []
        Y_matrix = []
    else:
        prepare_data = False

    sizes = []
    removex = []
    for signal, model_info, i in zip(signals, signals_models,
                                     range(len(signals))):
        signal2model = Signal2Model(model_info.dataset_name,
                                    "",
                                    signal_dim=model_info.Sd,
                                    hidden_dim=model_info.Hd,
                                    batch_size=Total_Windows,
                                    window_size=W)
        if type(signal[0]) is np.int64 or type(signal[0]) is np.float64:
            signal = [signal]

        if prepare_data:
            X_list, Y_list = prepare_test_data(
                signal,
                signal2model,
                overlap=overlap,
                batch_percentage=batch_percentage,
                mean_tol=mean_tol,
                std_tol=std_tol,
                randomize=False)

            if np.shape(X_list)[0] >= min_windows:
                X_matrix.append(X_list)
                Y_matrix.append(Y_list)
                sizes.append(np.shape(X_list)[0])
            else:
                removex.append(i)
        else:
            print(np.shape(X_matrix[i]))
            sizes.append(np.shape(X_matrix[i])[0])

    removex.sort(reverse=True)
    [signals_models.pop(rem) for rem in removex]

    print(np.shape(X_matrix))

    max_windows = np.min(np.array(sizes))
    for t, test_signal in enumerate(X_matrix):
        X_matrix[t] = test_signal[:max_windows]
        Y_matrix[t] = Y_matrix[t][:max_windows]
    print(np.shape(X_matrix))

    X_matrix, Y_matrix = np.array(X_matrix), np.array(Y_matrix)
    max_windows = max_windows - (max_windows % mini_batch)
    print("Number of Windows: {0} of {1}".format(max_windows,
                                                 np.max(np.array(sizes))))

    windows = np.arange(0, max_windows, mini_batch)
    print(windows)
    N_Models = len(signals_models)
    N_Signals = len(X_matrix)

    loss_tensor = np.zeros((N_Models, N_Signals, max_windows))

    print("Loading model...")
    model_info = signals_models[0]

    signal2Model = Signal2Model(model_info.dataset_name,
                                model_info.directory,
                                signal_dim=model_info.Sd,
                                hidden_dim=model_info.Hd,
                                mini_batch_size=mini_batch)
    model = GRU.LibphysMBGRU(signal2Model)
    times = []
    for m, model_info in zip(range(len(signals_models)), signals_models):
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing Model " + model_info.name + " - time: " +
              str(model.train_time))

        for s in range(N_Signals):
            print("Calculating loss for ECG " + str(s + 1), end=';\n ')
            for w in windows:
                x_test = X_matrix[s, w:w + mini_batch, :]
                y_test = Y_matrix[s, w:w + mini_batch, :]
                tic = time.time()
                loss_tensor[m, s, w:w + mini_batch] = np.asarray(
                    model.calculate_mse_vector(x_test, y_test))
                times.append(time.time() - tic)

    times = np.array(times)
    print(np.size(loss_tensor, 2))
    print(
        "Statistics: \n Mean time: {0}; \n Std time: {1}; Max time: {2}; Min Time: {3}"
        .format(np.mean(times), np.std(times), np.max(times), np.min(times)))
    return loss_tensor