def try_to_load(
    core_name,
    signal2model,
):
    dir_name = "ECG_BIOMETRY[MIT]"
    search_dir = GRU_DATA_DIRECTORY + dir_name + '/'
    files = os.listdir(search_dir)
    try:
        files.index(get_file_tag(core_name))
        model = GRU.LibphysMBGRU(signal2model)
        model.load(dir_name=dir_name)
        print("Found!")
        return model
    except ValueError:
        for x in range(3500, 0, -250):
            try:
                files.index(get_file_tag(core_name))
                model = GRU.LibphysMBGRU(signal2model)
                model.load(dir_name=dir_name,
                           file_tag=model.get_file_tag(0, x))
                print("Loaded! epoch {0}".format(x))
                return model
            except ValueError:
                pass
    if os.path.exists(search_dir + "backup/" + get_file_tag(core_name)):
        model = GRU.LibphysMBGRU(signal2model)
        model.load(dir_name="ECG_BIOMETRY[MIT]/backup")
        return model
    else:
        return GRU.LibphysMBGRU(signal2model)
def try_to_load(signal2model):
    dir_name = "ECG_BIOMETRY[128.1024]"
    search_dir = GRU_DATA_DIRECTORY + dir_name + '/'
    files = os.listdir(search_dir)
    try:
        files.index(get_file_tag(signal2model.model_name))
        print("Found!")
        return False
    except ValueError:
        for x in range(5000, 0, -250):
            try:
                files.index(get_file_tag(signal2model.model_name))
                model = GRU.LibphysMBGRU(signal2model)
                model.load(dir_name=dir_name,
                           file_tag=model.get_file_tag(0, x))
                print("Loaded! epoch {0}".format(x))
                return model
            except ValueError:
                pass

    return GRU.LibphysMBGRU(signal2model)
def calculate_loss_tensors(N_Windows, W, signals_models):
    N_Versions = len(signals_models)
    N_Signals = len(signals_models[0])
    loss_tensor = np.zeros((N_Versions, N_Signals, N_Signals, N_Windows))
    X_matrix = np.zeros((N_Versions, N_Signals, N_Windows, W))
    Y_matrix = np.zeros((N_Versions, N_Signals, N_Windows, W))

    i = 0
    for model_info in signals_models[0]:
        x_tests = []
        y_tests = []
        for version in range(N_Versions):
            [x_test, y_test] = load_test_data(
                "GRU_" + model_info.dataset_name + "[" + str(model_info.Sd) +
                "." + str(model_info.Hd) + ".-1.-1.-1]", model_info.directory)
            x_tests.append(x_test)
            y_tests.append(y_test)
        X_matrix[:, i, :, :], Y_matrix[:, i, :, :] = randomize_batch(
            np.asarray(x_test), np.asarray(y_test), N_Windows)
    i += 1

    print("Loading base model...")
    model_info = signals_models[0][0]
    model = GRU.LibPhys_GRU(model_info.Sd,
                            hidden_dim=model_info.Hd,
                            signal_name=model_info.dataset_name,
                            n_windows=N_Windows)

    for m in range(N_Signals):
        for version in range(N_Versions):
            model_info = signals_models[version][m]
            model.signal_name = model_info.dataset_name
            model.load(signal_name=model_info.name,
                       filetag=model.get_file_tag(model_info.DS, model_info.t),
                       dir_name=model_info.directory)
            print("Processing " + model_info.name)

            for s in range(N_Signals):
                x_test = X_matrix[version, s, :, :]
                y_test = Y_matrix[version, s, :, :]
                print("Calculating loss for " +
                      signals_models[version][s].name,
                      end=';\n ')
                loss_tensor[version, m, s, :] = np.asarray(
                    model.calculate_loss_vector(x_test, y_test))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models,
             signals_tests=signals_info)

    return loss_tensor
def calculate_fine_loss_tensor(filename, Total_Windows, W, signals_models,
                               n_windows):
    windows = np.arange(int(Total_Windows / n_windows))
    N_Windows = len(windows)
    N_Signals = len(signals_models)
    Total_Windows = int(N_Windows * n_windows)

    loss_tensor = np.zeros((N_Signals, N_Signals, N_Windows))

    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))

    i = 0
    for model_info in signals_models:
        [x_test, y_test] = load_test_data(
            "GRU_" + model_info.dataset_name + "[" + str(model_info.Sd) + "." +
            str(model_info.Hd) + ".-1.-1.-1]", model_info.directory)
        X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
            x_test, y_test, Total_Windows)
        i += 1

    print("Loading model...")

    model = GRU.LibPhys_GRU(model_info.Sd,
                            hidden_dim=model_info.Hd,
                            signal_name=model_info.dataset_name,
                            n_windows=n_windows)

    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.signal_name = model_info.dataset_name
        model.load(signal_name=model_info.name,
                   filetag=model.get_file_tag(model_info.DS, model_info.t),
                   dir_name=model_info.directory)
        print("Processing " + model_info.name)

        for s in range(N_Signals):
            print("Calculating loss for " + signals_models[s].name, end=';\n ')

            for w in windows:
                index = w * n_windows
                x_test = X_matrix[s, index:index + n_windows, :]
                y_test = Y_matrix[s, index:index + n_windows, :]
                loss_tensor[m, s, w] = np.asarray(
                    model.calculate_loss(x_test, y_test))

    np.savez(filename + "_fine.npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
def synthetize(models,
               uncertaintly=0.01,
               filename="synthesized_signals_1024.npz"):
    mini_bs = 256
    plt.ion()
    signals = []
    signal2Model = Signal2Model(models[0].dataset_name,
                                models[0].directory,
                                signal_dim=models[0].Sd,
                                hidden_dim=models[0].Hd,
                                mini_batch_size=mini_bs)
    model = GRU.LibphysMBGRU(signal2Model)
    for model_info in models:
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing " + model_info.name)

        # plt.ion()
        # font = {'family': 'lato',
        #         'weight': 'bold',
        #         'size': 40}
        #
        # matplotlib.rc('font', **font)
        x = [0]
        i = 0

        signal = np.random.randint(0, 63, size=(mini_bs, 1), dtype=np.int32)
        window_size = 512
        fz = 250
        N = 512
        # while True:
        for i in range(N):
            # print(i)
            y = model.generate_online_predicted_vector(
                signal, window_size, uncertaintly=uncertaintly)
            signal.append(y)
        plt.clf()
        plt.plot(signal)
        plt.pause(0.05)
        signals.append(np.array(signal))

        np.savez("img/" + model_info.dataset_name + ".npz",
                 synth_signal=signal,
                 probability=prob)
    np.savez(filename, synth_signals=signals, probabilities=probabilities)
Esempio n. 6
0
def calculate_loss_tensor(filename,
                          signals_models=[],
                          test_signals=None,
                          labels=None):
    X_Windows = test_signals[:, :-1]
    Y_Windows = test_signals[:, 1:]
    N_Signals = np.shape(X_Windows)[0]

    n_windows = np.shape(X_Windows)[0]
    print("Loading model...")
    model_info = signals_models[0]
    signal2Model = Signal2Model(model_info.dataset_name,
                                model_info.directory,
                                signal_dim=model_info.Sd,
                                hidden_dim=model_info.Hd,
                                mini_batch_size=n_windows)
    model = GRU.LibphysMBGRU(signal2Model)

    loss_tensor = np.zeros((len(signals_models), N_Signals))
    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing " + model_info.name)

        # for s in range(N_Signals):
        # if labels is not None:
        #     print("Calculating loss for " + labels[s], end=';\n ')
        # else:
        #     print("Calculating loss for " + signals_models[s].name, end=';\n ')

        loss_tensor[m, :] = np.asarray(
            model.calculate_mse_vector(X_Windows, Y_Windows))
        print(np.mean(loss_tensor[m, :]))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
Esempio n. 7
0
    try:
        where.index(what)
        return True
    except ValueError:
        return False


signals_models = db.ecg_1024_256_RAW + db.cybhi_512_M1 + db.cybhi_512_M2 + db.mit_1024
model_info = signals_models[0]

signal2Model = Signal2Model(model_info.dataset_name,
                            model_info.directory,
                            signal_dim=model_info.Sd,
                            hidden_dim=model_info.Hd)

model = GRU.LibphysMBGRU(signal2Model)
times_in_hours = [[], [], [], [], []]
for model_info in signals_models:
    try:
        model.model_name = model_info.dataset_name
        dirx = model_info.directory
        model.load(dir_name=dirx)
        hours = model.train_time / (3.6 * 10**6)
        times_in_hours[0].append(
            "cybhi" if exists(model.model_name, "cybhi") else
            "mit" if exists(model.model_name, "mit") else "fantasia")
        times_in_hours[1].append(256 if exists(dirx, "256") else
                                 1024 if exists(dirx, "1024") else 12)
        times_in_hours[2].append(model_info.W)
        times_in_hours[3].append(hours)
        times_in_hours[4].append(model.model_name)
def train_block(signals_train,
                signals_test,
                signal2model,
                signal_indexes=None,
                n_for_each=16,
                overlap=0.33,
                random_training=True,
                start_index=0,
                track_loss=False,
                loss_interval=1,
                train_ratio=1):
    """
    This method embraces several datasets (or one) according to a number of records for each

    :param signals: - list - a list containing two int vectors:
                                signal - input vector X, used for the input;

    :param signal2model: - Signal2Model object - object containing the information about the model, for more info
                            check Biosignals.utils.functions.signal2model

    :param signal_indexes: - list - a list containing the indexes of the "signals" variable to be trained.
                                    If None is given, all signals will be used.

    :param n_for_each: - int - number of windows from each signal to be inserted in the model training

    :param overlap: - float - value in the interval [0,1] that corresponds to the overlapping ratio of windows

    :param random_training: - boolean - value that if True random windows will be inserted in the training

    :param start_index: - int - value from which the windows will be selected

    :param track_loss: - boolean - value to plot loss as the model is trained

    :return: trained model
    """

    if signal_indexes is None:
        signal_indexes = range(len(signals_train))
    #self.save(signal2model.signal_directory, self.get_file_tag(-1, -1))

    x_train = []
    y_train = []
    for i in signal_indexes:

        # Creation of the Time Windows from the dataset
        if n_for_each == 1:
            if len(x_train) == 0:
                x_train = signals_train[i][:signal2model.window_size]
                y_train = signals_test[i][
                    1:signal2model.window_size +
                    1]  # for next signal without noise, [1:window_size + 1]
            else:
                x_train = np.vstack(
                    (x_train, signals_train[i][:signal2model.window_size]))
                y_train = np.vstack(
                    (y_train, signals_test[i][1:signal2model.window_size + 1]))
        else:
            X_windows, y_end_values, n_windows, last_index = segment_signal(
                signals_train[i][:-1],
                signal2model.window_size,
                overlap=overlap,
                start_index=start_index)
            Y_windows, y_end_values, n_windows, last_index = segment_signal(
                signals_test[i][1:],
                signal2model.window_size,
                overlap=overlap,
                start_index=start_index)

            n_for_each = n_for_each if n_for_each < np.shape(
                X_windows)[0] else np.shape(X_windows)[0]
            n_for_each = n_for_each if n_for_each % signal2model.mini_batch_size == 0 \
                else signal2model.mini_batch_size * int(n_for_each / signal2model.mini_batch_size)

            last_training_index = int(n_windows * train_ratio)
            # List of the windows to be inserted in the dataset
            if random_training:
                window_indexes = np.random.permutation(
                    last_training_index)  # randomly select windows
            else:
                window_indexes = list(range(
                    (n_windows)))  # first windows are selected

            # Insertion of the windows of this signal in the general dataset
            if len(x_train) == 0:
                # First is for train data
                x_train = X_windows[window_indexes[0:n_for_each], :]
                y_train = Y_windows[window_indexes[0:n_for_each], :]
                print("x_train shape:", x_train.shape)

                # # The rest is for test data
                # x_test = X_windows[last_training_index:, :]
                # y_test = Y_windows[last_training_index:, :]
            else:
                print("len != 0")
                x_train = np.append(x_train,
                                    X_windows[window_indexes[0:n_for_each], :],
                                    axis=0)
                y_train = np.append(y_train,
                                    Y_windows[window_indexes[0:n_for_each], :],
                                    axis=0)
                # x_test = np.append(x_train, X_windows[window_indexes[n_for_each:], :], axis=0)
                # y_test = np.append(x_train, Y_windows[window_indexes[n_for_each:], :], axis=0)

                # Save test data
                # self.save_test_data(signal2model.signal_directory, [x_test, y_test])

    # Start time recording

    # Start training model
    model = LibphysMBGRU.LibphysMBGRU(
        signal2model
    )  #signal2model, ModelType.CROSS_MBSGD, params)) -> for LibphysGRU
    t1 = time.time()
    model.start_time = time.time()
    returned = model.train_model(x_train, y_train, signal2model, track_loss,
                                 loss_interval)

    print("Dataset trained in: ~%d seconds" % int(time.time() - t1))

    # Model last training is then saved
    if returned:
        model.save(signal2model.signal_directory, model.get_file_tag(-5, -5))
        return True
    else:
        return False
Esempio n. 9
0
import DeepLibphys.models.LibphysMBGRU as MBGRU
import seaborn
from DeepLibphys.utils.functions.signal2model import *
from DeepLibphys.utils.functions.common import get_signals_tests
from DeepLibphys.utils.functions.database import *

signal2model = Signal2Model("XPTO", "XPTO")
signals = get_signals_tests(signal_tests, index=1)

model = MBGRU.LibphysMBGRU(signal2model)
model.train(signals[0], signal2model, loss_interval=10)


def start(moment, x):
    signal_dim = 256
    hidden_dim = 256
    mini_batch_size = 8
    batch_size = 256
    window_size = 1024
    save_interval = 250
    overlap = 0.055

    signal_directory = 'ECG_BIOMETRY[{0}.{1}]'.format(batch_size, window_size)
    # signal_directory = 'ECG_BIOMETRY[{0}.{1}.8]'.format(batch_size, window_size)
    # signal_directory = 'ECG_BIOMETRY[CYBHi]'
    noise_removed_path = "Data/CYBHi/signals_long_v2.npz"
    fileDir = "Data/CYBHi/3rd"

    # moment = 1
    _signals = np.load(noise_removed_path)["signals"]
    names = np.array([signal.name for signal in _signals])
    if moment == 1:
        signals = np.array(
            extract_train_part([signal.train_windows for signal in _signals],
                               0.5))
        signals_y = np.array(
            extract_test_part([signal.train_windows for signal in _signals],
                              0.5))
    else:
        signals = np.array(
            extract_train_part([signal.test_windows for signal in _signals],
                               0.5))
        signals_y = np.array(
            extract_test_part([signal.test_windows for signal in _signals],
                              0.5))

    # [print(DATASET_DIRECTORY + 'GRU_ecg_cybhi_M{0}_{1}[256.256.-1.-5.-5].npz'.format(moment, name)) for name in names]
    # [print(name + ":" + str(os.path.isfile(DATASET_DIRECTORY + signal_directory + '/GRU_ecg_cybhi_M{0}_{1}[256.256.-1.-5.-5].npz'.format(moment, name)))) for name in names]
    #
    #
    # exit()
    step = 1
    indexes = np.arange(0, 63, step)
    if moment == 1:
        indexes = np.array(
            [0, 1, 2, 3, 8, 9, 10, 12, 16, 17, 19, 20, 22, 26, 27, 28] + [
                30, 32, 33, 34, 40, 41, 42, 43, 44, 45, 46, 48, 53, 54, 58, 60,
                61, 62
            ])
    # if x == 6:
    #     z = np.arange(60, 63)
    # x = 0
    # else:
    # z = np.arange(indexes[x], indexes[x+1])
    # z = np.arange(names.tolist().index("CF") , int(len(signals)/2))#, len(signals))
    # z = np.arange(60, 63)
    # z = np.arange(0, 4)
    z = [indexes[x]]
    print(str(x) + ": " + str(list(z)))
    for s, signal, name in zip(
            np.arange(len(signals))[z], signals[z], names[z]):
        name = 'ecg_cybhi_M{0}_{1}'.format(moment, name)
        print(name)
        signal2model = Signal2Model(name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    hidden_dim=hidden_dim,
                                    batch_size=batch_size,
                                    mini_batch_size=mini_batch_size,
                                    window_size=window_size,
                                    save_interval=save_interval,
                                    tolerance=1e-9,
                                    count_to_break_max=15)

        # x_train, y_train = prepare_test_data([signal], signal2model, overlap=overlap, mean_tol=0.8, std_tol=0.1)
        std_tol = 0.1
        mean_tol = 0.05
        plt.plot(signal)
        plt.figure()
        plt.plot(signals_y[s])
        plt.figure()
        x_train, y_train = prepare_several_special_data([signal],
                                                        signal2model,
                                                        overlap=overlap,
                                                        mean_tol=mean_tol,
                                                        std_tol=std_tol)
        # x_train, y_train = manual_extraction(x_train, y_train)
        print("Final number of windows: {0}".format(len(x_train)))
        print("Compiling Model {0} for {1}".format(s, name))

        path = fileDir + "/Signals"
        full_path = path + "/{0}.pdf".format(signal2model.model_name)
        # if savePdf:
        print("Saving {0}.pdf".format(full_path))
        if not os.path.exists(path):
            os.makedirs(path)

        fig = plt.figure()
        pdf = PdfPages(full_path)
        for x in x_train:
            plt.plot(x)
            pdf.savefig(fig)
            plt.clf()
        pdf.close()
        # model = GRU.LibphysMBGRU(signal2model)

        n_runs = 0
        running_ok = False
        while not running_ok:
            n_runs += 1

            # if n_runs < 3:
            #     model = try_to_load(signal2model)
            #     if model is False:
            #         break
            # else:
            # model = GRU.LibphysMBGRU(signal2model)
            # signal2model2 = Signal2Model("generic_ecg", signal_directory, signal_dim=signal_dim, hidden_dim=hidden_dim,
            #                             batch_size=batch_size,
            #                             mini_batch_size=mini_batch_size, window_size=window_size,
            #                             save_interval=save_interval, number_of_epochs=2000, lower_error=1e-9,
            #                             count_to_break_max=15, learning_rate_val=0.01)

            # model = GRU.LibphysMBGRU(signal2model2)
            model = GRU.LibphysMBGRU(signal2model)
            if name == "ARA":
                model.load(file_tag=model.get_file_tag(0, 250),
                           dir_name=signal2model.signal_directory)
            # history_of_indexes = {}
            # model.load(dir_name="ECG_CLUSTER[128.1024]", file_tag=model.get_file_tag(999, 0))
            # model.model_name = signal2model.model_name
            print("Initiating training... ")
            running_ok = model.train_model(x_train, y_train, signal2model)

            if not running_ok:
                model = GRU.LibphysMBGRU(signal2model)
            else:
                model.save(dir_name=signal_directory)
def train_fantasia(hidden_dim, mini_batch_size, batch_size, window_size,
                   signal_directory, indexes, signals, save_interval,
                   signal_dim):
    for i, signal in zip(indexes, signals[indexes]):
        name = 'ecg_' + str(i + 1)

        signal2model = Signal2Model(name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    hidden_dim=hidden_dim,
                                    batch_size=batch_size,
                                    mini_batch_size=mini_batch_size,
                                    window_size=window_size,
                                    save_interval=save_interval,
                                    lower_error=3e-5,
                                    lower_learning_rate=1e-4,
                                    count_to_break_max=30)
        print("Compiling Model {0}".format(name))

        last_index = int(len(signal) * 0.33)
        x_train, y_train = prepare_test_data([signal[22500:last_index]],
                                             signal2model,
                                             mean_tol=0.9,
                                             std_tol=0.5)

        # fig, ax = plt.subplots()
        # plt.subplots_adjust(bottom=0.2)
        # l, = plt.plot(x_train[0], lw=2)
        #
        # class BooleanSwitcher(object):
        #     indexes = []
        #     ind = 0
        #
        #     def yes(self, event):
        #         if self.ind < len(x_train):
        #             self.indexes.append(self.ind)
        #             self.ind += 1
        #         if self.ind < len(x_train):
        #             l.set_ydata(x_train[self.ind])
        #             plt.draw()
        #         else:
        #             self.crop()
        #             plt.close()
        #
        #     def no(self, event):
        #         self.ind += 1
        #         if self.ind < len(x_train):
        #             l.set_ydata(x_train[self.ind])
        #             plt.draw()
        #         else:
        #             self.crop()
        #             plt.close()
        #
        #     def crop(self):
        #         c = len(self.indexes) % 16
        #         self.indexes = self.indexes[:(len(self.indexes) - c)]
        # callback = BooleanSwitcher()
        # axprev = plt.axes([0.7, 0.05, 0.1, 0.075])
        # axnext = plt.axes([0.81, 0.05, 0.1, 0.075])
        # by = Button(axnext, 'Yes')
        # by.on_clicked(callback.yes)
        # bn = Button(axprev, 'No')
        # bn.on_clicked(callback.no)
        # plt.show()

        model = GRU.LibphysMBGRU(signal2model)
        # try:
        #
        #     # if i < 20:
        #     #     old_directory = "CLEAN_ECG_BIOMETRY[128.1024]"
        #     #     old_name = 'clean_ecg' + str(i+1)
        #     # else:
        #     old_directory = "BIOMETRY[256.1024]"
        #     old_name = name
        #
        #     old_tag= 'GRU_{0}[{1}.{2}.{3}.{4}.{5}]'. \
        #         format(old_name, signal_dim, hidden_dim, -1, -5, -5)
        #     model.load(old_tag, old_directory)
        # except:
        #     pass

        print("Initiating training... ")
        model.model_name = 'ecg_' + str(i + 1)

        model.start_time = time.time()
        # returned = model.train_model(x_train[callback.indexes], y_train[callback.indexes], signal2model)
        model.load(model.get_file_tag(), signal_directory)
        returned = model.train_model(x_train, y_train, signal2model)
        if returned:
            model.save(signal2model.signal_directory,
                       model.get_file_tag(-5, -5))
Esempio n. 12
0
def start(s):
    signal_dim = 256
    hidden_dim = 256
    mini_batch_size = 8
    batch_size = 256
    window_size = 1024
    save_interval = 250
    signal_directory = 'ECG_BIOMETRY[MIT]'.format(256, window_size)

    raw_filenames, Ns, core_names = get_processing_variables()

    # ind = np.array([0, 2,       10,         15, 29, 33,                 48, 50,     54,     57, 58, 59, 64, 68])
    # ind = np.array([51, 55, 59, 64, 0] + [10, 11, 29, 33])
    # process_and_save_signals_2(raw_filenames, core_names, Ns, indexes2process=[ind[s]])#np.arange(45, len(raw_filenames)))
    ind = np.array([29, 59, 64])
    # exit()

    processed_filenames = np.array([
        '../data/processed/MIT/{0}[256].npz'.format(core_name)
        for core_name in core_names
    ])
    x_trains, y_trains, signals_2_models = [], [], []

    # indexes = np.array([1, 7, 11, 12, 20, 30, 32, 33, 42] + list(range(Ns[0] + 2, sum(Ns) + 1))) - 1
    # s = indexes.tolist().index(29)
    # ind = np.arange(0, len(processed_filenames))
    # s = 2
    step = 1
    e = s * step + step
    ind = ind[s * step:e]
    # indexes = [48, 49]
    # indexes = np.array([0, 6, 11, 17, 26, 36, 37, 38])#, 41, 51, 55])
    # ind = np.array([indexes[s]])
    print(str(np.arange(s * step, e)) + " - " + str(ind))

    for i, filename in enumerate(processed_filenames[ind]):
        signal, core_name = np.load(filename)["signal"], np.load(
            filename)["core_name"]
        running_ok = False
        signal2model = Signal2Model(core_name,
                                    signal_directory,
                                    signal_dim=signal_dim,
                                    hidden_dim=hidden_dim,
                                    batch_size=batch_size,
                                    mini_batch_size=mini_batch_size,
                                    window_size=window_size,
                                    save_interval=save_interval,
                                    lower_error=1e-10,
                                    count_to_break_max=15)
        last_index = int(len(signal) * 0.33)
        std_tol = 0.1
        mean_tol = 0.02
        n_runs = 0
        plt.plot(signal[:last_index])
        plt.figure()
        plt.plot(signal[last_index:])
        plt.figure()
        x_train, y_train = prepare_several_special_data([signal[:last_index]],
                                                        signal2model,
                                                        overlap=0.11,
                                                        mean_tol=mean_tol,
                                                        std_tol=std_tol)
        while not running_ok:
            print("Initiating training... ")
            # x_train, y_train = prepare_test_data([signal[:last_index]], signal2model, mean_tol=mean_tol, std_tol=std_tol)
            # x_train, y_train = prepare_special_data([signal[:last_index]], signal2model, mean_tol=mean_tol, std_tol=std_tol)

            print("done")
            print("Compiling Model {0}".format(signal2model.model_name))

            # if n_runs < 1:
            #     model = try_to_load(core_name, signal2model)
            # else:
            model = GRU.LibphysMBGRU(signal2model)

            if model:
                indexes = range(
                    len(x_train) - (len(x_train) % mini_batch_size))

                returned = model.train_model(x_train[indexes],
                                             y_train[indexes], signal2model)
                # if i == 16:
                if returned:
                    model.save(signal2model.signal_directory,
                               model.get_file_tag(-5, -5))
                else:
                    std_tol += 0.05

                running_ok = returned
                n_runs += 1
            else:
                running_ok = True
Esempio n. 13
0
def calculate_loss_tensor(Total_Windows,
                          W,
                          signals_models,
                          signals,
                          mean_tol=10000,
                          overlap=0.33,
                          batch_percentage=0,
                          mini_batch=256,
                          std_tol=10000,
                          X_matrix=None,
                          Y_matrix=None,
                          min_windows=100):

    if X_matrix is None and Y_matrix is None:
        prepare_data = True
        X_matrix = []
        Y_matrix = []
    else:
        prepare_data = False

    sizes = []
    removex = []
    for signal, model_info, i in zip(signals, signals_models,
                                     range(len(signals))):
        signal2model = Signal2Model(model_info.dataset_name,
                                    "",
                                    signal_dim=model_info.Sd,
                                    hidden_dim=model_info.Hd,
                                    batch_size=Total_Windows,
                                    window_size=W)
        if type(signal[0]) is np.int64 or type(signal[0]) is np.float64:
            signal = [signal]

        if prepare_data:
            X_list, Y_list = prepare_test_data(
                signal,
                signal2model,
                overlap=overlap,
                batch_percentage=batch_percentage,
                mean_tol=mean_tol,
                std_tol=std_tol,
                randomize=False)

            if np.shape(X_list)[0] >= min_windows:
                X_matrix.append(X_list)
                Y_matrix.append(Y_list)
                sizes.append(np.shape(X_list)[0])
            else:
                removex.append(i)
        else:
            print(np.shape(X_matrix[i]))
            sizes.append(np.shape(X_matrix[i])[0])

    removex.sort(reverse=True)
    [signals_models.pop(rem) for rem in removex]

    print(np.shape(X_matrix))

    max_windows = np.min(np.array(sizes))
    for t, test_signal in enumerate(X_matrix):
        X_matrix[t] = test_signal[:max_windows]
        Y_matrix[t] = Y_matrix[t][:max_windows]
    print(np.shape(X_matrix))

    X_matrix, Y_matrix = np.array(X_matrix), np.array(Y_matrix)
    max_windows = max_windows - (max_windows % mini_batch)
    print("Number of Windows: {0} of {1}".format(max_windows,
                                                 np.max(np.array(sizes))))

    windows = np.arange(0, max_windows, mini_batch)
    print(windows)
    N_Models = len(signals_models)
    N_Signals = len(X_matrix)

    loss_tensor = np.zeros((N_Models, N_Signals, max_windows))

    print("Loading model...")
    model_info = signals_models[0]

    signal2Model = Signal2Model(model_info.dataset_name,
                                model_info.directory,
                                signal_dim=model_info.Sd,
                                hidden_dim=model_info.Hd,
                                mini_batch_size=mini_batch)
    model = GRU.LibphysMBGRU(signal2Model)
    times = []
    for m, model_info in zip(range(len(signals_models)), signals_models):
        model.model_name = model_info.dataset_name
        model.load(dir_name=model_info.directory)
        print("Processing Model " + model_info.name + " - time: " +
              str(model.train_time))

        for s in range(N_Signals):
            print("Calculating loss for ECG " + str(s + 1), end=';\n ')
            for w in windows:
                x_test = X_matrix[s, w:w + mini_batch, :]
                y_test = Y_matrix[s, w:w + mini_batch, :]
                tic = time.time()
                loss_tensor[m, s, w:w + mini_batch] = np.asarray(
                    model.calculate_mse_vector(x_test, y_test))
                times.append(time.time() - tic)

    times = np.array(times)
    print(np.size(loss_tensor, 2))
    print(
        "Statistics: \n Mean time: {0}; \n Std time: {1}; Max time: {2}; Min Time: {3}"
        .format(np.mean(times), np.std(times), np.max(times), np.min(times)))
    return loss_tensor