def calculate_loss_tensor(filename,
                          N_Windows,
                          W,
                          signals_models,
                          signals_info,
                          test_included=False):
    loss_tensor = np.zeros((len(signals_models), len(signals_info), N_Windows))
    N_Signals = len(signals_info)

    X_matrix = np.zeros((N_Signals, N_Windows, W))
    Y_matrix = np.zeros((N_Signals, N_Windows, W))

    if not test_included:
        signals = get_signals_tests(signals_info, signals_models[0].Sd)
        for i in range(N_Signals):
            [X_matrix[i, :, :],
             Y_matrix[i, :, :]] = get_random_batch(signals[0][i],
                                                   signals[1][i], W, N_Windows)
    else:
        i = 0
        for model_info in signals_models:
            [x_test, y_test] = load_test_data(
                "GRU_" + model_info.dataset_name + "[" + str(model_info.Sd) +
                "." + str(model_info.Hd) + ".-1.-1.-1]", model_info.directory)
            X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
                x_test, y_test, N_Windows)
            i += 1

    print("Loading model...")
    model_info = signals_models[0]
    model = GRU.LibPhys_GRU(model_info.Sd,
                            hidden_dim=model_info.Hd,
                            signal_name=model_info.dataset_name,
                            n_windows=N_Windows)
    history = []
    m = -1
    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.signal_name = model_info.dataset_name
        model.load(signal_name=model_info.name,
                   filetag=model.get_file_tag(model_info.DS, model_info.t),
                   dir_name=model_info.directory)
        print("Processing " + model_info.name)

        for s in range(N_Signals):
            x_test = X_matrix[s, :, :]
            y_test = Y_matrix[s, :, :]
            signal_info = signals_info[s]
            print("Calculating loss for " + signal_info.name, end=';\n ')
            loss_tensor[m, s, :] = np.asarray(
                model.calculate_loss_vector(x_test, y_test))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models,
             signals_tests=signals_info)

    return loss_tensor
def calculate_loss_tensors(N_Windows, W, signals_models):
    N_Versions = len(signals_models)
    N_Signals = len(signals_models[0])
    loss_tensor = np.zeros((N_Versions, N_Signals, N_Signals, N_Windows))
    X_matrix = np.zeros((N_Versions, N_Signals, N_Windows, W))
    Y_matrix = np.zeros((N_Versions, N_Signals, N_Windows, W))

    i = 0
    for model_info in signals_models[0]:
        x_tests = []
        y_tests = []
        for version in range(N_Versions):
            [x_test, y_test] = load_test_data(
                "GRU_" + model_info.dataset_name + "[" + str(model_info.Sd) +
                "." + str(model_info.Hd) + ".-1.-1.-1]", model_info.directory)
            x_tests.append(x_test)
            y_tests.append(y_test)
        X_matrix[:, i, :, :], Y_matrix[:, i, :, :] = randomize_batch(
            np.asarray(x_test), np.asarray(y_test), N_Windows)
    i += 1

    print("Loading base model...")
    model_info = signals_models[0][0]
    model = GRU.LibPhys_GRU(model_info.Sd,
                            hidden_dim=model_info.Hd,
                            signal_name=model_info.dataset_name,
                            n_windows=N_Windows)

    for m in range(N_Signals):
        for version in range(N_Versions):
            model_info = signals_models[version][m]
            model.signal_name = model_info.dataset_name
            model.load(signal_name=model_info.name,
                       filetag=model.get_file_tag(model_info.DS, model_info.t),
                       dir_name=model_info.directory)
            print("Processing " + model_info.name)

            for s in range(N_Signals):
                x_test = X_matrix[version, s, :, :]
                y_test = Y_matrix[version, s, :, :]
                print("Calculating loss for " +
                      signals_models[version][s].name,
                      end=';\n ')
                loss_tensor[version, m, s, :] = np.asarray(
                    model.calculate_loss_vector(x_test, y_test))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models,
             signals_tests=signals_info)

    return loss_tensor
예제 #3
0
def calculate_loss_tensor(filename, Total_Windows, W, signals_models):
    n_windows = Total_Windows
    if Total_Windows / 256 > 1:
        ratio = round(Total_Windows / 256)
        n_windows = int(Total_Windows / ratio)

    windows = np.arange(int(Total_Windows / n_windows))
    N_Windows = len(windows)
    N_Signals = len(signals_models)
    Total_Windows = int(N_Windows * n_windows)

    loss_tensor = np.zeros((N_Signals, N_Signals, Total_Windows))
    N_Signals = len(signals_models)

    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))

    i = 0
    for model_info in signals_models:
        [x_test, y_test] = load_test_data(
            "GRU_" + model_info.dataset_name + "[" + str(model_info.Sd) + "." + str(model_info.Hd) + ".-1.-1.-1]"
            , model_info.directory)
        X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(x_test, y_test, Total_Windows)
        i += 1

    print("Loading model...")
    model_info = signals_models[0]
    model = GRU.LibPhys_GRU(model_info.Sd, hidden_dim=model_info.Hd, signal_name=model_info.dataset_name,
                            n_windows=n_windows)

    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.signal_name = model_info.dataset_name
        model.load(signal_name=model_info.name, filetag=model.get_file_tag(model_info.DS,
                                                                           model_info.t),
                   dir_name=model_info.directory)
        print("Processing " + model_info.name)

        for s in range(N_Signals):
            print("Calculating loss for " + signals_models[s].name, end=';\n ')
            for w in windows:
                index = w * n_windows
                x_test = X_matrix[s, index:index + n_windows, :]
                y_test = Y_matrix[s, index:index + n_windows, :]
                loss_tensor[m, s, index:index + n_windows] = np.asarray(model.calculate_loss_vector(x_test, y_test))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
def train_block(signals, signal2model, signal_indexes, n_for_each):
    model = GRU.LibPhys_GRU(signal_dim=signal2model.signal_dim,
                            hidden_dim=signal2model.hidden_dim,
                            signal_name=signal2model.signal_name,
                            n_windows=signal2model.mini_batch_size)
    model.save(signal2model.signal_directory, model.get_file_tag(-1, -1))

    x_train = []
    y_train = []
    for i in signal_indexes:
        X_windows, y_end_values, n_windows, last_index = segment_signal(
            signals[0][i], signal2model.window_size, overlap=0.33)
        Y_windows, y_end_values, n_windows, last_index = segment_signal(
            signals[1][i], signal2model.window_size, overlap=0.33)

        window_indexes = np.random.permutation(
            n_windows)  # randomly select windows
        if len(x_train) == 0:
            x_train = X_windows[window_indexes[0:n_for_each], :]
            y_train = Y_windows[window_indexes[0:n_for_each], :]
        else:
            x_train = np.append(x_train,
                                X_windows[window_indexes[0:n_for_each], :],
                                axis=0)
            y_train = np.append(y_train,
                                Y_windows[window_indexes[0:n_for_each], :],
                                axis=0)

        x_test = X_windows[window_indexes[n_windows:], :]
        y_test = Y_windows[window_indexes[n_windows:], :]

    model.save_test_data(model.get_file_tag(-5, -5),
                         signal2model.signal_directory, [x_test, y_test])
    x_test = []
    y_test = []
    X_windows = []
    Y_windows = []
    t1 = time.time()
    model.train_with_msgd(x_train,
                          y_train,
                          signal2model.number_of_epochs,
                          0.9,
                          track_loss=False,
                          save_directory=signal2model.signal_directory,
                          save_distance=signal2model.save_interval)
    print("Dataset trained in: ~%d seconds" % int(time.time() - t1))
    model.save(signal2model.signal_directory, model.get_file_tag(-5, -5))
예제 #5
0
window_size = 256
# number_of_epochs = 1000000

#Noisy signals
for noisy_index in [2]:#range(3,5):
    signals_tests = db.ecg_noisy_signals[noisy_index]
    signals_models = db.signal_models
    #
    # #   Load signals from database
    signals = get_signals_tests(signals_tests, signals_models[0].Sd, type="ecg noise", noisy_index=noisy_index)

    # train each signal from fantasia
    for i in range(9, 19):
        name = 'bio_noise_'+str(noisy_index)+'_ecg_' + str(i)
        signal = Signal2Model(name, signal_directory, batch_size=batch_size)
        model = GRU.LibPhys_GRU(signal_dim=signal_dim, hidden_dim=hidden_dim, signal_name=name, n_windows=mini_batch_size)
        model.save(signal_directory, model.get_file_tag(-1, -1))
        model.train_signals(signals[0][i], signals[1][i], signal, decay=0.95, track_loss=False)


# Normal + noisy ECGs
signal_dim = 64
hidden_dim = 256
signal_directory = 'BIOMETRIC_ECGs_[20.256]'
n_for_each = 16
mini_batch_size = n_for_each
signals_models = db.signal_models

signals_with_noise = [get_signals_tests(db.ecg_noisy_signals[noisy_index-1], signals_models[0].Sd, type="ecg noise",
                                        noisy_index=noisy_index) for noisy_index in range(1,5)]
signals_without_noise = get_signals_tests(db.signal_tests, signals_models[0].Sd, type="ecg")
예제 #6
0
def calculate_loss_tensor(filename,
                          Total_Windows=None,
                          W=256,
                          signals_models=[],
                          test_signals=None,
                          signals_info=None):
    if signals_info is None:
        if Total_Windows is None:
            Total_Windows = 1000000000
            for model_info in signals_models:
                [x_test, y_test] = load_test_data(model_info.dataset_name,
                                                  model_info.directory)
                if np.shape(x_test)[0] < Total_Windows:
                    Total_Windows = np.shape(x_test)[0]

        print("Number of Windows: {0}".format(Total_Windows))

        n_windows = Total_Windows
        if Total_Windows / 256 > 1:
            ratio = round(Total_Windows / 256)
            n_windows = 16  #int(Total_Windows/ratio)
        n_windows = 16
        windows = np.arange(int(Total_Windows / n_windows))
        N_Windows = len(windows)
        Total_Windows = int(N_Windows * n_windows)
        N_Signals = len(signals_models)
        N_Models = N_Signals

    else:
        N = [signal_info.size for signal_info in signals_info]
        N = min(N)
        step = int(W * 0.33)
        if Total_Windows is None:
            windows = np.arange(0, N - step - 1, step)
            Total_Windows = len(windows) - 2

        n_windows = Total_Windows
        if Total_Windows / 256 > 1:
            ratio = round(Total_Windows / 256)
            n_windows = int(Total_Windows / ratio)

        windows = np.arange(int(Total_Windows / n_windows))
        N_Windows = len(windows)
        Total_Windows = int(N_Windows * n_windows)
        N_Signals = len(signals)
        N_Models = len(signals_models)
        n_windows = Total_Windows

    loss_tensor = np.zeros((N_Models, N_Signals, Total_Windows))
    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))
    i = 0
    if test_signals is None:
        for model_info in signals_models:
            [x_test, y_test] = load_test_data(model_info.dataset_name,
                                              model_info.directory)
            X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
                x_test, y_test, Total_Windows)
            i += 1

    else:
        for signal, signal_info in zip(test_signals, signals_info):
            signal_windows = segment_matrix(signal, W, 0.33)
            X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
                signal_windows[0], signal_windows[1], Total_Windows)
            i += 1

    print("Loading model...")
    model_info = signals_models[0]

    model = GRU.LibPhys_GRU(model_info.Sd,
                            hidden_dim=model_info.Hd,
                            signal_name=model_info.dataset_name,
                            n_windows=n_windows)

    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.signal_name = model_info.dataset_name
        model.load(signal_name=model_info.name,
                   filetag=model.get_file_tag(model_info.DS, model_info.t),
                   dir_name=model_info.directory)
        print("Processing " + model_info.name)

        for s in range(N_Signals):
            if signals_info is not None:
                print("Calculating loss for " + signals_info[s].name,
                      end=';\n ')
            else:
                print("Calculating loss for " + signals_models[s].name,
                      end=';\n ')
            for w in windows:
                index = w * n_windows
                x_test = X_matrix[s, index:index + n_windows, :]
                y_test = Y_matrix[s, index:index + n_windows, :]
                loss_tensor[m, s, index:index + n_windows] = np.asarray(
                    model.calculate_loss_vector(x_test, y_test))
                print(np.mean(loss_tensor[m, s, index:index + n_windows]))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models=signals_models)

    return loss_tensor
def calculate_loss_tensor(filename,
                          Total_Windows,
                          W,
                          signals_models,
                          signals=None,
                          noisy_index=None):

    n_windows = Total_Windows
    if Total_Windows / 256 > 1:
        ratio = round(Total_Windows / 256)
    else:
        ratio = 1
    n_windows = 250

    windows = np.arange(int(Total_Windows / n_windows))
    N_Windows = len(windows)
    N_Signals = len(signals_models)
    Total_Windows = int(N_Windows * n_windows)

    loss_tensor = np.zeros((N_Signals, N_Signals, Total_Windows))
    N_Signals = len(signals_models)

    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))

    i = 0
    indexes = signals_models  #[np.random.permutation(len(signals_models))]
    for model_info in indexes:
        if signals is None:
            # [x_test, y_test] = load_test_data("GRU_" + model_info.dataset_name, + "["+str(model_info.Sd)+"."+str(model_info.Hd)+".-1.-1.-1]"
            #                               , model_info.directory)
            [x_test, y_test] = load_test_data(model_info.dataset_name,
                                              model_info.directory)
            X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
                x_test, y_test, Total_Windows)
        else:
            signals = get_signals_tests(db.ecg_noisy_signals[noisy_index - 1],
                                        index=i,
                                        noisy_index=noisy_index,
                                        peak_into_data=False)
            signal_test = segment_signal(signals[0][i], 256, 0.33)
            X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
                signal_test[0], signal_test[1], Total_Windows)

        i += 1

    print("Loading model...")
    model_info = signals_models[0]

    model = GRU.LibPhys_GRU(model_info.Sd,
                            hidden_dim=model_info.Hd,
                            signal_name=model_info.dataset_name,
                            n_windows=n_windows)

    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.signal_name = model_info.dataset_name
        model.load(signal_name=model_info.name,
                   filetag=model.get_file_tag(model_info.DS, model_info.t),
                   dir_name=model_info.directory)
        print("Processing " + model_info.name)

        for s in range(N_Signals):
            print("Calculating loss for " + signals_models[s].name, end=';\n ')
            for w in windows:
                index = w * n_windows
                x_test = X_matrix[s, index:index + n_windows, :]
                y_test = Y_matrix[s, index:index + n_windows, :]
                loss_tensor[m, s, index:index + n_windows] = np.asarray(
                    model.calculate_loss_vector(x_test, y_test))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models_=indexes,
             signals_models=signals_models)

    return loss_tensor, indexes