예제 #1
0
    def train(self, X, Y, save_directory, batch_size=32, number_of_batches=None, window_size=256,
              learning_rate_val=0.05, nepoch_val=5000, decay=0.9, track_loss=False, first_index=0, save_distance=100):

        x_windows, y_end_values, N_batches, last_index = segment_signal(X, window_size, overlap=0.25, start_index=0)
        y_windows, y_end_values, N_batches, last_index = segment_signal(Y, window_size, overlap=0.25, start_index=0)

        window_indexes = np.random.permutation(N_batches)  # randomly select windows

        # first mini_batch_dim are training - 70%
        x_train = x_windows[window_indexes[0:batch_size], :]
        y_train = y_windows[window_indexes[0:batch_size], :]

        # last windows are testing - 30% -> implement on cross_validation
        # testing_X_batch = X[window_indexes[mini_batch_dim:int(mini_batch_dim + mini_batch_dim * (0.3 / 0.7))], :]
        # testing_Y_batch = Y[window_indexes[mini_batch_dim:int(mini_batch_dim + mini_batch_dim * (0.3 / 0.7))], :]


        if track_loss:
            plt.ion()

        print("Start Processing")

        t1 = time.time()
        self.train_with_sgd(x_train, y_train, nepoch_val, decay, track_loss, save_directory, 0,
                            save_distance=save_distance)

        print("Dataset trained in: ~%d seconds" % int(time.time() - t1))
        self.save(save_directory, self.get_file_tag(-5, -5))
예제 #2
0
    def train_signals(self, X, Y, signal2model, decay=0.9, track_loss=False, X_test=None, Y_test=None, use_random=True):
        hidden_dim = self.E.shape[0]
        signal_dim = self.E.shape[1]
        self.current_learning_rate = signal2model.learning_rate_val

        X_windows, y_end_values, N_batches, last_index = segment_signal(X, signal2model.window_size, overlap=0.33,
                                                                        start_index=0)
        Y_windows, y_end_values, N_batches, last_index = segment_signal(Y, signal2model.window_size, overlap=0.33,
                                                                        start_index=0)
        if use_random:
            window_indexes = np.random.permutation(N_batches)  # randomly select windows
        else:
            window_indexes = list(range(N_batches))

            # first n_windows are training
        x_train = X_windows[window_indexes[0:signal2model.batch_size], :]
        y_train = Y_windows[window_indexes[0:signal2model.batch_size], :]

        x_test = []
        y_test = []
        if (X_test is None) and (Y_test is None):
            # the rest are for testing
            x_test = X_windows[window_indexes[signal2model.batch_size:], :]
            y_test = Y_windows[window_indexes[signal2model.batch_size:], :]
        else:
            x_test, none1, none2, none3 = segment_signal(X, signal2model.window_size, overlap=0.25,
                                                                            start_index=0)
            y_test, none1, none2, none3 = segment_signal(Y, signal2model.window_size, overlap=0.25,
                                                                            start_index=0)

        self.save_test_data(self.get_file_tag(-1, -1), signal2model.signal_directory, [x_test, y_test])
        x_test = []
        y_test = []
        X_windows = []
        Y_windows =[]

        if track_loss:
            plt.show()
            plt.ion()

        print("Starting Processing")
        try:
            t1 = time.time()
            self.train_with_msgd(x_train, y_train, signal2model.number_of_epochs, decay, track_loss,
                                 signal2model.signal_directory, 0, save_distance=signal2model.save_interval)
            print("Dataset trained in: ~%d seconds" % int(time.time() - t1))

        except:
            print("Error has occured in dataset: ", end=sys.exc_info()[0])

        self.save(signal2model.signal_directory, self.get_file_tag(-5, -5))
def train_block(signals, signal2model, signal_indexes, n_for_each):
    model = GRU.LibPhys_GRU(signal_dim=signal2model.signal_dim,
                            hidden_dim=signal2model.hidden_dim,
                            signal_name=signal2model.signal_name,
                            n_windows=signal2model.mini_batch_size)
    model.save(signal2model.signal_directory, model.get_file_tag(-1, -1))

    x_train = []
    y_train = []
    for i in signal_indexes:
        X_windows, y_end_values, n_windows, last_index = segment_signal(
            signals[0][i], signal2model.window_size, overlap=0.33)
        Y_windows, y_end_values, n_windows, last_index = segment_signal(
            signals[1][i], signal2model.window_size, overlap=0.33)

        window_indexes = np.random.permutation(
            n_windows)  # randomly select windows
        if len(x_train) == 0:
            x_train = X_windows[window_indexes[0:n_for_each], :]
            y_train = Y_windows[window_indexes[0:n_for_each], :]
        else:
            x_train = np.append(x_train,
                                X_windows[window_indexes[0:n_for_each], :],
                                axis=0)
            y_train = np.append(y_train,
                                Y_windows[window_indexes[0:n_for_each], :],
                                axis=0)

        x_test = X_windows[window_indexes[n_windows:], :]
        y_test = Y_windows[window_indexes[n_windows:], :]

    model.save_test_data(model.get_file_tag(-5, -5),
                         signal2model.signal_directory, [x_test, y_test])
    x_test = []
    y_test = []
    X_windows = []
    Y_windows = []
    t1 = time.time()
    model.train_with_msgd(x_train,
                          y_train,
                          signal2model.number_of_epochs,
                          0.9,
                          track_loss=False,
                          save_directory=signal2model.signal_directory,
                          save_distance=signal2model.save_interval)
    print("Dataset trained in: ~%d seconds" % int(time.time() - t1))
    model.save(signal2model.signal_directory, model.get_file_tag(-5, -5))
예제 #4
0
    def generate_predicted_signal_2(self, full_signal, W, N=None):
        # LETS DO THIS!!!
        E, V, U, W, b, c = self.E, self.V, self.U, self.W, self.b, self.c

        if N is None:
            N = len(full_signal)

        x = theano.shared(segment_signal(full_signal, W, 0))

        def forward_prop_step(x_t, s_t1_prev, s_t2_prev, s_t3_prev,
                              index_prev):
            # Word embedding layer
            x_e = E[:, x_t]
            # hello_world_op = printing.Print('hello world')

            # GRU Layer 1
            z_t1 = T.nnet.hard_sigmoid(U[0].dot(x_e) + W[0].dot(s_t1_prev) +
                                       b[0])
            r_t1 = T.nnet.hard_sigmoid(U[1].dot(x_e) + W[1].dot(s_t1_prev) +
                                       b[1])
            c_t1 = T.tanh(U[2].dot(x_e) + W[2].dot(s_t1_prev * r_t1) + b[2])
            s_t1 = (T.ones_like(z_t1) - z_t1) * c_t1 + z_t1 * s_t1_prev

            # GRU Layer 2
            z_t2 = T.nnet.hard_sigmoid(U[3].dot(s_t1) + W[3].dot(s_t2_prev) +
                                       b[3])
            r_t2 = T.nnet.hard_sigmoid(U[4].dot(s_t1) + W[4].dot(s_t2_prev) +
                                       b[4])
            c_t2 = T.tanh(U[5].dot(s_t1) + W[5].dot(s_t2_prev * r_t2) + b[5])
            s_t2 = (T.ones_like(z_t2) - z_t2) * c_t2 + z_t2 * s_t2_prev

            # GRU Layer 3
            z_t3 = T.nnet.hard_sigmoid(U[6].dot(s_t2) + W[6].dot(s_t3_prev) +
                                       b[6])
            r_t3 = T.nnet.hard_sigmoid(U[7].dot(s_t2) + W[7].dot(s_t3_prev) +
                                       b[7])
            c_t3 = T.tanh(U[8].dot(s_t2) + W[8].dot(s_t3_prev * r_t3) + b[8])
            s_t3 = (T.ones_like(z_t3) - z_t3) * c_t3 + z_t3 * s_t3_prev

            # Final output calculation
            # Theano's softmax returns a matrix with one row, we only need the row
            o_t = T.nnet.softmax(V.dot(s_t3) + c)[0]

            return [o_t, s_t1, s_t2, s_t3, printed_x]

        [o, printed_x
         ], updates = theano.scan(forward_prop_step,
                                  sequences=x,
                                  truncate_gradient=self.bptt_truncate,
                                  outputs_info=[
                                      None,
                                      dict(initial=T.zeros(self.hidden_dim)),
                                      dict(initial=T.zeros(self.hidden_dim)),
                                      dict(initial=T.zeros(self.hidden_dim)),
                                      dict(initial=T.zeros(self.hidden_dim))
                                  ])

        return T.argmax(o, axis=1)
예제 #5
0
    def train(self, X, Y, save_directory, batch_size, number_of_batches, window_size,
              learning_rate_val, nepoch_val, decay, track_loss, first_index, save_distance):
        x_windows, y_end_values, N_batches, last_index = segment_signal(X, window_size, overlap=0.33, start_index=0)
        y_windows, y_end_values, N_batches, last_index = segment_signal(Y, window_size, overlap=0.33, start_index=0)

        window_indexes = np.random.permutation(N_batches)  # randomly select windows

        # first n_windows are training
        x_train = x_windows[window_indexes[0:batch_size], :]
        y_train = y_windows[window_indexes[0:batch_size], :]

        if track_loss:
            plt.ion()

        print("Start Processing")

        t1 = time.time()
        self.train_with_msgd(x_train, y_train, nepoch_val, decay, track_loss, save_directory, 0,
                             save_distance=save_distance)
        print("Dataset trained in: ~%d seconds" % int(time.time() - t1))
        self.save(save_directory, self.get_file_tag(-5, -5))
예제 #6
0
def get_signals(n_samples, window_size=1024, train_ratio=0.67, overlap=0.5):
    # Gets training and testing segments
    files = sorted(
        glob.iglob(os.path.join(DATASET_DIRECTORY + '/*', '*_[1-2]m.mat'),
                   recursive=True))
    signals_train = []
    signals_test = []
    labels_train = []
    labels_test = []
    n_windows = int(n_samples / (window_size * overlap))
    train_windows = int(n_windows * train_ratio)
    test_windows = int(n_windows * (1 - train_ratio))
    for filename in files:
        # print(filename)
        original_signal = np.array(
            loadmat(os.path.join(
                DATASET_DIRECTORY,
                filename))['val'][0][:n_samples])  # 160000:160000+n_samples
        # print(original_signal.shape)
        # Signal Scaling
        #centered = original_signal - np.mean(original_signal)
        #original_signal = (original_signal - np.mean(original_signal)) / (np.max(original_signal) - np.min(original_signal))
        original_signal = remove_noise(original_signal)
        original_signal = normalize(original_signal)

        original_signal = segment_signal(original_signal,
                                         window_size,
                                         overlap=overlap)[0]
        # print("Orig:", original_signal.shape)
        # print(train_windows, test_windows)
        # exit()
        train_signal = original_signal[:train_windows]
        test_signal = original_signal[train_windows:]
        signals_train.append(train_signal)
        signals_test.append(test_signal)
        labels_train.append(
            [filename.split('/')[-2] for i in range(train_windows)])
        labels_test.append(
            [filename.split('/')[-2] for i in range(test_windows)])

    return np.array(signals_train), np.array(signals_test), np.array(
        labels_train), np.array(labels_test)
예제 #7
0
    return np.array(signals)


# Read one signal
sig = np.array(
    loadmat(os.path.join(DATASET_DIRECTORY, 'f1y01m.mat'))['val'][0][:70000])

# Normalize
#sig = (sig - np.mean(sig)) / np.std(sig)
sig = (np.max(sig) - sig) / (np.max(sig) - np.min(sig))

print(sig)
#plt.plot(sig[0])
#plt.show()
#exit()
x = segment_signal(sig, 1024)[0].astype(np.float32)
#x = segment_matrix(sig, 1024)[0].astype(np.float32)

print(x.shape)

model = Autoencoder()
model.fit(x, n_epochs=200, save=False)
pred = model.reconstruct(x)[0]

#lr = model.get_adapt_lr()
#np.save('/home/bento/lr.npy', lr)
lr = np.load('/home/bento/lr.npy')
#lr = (np.max(lr) - lr)/(np.max(lr) - np.min(lr))
#costs = model.get_cost_vector()
#np.save('/home/bento/costs.npy', costs)
costs = np.log(np.load('/home/bento/costs.npy'))
def train_block(signals_train,
                signals_test,
                signal2model,
                signal_indexes=None,
                n_for_each=16,
                overlap=0.33,
                random_training=True,
                start_index=0,
                track_loss=False,
                loss_interval=1,
                train_ratio=1):
    """
    This method embraces several datasets (or one) according to a number of records for each

    :param signals: - list - a list containing two int vectors:
                                signal - input vector X, used for the input;

    :param signal2model: - Signal2Model object - object containing the information about the model, for more info
                            check Biosignals.utils.functions.signal2model

    :param signal_indexes: - list - a list containing the indexes of the "signals" variable to be trained.
                                    If None is given, all signals will be used.

    :param n_for_each: - int - number of windows from each signal to be inserted in the model training

    :param overlap: - float - value in the interval [0,1] that corresponds to the overlapping ratio of windows

    :param random_training: - boolean - value that if True random windows will be inserted in the training

    :param start_index: - int - value from which the windows will be selected

    :param track_loss: - boolean - value to plot loss as the model is trained

    :return: trained model
    """

    if signal_indexes is None:
        signal_indexes = range(len(signals_train))
    #self.save(signal2model.signal_directory, self.get_file_tag(-1, -1))

    x_train = []
    y_train = []
    for i in signal_indexes:

        # Creation of the Time Windows from the dataset
        if n_for_each == 1:
            if len(x_train) == 0:
                x_train = signals_train[i][:signal2model.window_size]
                y_train = signals_test[i][
                    1:signal2model.window_size +
                    1]  # for next signal without noise, [1:window_size + 1]
            else:
                x_train = np.vstack(
                    (x_train, signals_train[i][:signal2model.window_size]))
                y_train = np.vstack(
                    (y_train, signals_test[i][1:signal2model.window_size + 1]))
        else:
            X_windows, y_end_values, n_windows, last_index = segment_signal(
                signals_train[i][:-1],
                signal2model.window_size,
                overlap=overlap,
                start_index=start_index)
            Y_windows, y_end_values, n_windows, last_index = segment_signal(
                signals_test[i][1:],
                signal2model.window_size,
                overlap=overlap,
                start_index=start_index)

            n_for_each = n_for_each if n_for_each < np.shape(
                X_windows)[0] else np.shape(X_windows)[0]
            n_for_each = n_for_each if n_for_each % signal2model.mini_batch_size == 0 \
                else signal2model.mini_batch_size * int(n_for_each / signal2model.mini_batch_size)

            last_training_index = int(n_windows * train_ratio)
            # List of the windows to be inserted in the dataset
            if random_training:
                window_indexes = np.random.permutation(
                    last_training_index)  # randomly select windows
            else:
                window_indexes = list(range(
                    (n_windows)))  # first windows are selected

            # Insertion of the windows of this signal in the general dataset
            if len(x_train) == 0:
                # First is for train data
                x_train = X_windows[window_indexes[0:n_for_each], :]
                y_train = Y_windows[window_indexes[0:n_for_each], :]
                print("x_train shape:", x_train.shape)

                # # The rest is for test data
                # x_test = X_windows[last_training_index:, :]
                # y_test = Y_windows[last_training_index:, :]
            else:
                print("len != 0")
                x_train = np.append(x_train,
                                    X_windows[window_indexes[0:n_for_each], :],
                                    axis=0)
                y_train = np.append(y_train,
                                    Y_windows[window_indexes[0:n_for_each], :],
                                    axis=0)
                # x_test = np.append(x_train, X_windows[window_indexes[n_for_each:], :], axis=0)
                # y_test = np.append(x_train, Y_windows[window_indexes[n_for_each:], :], axis=0)

                # Save test data
                # self.save_test_data(signal2model.signal_directory, [x_test, y_test])

    # Start time recording

    # Start training model
    model = LibphysMBGRU.LibphysMBGRU(
        signal2model
    )  #signal2model, ModelType.CROSS_MBSGD, params)) -> for LibphysGRU
    t1 = time.time()
    model.start_time = time.time()
    returned = model.train_model(x_train, y_train, signal2model, track_loss,
                                 loss_interval)

    print("Dataset trained in: ~%d seconds" % int(time.time() - t1))

    # Model last training is then saved
    if returned:
        model.save(signal2model.signal_directory, model.get_file_tag(-5, -5))
        return True
    else:
        return False
예제 #9
0
def process_error_by_prediction(filename):
    signals_models = db.signal_models
    signals_tests = db.signal_tests

    def load_model(model_info, N_Windows):
        model = GRU.LibPhys_GRU(model_info.Sd, hidden_dim=model_info.Hd, signal_name=model_info.dataset_name,
                                n_windows=N_Windows)
        model.load(signal_name=model_info.dataset_name, filetag=model.get_file_tag(model_info.DS, model_info.t),
                   dir_name=model_info.directory)
        return model

    def get_models(signal_models, N_Windows=None, index=None):
        models = []

        if index is None:
            for model_info in signals_models:
                models.append(load_model(model_info,N_Windows))
        else:
            model_info = signals_models[index]
            models.append(load_model(model_info,N_Windows))

        return models

    signals = get_signals_tests(signals_tests, signals_models[0].Sd)

    W = 256
    N_Windows = 20000
    for m in range(len(signals_models)):
        models = []
        models = get_models(signals_models, N_Windows, index=m)
        predicted_signals = list(range(len(signals_tests)))
        model_errors = list(range(len(signals_tests)))
        predicted_signals_ = list(range(len(signals_tests)))
        model_errors_ = list(range(len(signals_tests)))
        print("\nProcessing Model " + signals_models[m].name + ":")
        for s in range(len(signals)):
            print("\tProcessing Signal " + signals_tests[s].name + ";")
            signal = signals[s]
            if model_errors[s].__class__ is int:
                model_errors[s] = []
                model_errors_[s] = []
                predicted_signals_[s] = []
                predicted_signals[s] = []

            [segmented, y, N_Windows, last_index] = segment_signal(signal, W, 0, N_Windows)
            [x, e] = models[0].predict_class(segmented, y)
            predicted_signals[s].append(x[0,:])
            predicted_signals_[s].append(x[-1, :])
            model_errors[s].append(e[0,:])
            model_errors_[s].append(e[-1, :])
            limit = last_index + (N_Windows + W)
            print("processing...", end =" ")
            while limit < signals_tests[s].size:
                print(str(limit) + " of " + str(signals_tests[s].size), end="_")
                [segmented, y, N_Windows, last_index] = segment_signal(signal, W, 0, N_Windows, start_index=last_index)
                [x, e] = models[0].predict_class(segmented, y)
                predicted_signals[s][-1] = np.append(predicted_signals[s][-1], x[0,:])
                predicted_signals[s][-1] = np.append(predicted_signals_[s][-1], x[-1, :])
                model_errors[s][-1] = np.append(model_errors[s][-1], e[-1, :])
                model_errors_[s][-1] = np.append(model_errors_[s][-1], e[-1, :])
                # print(np.shape(predicted_signals[s][-1]))
                limit = last_index + (N_Windows + W)

        np.savez(filename + str(m) +".npz",
                 predicted_signals=predicted_signals,
                 model_errors=model_errors,
                 predicted_signals_=predicted_signals,
                 model_errors_=model_errors,
                 signals_models=signals_models,
                 signals_tests=signals_tests)
        print(filename + ".npz has been saved")
def calculate_loss_tensor(filename,
                          Total_Windows,
                          W,
                          signals_models,
                          signals=None,
                          noisy_index=None):

    n_windows = Total_Windows
    if Total_Windows / 256 > 1:
        ratio = round(Total_Windows / 256)
    else:
        ratio = 1
    n_windows = 250

    windows = np.arange(int(Total_Windows / n_windows))
    N_Windows = len(windows)
    N_Signals = len(signals_models)
    Total_Windows = int(N_Windows * n_windows)

    loss_tensor = np.zeros((N_Signals, N_Signals, Total_Windows))
    N_Signals = len(signals_models)

    X_matrix = np.zeros((N_Signals, Total_Windows, W))
    Y_matrix = np.zeros((N_Signals, Total_Windows, W))

    i = 0
    indexes = signals_models  #[np.random.permutation(len(signals_models))]
    for model_info in indexes:
        if signals is None:
            # [x_test, y_test] = load_test_data("GRU_" + model_info.dataset_name, + "["+str(model_info.Sd)+"."+str(model_info.Hd)+".-1.-1.-1]"
            #                               , model_info.directory)
            [x_test, y_test] = load_test_data(model_info.dataset_name,
                                              model_info.directory)
            X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
                x_test, y_test, Total_Windows)
        else:
            signals = get_signals_tests(db.ecg_noisy_signals[noisy_index - 1],
                                        index=i,
                                        noisy_index=noisy_index,
                                        peak_into_data=False)
            signal_test = segment_signal(signals[0][i], 256, 0.33)
            X_matrix[i, :, :], Y_matrix[i, :, :] = randomize_batch(
                signal_test[0], signal_test[1], Total_Windows)

        i += 1

    print("Loading model...")
    model_info = signals_models[0]

    model = GRU.LibPhys_GRU(model_info.Sd,
                            hidden_dim=model_info.Hd,
                            signal_name=model_info.dataset_name,
                            n_windows=n_windows)

    for m in range(len(signals_models)):
        model_info = signals_models[m]
        model.signal_name = model_info.dataset_name
        model.load(signal_name=model_info.name,
                   filetag=model.get_file_tag(model_info.DS, model_info.t),
                   dir_name=model_info.directory)
        print("Processing " + model_info.name)

        for s in range(N_Signals):
            print("Calculating loss for " + signals_models[s].name, end=';\n ')
            for w in windows:
                index = w * n_windows
                x_test = X_matrix[s, index:index + n_windows, :]
                y_test = Y_matrix[s, index:index + n_windows, :]
                loss_tensor[m, s, index:index + n_windows] = np.asarray(
                    model.calculate_loss_vector(x_test, y_test))

    np.savez(filename + ".npz",
             loss_tensor=loss_tensor,
             signals_models_=indexes,
             signals_models=signals_models)

    return loss_tensor, indexes
예제 #11
0
    return models


N_Windows = 2048
signal_ecg = get_signals_tests(signals_tests, signals_models[0].Sd,
                               index=7)[0][0]
signal_resp = get_signals_tests(signals_tests, signals_models[0].Sd,
                                index=27)[0][0]
# predicted_signals = list(range(len(signals_tests)))
# model_errors = list(range(len(signals_tests)))
fi = np.random.randint(0, 50000)
W = 256
[ecg_segments_, y_ecg, _a, end_index] = segment_signal(signal_ecg,
                                                       W,
                                                       0,
                                                       N_Windows,
                                                       start_index=fi)
[resp_segments, y_resp, _a, end_index] = segment_signal(signal_resp,
                                                        W,
                                                        0,
                                                        N_Windows,
                                                        start_index=fi)
model = get_models(signals_models, index=0, N_Windows=N_Windows)[0]

# [pred_signal_ecg,error] = model.predict_class(ecg_segments, y)
[pred_signal_ecg,
 e_ecg] = model.predict_class(np.asarray(ecg_segments_, dtype=int),
                              np.asarray(y_ecg, dtype=int))
[pred_signal_resp, e_resp] = model.predict_class(resp_segments, y_resp)
print(e_ecg)
예제 #12
0
        if history[-1] == "eeg":
            i = signal_info[2]
        else:
            X_train, Y_train = get_signals(signals_models[0]['Sd'],
                                           signal_info[1],
                                           peak_into_data=False)

    else:
        X_train, Y_train = get_signals(signals_models[0]['Sd'],
                                       signal_info[1],
                                       peak_into_data=False)

    history.append(signal_info[0])
    N = len(X_train[i])

    [X_segment_matrix, N_windows] = segment_signal(X_train[i], W)
    [Y_segment_matrix, N_windows] = segment_signal(Y_train[i], W)

    X_segment_matrix = np.reshape(
        X_segment_matrix,
        (1, np.shape(X_segment_matrix)[0], np.shape(X_segment_matrix)[1]))
    Y_segment_matrix = np.reshape(
        Y_segment_matrix,
        (1, np.shape(Y_segment_matrix)[0], np.shape(Y_segment_matrix)[1]))

    if len(X_loss_window_tensor) == 0:
        X_loss_window_tensor = X_segment_matrix
        Y_loss_window_tensor = Y_segment_matrix
    else:
        X_loss_window_tensor = np.append(X_loss_window_tensor,
                                         X_segment_matrix,
예제 #13
0
# Read one signal
sig = np.array(
    loadmat(os.path.join(DATASET_DIRECTORY, 'f1y02m.mat'))['val'][0][:])

# lower bound to ignore artifacts
for i in range(len(sig)):
    if sig[i] < 14670:
        sig[i] = 14670

# Normalize
#sig = (sig - np.mean(sig)) / np.std(sig)
x = (sig - np.min(sig)) / (np.max(sig) - np.min(sig)) * 2 - 1
#x = x[:35000]
#x[810] += 1
x = segment_signal(x, 1024)[0]

# print(x.shape)
# plt.plot(x[0])
# plt.show()
# print(x.shape)
# plt.plot(x[1])
# plt.show()
# exit()

# peaks = peaks(sig, tol=0.65)#segment_signal(sig, 1024)[0].astype(np.float32)
#print(peaks)

# Put red circles on peaks
# plt.plot(sig)
# plt.title("Detected Peaks")
예제 #14
0
# colors = ['r', 'g', 'b', 'c', 'p']
# plt.plot(x, X_train[0][0:Z+1], label="RAW")
# for m_index in range(len(signals_models)):
#     plt.plot(x, pred_signals[m_index,:], colors[m_index], label=signals_models[m_index]["s_name"])
#
# plt.legend()
# plt.draw()
# plt.figure(B)
#
# for m_index in range(len(signals_models)):
#     plt.plot(x, errors[m_index, :], colors[m_index], label=signals_models[m_index]["s_name"])
#
# plt.legend()
# plt.show()

segments_1 = segment_signal(errors[0, :], 256, 0)
segments_2 = segment_signal(errors[1, :], 256, 0)
segments_3 = segment_signal(errors[2, :], 256, 0)
segments_4 = segment_signal(errors[3, :], 256, 0)
stds_1 = np.std(segments_1[0], axis=1)
stds_2 = np.std(segments_2[0], axis=1)
stds_3 = np.std(segments_3[0], axis=1)
stds_4 = np.std(segments_4[0], axis=1)

stds = np.vstack((stds_1, stds_2, stds_3, stds_4))
loss = np.min(stds)

plot_confusion_matrix


def print_confusion(Mod, Sig, loss_tensor, signals_models, signals_tests):
예제 #15
0
    def train_block(self, signals, signal2model, signal_indexes=None, n_for_each=12, overlap=0.33, random_training=True,
                    start_index=0, track_loss=None, loss_interval=1):
        """
        This method embraces several datasets (or one) according to a number of records for each

        :param signals: - list - a list containing two int vectors:
                                    signal - input vector X, used for the input;

        :param signal2model: - Signal2Model object - object containing the information about the model, for more info
                                check Biosignals.utils.functions.signal2model

        :param signal_indexes: - list - a list containing the indexes of the "signals" variable to be trained.
                                        If None is given, all signals will be used.

        :param n_for_each: - int - number of windows from each signal to be inserted in the model training

        :param overlap: - float - value in the interval [0,1] that corresponds to the overlapping ratio of windows

        :param random_training: - boolean - value that if True random windows will be inserted in the training

        :param start_index: - int - value from which the windows will be selected

        :param track_loss: - boolean - value to plot loss as the model is trained

        :return: trained model
        """

        if signal_indexes is None:
            signal_indexes = range(len(signals))

        self.save(signal2model.signal_directory, self.get_file_tag(-1, -1))
        n_for_each = int(n_for_each)
        x_train = []
        y_train = []
        for i in signal_indexes:

            # Creation of the Time Windows from the dataset
            X_windows, y_end_values, n_windows, last_index = segment_signal(signals[i][:-1], signal2model.window_size,
                                                                            overlap=overlap, start_index=start_index)
            Y_windows, y_end_values, n_windows, last_index = segment_signal(signals[i][1:], signal2model.window_size,
                                                                            overlap=overlap, start_index=start_index)
            last_training_index = int(n_windows * 0.33)
            # List of the windows to be inserted in the dataset
            if random_training:
                window_indexes = np.random.permutation(last_training_index)  # randomly select windows
            else:
                window_indexes = list(range((n_windows))) # first windows are selected

            # Insertion of the windows of this signal in the general dataset
            if len(x_train) == 0:
                # First is for train data
                x_train = X_windows[window_indexes[0:n_for_each], :]
                y_train = Y_windows[window_indexes[0:n_for_each], :]

                # The rest is for test data
                x_test = X_windows[last_training_index:, :]
                y_test = Y_windows[last_training_index:, :]
            else:
                x_train = np.append(x_train, X_windows[window_indexes[0:n_for_each], :], axis=0)
                y_train = np.append(y_train, Y_windows[window_indexes[0:n_for_each], :], axis=0)
                x_test = np.append(x_train, X_windows[window_indexes[n_for_each:], :], axis=0)
                y_test = np.append(x_train, Y_windows[window_indexes[n_for_each:], :], axis=0)

        # Save test data
        self.save_test_data(signal2model.signal_directory, [x_test, y_test])

        # Start time recording
        self.start_time = time.time()
        t1 = time.time()

        # Start training model
        self.train_model(x_train, y_train, signal2model, track_loss, loss_interval)

        print("Dataset trained in: ~%d seconds" % int(time.time() - t1))

        # Model last training is then saved
        self.save(signal2model.signal_directory, self.get_file_tag(-5, -5))