Ejemplo n.º 1
0
def visualize_signals_testing(gen, sig_type_source):
    '''
    visualize the estimated and target signals on a minibatch of testing data
    :param gen: test generator
    :param sig_type_source: list of str of source signals: e.g. ['aX', 'aY', 'aZ'] for all three axes of the acceleroemter
    :return: -
    '''
    torch.cuda.empty_cache()
    finished, X_batch, Y_batch, subject_id_list = next(gen)

    if finished:
        print('Generator Finished')
    else:
        X_batch = torch.from_numpy(X_batch)
        X_batch = network_models.cuda(X_batch)
        X_batch = X_batch.type(torch.cuda.FloatTensor)

        Y_batch_predicted = sig_model.forward(
            X_batch).squeeze().detach().cpu().numpy()
        X_batch = X_batch.detach().cpu().numpy()

        no_sigs = X_batch.shape[1]
        for v in range(0, Y_batch.shape[0], 4):
            fig = plt.figure(figsize=(12, 8))
            plt.subplot(no_sigs + 1, 1, 1)
            plt.plot((Y_batch[v, 0, :] - np.mean(Y_batch[v, 0, :])) / (np.sqrt(
                np.sum(
                    np.power(Y_batch[v, 0, :] - np.mean(Y_batch[v, 0, :]),
                             2)))), '-r')
            plt.plot(
                (Y_batch_predicted[v, :] - np.mean(Y_batch_predicted[v, :])) /
                (np.sqrt(
                    np.sum(
                        np.power(
                            Y_batch_predicted[v, :] -
                            np.mean(Y_batch_predicted[v, :]), 2)))), '-b')

            plt.title('Subject: ' + str(subject_id_list[v]) +
                      ' Index in batch: ' + str(v))

            for k in range(2, no_sigs + 2):
                plt.subplot(no_sigs + 1, 1, k)
                plt.plot(X_batch[v, k - 2, :], '-g')
                plt.plot(
                    (Y_batch_predicted[v, :] - np.min(Y_batch_predicted[v, :]))
                    / (np.max(Y_batch_predicted[v, :]) -
                       np.min(Y_batch_predicted[v, :])), '-b')

                #plt.plot(Y_batch_predicted[v,:] , '-b')
                plt.title(sig_type_source[k - 2])

            plt.tight_layout()
            plt.show()
            if save_true:
                fig.savefig(directory + '/Code Output/' + file_name_pre +
                            '_waveforms_testing_hf_' + str(v) + '.png')

        del X_batch
        del Y_batch_predicted
        del Y_batch
def visualize_signals(gen, sig_type_source):
    '''
    plots target signal segments, source signal segments and the estimated target signal segments together
    :param gen: data generator
    :param sig_type_source: list of source signals e.g. ['aX', 'aY', 'aZ'] for all three axes of the accelerometer as source signals
    :return: -
    '''
    torch.cuda.empty_cache()
    X_batch, Y_batch, list_subjects = next(gen)

    X_batch = torch.from_numpy(X_batch)
    X_batch = network_models.cuda(X_batch)
    X_batch = X_batch.type(torch.cuda.FloatTensor)

    Y_batch_predicted = sig_model.forward(
        X_batch).squeeze().detach().cpu().numpy()
    X_batch = X_batch.detach().cpu().numpy()

    # Y_batch_predicted=-Y_batch_predicted

    no_sigs = X_batch.shape[1]
    for v in range(Y_batch.shape[0]):
        fig = plt.figure(figsize=(12, 8))
        plt.subplot(no_sigs + 1, 1, 1)
        #plt.plot(Y_batch[v,0,:]  , '-r')
        plt.plot((Y_batch[v, 0, :] - np.mean(Y_batch[v, 0, :])) / (np.sqrt(
            np.sum(np.power(Y_batch[v, 0, :] - np.mean(Y_batch[v, 0, :]),
                            2)))), '-r')
        #plt.plot((Y_batch_predicted[v,:]-np.min(Y_batch_predicted[v,:]) )/(np.max(Y_batch_predicted[v,:])-np.min(Y_batch_predicted[v,:])) , '-b')
        plt.plot((Y_batch_predicted[v, :] - np.mean(Y_batch_predicted[v, :])) /
                 (np.sqrt(
                     np.sum(
                         np.power(
                             Y_batch_predicted[v, :] -
                             np.mean(Y_batch_predicted[v, :]), 2)))), '-b')

        #plt.plot(Y_batch_predicted[v, :] , '-b')
        plt.title('Subject Number: ' + str(list_subjects[v]))

        for k in range(2, no_sigs + 2):
            plt.subplot(no_sigs + 1, 1, k)
            plt.plot(X_batch[v, k - 2, :], '-g')
            plt.plot(
                (Y_batch_predicted[v, :] - np.min(Y_batch_predicted[v, :])) /
                (np.max(Y_batch_predicted[v, :]) -
                 np.min(Y_batch_predicted[v, :])), '-b')

            #plt.plot(Y_batch_predicted[v,:] , '-b')
            plt.title(sig_type_source[k - 2])

        plt.tight_layout()
        plt.show()
        if save_true:
            fig.savefig(directory + '/Code Output/' + file_name_pre +
                        '_waveforms_' + str(v) + '.png')

    del X_batch
    del Y_batch_predicted
    del Y_batch
def visualize_signals(gen , sig_type_source ):

    torch.cuda.empty_cache()
    X_batch, tt, freq_vector = next(gen)

    X_batch = torch.from_numpy(X_batch)
    X_batch = network_models.cuda(X_batch)
    X_batch = X_batch.type(torch.cuda.FloatTensor)

    Y_batch_predicted= sig_model.forward(X_batch).squeeze().detach().cpu().numpy()
    X_batch = X_batch.detach().cpu().numpy()

    offset = len(tt)//2

    no_sigs = X_batch.shape[1]
    for v in range(X_batch.shape[0]):
        plt.figure()
        for k in range(1,no_sigs+1):
            inp = X_batch[v,k-2,:]
            out = (Y_batch_predicted[v, :] - np.min(Y_batch_predicted[v, :])) / (
                        np.max(Y_batch_predicted[v, :]) - np.min(Y_batch_predicted[v, :]))
            time_ax=tt
            time_ax = time_ax if freq_vector[v]<3 else time_ax[ offset:offset+  int(len(tt)//freq_vector[v])   ]
            inp = inp if freq_vector[v]<3 else inp[ offset:offset+  int(len(tt)//freq_vector[v])   ]
            out = out if freq_vector[v] < 3 else out[offset:offset + int(len(tt) // freq_vector[v])]

            plt.subplot( no_sigs , 1 , k )
            plt.plot(time_ax,inp , '-g')
            plt.plot(time_ax,out, '-b')
            #plt.plot(Y_batch_predicted[v,:] , '-b')
            plt.title(sig_type_source[k-2] + ' , ' + str(freq_vector[v]))

        plt.tight_layout()
        plt.show()


    del X_batch
    del Y_batch_predicted
    def test_model(gen, sig_model):
        '''
        function tests a model by running it over the test data and calculating error metrics
        :param gen: test generator
        :param sig_model: model
        :return: list_pearson_r_loss: list of pearson correlations between target and estimated segments
        :return: list_subject_ids: subject ID for each segment
        :return: list_i_errors: list of R-I interval errors between each target and estimated signal segment
        :return: list_j_errors: list of R-J interval errors between each target and estimated signal segment
        :return: list_k_errors: list of R-K interval errors between each target and estimated signal segment
        '''
        criterion = network_models.PearsonRLoss()
        with torch.no_grad():

            finished = False
            list_pearson_r_loss = []
            list_i_errors = []
            list_j_errors = []
            list_k_errors = []
            list_subject_ids = []
            list_noise_var_target = []
            list_noise_var_estimate = []
            list_target_i_points = []
            list_target_j_points = []
            list_target_k_points = []
            list_sdr = []

            while not finished:

                print('Running Testing')
                torch.cuda.empty_cache()
                finished, X_batch, Y_batch, subject_id_list= next(gen)

                if finished:
                    print('Generator Finished')
                else:

                    #get ecg
                    ecg = X_batch[:, -1, :]

                    #calculate pearson correlation
                    X_batch = torch.from_numpy(X_batch[:, :-1, :]).contiguous()
                    X_batch = network_models.cuda(X_batch)
                    X_batch = X_batch.type(torch.cuda.FloatTensor)

                    Y_batch_predicted= sig_model.forward(X_batch).squeeze()

                    Y_batch = torch.from_numpy(Y_batch)
                    Y_batch = network_models.cuda(Y_batch)
                    Y_batch = Y_batch.type(torch.cuda.FloatTensor)
                    Y_batch = Y_batch.squeeze()


                    if len(Y_batch.size())==1:
                        Y_batch=Y_batch.view(1,-1)

                    if len(Y_batch_predicted.size())==1:
                        Y_batch_predicted=Y_batch_predicted.view(1,-1)

                    loss = criterion.get_induvidual_losses(Y_batch_predicted, Y_batch)

                    list_pearson_r_loss+=loss.cpu().numpy().reshape(-1).tolist()
                    list_subject_ids+=subject_id_list

                    #ijk points
                    Y_batch_predicted = Y_batch_predicted.detach().cpu().numpy()
                    Y_batch = Y_batch.detach().cpu().numpy()

                    for v in range(Y_batch.shape[0]):
                        r_peaks = signal_processing_modules.get_R_peaks(ecg[v, :])
                        ensemble_avg_target, ensemble_beats_target = signal_processing_modules.get_ensemble_avg(r_peaks,
                                                                                                                (Y_batch[v, :] - np.mean(Y_batch[v,  :]) )/( np.sqrt(np.sum(np.power(  Y_batch[v, :] - np.mean(Y_batch[v, :])  ,2)))) ,
                                                                                                                n_samples=500,upsample_factor=1)

                        i_point_target, j_point_target, k_point_target = signal_processing_modules.get_IJK_peaks(ensemble_avg_target, upsample_factor=1)

                        ensemble_avg_estimate, ensemble_beats_estimate = signal_processing_modules.get_ensemble_avg(r_peaks,
                                                                                                                    ( Y_batch_predicted[v,:]-np.mean(Y_batch_predicted[v,:]) )/(np.sqrt(np.sum(np.power(  Y_batch_predicted[v,:]-np.mean(Y_batch_predicted[v,:]) , 2  )))) ,
                                                                                                                    n_samples=500, upsample_factor=1)
                        i_point_estimate, j_point_estimate, k_point_estimate = signal_processing_modules.get_IJK_peaks(ensemble_avg_estimate, upsample_factor=1)

                        i_error = 1000 * np.abs(i_point_target - i_point_estimate) / (500) if i_point_target != -1 and i_point_estimate != -1 else -1  # 500 is the sampling rate of the signal segments, 1000* for miliseconds
                        j_error = 1000 * np.abs(j_point_target - j_point_estimate) / (500) if j_point_target != -1 and j_point_estimate != -1 else -1
                        k_error = 1000 * np.abs(k_point_target - k_point_estimate) / (500) if k_point_target != -1 and k_point_estimate != -1 else -1

                        list_i_errors.append(i_error)
                        list_j_errors.append(j_error)
                        list_k_errors.append(k_error)
                        list_noise_var_target.append( signal_processing_modules.get_noise_variance(ensemble_avg_target, ensemble_beats_target) )
                        list_noise_var_estimate.append( signal_processing_modules.get_noise_variance(ensemble_avg_estimate, ensemble_beats_estimate ) )
                        list_target_i_points.append(i_point_target)
                        list_target_j_points.append(j_point_target)
                        list_target_k_points.append(k_point_target)
                        list_sdr.append(signal_processing_modules.get_sdr(( Y_batch_predicted[v,:]-np.mean(Y_batch_predicted[v,:]) )/(np.sqrt(np.sum(np.power(  Y_batch_predicted[v,:]-np.mean(Y_batch_predicted[v,:]) , 2  ))))
                                    , (Y_batch[v, :] - np.mean(Y_batch[v,  :]) )/( np.sqrt(np.sum(np.power(  Y_batch[v, :] - np.mean(Y_batch[v, :])  ,2)))) ))



        del X_batch
        del Y_batch_predicted
        del Y_batch

        return list_pearson_r_loss, list_subject_ids, list_i_errors, list_j_errors, list_k_errors, list_noise_var_target, list_noise_var_estimate, list_target_i_points, list_target_j_points, list_target_k_points, list_sdr
Ejemplo n.º 5
0
def visualize_signals_video(gen, sig_type_source, directory,
                            model_path_for_video, model_type, no_layers,
                            input_size, kernel_size, filter_number,
                            video_name):
    list_all_files = os.listdir(directory + '/Models for Video/')
    list_models = [
        file for file in list_all_files
        if file.startswith(model_path_for_video[-25::])
    ]
    no_epochs = len(list_models)

    torch.cuda.empty_cache()
    X_batch, Y_batch, list_subjects = next(gen)

    X_batch = torch.from_numpy(X_batch)
    X_batch = network_models.cuda(X_batch)
    X_batch = X_batch.type(torch.cuda.FloatTensor)

    v = np.random.randint(0, Y_batch.shape[0])
    X_batch_detached = X_batch.detach().cpu().numpy()
    no_sigs = X_batch_detached.shape[1]
    fig = plt.figure(figsize=(12, 8))

    def animate(i):
        #for epoch in range(1,no_epochs+1):
        epoch = i + 1
        if epoch < 10:
            epoch_str = '000' + str(epoch)
        elif epoch >= 10 and epoch < 100:
            epoch_str = '00' + str(epoch)
        elif epoch >= 100 and epoch < 1000:
            epoch_str = '0' + str(epoch)
        else:
            epoch_str = str(epoch)
        model_path = model_path_for_video + '_' + epoch_str + '.pt'
        print('Loading...' + model_path)
        sig_model = network_models.load_saved_model(
            model_path,
            model_type,
            input_size,
            kernel_size,
            filter_number=filter_number,
            signal_number=len(sig_type_source),
            no_layers=no_layers)

        Y_batch_predicted = sig_model.forward(
            X_batch).squeeze().detach().cpu().numpy()

        #for v in range(Y_batch.shape[0]):
        plt.clf()
        plt.subplot(no_sigs + 1, 1, 1)
        #plt.plot(Y_batch[v,0,:]  , '-r')
        plt.plot((Y_batch[v, 0, :] - np.mean(Y_batch[v, 0, :])) / (np.sqrt(
            np.sum(np.power(Y_batch[v, 0, :] - np.mean(Y_batch[v, 0, :]),
                            2)))), '-r')
        #plt.plot((Y_batch_predicted[v,:]-np.min(Y_batch_predicted[v,:]) )/(np.max(Y_batch_predicted[v,:])-np.min(Y_batch_predicted[v,:])) , '-b')
        plt.plot((Y_batch_predicted[v, :] - np.mean(Y_batch_predicted[v, :])) /
                 (np.sqrt(
                     np.sum(
                         np.power(
                             Y_batch_predicted[v, :] -
                             np.mean(Y_batch_predicted[v, :]), 2)))), '-b')
        #plt.plot(Y_batch_predicted[v, :] , '-b')
        # plt.title('Subject Number: ' + str(list_subjects[v]) +' , Epoch: ' + str(epoch)
        #           + ', Train Loss=' + str(train_history[epoch-1]['train_loss']) + ', Val Loss=' + str(valid_history[epoch-1]['valid_loss']) )

        plt.title(
            'Subject Number: {}, Epoch: {}, Train Loss: {:.4f}, Val Loss: {:.4f}'
            .format(list_subjects[v], epoch,
                    train_history[epoch - 1]['train_loss'],
                    valid_history[epoch - 1]['valid_loss']))

        for k in range(2, no_sigs + 2):
            plt.subplot(no_sigs + 1, 1, k)
            plt.plot(X_batch_detached[v, k - 2, :], '-g')
            plt.plot(
                (Y_batch_predicted[v, :] - np.min(Y_batch_predicted[v, :])) /
                (np.max(Y_batch_predicted[v, :]) -
                 np.min(Y_batch_predicted[v, :])), '-b')
            #plt.plot(Y_batch_predicted[v,:] , '-b')
            plt.title(sig_type_source[k - 2])

        # plt.tight_layout()
        # plt.show()
        # if save_true:
        #     fig.savefig(directory + '/' + file_name_pre + '_waveforms_' + str(v)+ '.png')
        del Y_batch_predicted

    ani = animation.FuncAnimation(fig, animate, frames=no_epochs, repeat=False)
    Writer = animation.writers['ffmpeg']
    writer = Writer(fps=fps, metadata=dict(artist='Me'), bitrate=1800)
    ani.save(directory + '/Code Output/' + video_name + '.mp4',
             writer=writer,
             dpi=dpi)
    del Y_batch
    del X_batch
Ejemplo n.º 6
0
def visualize_ijk(gen, upsample_factor, no_cycles):
    '''
    plots target signal segments and estimated target signal segments, ensemble averaged with the i,j,k points
    :param gen: data generator
    :param sig_type_source: list of source signals e.g. ['aX', 'aY', 'aZ'] for all three axes of the accelerometer as source signals
    :return: -
    '''
    torch.cuda.empty_cache()
    X_batch, Y_batch, list_subjects = next(gen)
    ecg = X_batch[:, -1, :]

    X_batch = torch.from_numpy(X_batch[:, :-1, :]).contiguous()
    X_batch = network_models.cuda(X_batch)
    X_batch = X_batch.type(torch.cuda.FloatTensor)

    Y_batch_predicted = sig_model.forward(
        X_batch).squeeze().detach().cpu().numpy()
    X_batch = X_batch.detach().cpu().numpy()

    list_i_errors = []
    list_j_errors = []
    list_k_errors = []
    for _ in range(no_cycles):
        for v in range(Y_batch.shape[0]):

            r_peaks = signal_processing_modules.get_R_peaks(ecg[v, :])
            ensemble_avg_target, ensemble_beats_target = signal_processing_modules.get_ensemble_avg(
                r_peaks,
                Y_batch[v, 0, :],
                n_samples=500,
                upsample_factor=upsample_factor)
            i_point_target, j_point_target, k_point_target = signal_processing_modules.get_IJK_peaks(
                ensemble_avg_target, upsample_factor=upsample_factor)

            ensemble_avg_estimate, ensemble_beats_estimate = signal_processing_modules.get_ensemble_avg(
                r_peaks,
                Y_batch_predicted[v, :],
                n_samples=500,
                upsample_factor=upsample_factor)
            i_point_estimate, j_point_estimate, k_point_estimate = signal_processing_modules.get_IJK_peaks(
                ensemble_avg_estimate, upsample_factor=upsample_factor)

            i_error = 1000 * np.abs(i_point_target - i_point_estimate) / (
                upsample_factor * 500
            ) if i_point_target != -1 and i_point_estimate != -1 else -1  #500 is the sampling rate of the signal segments, 1000* for miliseconds
            j_error = 1000 * np.abs(j_point_target - j_point_estimate) / (
                upsample_factor *
                500) if j_point_target != -1 and j_point_estimate != -1 else -1
            k_error = 1000 * np.abs(k_point_target - k_point_estimate) / (
                upsample_factor *
                500) if k_point_target != -1 and k_point_estimate != -1 else -1

            fig = plt.figure(figsize=(12, 8))
            plt.subplot(2, 1, 1)

            plt.plot(ensemble_beats_target.T, 'r', alpha=0.3)
            plt.plot(ensemble_avg_target, '-r', linewidth=4)
            plt.plot(i_point_target, ensemble_avg_target[i_point_target], 'ok')
            plt.text(i_point_target,
                     ensemble_avg_target[i_point_target] + 0.05,
                     'I',
                     color='red')
            plt.plot(j_point_target, ensemble_avg_target[j_point_target], 'ok')
            plt.text(j_point_target,
                     ensemble_avg_target[j_point_target] + 0.05,
                     'J',
                     color='red')
            plt.plot(k_point_target, ensemble_avg_target[k_point_target], 'ok')
            plt.text(k_point_target,
                     ensemble_avg_target[k_point_target] + 0.05,
                     'K',
                     color='red')

            plt.title('Subject Number: ' + str(list_subjects[v]))

            plt.subplot(2, 1, 2)

            plt.plot(ensemble_beats_estimate.T, 'b', alpha=0.3)
            plt.plot(ensemble_avg_estimate, '-b', linewidth=4)
            plt.plot(i_point_estimate, ensemble_avg_estimate[i_point_estimate],
                     'ok')
            plt.text(i_point_estimate,
                     ensemble_avg_estimate[i_point_estimate] + 0.05,
                     'I',
                     color='blue')
            plt.plot(j_point_estimate, ensemble_avg_estimate[j_point_estimate],
                     'ok')
            plt.text(j_point_estimate,
                     ensemble_avg_estimate[j_point_estimate] + 0.05,
                     'J',
                     color='blue')
            plt.plot(k_point_estimate, ensemble_avg_estimate[k_point_estimate],
                     'ok')
            plt.text(k_point_estimate,
                     ensemble_avg_estimate[k_point_estimate] + 0.05,
                     'K',
                     color='blue')

            plt.title('I-error=' + str(i_error) + ' | J-error=' +
                      str(j_error) + ' | K-error=' + str(k_error) + ' in (ms)')

            plt.tight_layout()
            plt.show()
            if save_true:
                fig.savefig(directory + '/Code Output/' + file_name_pre +
                            '_waveforms_ensemble_averaged' + str(v) + '.png')

            if i_error != -1:
                list_i_errors.append(i_error)
            if j_error != -1:
                list_j_errors.append(j_error)
            if k_error != -1:
                list_k_errors.append(k_error)

    print('MAE I-Point (ms): ' + str(np.mean(np.array(list_i_errors))))
    print('MAE J-Point (ms): ' + str(np.mean(np.array(list_j_errors))))
    print('MAE K-Point (ms): ' + str(np.mean(np.array(list_k_errors))))

    del X_batch
    del Y_batch_predicted
    del Y_batch