Beispiel #1
0
def frequency_plot(nni, method='welch_psd', title=''):
    if method == 'lomb_psd':
        fd.lomb_psd(nni, title=title)
    elif method == 'ar_psd':
        fd.ar_psd(nni, title=title)
    else:
        fd.welch_psd(nni, title=title, plot=True)
Beispiel #2
0
    def extractFrequencyDomain(self, x):
        try:
            nni = self.extractRR(x)
            # the bands was decided by refering to Revealing Real-Time Emotional Responses
            psd = fd.welch_psd(nni=nni, show=False,
                               fbands={'ulf': (0.00, 0.01), 'vlf': (0.01, 0.05), 'lf': (0.05, 0.15), 'hf': (0.15, 0.5)},
                               nfft=2 ** 12, legend=False, mode="dev")[0]
            return np.array([psd["fft_norm"][0], psd["fft_norm"][1], psd["fft_ratio"]])

        except:
            return np.array([])
Beispiel #3
0
    def _welch_psd(self, nni, peaks):
        params, freq, power = fd.welch_psd(nni=nni,
                                           fbands=self.bands,
                                           rpeaks=peaks,
                                           show=False,
                                           mode='dev')

        _, freq_i = fd._compute_parameters('fft', freq, power, self.bands)

        return {
            'params': dict(params.as_dict()),
            'freq': freq,
            'power': power / 10**6,
            'freq_i': freq_i
        }
Beispiel #4
0
def compute_features(nni):
    features = {}
    features['mean_hr'] = tools.heart_rate(nni).mean()
    features['sdnn'] = td.sdnn(nni)[0]
    features['rmssd'] = td.rmssd(nni)[0]
    features['sdsd'] = td.sdsd(nni)[0]
    features['nn20'] = td.nn20(nni)[0]
    features['pnn20'] = td.nn20(nni)[1]
    features['nn50'] = td.nn50(nni)[0]
    features['pnn50'] = td.nn50(nni)[1]
    features['hf_lf_ratio'] = fd.welch_psd(nni, show=False)['fft_ratio']
    features['very_lf'] = fd.welch_psd(nni, show=False)['fft_peak'][0]
    features['lf'] = fd.welch_psd(nni, show=False)['fft_peak'][1]
    features['hf'] = fd.welch_psd(nni, show=False)['fft_peak'][2]
    features['log_very_lf'] = fd.welch_psd(nni, show=False)['fft_log'][0]
    features['log_lf'] = fd.welch_psd(nni, show=False)['fft_log'][1]
    features['log_hf'] = fd.welch_psd(nni, show=False)['fft_log'][2]
    features['sampen'] = nl.sample_entropy(nni)[0]

    return features
Beispiel #5
0
    def _prepare_figures(self):
        """Re-creates plot figures based on the results for the PDF report"""
        plots = {}

        # Tachogram
        try:
            fig = tools.tachogram(nni=self.nni,
                                  show=False,
                                  figsize=self.figsizes,
                                  interval='complete')['tachogram_plot']
            plots['tachogram'] = fig
            self._set_section('tachogram')
        except Exception as e:
            self._set_section('tachogram', False)
            warnings.warn(
                "\nAn error occurred while trying to create the Tachogram figure for "
                "the PDF report: \n%s'" % str(e),
                stacklevel=2)

        # ECG signal plot
        try:
            if self.signal is not None:
                plots['ecg_plot'] = \
                tools.plot_ecg(signal=self.signal, show=False, interval='complete', figsize=self.figsizes)['ecg_plot']
                self._set_section('ecg_plot')
        except Exception as e:
            self._set_section('ecg_plot', False)
            warnings.warn(
                "\nAn error occurred while trying to create the ECG plot figure for "
                "the PDF report: \n%s'" % str(e),
                stacklevel=2)

        # Histogram
        try:
            plots['histogram'] = td.triangular_index(
                nni=self.nni, show=False, legend=False,
                figsize=self.figsizes)['tri_histogram']
            self._set_section('histogram')
        except Exception as e:
            self._set_section('histogram', False)
            warnings.warn(
                "\nAn error occurred while trying to create the NNI histogram figure for "
                "the PDF report: \n%s'" % str(e),
                stacklevel=2)

        # HR Heat Plot
        if isinstance(self._general_info['age'], int) and isinstance(
                self._general_info['gender'], str):
            try:
                plots['hr_heatplot'] = \
                pyhrv.utils.heart_rate_heatplot(nni=self.nni, show=False, age=self._general_info['age'],
                        gender=self._general_info['gender'], figsize=self.figsizes)['hr_heatplot']
                self._set_section('hrheatplot')
            except Exception as e:
                self._set_section('hrheatplot', False)
                warnings.warn(
                    "\nAn error occurred while trying to create the HR heatplotfor "
                    "the PDF report: \n%s'" % str(e),
                    stacklevel=2)

        # Welch's Plot
        if 'fft_plot' in self.results.keys():
            try:
                plots['fft_plot'] = \
                fd.welch_psd(nni=self.nni, fbands=self.results['fft_bands'], window=self.results['fft_window'],
                    show=False, show_param=False, figsize=self.figsizes)['fft_plot']
                self._set_section('fft_plot')
            except Exception as e:
                self._set_section('fft_plot', False)
                warnings.warn(
                    "\nAn error occurred while trying to create the FFT/Welch figure for "
                    "the PDF report: \n%s'" % str(e),
                    stacklevel=2)

        # Welch's Plot
        if 'ar_plot' in self.results.keys():
            try:
                plots['ar_plot'] = \
                fd.ar_psd(nni=self.nni, fbands=self.results['ar_bands'], show=False, show_param=False,
                    figsize=self.figsizes)['ar_plot']
                self._set_section('ar_plot')
            except Exception as e:
                self._set_section('ar_plot', False)
                warnings.warn(
                    "\nAn error occurred while trying to create the AR PSD figure for "
                    "the PDF report: \n%s'" % str(e),
                    stacklevel=2)

        # Welch's Plot
        if 'lomb_plot' in self.results.keys():
            try:
                plots['lomb_plot'] = \
                fd.lomb_psd(nni=self.nni, fbands=self.results['lomb_bands'], ma_size=self.results['lomb_ma'],
                   show=False, show_param=False, figsize=self.figsizes)['lomb_plot']
                self._set_section('lomb_plot')
            except Exception as e:
                self._set_section('lomb_plot', False)
                warnings.warn(
                    "\nAn error occurred while trying to create the AR PSD figure for "
                    "the PDF report: \n%s'" % str(e),
                    stacklevel=2)

        # Poincare
        if 'poincare_plot' in self.results.keys():
            try:
                plots['poincare_plot'] = nl.poincare(
                    nni=self.nni, show=False, legend=False)['poincare_plot']
                self._set_section('poincare_plot')
            except Exception as e:
                self._set_section('poincare_plot', False)
                warnings.warn(
                    "\nAn error occurred while trying to create the Poincare plot figure for "
                    "the PDF report: \n%s'" % str(e),
                    stacklevel=2)

        # DFA
        if 'dfa_plot' in self.results.keys():
            try:
                fig = nl.dfa(nn=self.nni, show=False, legend=False)['dfa_plot']
                plots['dfa_plot'] = fig
                self._set_section('dfa')
            except Exception as e:
                self._set_section('dfa', False)
                warnings.warn(
                    "\nAn error occurred while trying to create the DFA plot figure for "
                    "the PDF report: %s'" % str(e),
                    stacklevel=2)

        # Save all plot figures
        for f in plots.keys():
            plots[f].savefig("%s%s.png" %
                             (self._figure_path, f.replace('_', '')),
                             dpi=300,
                             bbox_inches='tight')
Beispiel #6
0
def process(X):
    features = []

    plot = False
    print(y[0:40])
    tpls0 = []
    tpls1 = []
    tpls2 = []
    tpls3 = []

    shp = None
    for i in range(len(X)):
        _sample = X[i]
        print(f"sample {i}: Class {y[i]}")
        sample = _sample[~np.isnan(_sample)]

        res = ecg(signal=sample, sampling_rate=300, show=False)

        # FT
        # N = len(sample) / 2
        # T = 1.0 / 300.0

        # xf = np.linspace(0.0, 1.0 / (2 * T), int(N // 2))
        # yf = fft(res["filtered"])
        # plt.plot(xf, 2.0 / N * np.abs(yf[0 : int(N // 2)]))
        # plt.show()

        median = calc_median(res["templates"])
        # if (np.argmin(median) < 60) and not 0.7*np.max(median) > abs(np.min(median)):
        if (
            not np.max(median[55:65]) == np.max(median)
            or (np.max(median) < -0.8 * np.min(median))
            or (
                not 0.75 * np.max(median) > -np.min(median)
                and (
                    np.argmin(median) < 60
                    and (
                        np.min(median[:60]) < 1.5 * min(median[60:])
                        or np.min(median[60:]) < 1.5 * min(median[:60])
                    )
                )
            )
        ):
            # and ((np.min(median) < 1.2 * np.min(
            #     median[[i for i in range(len(median)) if i != np.argmin(median)]])) or np.max(median[45:48]) > -np.min(median[65:75])):
            # if np.min(median[45:55]) < np.min(median[0:45]) and np.min(median[65:80]) < np.min(median[80:]) and np.max(median[55:65]) == np.max(median):
            # if np.max(median) < abs(np.min(median)) and np.min(median[50:55]) < np.min(median[60:65]):
            # if abs(np.mean(median)) > abs(np.median(median)):
            res = ecg(-sample, sampling_rate=300, show=False)

            median = calc_median(res["templates"])
            # neg = True

            # res["templates"][j] = (res["templates"][j]-mean)/std

        median = (median) / median.std()
        if i < 40 and plot:
            # plt.plot(res["templates"][j])
            plt.title(y[i])
            plt.plot(median)
            plt.show()

        # if not neg:
        if y[i] == 0:
            tpls0.append(median)
        if y[i] == 1:
            tpls1.append(median)
        if y[i] == 2:
            tpls2.append(median)
        if y[i] == 3:
            tpls3.append(median)

        # beat characterization
        heart_rate = res["heart_rate"]

        filtered = res["filtered"]
        rpeaks = res["rpeaks"]
        # peaks in seconds, required by pyhrv
        rpeaks_s = res["ts"][rpeaks]
        qpeaks = np.array([find_minimum(filtered, r) for r in rpeaks])
        speaks = np.array(
            [find_minimum(filtered, r, direction="right") for r in rpeaks]
        )

        r_amplitude = filtered[rpeaks]
        q_amplitude = filtered[qpeaks]
        s_amplitude = filtered[speaks]

        qrs_duration = speaks - qpeaks

        # hrv_res = hrv(
        #    rpeaks=rpeaks_s,
        #    plot_tachogram=False,
        #    kwargs_ar={"order": 8},
        #    show=False,
        # )
        nni = time_domain.nni_parameters(rpeaks=rpeaks_s)
        nni_diff = time_domain.nni_differences_parameters(rpeaks=rpeaks_s)
        sdnn = time_domain.sdnn(rpeaks=rpeaks_s)
        sdsd = time_domain.sdsd(rpeaks=rpeaks_s)
        tri_index = time_domain.triangular_index(rpeaks=rpeaks_s, plot=False)

        welch_psd = frequency_domain.welch_psd(rpeaks=rpeaks_s, mode="dev")[0]
        # print(templates.shape, median.shape)
        features.append(
            build_features(q_amplitude, r_amplitude, s_amplitude, qrs_duration)
            + [
                nni["nni_mean"],
                nni["nni_min"],
                nni["nni_max"],
                nni_diff["nni_diff_mean"],
                nni_diff["nni_diff_min"],
                nni_diff["nni_diff_max"],
                sdnn["sdnn"],
                sdsd["sdsd"],
                tri_index["tri_index"],
                welch_psd["fft_ratio"],
            ]
            + list(welch_psd["fft_peak"] + welch_psd["fft_abs"] + welch_psd["fft_norm"])
        )
        # print(templates.shape, median.shape)

    features = np.array(features)
    print(f"computed features {features.shape}")
    return features
Beispiel #7
0
def calculate_bvp_f(bvp_data, sample_rate, bvp_time, bvp_chunks):
    features_chunks = []

    for chunk in range(len(bvp_chunks)):
        if bvp_chunks[chunk] == None:
            features_chunks.extend([None])
            continue

        bvpData = list(map(lambda x: x['data'], bvp_chunks[chunk]))
        chunk_time = bvp_chunks[chunk][-1]['timeStamp'] - bvp_chunks[chunk][0][
            'timeStamp']
        chunk_s_r = len(bvpData) / chunk_time
        if not chunk_s_r + 30 >= sample_rate:
            features_chunks.extend([None])
            continue

        bandpass = signalsTools.filter_signal(ftype='FIR',
                                              sampling_rate=chunk_s_r,
                                              band='bandpass',
                                              frequency=[0.5, 4],
                                              signal=bvpData,
                                              order=4)
        # all_working_data, all_measures = hp.process(bandpass[0], sample_rate=chunk_s_r,calc_freq=True)
        all_working_data, all_measures = hp.process(np.asarray(bvpData),
                                                    sample_rate=chunk_s_r)
        hp.plotter(all_working_data, all_measures)
        result = biosppy.signals.bvp.bvp(signal=np.asarray(bvpData),
                                         sampling_rate=chunk_s_r,
                                         show=True)
        result = fd.welch_psd(nni=np.asarray(bvpData))
        # RRI_DF = getRRI(np.asarray(bvpData), column2, sample_rate)
        # HRV_DF = getHRV(RRI_DF, np.mean(HR))
        # print(result['fft_total'])
        result.fft_plot()

        f, Pxx_den = signal.welch(np.asarray(bvpData))
        plt.semilogy(f, Pxx_den)
        plt.ylim([0.5e-3, 1])
        plt.xlabel('frequency [Hz]')
        plt.ylabel('PSD [V**2/Hz]')
        plt.show()
        plt.plot(all_working_data['RR_list'])
        plt.show()
        # features = {
        #     'HR_avg': all_measures['bpm'],
        #     'NN_avg': all_measures['ibi'],
        #     'SDNN': all_measures['sdnn'],
        #     'SDSD': all_measures['sdsd'],
        #     'RMSSD': all_measures['rmssd'],
        #     'pNN20': all_measures['pnn20'],
        #     'pNN50': all_measures['pnn50'],
        #     'hrMad': all_measures['hr_mad'],
        #     'BreR': all_measures['breathingrate'],
        #     'lf': all_measures['lf'],
        #     'hf': all_measures['hf'],
        #     'lf/hf': all_measures['lf/hf']
        # }

        time_domain_features = get_time_domain_features(
            all_working_data['RR_list'])
        freq_domain_features = get_frequency_domain_features(
            all_working_data['RR_list'])
        sampen_domain_features = get_sampen(all_working_data['RR_list'])
        features = {
            'co_he':
            freq_domain_features['total_power'] /
            (freq_domain_features['hf'] + freq_domain_features['lf'])
        }
        features.update(time_domain_features)
        features.update(freq_domain_features)
        features.update(sampen_domain_features)
        # features.update({'ApEN':get_apen(all_working_data['RR_list'], 2, (0.2 * features['SDNN']))})
        features.update({
            'ApEN':
            get_apen(all_working_data['RR_list'], 2, (0.2 * features['sdnn']))
        })

        # samp_enn = sampen2(all_working_data['RR_list'])
        # features['sampEn'] = samp_enn['sampen']

        SD1 = (1 / np.sqrt(2)) * features[
            'sdsd']  # measures the width of poincare cloud https://github.com/pickus91/HRV/blob/master/poincare.py
        SD2 = np.sqrt(
            (2 * features['sdnn']**2) -
            (0.5 *
             features['sdsd']**2))  # measures the length of the poincare cloud
        features['SD1'] = SD1
        features['SD2'] = SD2
        features_chunks.extend([features])

    return features_chunks