Exemple #1
0
def create_X(x, last_index=None, n_steps=150, step_length=1000, aug=0):
    if last_index == None:
        last_index=len(x)
       
    assert last_index - n_steps * step_length >= 0

    # Reshaping
    per=x[(last_index - n_steps * step_length):last_index]

    #for data augmentation
    if aug==1:
        flag=randint(0, 2)
        if flag==0:
            s=np.random.normal(0, 1, per.shape[0])
            s=np.matrix.round(s,0)
            per=per+s
        if flag==1:
            per=running_mean(per)
        if flag==2:
            per=fourier(per)
            #print(per)

    temp = (per.reshape(n_steps, -1) - 5 ) / 3
    
    #ac1=np.zeros(150)
    ac2=np.zeros(150)
    ac3=np.zeros(150)
    #c3_1=np.zeros(150)
    c3_2=np.zeros(150)
    c3_3=np.zeros(150)
    mac=np.zeros(150)
    mc=np.zeros(150)
    for i in range(150):
        #ac1[i]=ts.autocorrelation(temp[i,:],1)
        ac2[i]=ts.autocorrelation(temp[i,:],2)
        ac3[i]=ts.autocorrelation(temp[i,:],3)
        #c3_1[i]=ts.c3(temp[i,:],1)/500
        c3_2[i]=ts.c3(temp[i,:],2)/500
        c3_3[i]=ts.c3(temp[i,:],3)/500
        mac[i]=ts.mean_abs_change(temp[i,:])
        mc[i]=ts.mean_change(temp[i,:])
        
    return np.c_[extract_features(temp),
                 extract_features(temp[:, 827:]),
                 extract_features(temp[:, 970:]),
                 #ac1,
                 ac2,
                 ac3,
                 #c3_1,
                 c3_2,
                 c3_3,
                 mac,
                 mc,
                 temp[:, -1:]]
Exemple #2
0
    def get_sta_features(self, data):
        """
        Calculate the value of 9 kinds of selected statistical features
        :param data:
        :return:
        """
        def _cal_trend(data):
            time_list = np.arange(len(data))
            # create linear regression object
            regr = linear_model.LinearRegression()
            regr.fit(time_list.reshape(-1, 1), np.array(data).reshape(-1, 1))

            return regr.coef_[0][0]

        E = ts.abs_energy(data)
        S = ts.binned_entropy(data, max_bins=5)
        ro = ts.autocorrelation(data, lag=4)
        skewness = ts.skewness(data)
        kurtosis = ts.kurtosis(data)
        trend = _cal_trend(data)
        mean = ts.mean(data)
        min = ts.minimum(data)
        max = ts.maximum(data)

        return [E, S, ro, skewness, kurtosis, trend, mean, min, max]
Exemple #3
0
def auto_corr(mag):
    """Similarity between observations as a function of a time lag between them.

    rtype:float
    """
    auto_corr = ts.autocorrelation(mag, 1)
    return auto_corr
Exemple #4
0
    def autocorrelation(self, x, lag):
        """
        As in tsfresh `autocorrelation <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
        feature_calculators.py#L1457>`_

        Calculates the autocorrelation of the specified lag, according to the `formula <https://en.wikipedia.org/wiki/\
        Autocorrelation#Estimation>`_:

        .. math::

            \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)

        where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its
        mean. `l` denotes the lag.

        :param x: the time series to calculate the feature of
        :type x: pandas.Series
        :param lag: the lag
        :type lag: int
        :return: the value of this feature
        :rtype: float
        """
        # This is important: If a series is passed, the product below is calculated
        # based on the index, which corresponds to squaring the series.
        if lag is None:
            lag = 0
        _autoc = feature_calculators.autocorrelation(x, lag)
        logging.debug("autocorrelation by tsfresh calculated")
        return _autoc
def time_series_autocorrelation(x):
    """
    Calculates the autocorrelation of the specified lag, according to the formula [1]

    .. math::

        \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)

    where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its
    mean. `l` denotes the lag.

    .. rubric:: References

    [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation

    :param x: the time series to calculate the feature of
    :type x: pandas.Series
    :param lag: the lag
    :type lag: int
    :return: the value of this feature
    :return type: float
    """
    lag = int((len(x) - 3) / 5)
    if np.sqrt(np.var(x)) < 1e-10:
        return 0
    return ts_feature_calculators.autocorrelation(x, lag)
Exemple #6
0
def get_features_from_one_signal(X, sample_rate=50):
    assert X.ndim == 1, "Expected single signal in feature extraction"
    mean = np.mean(X)
    stdev = np.std(X)
    abs_energy = fc.abs_energy(X)
    sum_of_changes = fc.absolute_sum_of_changes(X)
    autoc = fc.autocorrelation(X, sample_rate)
    count_above_mean = fc.count_above_mean(X)
    count_below_mean = fc.count_below_mean(X)
    kurtosis = fc.kurtosis(X)
    longest_above = fc.longest_strike_above_mean(X)
    zero_crossing = fc.number_crossing_m(X, mean)
    num_peaks = fc.number_peaks(X, int(sample_rate / 10))
    sample_entropy = fc.sample_entropy(X)
    spectral_density = fc.spkt_welch_density(X, [{
        "coeff": 1
    }, {
        "coeff": 2
    }, {
        "coeff": 3
    }, {
        "coeff": 4
    }, {
        "coeff": 5
    }, {
        "coeff": 6
    }])
    c, v = zip(*spectral_density)
    v = np.asarray(v)

    return [
        mean, stdev, abs_energy, sum_of_changes, autoc, count_above_mean,
        count_below_mean, kurtosis, longest_above, zero_crossing, num_peaks,
        sample_entropy, v[0], v[1], v[2], v[3], v[4], v[5]
    ]
Exemple #7
0
def time_series_autocorrelation(x):
    """
    Calculates the autocorrelation of the specified lag, according to the formula [1]

    .. math::

        \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)

    where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its
    mean. `l` denotes the lag.

    .. rubric:: References

    [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation

    :param x: the time series to calculate the feature of
    :type x: pandas.Series
    :param lag: the lag
    :type lag: int
    :return: the value of this feature
    :return type: float
    """
    lag = int((len(x) - 3) / 5)
    if np.sqrt(np.var(x)) < 1e-10:
        return 0
    return ts_feature_calculators.autocorrelation(x, lag)
Exemple #8
0
def time_series_autocorrelation(x):
    """
    :param x: the time series to calculate the feature of
    :type x: pandas.Series
    :param lag: the lag
    :type lag: int
    :return: the value of this feature
    :return type: float
    """
    lag = int((len(x) - 3) / 5)
    if np.sqrt(np.var(x)) < 1e-10:
        return 0
    return ts_feature_calculators.autocorrelation(x, lag)
Exemple #9
0
    def features(self, x, y, seg_id):
        feature_dict = dict()
        feature_dict['target'] = y
        feature_dict['seg_id'] = seg_id
        x = pd.Series(denoise_signal(x, wavelet='db1', level=1))
        #x = x - np.mean(x)

        zc = np.fft.fft(x)
        zc = zc[:37500]

        # FFT transform values
        realFFT = np.real(zc)
        imagFFT = np.imag(zc)

        freq_bands = [x for x in range(0, 37500, 7500)]
        magFFT = np.sqrt(realFFT**2 + imagFFT**2)
        phzFFT = np.arctan(imagFFT / realFFT)
        phzFFT[phzFFT == -np.inf] = -np.pi / 2.0
        phzFFT[phzFFT == np.inf] = np.pi / 2.0
        phzFFT = np.nan_to_num(phzFFT)

        for freq in freq_bands:
            if freq == 0:
                continue
            feature_dict['FFT_Mag_01q%d' % freq] = np.quantile(
                magFFT[freq:freq + 7500], 0.01)
            feature_dict['FFT_Mag_10q%d' % freq] = np.quantile(
                magFFT[freq:freq + 7500], 0.1)
            feature_dict['FFT_Mag_90q%d' % freq] = np.quantile(
                magFFT[freq:freq + 7500], 0.9)
            feature_dict['FFT_Mag_99q%d' % freq] = np.quantile(
                magFFT[freq:freq + 7500], 0.99)
            feature_dict['FFT_Mag_mean%d' % freq] = np.mean(magFFT[freq:freq +
                                                                   7500])
            feature_dict['FFT_Mag_std%d' % freq] = np.std(magFFT[freq:freq +
                                                                 7500])
            feature_dict['FFT_Mag_max%d' % freq] = np.max(magFFT[freq:freq +
                                                                 7500])

        for p in [10]:
            feature_dict[f'num_peaks_{p}'] = feature_calculators.number_peaks(
                x, 10)

        feature_dict['cid_ce'] = feature_calculators.cid_ce(x, normalize=True)

        for w in [5]:
            feature_dict[
                f'autocorrelation_{w}'] = feature_calculators.autocorrelation(
                    x, w)
        return feature_dict
def make_features(df_x):
    """Данные разбиваются на блоки и создают признаки для них."""
    feat = dict()

    # Спектральная плотность (диапазоны выбраны в ручную) - нечто похожее используется при анализе голоса в NN
    welch = signal.welch(df_x)[1]
    for num in [2, 3, 28, 30]:
        feat[f"welch_{num}"] = welch[num]

    # Фичи на скользящих медианах - идейно похоже на Pooling только не max и average, а MedianPolling
    mean_abs = (df_x - df_x.mean()).abs()
    feat["mean_abs_med"] = mean_abs.median()

    roll_std = df_x.rolling(375).std().dropna()
    feat["std_roll_med_375"] = roll_std.median()

    half = len(roll_std) // 2
    feat["std_roll_half1"] = roll_std.iloc[:half].median()
    feat["std_roll_half2"] = roll_std.iloc[-half:].median()

    # Фичи на скользящих глубоких квантилях - тоже нейкий QuantilePolling
    feat["q05_roll_std_25"] = df_x.rolling(25).std().dropna().quantile(0.05)
    feat["q05_roll_std_375"] = df_x.rolling(375).std().dropna().quantile(0.05)
    feat["q05_roll_std_1500"] = df_x.rolling(1500).std().dropna().quantile(
        0.05)
    feat["q05_roll_std_1000"] = df_x.rolling(1000).std().dropna().quantile(
        0.05)
    feat["q01_roll_mean_1500"] = df_x.rolling(1500).mean().dropna().quantile(
        0.01)
    feat["q99_roll_mean_1500"] = df_x.rolling(1500).mean().dropna().quantile(
        0.99)

    feat["ave10"] = stats.trim_mean(df_x, 0.1)

    # Pre Main
    feat["num_peaks_10"] = feature_calculators.number_peaks(df_x, 10)
    feat["percentile_roll_std_5"] = np.percentile(
        df_x.rolling(10000).std().dropna().values, 5)
    feat["afc_50"] = feature_calculators.autocorrelation(df_x, 50)

    welch = signal.welch(df_x.clip(-11, 20))[1]
    for num in list(range(33)):
        feat[f"welch_clipped_{num}"] = welch[num]

    return feat
def feature_vector_fun(data, isFun=False, test=False):
    trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_FUN)
    rX = trimmed_data['rightWrist_x']

    normRawColumn = universal_normalization(rX, trimmed_data, x_norm=True)
    normRawColumn = general_normalization(normRawColumn)

    # Area under curve
    auc = np.array([])
    auc = np.append(auc, abs(integrate.simps(normRawColumn, dx=5)))

    # Absolute Sum of Consecutive Differences
    scd = fc.absolute_sum_of_changes(normRawColumn)

    # Entropy
    entropy = fc.approximate_entropy(normRawColumn, 2, 3)

    # AutoCorrelation
    ac = fc.autocorrelation(normRawColumn, lag=5)

    # Count Above Mean
    cam = fc.count_above_mean(normRawColumn)

    # Count Below Mean
    cbm = fc.count_below_mean(normRawColumn)

    featureVector = np.array([])
    featureVector = np.append(featureVector, auc)
    featureVector = np.append(featureVector, scd)
    featureVector = np.append(featureVector, entropy)
    featureVector = np.append(featureVector, ac)
    featureVector = np.append(featureVector, cam)
    featureVector = np.append(featureVector, cbm)
    if TRIM_DATA_SIZE_FUN - 1 > featureVector.shape[0]:
        featureVector = np.pad(
            featureVector,
            (0, TRIM_DATA_SIZE_FUN - featureVector.shape[0] - 1), 'constant')
    featureVector = featureVector[:TRIM_DATA_SIZE_FUN - 1]
    if not test:
        if isFun:
            featureVector = np.append(featureVector, 1)
        else:
            featureVector = np.append(featureVector, 0)
    return featureVector
    def features(self, x, y, seg_id):
        feature_dict = dict()
        feature_dict['target'] = y
        feature_dict['seg_id'] = seg_id

        # lists with parameters to iterate over them
        percentiles = [
            1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99
        ]
        hann_windows = [50, 150, 1500, 15000]
        spans = [300, 3000, 30000, 50000]
        windows = [10, 50, 100, 500, 1000, 10000]
        borders = list(range(-4000, 4001, 1000))
        peaks = [10, 20, 50, 100]
        coefs = [1, 5, 10, 50, 100]
        autocorr_lags = [5, 10, 50, 100, 500, 1000, 5000, 10000]

        # basic stats
        feature_dict['mean'] = x.mean()
        feature_dict['std'] = x.std()
        feature_dict['max'] = x.max()
        feature_dict['min'] = x.min()

        # basic stats on absolute values
        feature_dict['mean_change_abs'] = np.mean(np.diff(x))
        feature_dict['abs_max'] = np.abs(x).max()
        feature_dict['abs_mean'] = np.abs(x).mean()
        feature_dict['abs_std'] = np.abs(x).std()

        # geometric and harmonic means
        feature_dict['hmean'] = stats.hmean(np.abs(x[np.nonzero(x)[0]]))
        feature_dict['gmean'] = stats.gmean(np.abs(x[np.nonzero(x)[0]]))

        # k-statistic and moments
        for i in range(1, 5):
            feature_dict[f'kstat_{i}'] = stats.kstat(x, i)
            feature_dict[f'moment_{i}'] = stats.moment(x, i)

        for i in [1, 2]:
            feature_dict[f'kstatvar_{i}'] = stats.kstatvar(x, i)

        # aggregations on various slices of data
        for agg_type, slice_length, direction in product(
            ['std', 'min', 'max', 'mean'], [1000, 10000, 50000],
            ['first', 'last']):
            if direction == 'first':
                feature_dict[
                    f'{agg_type}_{direction}_{slice_length}'] = x[:
                                                                  slice_length].agg(
                                                                      agg_type)
            elif direction == 'last':
                feature_dict[f'{agg_type}_{direction}_{slice_length}'] = x[
                    -slice_length:].agg(agg_type)

        feature_dict['max_to_min'] = x.max() / np.abs(x.min())
        feature_dict['max_to_min_diff'] = x.max() - np.abs(x.min())
        feature_dict['count_big'] = len(x[np.abs(x) > 500])
        feature_dict['sum'] = x.sum()

        feature_dict['mean_change_rate'] = self.calc_change_rate(x)

        # calc_change_rate on slices of data
        for slice_length, direction in product([1000, 10000, 50000],
                                               ['first', 'last']):
            if direction == 'first':
                feature_dict[
                    f'mean_change_rate_{direction}_{slice_length}'] = self.calc_change_rate(
                        x[:slice_length])
            elif direction == 'last':
                feature_dict[
                    f'mean_change_rate_{direction}_{slice_length}'] = self.calc_change_rate(
                        x[-slice_length:])

        # percentiles on original and absolute values
        for p in percentiles:
            feature_dict[f'percentile_{p}'] = np.percentile(x, p)
            feature_dict[f'abs_percentile_{p}'] = np.percentile(np.abs(x), p)

        feature_dict['trend'] = self.add_trend_feature(x)
        feature_dict['abs_trend'] = self.add_trend_feature(x, abs_values=True)

        feature_dict['mad'] = x.mad()
        feature_dict['kurt'] = x.kurtosis()
        feature_dict['skew'] = x.skew()
        feature_dict['med'] = x.median()

        feature_dict['Hilbert_mean'] = np.abs(signal.hilbert(x)).mean()

        for hw in hann_windows:
            feature_dict[f'Hann_window_mean_{hw}'] = (
                signal.convolve(x, signal.hann(hw), mode='same') /
                sum(signal.hann(hw))).mean()

        feature_dict['classic_sta_lta1_mean'] = self.classic_sta_lta(
            x, 500, 10000).mean()
        feature_dict['classic_sta_lta2_mean'] = self.classic_sta_lta(
            x, 5000, 100000).mean()
        feature_dict['classic_sta_lta3_mean'] = self.classic_sta_lta(
            x, 3333, 6666).mean()
        feature_dict['classic_sta_lta4_mean'] = self.classic_sta_lta(
            x, 10000, 25000).mean()
        feature_dict['classic_sta_lta5_mean'] = self.classic_sta_lta(
            x, 50, 1000).mean()
        feature_dict['classic_sta_lta6_mean'] = self.classic_sta_lta(
            x, 100, 5000).mean()
        feature_dict['classic_sta_lta7_mean'] = self.classic_sta_lta(
            x, 333, 666).mean()
        feature_dict['classic_sta_lta8_mean'] = self.classic_sta_lta(
            x, 4000, 10000).mean()

        # exponential rolling statistics
        ewma = pd.Series.ewm
        for s in spans:
            feature_dict[f'exp_Moving_average_{s}_mean'] = (ewma(
                x, span=s).mean(skipna=True)).mean(skipna=True)
            feature_dict[f'exp_Moving_average_{s}_std'] = (ewma(
                x, span=s).mean(skipna=True)).std(skipna=True)
            feature_dict[f'exp_Moving_std_{s}_mean'] = (ewma(
                x, span=s).std(skipna=True)).mean(skipna=True)
            feature_dict[f'exp_Moving_std_{s}_std'] = (ewma(
                x, span=s).std(skipna=True)).std(skipna=True)

        feature_dict['iqr'] = np.subtract(*np.percentile(x, [75, 25]))
        feature_dict['iqr1'] = np.subtract(*np.percentile(x, [95, 5]))
        feature_dict['ave10'] = stats.trim_mean(x, 0.1)

        for slice_length, threshold in product([50000, 100000, 150000],
                                               [5, 10, 20, 50, 100]):
            feature_dict[f'count_big_{slice_length}_threshold_{threshold}'] = (
                np.abs(x[-slice_length:]) > threshold).sum()
            feature_dict[
                f'count_big_{slice_length}_less_threshold_{threshold}'] = (
                    np.abs(x[-slice_length:]) < threshold).sum()

        feature_dict['range_minf_m4000'] = feature_calculators.range_count(
            x, -np.inf, -4000)
        feature_dict['range_p4000_pinf'] = feature_calculators.range_count(
            x, 4000, np.inf)

        for i, j in zip(borders, borders[1:]):
            feature_dict[f'range_{i}_{j}'] = feature_calculators.range_count(
                x, i, j)

        for autocorr_lag in autocorr_lags:
            feature_dict[
                f'autocorrelation_{autocorr_lag}'] = feature_calculators.autocorrelation(
                    x, autocorr_lag)
            feature_dict[f'c3_{autocorr_lag}'] = feature_calculators.c3(
                x, autocorr_lag)

        for p in percentiles:
            feature_dict[
                f'binned_entropy_{p}'] = feature_calculators.binned_entropy(
                    x, p)

        feature_dict['num_crossing_0'] = feature_calculators.number_crossing_m(
            x, 0)

        for peak in peaks:
            feature_dict[
                f'num_peaks_{peak}'] = feature_calculators.number_peaks(
                    x, peak)

        for c in coefs:
            feature_dict[f'spkt_welch_density_{c}'] = \
            list(feature_calculators.spkt_welch_density(x, [{'coeff': c}]))[0][1]
            feature_dict[
                f'time_rev_asym_stat_{c}'] = feature_calculators.time_reversal_asymmetry_statistic(
                    x, c)

        for w in windows:
            x_roll_std = x.rolling(w).std().dropna().values
            x_roll_mean = x.rolling(w).mean().dropna().values

            feature_dict[f'ave_roll_std_{w}'] = x_roll_std.mean()
            feature_dict[f'std_roll_std_{w}'] = x_roll_std.std()
            feature_dict[f'max_roll_std_{w}'] = x_roll_std.max()
            feature_dict[f'min_roll_std_{w}'] = x_roll_std.min()

            for p in percentiles:
                feature_dict[
                    f'percentile_roll_std_{p}_window_{w}'] = np.percentile(
                        x_roll_std, p)

            feature_dict[f'av_change_abs_roll_std_{w}'] = np.mean(
                np.diff(x_roll_std))
            feature_dict[f'av_change_rate_roll_std_{w}'] = np.mean(
                np.nonzero((np.diff(x_roll_std) / x_roll_std[:-1]))[0])
            feature_dict[f'abs_max_roll_std_{w}'] = np.abs(x_roll_std).max()

            feature_dict[f'ave_roll_mean_{w}'] = x_roll_mean.mean()
            feature_dict[f'std_roll_mean_{w}'] = x_roll_mean.std()
            feature_dict[f'max_roll_mean_{w}'] = x_roll_mean.max()
            feature_dict[f'min_roll_mean_{w}'] = x_roll_mean.min()

            for p in percentiles:
                feature_dict[
                    f'percentile_roll_mean_{p}_window_{w}'] = np.percentile(
                        x_roll_mean, p)

            feature_dict[f'av_change_abs_roll_mean_{w}'] = np.mean(
                np.diff(x_roll_mean))
            feature_dict[f'av_change_rate_roll_mean_{w}'] = np.mean(
                np.nonzero((np.diff(x_roll_mean) / x_roll_mean[:-1]))[0])
            feature_dict[f'abs_max_roll_mean_{w}'] = np.abs(x_roll_mean).max()

        # Mel-frequency cepstral coefficients (MFCCs)
        x = x.values.astype('float32')
        mfcc = librosa.feature.mfcc(y=x)
        for i in range(len(mfcc)):
            feature_dict[f'mfcc_{i}_avg'] = np.mean(np.abs(mfcc[i]))

        # spectral features
        feature_dict['spectral_centroid'] = np.mean(
            np.abs(librosa.feature.spectral_centroid(y=x)[0]))
        feature_dict['zero_crossing_rate'] = np.mean(
            np.abs(librosa.feature.zero_crossing_rate(y=x)[0]))
        feature_dict['spectral_flatness'] = np.mean(
            np.abs(librosa.feature.spectral_flatness(y=x)[0]))
        feature_dict['spectral_contrast'] = np.mean(
            np.abs(
                librosa.feature.spectral_contrast(
                    S=np.abs(librosa.stft(x)))[0]))
        feature_dict['spectral_bandwidth'] = np.mean(
            np.abs(librosa.feature.spectral_bandwidth(y=x)[0]))

        return feature_dict
Exemple #13
0
def ACLag11(fragment):
    return fc.autocorrelation(fragment,11)
Exemple #14
0
def create_features(seg, ):
    data_row = {}

    xcz = des_filter(seg, high=CUTOFF)

    zc = np.fft.fft(xcz)
    zc = zc[:MAX_FREQ]

    # FFT transform values
    realFFT = np.real(zc)
    imagFFT = np.imag(zc)

    freq_bands = list(range(0, MAX_FREQ, FREQ_STEP))
    magFFT = np.abs(zc)
    phzFFT = np.angle(zc)
    phzFFT[phzFFT == -np.inf] = -np.pi / 2.0
    phzFFT[phzFFT == np.inf] = np.pi / 2.0
    phzFFT = np.nan_to_num(phzFFT)

    for freq in freq_bands:
        data_row['FFT_Mag_01q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.01)
        data_row['FFT_Mag_10q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.1)
        data_row['FFT_Mag_90q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.9)
        data_row['FFT_Mag_99q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.99)
        data_row['FFT_Mag_mean%d' % freq] = np.mean(magFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Mag_std%d' % freq] = np.std(magFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Mag_max%d' % freq] = np.max(magFFT[freq: freq + FREQ_STEP])

        data_row['FFT_Phz_mean%d' % freq] = np.mean(phzFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Phz_std%d' % freq] = np.std(phzFFT[freq: freq + FREQ_STEP])

    data_row['FFT_Rmean'] = realFFT.mean()
    data_row['FFT_Rstd'] = realFFT.std()
    data_row['FFT_Rmax'] = realFFT.max()
    data_row['FFT_Rmin'] = realFFT.min()
    data_row['FFT_Imean'] = imagFFT.mean()
    data_row['FFT_Istd'] = imagFFT.std()
    data_row['FFT_Imax'] = imagFFT.max()
    data_row['FFT_Imin'] = imagFFT.min()

    data_row['FFT_Rmean_first_6000'] = realFFT[:6000].mean()
    data_row['FFT_Rstd__first_6000'] = realFFT[:6000].std()
    data_row['FFT_Rmax_first_6000'] = realFFT[:6000].max()
    data_row['FFT_Rmin_first_6000'] = realFFT[:6000].min()
    data_row['FFT_Rmean_first_18000'] = realFFT[:18000].mean()
    data_row['FFT_Rstd_first_18000'] = realFFT[:18000].std()
    data_row['FFT_Rmax_first_18000'] = realFFT[:18000].max()
    data_row['FFT_Rmin_first_18000'] = realFFT[:18000].min()

    del xcz
    del zc
    gc.collect()

    sigs = [seg]
    for freq in range(0,MAX_FREQ+FREQ_STEP,FREQ_STEP):
        if freq==0:
            xc_ = des_filter(seg, high=FREQ_STEP)
        elif freq==MAX_FREQ:
            xc_ = des_filter(seg, low=freq)
        else:
            xc_ = des_filter(seg, low=freq, high=freq+FREQ_STEP)
        sigs.append(pd.Series(xc_))

    for i, sig in enumerate(sigs):
        data_row['mean_%d' % i] = sig.mean()
        data_row['std_%d' % i] = sig.std()
        data_row['max_%d' % i] = sig.max()
        data_row['min_%d' % i] = sig.min()

        data_row['mean_change_abs_%d' % i] = np.mean(np.diff(sig))
        data_row['mean_change_rate_%d' % i] = np.mean(np.nonzero((np.diff(sig) / sig[:-1]))[0])
        data_row['abs_max_%d' % i] = np.abs(sig).max()
        data_row['abs_min_%d' % i] = np.abs(sig).min()

        data_row['std_first_50000_%d' % i] = sig[:50000].std()
        data_row['std_last_50000_%d' % i] = sig[-50000:].std()
        data_row['std_first_10000_%d' % i] = sig[:10000].std()
        data_row['std_last_10000_%d' % i] = sig[-10000:].std()

        data_row['avg_first_50000_%d' % i] = sig[:50000].mean()
        data_row['avg_last_50000_%d' % i] = sig[-50000:].mean()
        data_row['avg_first_10000_%d' % i] = sig[:10000].mean()
        data_row['avg_last_10000_%d' % i] = sig[-10000:].mean()

        data_row['min_first_50000_%d' % i] = sig[:50000].min()
        data_row['min_last_50000_%d' % i] = sig[-50000:].min()
        data_row['min_first_10000_%d' % i] = sig[:10000].min()
        data_row['min_last_10000_%d' % i] = sig[-10000:].min()

        data_row['max_first_50000_%d' % i] = sig[:50000].max()
        data_row['max_last_50000_%d' % i] = sig[-50000:].max()
        data_row['max_first_10000_%d' % i] = sig[:10000].max()
        data_row['max_last_10000_%d' % i] = sig[-10000:].max()

        data_row['max_to_min_%d' % i] = sig.max() / np.abs(sig.min())
        data_row['max_to_min_diff_%d' % i] = sig.max() - np.abs(sig.min())
        data_row['count_big_%d' % i] = len(sig[np.abs(sig) > 500])
        data_row['sum_%d' % i] = sig.sum()

        data_row['mean_change_rate_first_50000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[:50000]) / sig[:50000][:-1]))[0])
        data_row['mean_change_rate_last_50000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[-50000:]) / sig[-50000:][:-1]))[0])
        data_row['mean_change_rate_first_10000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[:10000]) / sig[:10000][:-1]))[0])
        data_row['mean_change_rate_last_10000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[-10000:]) / sig[-10000:][:-1]))[0])

        data_row['q95_%d' % i] = np.quantile(sig, 0.95)
        data_row['q99_%d' % i] = np.quantile(sig, 0.99)
        data_row['q05_%d' % i] = np.quantile(sig, 0.05)
        data_row['q01_%d' % i] = np.quantile(sig, 0.01)

        data_row['abs_q95_%d' % i] = np.quantile(np.abs(sig), 0.95)
        data_row['abs_q99_%d' % i] = np.quantile(np.abs(sig), 0.99)
        data_row['abs_q05_%d' % i] = np.quantile(np.abs(sig), 0.05)
        data_row['abs_q01_%d' % i] = np.quantile(np.abs(sig), 0.01)

        data_row['trend_%d' % i] = add_trend_feature(sig)
        data_row['abs_trend_%d' % i] = add_trend_feature(sig, abs_values=True)
        data_row['abs_mean_%d' % i] = np.abs(sig).mean()
        data_row['abs_std_%d' % i] = np.abs(sig).std()

        data_row['mad_%d' % i] = sig.mad()
        data_row['kurt_%d' % i] = sig.kurtosis()
        data_row['skew_%d' % i] = sig.skew()
        data_row['med_%d' % i] = sig.median()

        data_row['Hilbert_mean_%d' % i] = np.abs(hilbert(sig)).mean()
        data_row['Hann_window_mean'] = (convolve(seg, hann(150), mode='same') / sum(hann(150))).mean()

        data_row['classic_sta_lta1_mean_%d' % i] = classic_sta_lta(sig, 500, 10000).mean()
        data_row['classic_sta_lta2_mean_%d' % i] = classic_sta_lta(sig, 5000, 100000).mean()
        data_row['classic_sta_lta3_mean_%d' % i] = classic_sta_lta(sig, 3333, 6666).mean()
        data_row['classic_sta_lta4_mean_%d' % i] = classic_sta_lta(sig, 10000, 25000).mean()

        data_row['Moving_average_400_mean_%d' % i] = sig.rolling(window=400).mean().mean(skipna=True)
        data_row['Moving_average_700_mean_%d' % i] = sig.rolling(window=700).mean().mean(skipna=True)
        data_row['Moving_average_1500_mean_%d' % i] = sig.rolling(window=1500).mean().mean(skipna=True)
        data_row['Moving_average_3000_mean_%d' % i] = sig.rolling(window=3000).mean().mean(skipna=True)
        data_row['Moving_average_6000_mean_%d' % i] = sig.rolling(window=6000).mean().mean(skipna=True)

        ewma = pd.Series.ewm
        data_row['exp_Moving_average_300_mean_%d' % i] = ewma(sig, span=300).mean().mean(skipna=True)
        data_row['exp_Moving_average_3000_mean_%d' % i] = ewma(sig, span=3000).mean().mean(skipna=True)
        data_row['exp_Moving_average_30000_mean_%d' % i] = ewma(sig, span=6000).mean().mean(skipna=True)

        no_of_std = 2
        data_row['MA_700MA_std_mean_%d' % i] = sig.rolling(window=700).std().mean(skipna=True)
        data_row['MA_700MA_BB_high_mean_%d' % i] = (
        data_row['Moving_average_700_mean_%d' % i] + no_of_std * data_row['MA_700MA_std_mean_%d' % i]).mean()
        data_row['MA_700MA_BB_low_mean_%d' % i] = (
        data_row['Moving_average_700_mean_%d' % i] - no_of_std * data_row['MA_700MA_std_mean_%d' % i]).mean()
        data_row['MA_400MA_std_mean_%d' % i] = sig.rolling(window=400).std().mean(skipna=True)
        data_row['MA_400MA_BB_high_mean_%d' % i] = (
        data_row['Moving_average_400_mean_%d' % i] + no_of_std * data_row['MA_400MA_std_mean_%d' % i]).mean()
        data_row['MA_400MA_BB_low_mean_%d' % i] = (
        data_row['Moving_average_400_mean_%d' % i] - no_of_std * data_row['MA_400MA_std_mean_%d' % i]).mean()

        data_row['iqr0_%d' % i] = np.subtract(*np.percentile(sig, [75, 25]))
        data_row['q999_%d' % i] = np.quantile(sig, 0.999)
        data_row['q001_%d' % i] = np.quantile(sig, 0.001)
        data_row['ave10_%d' % i] = stats.trim_mean(sig, 0.1)
        data_row['peak10_num_%d' % i] = feature_calculators.number_peaks(sig, 10)
        data_row['num_cross_0_%d' % i] = feature_calculators.number_crossing_m(sig, 0)
        data_row['autocorrelation_%d' % i] = feature_calculators.autocorrelation(sig, 5)
        # data_row['spkt_welch_density_%d' % i] = list(feature_calculators.spkt_welch_density(x, [{'coeff': 50}]))[0][1]
        data_row['ratio_value_number_%d' % i] = feature_calculators.ratio_value_number_to_time_series_length(sig)

    for windows in [50, 200, 1000]:
        x_roll_std = seg.rolling(windows).std().dropna().values
        x_roll_mean = seg.rolling(windows).mean().dropna().values

        data_row['ave_roll_std_' + str(windows)] = x_roll_std.mean()
        data_row['std_roll_std_' + str(windows)] = x_roll_std.std()
        data_row['max_roll_std_' + str(windows)] = x_roll_std.max()
        data_row['min_roll_std_' + str(windows)] = x_roll_std.min()
        data_row['q01_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.01)
        data_row['q05_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.05)
        data_row['q95_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.95)
        data_row['q99_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.99)
        data_row['av_change_abs_roll_std_' + str(windows)] = np.mean(np.diff(x_roll_std))
        data_row['av_change_rate_roll_std_' + str(windows)] = np.mean(np.nonzero((np.diff(x_roll_std) / x_roll_std[:-1]))[0])
        data_row['abs_max_roll_std_' + str(windows)] = np.abs(x_roll_std).max()

        data_row['ave_roll_mean_' + str(windows)] = x_roll_mean.mean()
        data_row['std_roll_mean_' + str(windows)] = x_roll_mean.std()
        data_row['max_roll_mean_' + str(windows)] = x_roll_mean.max()
        data_row['min_roll_mean_' + str(windows)] = x_roll_mean.min()
        data_row['q01_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.01)
        data_row['q05_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.05)
        data_row['q95_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.95)
        data_row['q99_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.99)
        data_row['av_change_abs_roll_mean_' + str(windows)] = np.mean(np.diff(x_roll_mean))
        data_row['av_change_rate_roll_mean_' + str(windows)] = np.mean(np.nonzero((np.diff(x_roll_mean) / x_roll_mean[:-1]))[0])
        data_row['abs_max_roll_mean_' + str(windows)] = np.abs(x_roll_mean).max()

        data_row['num_peak10_rolling_' + str(windows)] = feature_calculators.number_peaks(x_roll_mean, 10)
        data_row['num_cross0_rolling_' + str(windows)] = feature_calculators.number_crossing_m(x_roll_mean, 0)
        data_row['autocorrelation_rolling_' + str(windows)] = feature_calculators.autocorrelation(x_roll_mean, 5)
        # data_row['spkt_welch_density_rolling_' + str(windows)] = list(feature_calculators.spkt_welch_density(x_roll_mean, [{'coeff': 50}]))[0][1]
        data_row['ratio_value_number_rolling_' + str(windows)] = feature_calculators.ratio_value_number_to_time_series_length(x_roll_mean)
        data_row['classic_sta_lta_rolling_' + str(windows)] = classic_sta_lta(x_roll_mean, 500, 10000).mean()

    return data_row
Exemple #15
0
    def power_features(self, x, zc, y, seg_id):
        feature_dict = dict()
        feature_dict['target'] = y
        feature_dict['seg_id'] = seg_id

        realFFT = np.real(zc)
        imagFFT = np.imag(zc)

        absFFT = np.sqrt(realFFT**2+imagFFT**2)
        absFFT_cut = absFFT[:round(len(absFFT)/2)]
        powerFFT = []
        nFFTwindow = 50
        sub_row = round(self.chunk_size/nFFTwindow/2)
        for ii in range(nFFTwindow):
            powerFFT.append(np.sum(absFFT_cut[ii*sub_row:(ii+1)*sub_row]))
        powerFFT_norm = powerFFT/sum(powerFFT)
        nFFTwindow_sub = 10
        for jj in range(1,nFFTwindow_sub-1):
            for ii in range(nFFTwindow-jj):
                feature_dict[f'power_ratio_{ii}_{ii+jj}'] = powerFFT[ii]/powerFFT[ii+jj]
        
        windows = [100, 500, 1000, 2000, 3000, 5000]
        autocorr_lags = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50];
        for w in windows:
            powerseries = []
            powerratioseries = []
            realseries = []
            realratioseries = []
            imagsseries = []
            imagratiosseries = []
            ii = 0 
            perc = 0.2;
            while ii+w <= len(x):
                xtemp = x[ii:ii+w]
                zctemp = np.fft.fft(xtemp)
                realFFTtemp = np.real(zctemp)
                imagFFTtemp = np.imag(zctemp)
                absFFTtemp = np.sqrt(realFFTtemp**2+imagFFTtemp**2)
                absFFTtemp_cut = absFFTtemp[:round(len(absFFTtemp)/2)]
                realFFTtemp_cut = realFFTtemp[:round(len(realFFTtemp)/2)]
                imagFFTtemp_cut = imagFFTtemp[:round(len(imagFFTtemp)/2)]
                powerseries.append(np.sum(absFFTtemp_cut))
                powerratioseries.append(np.sum(absFFTtemp_cut[0:round(len(absFFTtemp_cut)*perc)])/np.sum(absFFTtemp_cut[round(len(absFFTtemp_cut)*perc):]))
                realseries.append(np.sum(realFFTtemp_cut))
                realratioseries.append(np.sum(realFFTtemp_cut[0:round(len(realFFTtemp_cut)*perc)])/np.sum(realFFTtemp_cut[round(len(realFFTtemp_cut)*perc):]))
                imagsseries.append(np.sum(imagFFTtemp_cut))
                imagratiosseries.append(np.sum(imagFFTtemp_cut[0:round(len(imagFFTtemp_cut)*perc)])/np.sum(imagFFTtemp_cut[round(len(imagFFTtemp_cut)*perc):]))
                ii+=w
            for autocorr_lag in autocorr_lags:
                feature_dict[f'power_autocorr_w{w}_lag{autocorr_lag}'] = feature_calculators.autocorrelation(powerseries, autocorr_lag)
                feature_dict[f'power_c3_w{w}_lag{autocorr_lag}'] = feature_calculators.c3(powerseries, autocorr_lag)
                feature_dict[f'powerratio_autocorr_w{w}_lag{autocorr_lag}'] = feature_calculators.autocorrelation(powerratioseries, autocorr_lag)
                feature_dict[f'powerratio_c3_w{w}_lag{autocorr_lag}'] = feature_calculators.c3(powerratioseries, autocorr_lag)
                
                feature_dict[f'real_autocorr_w{w}_lag{autocorr_lag}'] = feature_calculators.autocorrelation(realseries, autocorr_lag)
                feature_dict[f'real_c3_w{w}_lag{autocorr_lag}'] = feature_calculators.c3(realseries, autocorr_lag)
                feature_dict[f'realratio_autocorr_w{w}_lag{autocorr_lag}'] = feature_calculators.autocorrelation(realratioseries, autocorr_lag)
                feature_dict[f'realratio_c3_w{w}_lag{autocorr_lag}'] = feature_calculators.c3(realratioseries, autocorr_lag)
                
                feature_dict[f'imag_autocorr_w{w}_lag{autocorr_lag}'] = feature_calculators.autocorrelation(imagsseries, autocorr_lag)
                feature_dict[f'imag_c3_w{w}_lag{autocorr_lag}'] = feature_calculators.c3(imagsseries, autocorr_lag)
                feature_dict[f'imagratio_autocorr_w{w}_lag{autocorr_lag}'] = feature_calculators.autocorrelation(imagratiosseries, autocorr_lag)
                feature_dict[f'imagratio_c3_w{w}_lag{autocorr_lag}'] = feature_calculators.c3(imagratiosseries, autocorr_lag)
                
        return feature_dict
Exemple #16
0
def ACLag7(fragment):
    return fc.autocorrelation(fragment,7)
Exemple #17
0
 def function(x):
     return autocorrelation(x, lag=self.lag)
def autocorrelation(seg, k):
    res = [
        feature_calculators.autocorrelation(seg[i], k)
        for i in range(config.seg_size[0])
    ]
    return np.asarray(res)
Exemple #19
0
#Reset index after merging different files into one
feat_dataset.reset_index(drop=True, inplace=True)
##Populate feature characteristics
##ENTROPY
feat_dataset['CGM_Entropy'] = np.nan
for i in range(len(dataset)):
    feat_dataset['CGM_Entropy'][i] = ts.sample_entropy(
        np.array(dataset.iloc[i, :]))
##RMS
feat_dataset['CGM_RMS'] = np.nan
for i in range(len(dataset)):
    feat_dataset['CGM_RMS'][i] = np.sqrt(np.mean(dataset.iloc[i, :]**2))
#Correlation
feat_dataset['CGM_Correlation'] = np.nan
for i in range(len(dataset)):
    feat_dataset['CGM_Correlation'][i] = ts.autocorrelation(
        np.array(dataset.iloc[i, :]), 1)
##Number_of_Peaks
feat_dataset['CGM_Peaks'] = np.nan
for i in range(len(dataset)):
    feat_dataset['CGM_Peaks'][i] = ts.number_peaks(
        np.array(dataset.iloc[i, :]), 2)
#CGM Velocity
feat_dataset['CGM_Velocity'] = np.nan
for i in range(len(dataset)):
    c_list = dataset.loc[i, :].tolist()
    sum_ = []
    for j in range(1, len(c_list)):
        sum_.append(abs(c_list[j] - c_list[j - 1]))
    feat_dataset['CGM_Velocity'][i] = np.round(np.mean(sum_), 2)
#MinMax
feat_dataset['CGM_MinMax'] = np.nan
Exemple #20
0
    def features(self, x, prefix):
        feature_dict = dict()

        # create features here
        # numpy
        feature_dict[prefix + '_' + 'mean'] = np.mean(x)
        feature_dict[prefix + '_' + 'max'] = np.max(x)
        feature_dict[prefix + '_' + 'min'] = np.min(x)
        feature_dict[prefix + '_' + 'std'] = np.std(x)
        feature_dict[prefix + '_' + 'var'] = np.var(x)
        feature_dict[prefix + '_' + 'ptp'] = np.ptp(x)
        feature_dict[prefix + '_' + 'percentile_10'] = np.percentile(x, 10)
        feature_dict[prefix + '_' + 'percentile_20'] = np.percentile(x, 20)
        feature_dict[prefix + '_' + 'percentile_30'] = np.percentile(x, 30)
        feature_dict[prefix + '_' + 'percentile_40'] = np.percentile(x, 40)
        feature_dict[prefix + '_' + 'percentile_50'] = np.percentile(x, 50)
        feature_dict[prefix + '_' + 'percentile_60'] = np.percentile(x, 60)
        feature_dict[prefix + '_' + 'percentile_70'] = np.percentile(x, 70)
        feature_dict[prefix + '_' + 'percentile_80'] = np.percentile(x, 80)
        feature_dict[prefix + '_' + 'percentile_90'] = np.percentile(x, 90)

        # scipy
        feature_dict[prefix + '_' + 'skew'] = sp.stats.skew(x)
        feature_dict[prefix + '_' + 'kurtosis'] = sp.stats.kurtosis(x)
        feature_dict[prefix + '_' + 'kstat_1'] = sp.stats.kstat(x, 1)
        feature_dict[prefix + '_' + 'kstat_2'] = sp.stats.kstat(x, 2)
        feature_dict[prefix + '_' + 'kstat_3'] = sp.stats.kstat(x, 3)
        feature_dict[prefix + '_' + 'kstat_4'] = sp.stats.kstat(x, 4)
        feature_dict[prefix + '_' + 'moment_1'] = sp.stats.moment(x, 1)
        feature_dict[prefix + '_' + 'moment_2'] = sp.stats.moment(x, 2)
        feature_dict[prefix + '_' + 'moment_3'] = sp.stats.moment(x, 3)
        feature_dict[prefix + '_' + 'moment_4'] = sp.stats.moment(x, 4)

        # tsfresh
        feature_dict[prefix + '_' +
                     'abs_energy'] = feature_calculators.abs_energy(x)
        feature_dict[
            prefix + '_' +
            'abs_sum_of_changes'] = feature_calculators.absolute_sum_of_changes(
                x)
        feature_dict[
            prefix + '_' +
            'count_above_mean'] = feature_calculators.count_above_mean(x)
        feature_dict[
            prefix + '_' +
            'count_below_mean'] = feature_calculators.count_below_mean(x)
        feature_dict[prefix + '_' +
                     'mean_abs_change'] = feature_calculators.mean_abs_change(
                         x)
        feature_dict[prefix + '_' +
                     'mean_change'] = feature_calculators.mean_change(x)
        feature_dict[
            prefix + '_' +
            'var_larger_than_std_dev'] = feature_calculators.variance_larger_than_standard_deviation(
                x)
        feature_dict[prefix + '_' +
                     'range_minf_m4000'] = feature_calculators.range_count(
                         x, -np.inf, -4000)
        feature_dict[prefix + '_' +
                     'range_m4000_m3000'] = feature_calculators.range_count(
                         x, -4000, -3000)
        feature_dict[prefix + '_' +
                     'range_m3000_m2000'] = feature_calculators.range_count(
                         x, -3000, -2000)
        feature_dict[prefix + '_' +
                     'range_m2000_m1000'] = feature_calculators.range_count(
                         x, -2000, -1000)
        feature_dict[prefix + '_' +
                     'range_m1000_0'] = feature_calculators.range_count(
                         x, -1000, 0)
        feature_dict[prefix + '_' +
                     'range_0_p1000'] = feature_calculators.range_count(
                         x, 0, 1000)
        feature_dict[prefix + '_' +
                     'range_p1000_p2000'] = feature_calculators.range_count(
                         x, 1000, 2000)
        feature_dict[prefix + '_' +
                     'range_p2000_p3000'] = feature_calculators.range_count(
                         x, 2000, 3000)
        feature_dict[prefix + '_' +
                     'range_p3000_p4000'] = feature_calculators.range_count(
                         x, 3000, 4000)
        feature_dict[prefix + '_' +
                     'range_p4000_pinf'] = feature_calculators.range_count(
                         x, 4000, np.inf)

        feature_dict[
            prefix + '_' +
            'ratio_unique_values'] = feature_calculators.ratio_value_number_to_time_series_length(
                x)
        feature_dict[
            prefix + '_' +
            'first_loc_min'] = feature_calculators.first_location_of_minimum(x)
        feature_dict[
            prefix + '_' +
            'first_loc_max'] = feature_calculators.first_location_of_maximum(x)
        feature_dict[
            prefix + '_' +
            'last_loc_min'] = feature_calculators.last_location_of_minimum(x)
        feature_dict[
            prefix + '_' +
            'last_loc_max'] = feature_calculators.last_location_of_maximum(x)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_10'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 10)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_100'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 100)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_1000'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 1000)
        feature_dict[
            prefix + '_' +
            'autocorrelation_1'] = feature_calculators.autocorrelation(x, 1)
        feature_dict[
            prefix + '_' +
            'autocorrelation_2'] = feature_calculators.autocorrelation(x, 2)
        feature_dict[
            prefix + '_' +
            'autocorrelation_3'] = feature_calculators.autocorrelation(x, 3)
        feature_dict[
            prefix + '_' +
            'autocorrelation_4'] = feature_calculators.autocorrelation(x, 4)
        feature_dict[
            prefix + '_' +
            'autocorrelation_5'] = feature_calculators.autocorrelation(x, 5)
        feature_dict[
            prefix + '_' +
            'autocorrelation_6'] = feature_calculators.autocorrelation(x, 6)
        feature_dict[
            prefix + '_' +
            'autocorrelation_7'] = feature_calculators.autocorrelation(x, 7)
        feature_dict[
            prefix + '_' +
            'autocorrelation_8'] = feature_calculators.autocorrelation(x, 8)
        feature_dict[
            prefix + '_' +
            'autocorrelation_9'] = feature_calculators.autocorrelation(x, 9)
        feature_dict[
            prefix + '_' +
            'autocorrelation_10'] = feature_calculators.autocorrelation(x, 10)
        feature_dict[
            prefix + '_' +
            'autocorrelation_50'] = feature_calculators.autocorrelation(x, 50)
        feature_dict[
            prefix + '_' +
            'autocorrelation_100'] = feature_calculators.autocorrelation(
                x, 100)
        feature_dict[
            prefix + '_' +
            'autocorrelation_1000'] = feature_calculators.autocorrelation(
                x, 1000)
        feature_dict[prefix + '_' + 'c3_1'] = feature_calculators.c3(x, 1)
        feature_dict[prefix + '_' + 'c3_2'] = feature_calculators.c3(x, 2)
        feature_dict[prefix + '_' + 'c3_3'] = feature_calculators.c3(x, 3)
        feature_dict[prefix + '_' + 'c3_4'] = feature_calculators.c3(x, 4)
        feature_dict[prefix + '_' + 'c3_5'] = feature_calculators.c3(x, 5)
        feature_dict[prefix + '_' + 'c3_10'] = feature_calculators.c3(x, 10)
        feature_dict[prefix + '_' + 'c3_100'] = feature_calculators.c3(x, 100)
        for c in range(1, 34):
            feature_dict[prefix + '_' + 'fft_{0}_real'.format(c)] = list(
                feature_calculators.fft_coefficient(x, [{
                    'coeff': c,
                    'attr': 'real'
                }]))[0][1]
            feature_dict[prefix + '_' + 'fft_{0}_imag'.format(c)] = list(
                feature_calculators.fft_coefficient(x, [{
                    'coeff': c,
                    'attr': 'imag'
                }]))[0][1]
            feature_dict[prefix + '_' + 'fft_{0}_ang'.format(c)] = list(
                feature_calculators.fft_coefficient(x, [{
                    'coeff': c,
                    'attr': 'angle'
                }]))[0][1]
        feature_dict[
            prefix + '_' +
            'long_strk_above_mean'] = feature_calculators.longest_strike_above_mean(
                x)
        feature_dict[
            prefix + '_' +
            'long_strk_below_mean'] = feature_calculators.longest_strike_below_mean(
                x)
        feature_dict[prefix + '_' + 'cid_ce_0'] = feature_calculators.cid_ce(
            x, 0)
        feature_dict[prefix + '_' + 'cid_ce_1'] = feature_calculators.cid_ce(
            x, 1)
        feature_dict[prefix + '_' +
                     'binned_entropy_5'] = feature_calculators.binned_entropy(
                         x, 5)
        feature_dict[prefix + '_' +
                     'binned_entropy_10'] = feature_calculators.binned_entropy(
                         x, 10)
        feature_dict[prefix + '_' +
                     'binned_entropy_20'] = feature_calculators.binned_entropy(
                         x, 20)
        feature_dict[prefix + '_' +
                     'binned_entropy_50'] = feature_calculators.binned_entropy(
                         x, 50)
        feature_dict[prefix + '_' +
                     'binned_entropy_80'] = feature_calculators.binned_entropy(
                         x, 80)
        feature_dict[
            prefix + '_' +
            'binned_entropy_100'] = feature_calculators.binned_entropy(x, 100)

        feature_dict[prefix + '_' +
                     'num_crossing_0'] = feature_calculators.number_crossing_m(
                         x, 0)
        feature_dict[prefix + '_' +
                     'num_peaks_1'] = feature_calculators.number_peaks(x, 1)
        feature_dict[prefix + '_' +
                     'num_peaks_3'] = feature_calculators.number_peaks(x, 3)
        feature_dict[prefix + '_' +
                     'num_peaks_5'] = feature_calculators.number_peaks(x, 5)
        feature_dict[prefix + '_' +
                     'num_peaks_10'] = feature_calculators.number_peaks(x, 10)
        feature_dict[prefix + '_' +
                     'num_peaks_50'] = feature_calculators.number_peaks(x, 50)
        feature_dict[prefix + '_' +
                     'num_peaks_100'] = feature_calculators.number_peaks(
                         x, 100)
        feature_dict[prefix + '_' +
                     'num_peaks_500'] = feature_calculators.number_peaks(
                         x, 500)

        feature_dict[prefix + '_' + 'spkt_welch_density_1'] = list(
            feature_calculators.spkt_welch_density(x, [{
                'coeff': 1
            }]))[0][1]
        feature_dict[prefix + '_' + 'spkt_welch_density_2'] = list(
            feature_calculators.spkt_welch_density(x, [{
                'coeff': 2
            }]))[0][1]
        feature_dict[prefix + '_' + 'spkt_welch_density_5'] = list(
            feature_calculators.spkt_welch_density(x, [{
                'coeff': 5
            }]))[0][1]
        feature_dict[prefix + '_' + 'spkt_welch_density_8'] = list(
            feature_calculators.spkt_welch_density(x, [{
                'coeff': 8
            }]))[0][1]
        feature_dict[prefix + '_' + 'spkt_welch_density_10'] = list(
            feature_calculators.spkt_welch_density(x, [{
                'coeff': 10
            }]))[0][1]
        feature_dict[prefix + '_' + 'spkt_welch_density_50'] = list(
            feature_calculators.spkt_welch_density(x, [{
                'coeff': 50
            }]))[0][1]
        feature_dict[prefix + '_' + 'spkt_welch_density_100'] = list(
            feature_calculators.spkt_welch_density(x, [{
                'coeff': 100
            }]))[0][1]

        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_1'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 1)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_2'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 2)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_3'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 3)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_4'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 4)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_10'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 10)
        feature_dict[
            prefix + '_' +
            'time_rev_asym_stat_100'] = feature_calculators.time_reversal_asymmetry_statistic(
                x, 100)

        for r in range(20):
            feature_dict[prefix + '_' + 'symmetry_looking_' +
                         str(r)] = feature_calculators.symmetry_looking(
                             x, [{
                                 'r': r * 0.05
                             }])[0][1]

        for r in range(1, 20):
            feature_dict[
                prefix + '_' + 'large_standard_deviation_' +
                str(r)] = feature_calculators.large_standard_deviation(
                    x, r * 0.05)

        for r in range(1, 10):
            feature_dict[prefix + '_' + 'quantile_' +
                         str(r)] = feature_calculators.quantile(x, r * 0.1)

        for r in ['mean', 'median', 'var']:
            feature_dict[prefix + '_' + 'agg_autocorr_' +
                         r] = feature_calculators.agg_autocorrelation(
                             x, [{
                                 'f_agg': r,
                                 'maxlag': 40
                             }])[0][-1]

        #for r in range(1, 6):
        #    feature_dict[prefix+'_'+'number_cwt_peaks_'+str(r)] = feature_calculators.number_cwt_peaks(x, r)

        for r in range(1, 10):
            feature_dict[prefix + '_' + 'index_mass_quantile_' +
                         str(r)] = feature_calculators.index_mass_quantile(
                             x, [{
                                 'q': r
                             }])[0][1]

        #for ql in [0., .2, .4, .6, .8]:
        #    for qh in [.2, .4, .6, .8, 1.]:
        #        if ql < qh:
        #            for b in [False, True]:
        #                for f in ["mean", "var"]:
        #                    feature_dict[prefix+'_'+'change_quantiles_'+str(ql)+'_'+str(qh)+'_'+str(b)+'_'+str(f)] = feature_calculators.change_quantiles(x, ql, qh, b, f)

        #for r in [.1, .3, .5, .7, .9]:
        #    feature_dict[prefix+'_'+'approximate_entropy_'+str(r)] = feature_calculators.approximate_entropy(x, 2, r)

        feature_dict[
            prefix + '_' +
            'max_langevin_fixed_point'] = feature_calculators.max_langevin_fixed_point(
                x, 3, 30)

        for r in ['pvalue', 'rvalue', 'intercept', 'slope', 'stderr']:
            feature_dict[prefix + '_' + 'linear_trend_' +
                         str(r)] = feature_calculators.linear_trend(
                             x, [{
                                 'attr': r
                             }])[0][1]

        for r in ['pvalue', 'teststat', 'usedlag']:
            feature_dict[prefix + '_' + 'augmented_dickey_fuller_' +
                         r] = feature_calculators.augmented_dickey_fuller(
                             x, [{
                                 'attr': r
                             }])[0][1]

        for r in [0.5, 1, 1.5, 2, 2.5, 3, 5, 6, 7, 10]:
            feature_dict[prefix + '_' + 'ratio_beyond_r_sigma_' +
                         str(r)] = feature_calculators.ratio_beyond_r_sigma(
                             x, r)

        #for attr in ["pvalue", "rvalue", "intercept", "slope", "stderr"]:
        #    feature_dict[prefix+'_'+'linear_trend_timewise_'+attr] = feature_calculators.linear_trend_timewise(x, [{'attr': attr}])[0][1]
        #for attr in ["rvalue", "intercept", "slope", "stderr"]:
        #    for i in [5, 10, 50]:
        #        for f in ["max", "min", "mean", "var"]:
        #            feature_dict[prefix+'_'+'agg_linear_trend_'+attr+'_'+str(i)+'_'+f] = feature_calculators.agg_linear_trend(x, [{'attr': attr, 'chunk_len': i, 'f_agg': f}])[0][-1]
        #for width in [2, 5, 10, 20]:
        #    for coeff in range(15):
        #        for w in [2, 5, 10, 20]:
        #            feature_dict[prefix+'_'+'cwt_coefficients_'+str(width)+'_'+str(coeff)+'_'+str(w)] = list(feature_calculators.cwt_coefficients(x, [{'widths': width, 'coeff': coeff, 'w': w}]))[0][1]
        #for r in range(10):
        #    feature_dict[prefix+'_'+'partial_autocorr_'+str(r)] = feature_calculators.partial_autocorrelation(x, [{'lag': r}])[0][1]
        # "ar_coefficient": [{"coeff": coeff, "k": k} for coeff in range(5) for k in [10]],
        # "fft_coefficient": [{"coeff": k, "attr": a} for a, k in product(["real", "imag", "abs", "angle"], range(100))],
        # "fft_aggregated": [{"aggtype": s} for s in ["centroid", "variance", "skew", "kurtosis"]],
        # "value_count": [{"value": value} for value in [0, 1, -1]],
        # "range_count": [{"min": -1, "max": 1}, {"min": 1e12, "max": 0}, {"min": 0, "max": 1e12}],
        # "friedrich_coefficients": (lambda m: [{"coeff": coeff, "m": m, "r": 30} for coeff in range(m + 1)])(3),
        #  "energy_ratio_by_chunks": [{"num_segments": 10, "segment_focus": i} for i in range(10)],
        return feature_dict
def TS_feature7(signal):
    lag_ts = 203  # lag is a number
    autocorelation = ts.autocorrelation(signal, lag_ts)
    value_c3 = ts.c3(signal, lag_ts)

    return autocorelation, value_c3,
Exemple #22
0
    def features(self, x, y, seg_id):
        feature_dict = dict()
        feature_dict['target'] = y
        feature_dict['seg_id'] = seg_id

        # create features here
        # numpy
        feature_dict['mean'] = np.mean(x)
        feature_dict['max'] = np.max(x)
        feature_dict['min'] = np.min(x)
        feature_dict['std'] = np.std(x)
        feature_dict['var'] = np.var(x)
        feature_dict['ptp'] = np.ptp(x)
        feature_dict['percentile_10'] = np.percentile(x, 10)
        feature_dict['percentile_20'] = np.percentile(x, 20)
        feature_dict['percentile_30'] = np.percentile(x, 30)
        feature_dict['percentile_40'] = np.percentile(x, 40)
        feature_dict['percentile_50'] = np.percentile(x, 50)
        feature_dict['percentile_60'] = np.percentile(x, 60)
        feature_dict['percentile_70'] = np.percentile(x, 70)
        feature_dict['percentile_80'] = np.percentile(x, 80)
        feature_dict['percentile_90'] = np.percentile(x, 90)

        # scipy
        feature_dict['skew'] = sp.stats.skew(x)
        feature_dict['kurtosis'] = sp.stats.kurtosis(x)
        feature_dict['kstat_1'] = sp.stats.kstat(x, 1)
        feature_dict['kstat_2'] = sp.stats.kstat(x, 2)
        feature_dict['kstat_3'] = sp.stats.kstat(x, 3)
        feature_dict['kstat_4'] = sp.stats.kstat(x, 4)
        feature_dict['moment_1'] = sp.stats.moment(x, 1)
        feature_dict['moment_2'] = sp.stats.moment(x, 2)
        feature_dict['moment_3'] = sp.stats.moment(x, 3)
        feature_dict['moment_4'] = sp.stats.moment(x, 4)
        
        feature_dict['abs_energy'] = feature_calculators.abs_energy(x)
        feature_dict['abs_sum_of_changes'] = feature_calculators.absolute_sum_of_changes(x)
        feature_dict['count_above_mean'] = feature_calculators.count_above_mean(x)
        feature_dict['count_below_mean'] = feature_calculators.count_below_mean(x)
        feature_dict['mean_abs_change'] = feature_calculators.mean_abs_change(x)
        feature_dict['mean_change'] = feature_calculators.mean_change(x)
        feature_dict['var_larger_than_std_dev'] = feature_calculators.variance_larger_than_standard_deviation(x)
        feature_dict['range_minf_m4000'] = feature_calculators.range_count(x, -np.inf, -4000)
        feature_dict['range_m4000_m3000'] = feature_calculators.range_count(x, -4000, -3000)
        feature_dict['range_m3000_m2000'] = feature_calculators.range_count(x, -3000, -2000)
        feature_dict['range_m2000_m1000'] = feature_calculators.range_count(x, -2000, -1000)
        feature_dict['range_m1000_0'] = feature_calculators.range_count(x, -1000, 0)
        feature_dict['range_0_p1000'] = feature_calculators.range_count(x, 0, 1000)
        feature_dict['range_p1000_p2000'] = feature_calculators.range_count(x, 1000, 2000)
        feature_dict['range_p2000_p3000'] = feature_calculators.range_count(x, 2000, 3000)
        feature_dict['range_p3000_p4000'] = feature_calculators.range_count(x, 3000, 4000)
        feature_dict['range_p4000_pinf'] = feature_calculators.range_count(x, 4000, np.inf)

        feature_dict['ratio_unique_values'] = feature_calculators.ratio_value_number_to_time_series_length(x)
        feature_dict['first_loc_min'] = feature_calculators.first_location_of_minimum(x)
        feature_dict['first_loc_max'] = feature_calculators.first_location_of_maximum(x)
        feature_dict['last_loc_min'] = feature_calculators.last_location_of_minimum(x)
        feature_dict['last_loc_max'] = feature_calculators.last_location_of_maximum(x)
        feature_dict['time_rev_asym_stat_10'] = feature_calculators.time_reversal_asymmetry_statistic(x, 10)
        feature_dict['time_rev_asym_stat_100'] = feature_calculators.time_reversal_asymmetry_statistic(x, 100)
        feature_dict['time_rev_asym_stat_1000'] = feature_calculators.time_reversal_asymmetry_statistic(x, 1000)
        feature_dict['autocorrelation_5'] = feature_calculators.autocorrelation(x, 5)
        feature_dict['autocorrelation_10'] = feature_calculators.autocorrelation(x, 10)
        feature_dict['autocorrelation_50'] = feature_calculators.autocorrelation(x, 50)
        feature_dict['autocorrelation_100'] = feature_calculators.autocorrelation(x, 100)
        feature_dict['autocorrelation_1000'] = feature_calculators.autocorrelation(x, 1000)
        feature_dict['c3_5'] = feature_calculators.c3(x, 5)
        feature_dict['c3_10'] = feature_calculators.c3(x, 10)
        feature_dict['c3_100'] = feature_calculators.c3(x, 100)
        feature_dict['fft_1_real'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 1, 'attr': 'real'}]))[0][1]
        feature_dict['fft_1_imag'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 1, 'attr': 'imag'}]))[0][1]
        feature_dict['fft_1_ang'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 1, 'attr': 'angle'}]))[0][1]
        feature_dict['fft_2_real'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 2, 'attr': 'real'}]))[0][1]
        feature_dict['fft_2_imag'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 2, 'attr': 'imag'}]))[0][1]
        feature_dict['fft_2_ang'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 2, 'attr': 'angle'}]))[0][1]
        feature_dict['fft_3_real'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 3, 'attr': 'real'}]))[0][1]
        feature_dict['fft_3_imag'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 3, 'attr': 'imag'}]))[0][1]
        feature_dict['fft_3_ang'] = list(feature_calculators.fft_coefficient(x, [{'coeff': 3, 'attr': 'angle'}]))[0][1]
        feature_dict['long_strk_above_mean'] = feature_calculators.longest_strike_above_mean(x)
        feature_dict['long_strk_below_mean'] = feature_calculators.longest_strike_below_mean(x)
        feature_dict['cid_ce_0'] = feature_calculators.cid_ce(x, 0)
        feature_dict['cid_ce_1'] = feature_calculators.cid_ce(x, 1)
        feature_dict['binned_entropy_5'] = feature_calculators.binned_entropy(x, 5)
        feature_dict['binned_entropy_10'] = feature_calculators.binned_entropy(x, 10)
        feature_dict['binned_entropy_20'] = feature_calculators.binned_entropy(x, 20)
        feature_dict['binned_entropy_50'] = feature_calculators.binned_entropy(x, 50)
        feature_dict['binned_entropy_80'] = feature_calculators.binned_entropy(x, 80)
        feature_dict['binned_entropy_100'] = feature_calculators.binned_entropy(x, 100)

        feature_dict['num_crossing_0'] = feature_calculators.number_crossing_m(x, 0)
        feature_dict['num_peaks_10'] = feature_calculators.number_peaks(x, 10)
        feature_dict['num_peaks_50'] = feature_calculators.number_peaks(x, 50)
        feature_dict['num_peaks_100'] = feature_calculators.number_peaks(x, 100)
        feature_dict['num_peaks_500'] = feature_calculators.number_peaks(x, 500)

        feature_dict['spkt_welch_density_1'] = list(feature_calculators.spkt_welch_density(x, [{'coeff': 1}]))[0][1]
        feature_dict['spkt_welch_density_10'] = list(feature_calculators.spkt_welch_density(x, [{'coeff': 10}]))[0][1]
        feature_dict['spkt_welch_density_50'] = list(feature_calculators.spkt_welch_density(x, [{'coeff': 50}]))[0][1]
        feature_dict['spkt_welch_density_100'] = list(feature_calculators.spkt_welch_density(x, [{'coeff': 100}]))[0][1]

        feature_dict['time_rev_asym_stat_1'] = feature_calculators.time_reversal_asymmetry_statistic(x, 1)
        feature_dict['time_rev_asym_stat_10'] = feature_calculators.time_reversal_asymmetry_statistic(x, 10)
        feature_dict['time_rev_asym_stat_100'] = feature_calculators.time_reversal_asymmetry_statistic(x, 100)        

        return feature_dict
Exemple #23
0
def autocorrelation(lag):
    return lambda x: feats.autocorrelation(x, lag)
Exemple #24
0
def create_features2(seg, ):
    data_row = {}

    xcz = des_filter(seg, high=CUTOFF)

    zc = np.fft.fft(xcz)
    zc = zc[:MAX_FREQ]

    # FFT transform values
    realFFT = np.real(zc)
    imagFFT = np.imag(zc)

    freq_bands = list(range(0, MAX_FREQ, FREQ_STEP))
    magFFT = np.abs(zc)
    phzFFT = np.angle(zc)
    phzFFT[phzFFT == -np.inf] = -np.pi / 2.0
    phzFFT[phzFFT == np.inf] = np.pi / 2.0
    phzFFT = np.nan_to_num(phzFFT)

    for freq in freq_bands:
        data_row['FFT_Mag_01q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.01)
        data_row['FFT_Mag_10q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.1)
        data_row['FFT_Mag_90q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.9)
        data_row['FFT_Mag_99q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.99)

        data_row['FFT_Mag_mean%d' % freq] = np.mean(magFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Mag_std%d' % freq] = np.std(magFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Mag_max%d' % freq] = np.max(magFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Mag_min%d' % freq] = np.min(magFFT[freq: freq + FREQ_STEP])

        data_row['FFT_Phz_mean%d' % freq] = np.mean(phzFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Phz_std%d' % freq] = np.std(phzFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Phz_max%d' % freq] = np.max(phzFFT[freq: freq + FREQ_STEP])
        data_row['FFT_Phz_min%d' % freq] = np.min(phzFFT[freq: freq + FREQ_STEP])

    data_row['FFT_Rmean'] = realFFT.mean()
    data_row['FFT_Rstd'] = realFFT.std()
    data_row['FFT_Rmax'] = realFFT.max()
    data_row['FFT_Rmin'] = realFFT.min()
    data_row['FFT_Imean'] = imagFFT.mean()
    data_row['FFT_Istd'] = imagFFT.std()
    data_row['FFT_Imax'] = imagFFT.max()
    data_row['FFT_Imin'] = imagFFT.min()

    data_row['FFT_Rmean_first_6000'] = realFFT[:6000].mean()
    data_row['FFT_Rstd__first_6000'] = realFFT[:6000].std()
    data_row['FFT_Rmax_first_6000'] = realFFT[:6000].max()
    data_row['FFT_Rmin_first_6000'] = realFFT[:6000].min()
    data_row['FFT_Rmean_first_18000'] = realFFT[:18000].mean()
    data_row['FFT_Rstd_first_18000'] = realFFT[:18000].std()
    data_row['FFT_Rmax_first_18000'] = realFFT[:18000].max()
    data_row['FFT_Rmin_first_18000'] = realFFT[:18000].min()

    del xcz
    del zc
    # gc.collect()

    sigs = [seg]
    for freq in range(0, MAX_FREQ + FREQ_STEP, FREQ_STEP):
        if freq == 0:
            xc_ = des_filter(seg, high=FREQ_STEP)
        elif freq == MAX_FREQ:
            xc_ = des_filter(seg, low=freq)
        else:
            xc_ = des_filter(seg, low=freq, high=freq + FREQ_STEP)
        sigs.append(pd.Series(xc_))

    for window in [50, 200, 1000]:
        roll_mean = seg.rolling(window).mean().dropna()
        roll_std = seg.rolling(window).std().dropna()
        sigs.append(pd.Series(roll_mean))
        sigs.append(pd.Series(roll_std))

    for span in [30, 300, 3000]:
        exp_mean = seg.ewm(span).mean().dropna()
        exp_std = seg.ewm(span).std().dropna()
        sigs.append(pd.Series(exp_mean))
        sigs.append(pd.Series(exp_std))

    for i, sig in enumerate(sigs):

        data_row['mean_%d' % i] = sig.mean()
        data_row['std_%d' % i] = sig.std()
        data_row['max_%d' % i] = sig.max()
        data_row['min_%d' % i] = sig.min()

        data_row['mean_change_abs_%d' % i] = np.mean(np.diff(sig))
        data_row['mean_change_rate_%d' % i] = np.mean(np.nonzero((np.diff(sig) / sig[:-1]))[0])
        data_row['abs_max_%d' % i] = np.abs(sig).max()
        data_row['abs_min_%d' % i] = np.abs(sig).min()

        data_row['std_first_50000_%d' % i] = sig[:50000].std()
        data_row['std_last_50000_%d' % i] = sig[-50000:].std()
        data_row['std_first_10000_%d' % i] = sig[:10000].std()
        data_row['std_last_10000_%d' % i] = sig[-10000:].std()

        data_row['avg_first_50000_%d' % i] = sig[:50000].mean()
        data_row['avg_last_50000_%d' % i] = sig[-50000:].mean()
        data_row['avg_first_10000_%d' % i] = sig[:10000].mean()
        data_row['avg_last_10000_%d' % i] = sig[-10000:].mean()

        data_row['min_first_50000_%d' % i] = sig[:50000].min()
        data_row['min_last_50000_%d' % i] = sig[-50000:].min()
        data_row['min_first_10000_%d' % i] = sig[:10000].min()
        data_row['min_last_10000_%d' % i] = sig[-10000:].min()

        data_row['max_first_50000_%d' % i] = sig[:50000].max()
        data_row['max_last_50000_%d' % i] = sig[-50000:].max()
        data_row['max_first_10000_%d' % i] = sig[:10000].max()
        data_row['max_last_10000_%d' % i] = sig[-10000:].max()

        data_row['max_to_min_%d' % i] = sig.max() / np.abs(sig.min())
        data_row['max_to_min_diff_%d' % i] = sig.max() - np.abs(sig.min())
        data_row['count_big_%d' % i] = len(sig[np.abs(sig) > 500])
        data_row['sum_%d' % i] = sig.sum()

        data_row['mean_change_rate_first_50000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[:50000]) / sig[:50000][:-1]))[0])
        data_row['mean_change_rate_last_50000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[-50000:]) / sig[-50000:][:-1]))[0])
        data_row['mean_change_rate_first_10000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[:10000]) / sig[:10000][:-1]))[0])
        data_row['mean_change_rate_last_10000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[-10000:]) / sig[-10000:][:-1]))[0])

        for p in [1, 5, 10, 25, 50, 75, 90, 95, 99]:
            data_row['percentile_p{}_{}'.format(p, i)] = np.percentile(sig, p)
            data_row['abd_percentile_p{}_{}'.format(p, i)] = np.percentile(np.abs(sig), p)

        data_row['trend_%d' % i] = add_trend_feature(sig)
        data_row['abs_trend_%d' % i] = add_trend_feature(sig, abs_values=True)
        data_row['abs_mean_%d' % i] = np.abs(sig).mean()
        data_row['abs_std_%d' % i] = np.abs(sig).std()

        data_row['mad_%d' % i] = sig.mad()
        data_row['kurt_%d' % i] = sig.kurtosis()
        data_row['skew_%d' % i] = sig.skew()
        data_row['med_%d' % i] = sig.median()

        # data_row['Hilbert_mean_%d' % i] = np.abs(hilbert(sig)).mean()
        data_row['Hann_window50_%d' % i] = (convolve(sig, hann(50), mode='same') / sum(hann(50))).mean()
        data_row['Hann_window500_%d' % i] = (convolve(sig, hann(500), mode='same') / sum(hann(500))).mean()

        data_row['classic_sta_lta0_mean_%d' % i] = classic_sta_lta(sig, 50, 1000).mean()
        data_row['classic_sta_lta1_mean_%d' % i] = classic_sta_lta(sig, 500, 10000).mean()
        data_row['classic_sta_lta2_mean_%d' % i] = classic_sta_lta(sig, 5000, 100000).mean()
        data_row['classic_sta_lta3_mean_%d' % i] = classic_sta_lta(sig, 3333, 6666).mean()
        data_row['classic_sta_lta4_mean_%d' % i] = classic_sta_lta(sig, 10000, 25000).mean()

        no_of_std = 2
        for w in [10, 100, 500]:
            signal_mean = sig.rolling(window=w).mean()
            signal_std = sig.rolling(window=w).std()
            data_row['high_bound_mean_win{}_{}'.format(w, i)] = (signal_mean + no_of_std * signal_std).mean()
            data_row['low_bound_mean_win{}_{}'.format(w, i)] = (signal_mean - no_of_std * signal_std).mean()

        data_row['range_inf_4000_%d' % i] = feature_calculators.range_count(sig, -np.inf, -4000)
        data_row['range_4000_inf_%d' % i] = feature_calculators.range_count(sig, 4000, np.inf)
        for l, h in [[-4000, -2000], [-2000, 0], [0, 2000], [2000, 4000]]:
            data_row['range_{}_{}_{}'.format(np.abs(l), np.abs(h), i)] = feature_calculators.range_count(sig, l, h)

        data_row['iqr0_%d' % i] = np.subtract(*np.percentile(sig, [75, 25]))
        data_row['iqr1_%d' % i] = np.subtract(*np.percentile(sig, [95, 5]))
        data_row['ave10_%d' % i] = stats.trim_mean(sig, 0.1)
        data_row['num_cross_0_%d' % i] = feature_calculators.number_crossing_m(sig, 0)
        data_row['ratio_value_number_%d' % i] = feature_calculators.ratio_value_number_to_time_series_length(sig)
        # data_row['var_larger_than_std_dev_%d' % i] = feature_calculators.variance_larger_than_standard_deviation(sig)
        data_row['ratio_unique_values_%d' % i] = feature_calculators.ratio_value_number_to_time_series_length(sig)
        data_row['abs_energy_%d' % i] = feature_calculators.abs_energy(sig)
        data_row['abs_sum_of_changes_%d' % i] = feature_calculators.absolute_sum_of_changes(sig)
        data_row['count_above_mean_%d' % i] = feature_calculators.count_above_mean(sig)
        data_row['count_below_mean_%d' % i] = feature_calculators.count_below_mean(sig)
        data_row['mean_abs_change_%d' % i] = feature_calculators.mean_abs_change(sig)
        data_row['mean_change_%d' % i] = feature_calculators.mean_change(sig)
        data_row['first_loc_min_%d' % i] = feature_calculators.first_location_of_minimum(sig)
        data_row['first_loc_max_%d' % i] = feature_calculators.first_location_of_maximum(sig)
        data_row['last_loc_min_%d' % i] = feature_calculators.last_location_of_minimum(sig)
        data_row['last_loc_max_%d' % i] = feature_calculators.last_location_of_maximum(sig)
        data_row['long_strk_above_mean_%d' % i] = feature_calculators.longest_strike_above_mean(sig)
        data_row['long_strk_below_mean_%d' % i] = feature_calculators.longest_strike_below_mean(sig)
        # data_row['cid_ce_0_%d' % i] = feature_calculators.cid_ce(sig, 0)
        # data_row['cid_ce_1_%d' % i] = feature_calculators.cid_ce(sig, 1)

        for j in [10, 50, ]:
            data_row['peak_num_p{}_{}'.format(j, i)] = feature_calculators.number_peaks(sig, j)
        for j in [1, 10, 50, 100]:
            data_row['spkt_welch_density_coeff{}_{}'.format(j, i)] = \
            list(feature_calculators.spkt_welch_density(sig, [{'coeff': j}]))[0][1]
        for j in [5, 10, 100]:
            data_row['c3_c{}_{}'.format(j, i)] = feature_calculators.c3(sig, j)
        for j in [5, 10, 50, 100, 1000]:
            data_row['autocorrelation_auto{}_{}'.format(j, i)] = feature_calculators.autocorrelation(sig, j)
        for j in [10, 100, 1000]:
            data_row['time_rev_asym_stat_t{}_{}'.format(j, i)] = feature_calculators.time_reversal_asymmetry_statistic(
                sig, j)
        for j in range(1, 5):
            data_row['kstat_k{}_{}'.format(j, i)] = stats.kstat(sig, j)
            data_row['moment_m{}_{}'.format(j, i)] = stats.moment(sig, j)
        for j in range(1, 3):
            data_row['kstatvar_k{}_{}'.format(j, i)] = stats.kstatvar(sig, j)
        for j in [5, 10, 50, 100]:
            data_row['binned_entropy_b{}_{}'.format(j, i)] = feature_calculators.binned_entropy(sig, j)

    return data_row
def get_features(sig, sensor_id):
    """Analysis of a signal. Grabs temporal and frequential features.
    Returns a pandas dataframe"""

    fourier = fftpack.fft(sig.values)
    real, imag = np.real(fourier), np.imag(fourier)

    # Temporal data
    features = {}
    features[f"{sensor_id}_mean"] = [sig.mean()]
    features[f"{sensor_id}_var"] = [sig.var()]
    features[f"{sensor_id}_skew"] = [sig.skew()]
    features[f"{sensor_id}_delta"] = [sig.max() - sig.min()]
    features[f"{sensor_id}_mad"] = [sig.mad()]
    features[f"{sensor_id}_kurtosis"] = [sig.kurtosis()]
    features[f"{sensor_id}_sem"] = [sig.sem()]
    features[f"{sensor_id}_q5"] = [np.quantile(sig, 0.05)]
    features[f"{sensor_id}_q25"] = [np.quantile(sig, 0.25)]
    features[f"{sensor_id}_q75"] = [np.quantile(sig, 0.75)]
    features[f"{sensor_id}_q95"] = [np.quantile(sig, 0.95)]
    grad_rol_max = [maximum_filter1d(np.gradient(np.abs(sig.values)), 50)]
    delta = np.max(grad_rol_max) - np.min(grad_rol_max)
    features[f"{sensor_id}_grmax_delta"] = delta

    # Frequencial
    features[f"{sensor_id}_real_mean"] = [real.mean()]
    features[f"{sensor_id}_real_var"] = [real.var()]
    features[f"{sensor_id}_real_delta"] = [real.max() - real.min()]

    features[f"{sensor_id}_imag_mean"] = [imag.mean()]
    features[f"{sensor_id}_imag_var"] = [imag.var()]
    features[f"{sensor_id}_imag_delta"] = [imag.max() - imag.min()]

    features[f"{sensor_id}_nb_peak"] = fc.number_peaks(sig.values, 2)
    features[f"{sensor_id}_median_roll_std"] = np.median(
        pd.Series(sig).rolling(50).std().dropna().values)
    features[f"{sensor_id}_autocorr5"] = fc.autocorrelation(sig, 5)

    # Added 16
    features[f"{sensor_id}_nb_peak_3"] = fc.number_peaks(sig.values, 3)
    features[f"{sensor_id}_absquant95"] = np.quantile(np.abs(sig), 0.95)

    try:
        # Mel-frequency cepstral coefficients
        mfcc_mean = mfcc(sig.values).mean(axis=1)
        for i in range(20):
            features[f"{sensor_id}_mfcc_mean_{i}"] = mfcc_mean[i]
        # Contrast spectral
        spec_contrast = spectral_contrast(sig.values).mean(axis=1)
        for i in range(7):
            features[f"{sensor_id}_lib_spec_cont_{i}"] = spec_contrast[i]
        features[f"{sensor_id}_zero_cross"] = zero_crossing_rate(sig)[0].mean()
        # Added 16
        features[f"{sensor_id}_percentile_roll20_std_50"] = np.percentile(
            sig.rolling(20).std().dropna().values, 50)

    except:
        pass


# =============================================================================
# fftrhann20000 = np.sum(np.abs(np.fft.fft(np.hanning(len(z))*z)[:20000]))
# fftrhann20000_denoise = np.sum(np.abs(np.fft.fft(np.hanning(len(z))*den_sample)[:20000]))
# fftrhann20000_diff_rate = (fftrhann20000 - fftrhann20000_denoise)/fftrhann20000
# X['LGBM_fftrhann20000_diff_rate'] = fftrhann20000_diff_rate
# =============================================================================
    return pd.DataFrame.from_dict(features)
Exemple #26
0
def transform_pack3(df):
    """ augment X from tsfresh features"""
    x = df.values
    output = {}

    output['kstat_1'] = stats.kstat(x, 1)
    output['kstat_2'] = stats.kstat(x, 2)
    output['kstat_3'] = stats.kstat(x, 3)
    output['kstat_4'] = stats.kstat(x, 4)
    output['abs_energy'] = feature_calculators.abs_energy(x)
    output['abs_sum_of_changes'] = feature_calculators.absolute_sum_of_changes(
        x)
    output['count_above_mean'] = feature_calculators.count_above_mean(x)
    output['count_below_mean'] = feature_calculators.count_below_mean(x)
    output['range_minf_m4000'] = feature_calculators.range_count(
        x, -np.inf, -4000)
    output['range_m4000_m3000'] = feature_calculators.range_count(
        x, -4000, -3000)
    output['range_m3000_m2000'] = feature_calculators.range_count(
        x, -3000, -2000)
    output['range_m2000_m1000'] = feature_calculators.range_count(
        x, -2000, -1000)
    output['range_m1000_0'] = feature_calculators.range_count(x, -1000, 0)
    output['range_0_p1000'] = feature_calculators.range_count(x, 0, 1000)
    output['range_p1000_p2000'] = feature_calculators.range_count(
        x, 1000, 2000)
    output['range_p2000_p3000'] = feature_calculators.range_count(
        x, 2000, 3000)
    output['range_p3000_p4000'] = feature_calculators.range_count(
        x, 3000, 4000)
    output['range_p4000_pinf'] = feature_calculators.range_count(
        x, 4000, np.inf)

    output[
        'ratio_unique_values'] = feature_calculators.ratio_value_number_to_time_series_length(
            x)
    output['first_loc_min'] = feature_calculators.first_location_of_minimum(x)
    output['first_loc_max'] = feature_calculators.first_location_of_maximum(x)
    output['last_loc_min'] = feature_calculators.last_location_of_minimum(x)
    output['last_loc_max'] = feature_calculators.last_location_of_maximum(x)
    output[
        'time_rev_asym_stat_10'] = feature_calculators.time_reversal_asymmetry_statistic(
            x, 10)
    output[
        'time_rev_asym_stat_100'] = feature_calculators.time_reversal_asymmetry_statistic(
            x, 100)
    output[
        'time_rev_asym_stat_1000'] = feature_calculators.time_reversal_asymmetry_statistic(
            x, 1000)

    output['autocorrelation_10'] = feature_calculators.autocorrelation(x, 10)
    output['autocorrelation_100'] = feature_calculators.autocorrelation(x, 100)
    output['autocorrelation_1000'] = feature_calculators.autocorrelation(
        x, 1000)
    output['autocorrelation_5000'] = feature_calculators.autocorrelation(
        x, 5000)

    output['c3_5'] = feature_calculators.c3(x, 5)
    output['c3_10'] = feature_calculators.c3(x, 10)
    output['c3_100'] = feature_calculators.c3(x, 100)

    output[
        'long_strk_above_mean'] = feature_calculators.longest_strike_above_mean(
            x)
    output[
        'long_strk_below_mean'] = feature_calculators.longest_strike_below_mean(
            x)
    output['cid_ce_0'] = feature_calculators.cid_ce(x, 0)
    output['cid_ce_1'] = feature_calculators.cid_ce(x, 1)
    output['binned_entropy_10'] = feature_calculators.binned_entropy(x, 10)
    output['binned_entropy_50'] = feature_calculators.binned_entropy(x, 50)
    output['binned_entropy_80'] = feature_calculators.binned_entropy(x, 80)
    output['binned_entropy_100'] = feature_calculators.binned_entropy(x, 100)

    tmp = np.abs(x)
    output['num_crossing_0'] = feature_calculators.number_crossing_m(tmp, 0)
    output['num_crossing_10'] = feature_calculators.number_crossing_m(tmp, 10)
    output['num_crossing_100'] = feature_calculators.number_crossing_m(
        tmp, 100)
    output['num_peaks_10'] = feature_calculators.number_peaks(tmp, 10)
    output['num_peaks_50'] = feature_calculators.number_peaks(tmp, 50)
    output['num_peaks_100'] = feature_calculators.number_peaks(tmp, 100)
    output['num_peaks_500'] = feature_calculators.number_peaks(tmp, 500)

    output['spkt_welch_density_1'] = list(
        feature_calculators.spkt_welch_density(x, [{
            'coeff': 1
        }]))[0][1]
    output['spkt_welch_density_10'] = list(
        feature_calculators.spkt_welch_density(x, [{
            'coeff': 10
        }]))[0][1]
    output['spkt_welch_density_50'] = list(
        feature_calculators.spkt_welch_density(x, [{
            'coeff': 50
        }]))[0][1]
    output['spkt_welch_density_100'] = list(
        feature_calculators.spkt_welch_density(x, [{
            'coeff': 100
        }]))[0][1]

    output[
        'time_rev_asym_stat_1'] = feature_calculators.time_reversal_asymmetry_statistic(
            x, 1)
    output[
        'time_rev_asym_stat_10'] = feature_calculators.time_reversal_asymmetry_statistic(
            x, 10)
    output[
        'time_rev_asym_stat_100'] = feature_calculators.time_reversal_asymmetry_statistic(
            x, 100)

    return output
def generate_features(x):
    # collection of features
    feature_collection = {}

    # collection of intervals
    feature_intervals = {
        'k_static': list(range(1, 5)),
        'variable_k_static': [1, 2]
    }

    for interval in [50, 10, 100, 20]:
        feature_collection[f'discrimination_power_{interval}'] = feature_calculators.c3(x, interval)

    for interval in [500, 10000, 1000, 10, 50, 100]:
        standard_dev = pd.DataFrame(x).rolling(interval).std().dropna().values

        for sub_interval in [50, 60, 70, 75, 1, 40, 80, 90, 95, 99, 5, 10, 20, 25, 30]:
            feature_collection[f'{interval}_{sub_interval}_standard_percentile'] = np.percentile(standard_dev, sub_interval)

    for interval in feature_intervals['k_static']:
        feature_collection[f'{interval}_k_static'] = stats.kstat(x, interval)

    feature_collection['median_abs_dev'] = stats.median_absolute_deviation(x)

    for interval in feature_intervals['variable_k_static']:
        feature_collection[f'{interval}_variable_k_static'] = stats.kstatvar(x, interval)

    feature_collection['kurtosis'] = stats.kurtosis(x)

    for interval in feature_intervals['k_static']:
        feature_collection[f'{interval}_moments'] = stats.moment(x, interval)

    feature_collection['median'] = statistics.median(x)

    feature_collection['skewness'] = stats.skew(x)

    for interval in [1000, 5000, 10000, 5, 10, 50, 100, 500]:
        feature_collection[f'{interval}_correlation'] = feature_calculators.autocorrelation(x, interval)

    for interval in [50, 10, 100, 20]:
        feature_collection[f'{interval}_peak_number'] = feature_calculators.number_peaks(x, interval)

    # geometric and harmonic means
    x_val = x[x.to_numpy().nonzero()[0]]
    feature_collection['geometric_mean'] = stats.gmean(np.abs(x_val))
    feature_collection['harmonic_mean'] = stats.hmean(np.abs(x_val))

    # basic stats
    feature_collection['mean'] = mean(x)
    feature_collection['std'] = x.std()
    feature_collection['max'] = max(x)
    feature_collection['min'] = min(x)

    # basic stats on absolute values
    feature_collection['mean_change_abs'] = (np.diff(x)).mean()
    feature_collection['abs_max'] = max(np.abs(x))
    feature_collection['abs_mean'] = np.mean(np.abs(x))
    feature_collection['abs_std'] = np.abs(x).std()

    percentile_divisions = [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]

    for p in percentile_divisions:
        feature_collection[f'{p}th_abs_percentile'] = np.percentile(np.abs(x), p)
        feature_collection[f'{p}th_percentile'] = np.percentile(x, p)

    feature_collection['maximum_absoluteMinimum_ratio'] = max(x) / np.abs(min(x))
    feature_collection['diff_maximum_and_minimum'] = max(x) - np.abs(min(x))
    feature_collection['x_sum'] = x.sum()
    feature_collection['count_x_greater_than_500_BIG'] = len(x[np.abs(x) > 500])

    feature_collection['max_to_min'] = x.max() / np.abs(x.min())
    feature_collection['max_to_min_diff'] = x.max() - np.abs(x.min())
    feature_collection['count_big'] = len(x[np.abs(x) > 500])
    feature_collection['sum'] = x.sum()

    feature_collection['valid_mean_change_rate'] = change_rate_calculation(x)

    # calc_change_rate on slices of data
    for slice, movement_direction in product([50000, 1000, 1000], ['last', 'first']):
        if movement_direction == 'last':
            x_sliced = x[-slice:]
            feature_collection[f'from_{movement_direction}_slice_{slice}_valid_mean_change_rate'] = change_rate_calculation(x_sliced)
        elif movement_direction == 'first':
            x_sliced = x[:slice]
            feature_collection[f'from_{movement_direction}_slice_{slice}_valid_mean_change_rate'] = change_rate_calculation(x_sliced)

    for slice_length, direction in product([50000, 1000, 1000], ['last', 'first']):
        if direction == 'first':
            feature_collection[f'mean_change_rate_{direction}_{slice_length}'] = change_rate_calculation(x[:slice_length])
        elif direction == 'last':
            feature_collection[f'mean_change_rate_{direction}_{slice_length}'] = change_rate_calculation(x[-slice_length:])

    feature_collection['linear_trend'] = trend_adding_feature(x)
    feature_collection['absolute_linear_trend'] = trend_adding_feature(x, absolute=True)

    for slice, threshold_limit in product([50000, 100000, 150000], [5, 10, 20, 50, 100]):
        x_sliced = np.abs(x[-slice:])
        feature_collection[f'count_{slice}_greater_than_threshold_{threshold_limit}'] = (x_sliced > threshold_limit).sum()
        feature_collection[f'count_{slice}_less_than_threshold_{threshold_limit}'] = (x_sliced < threshold_limit).sum()

    # aggregations on various slices of data
    for type_of_aggregation, movement_direction, slice in product(['std', 'mean', 'max', 'min'], ['last', 'first'], [50000, 10000, 1000]):
        if movement_direction == 'last':
            feature_collection[f'from_{movement_direction}_slice_{slice}_typeOfAggregation{type_of_aggregation}'] = pd.DataFrame(x[-slice:]).agg(type_of_aggregation)[0]
        elif movement_direction == 'first':
            feature_collection[f'from_{movement_direction}_slice_{slice}_typeOfAggregation{type_of_aggregation}'] = pd.DataFrame(x[:slice]).agg(type_of_aggregation)[0]

    return feature_collection
Exemple #28
0
def compute_standard_features_block(xc, seg_id, X, fs, prefix=''):
    
    # Generic stats
    X.loc[seg_id, prefix + 'mean'] = xc.mean()
    X.loc[seg_id, prefix + 'std'] = xc.std()
    X.loc[seg_id, prefix + 'max'] = xc.max()
    X.loc[seg_id, prefix + 'min'] = xc.min()
    X.loc[seg_id, prefix + 'hmean'] = stats.hmean(np.abs(xc[np.nonzero(xc)[0]]))
    X.loc[seg_id, prefix + 'gmean'] = stats.gmean(np.abs(xc[np.nonzero(xc)[0]])) 
    X.loc[seg_id, prefix + 'mad'] = xc.mad()
    X.loc[seg_id, prefix + 'kurt'] = xc.kurtosis()
    X.loc[seg_id, prefix + 'skew'] = xc.skew()
    X.loc[seg_id, prefix + 'med'] = xc.median()

    for p in [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]:
        X.loc[seg_id, prefix + f'percentile_{p}'] = np.percentile(xc, p)
        X.loc[seg_id, prefix + f'abs_percentile_{p}'] = np.percentile(np.abs(xc), p)

    X.loc[seg_id, prefix + 'num_crossing_0'] = feature_calculators.number_crossing_m(xc, 0)

    for p in [95,99]:
        X.loc[seg_id, prefix + f'binned_entropy_{p}'] = feature_calculators.binned_entropy(xc, p)

    # Andrew stats
    X.loc[seg_id, prefix + 'mean_diff'] = np.mean(np.diff(xc))
    X.loc[seg_id, prefix + 'mean_abs_diff'] = np.mean(np.abs(np.diff(xc)))
    X.loc[seg_id, prefix + 'mean_change_rate'] = change_rate(xc, method='original')
    X.loc[seg_id, prefix + 'mean_change_rate_v2'] = change_rate(xc, method='modified')
    X.loc[seg_id, prefix + 'abs_max'] = np.abs(xc).max()
    X.loc[seg_id, prefix + 'abs_min'] = np.abs(xc).min()
    X.loc[seg_id, prefix + 'mean_change_abs'] = np.mean(np.diff(xc))

    # Classical stats by segment
    for agg_type, slice_length, direction in product(['std', 'min', 'max', 'mean'], [1000, 10000, 50000], ['first', 'last']):
        if direction == 'first':
            X.loc[seg_id, prefix + f'{agg_type}_{direction}_{slice_length}'] = xc[:slice_length].agg(agg_type)
        elif direction == 'last':
            X.loc[seg_id, prefix + f'{agg_type}_{direction}_{slice_length}'] = xc[-slice_length:].agg(agg_type)

    X.loc[seg_id, prefix + 'avg_first_50000'] = xc[:50000].mean()
    X.loc[seg_id, prefix + 'avg_last_50000'] = xc[-50000:].mean()
    X.loc[seg_id, prefix + 'avg_first_10000'] = xc[:10000].mean()
    X.loc[seg_id, prefix + 'avg_last_10000'] = xc[-10000:].mean()

    # k-statistic and moments
    for i in range(1, 5):
        X.loc[seg_id, prefix + f'kstat_{i}'] = stats.kstat(xc, i)
        X.loc[seg_id, prefix + f'moment_{i}'] = stats.moment(xc, i)

    for i in [1, 2]:
        X.loc[seg_id, prefix + f'kstatvar_{i}'] = stats.kstatvar(xc, i)

    X.loc[seg_id, prefix + 'range_minf_m4000'] = feature_calculators.range_count(xc, -np.inf, -4000)
    X.loc[seg_id, prefix + 'range_p4000_pinf'] = feature_calculators.range_count(xc, 4000, np.inf)
    for i, j in zip(borders, borders[1:]):
        X.loc[seg_id, prefix + f'range_{i}_{j}'] = feature_calculators.range_count(xc, i, j)
        X.loc[seg_id, prefix + 'ratio_unique_values'] = feature_calculators.ratio_value_number_to_time_series_length(xc)

    X.loc[seg_id, prefix + 'max_to_min'] = xc.max() / np.abs(xc.min())
    X.loc[seg_id, prefix + 'max_to_min_diff'] = xc.max() - np.abs(xc.min())
    X.loc[seg_id, prefix + 'count_big'] = len(xc[np.abs(xc) > 500])
    X.loc[seg_id, prefix + 'sum'] = xc.sum()

    # calc_change_rate on slices of data
    for slice_length, direction in product([1000, 10000, 50000], ['first', 'last']):
        if direction == 'first':
            X.loc[seg_id, prefix + f'mean_change_rate_{direction}_{slice_length}'] = change_rate(xc[:slice_length], method='original')
            X.loc[seg_id, prefix + f'mean_change_rate_{direction}_{slice_length}_v2'] = change_rate(xc[:slice_length], method='modified')
        elif direction == 'last':
            X.loc[seg_id, prefix + f'mean_change_rate_{direction}_{slice_length}'] = change_rate(xc[-slice_length:], method='original')
            X.loc[seg_id, prefix + f'mean_change_rate_{direction}_{slice_length}_v2'] = change_rate(xc[-slice_length:], method='modified')

    X.loc[seg_id, prefix + 'q95'] = np.quantile(xc, 0.95)
    X.loc[seg_id, prefix + 'q99'] = np.quantile(xc, 0.99)
    X.loc[seg_id, prefix + 'q05'] = np.quantile(xc, 0.05)
    X.loc[seg_id, prefix + 'q01'] = np.quantile(xc, 0.01)

    X.loc[seg_id, prefix + 'abs_q95'] = np.quantile(np.abs(xc), 0.95)
    X.loc[seg_id, prefix + 'abs_q99'] = np.quantile(np.abs(xc), 0.99)
    X.loc[seg_id, prefix + 'abs_q05'] = np.quantile(np.abs(xc), 0.05)
    X.loc[seg_id, prefix + 'abs_q01'] = np.quantile(np.abs(xc), 0.01)

    X.loc[seg_id, prefix + 'trend'] = add_trend_feature(xc)
    X.loc[seg_id, prefix + 'abs_trend'] = add_trend_feature(xc, abs_values=True)
    X.loc[seg_id, prefix + 'abs_mean'] = np.abs(xc).mean()
    X.loc[seg_id, prefix + 'abs_std'] = np.abs(xc).std()

    X.loc[seg_id, prefix + 'Hilbert_mean'] = np.abs(hilbert(xc)).mean()
    X.loc[seg_id, prefix + 'Hann_window_mean'] = (convolve(xc, hann(150), mode='same') / sum(hann(150))).mean()
    for hw in [50, 150, 1500, 15000]:
        X.loc[seg_id, prefix + f'Hann_window_mean_{hw}'] = (convolve(xc, hann(hw), mode='same') / sum(hann(hw))).mean()

    sta_lta_method = 'original'
    classic_sta_lta1 = sta_lta_ratio(xc, 500, 10000, method=sta_lta_method)
    classic_sta_lta2 = sta_lta_ratio(xc, 5000, 100000, method=sta_lta_method)
    classic_sta_lta3 = sta_lta_ratio(xc, 3333, 6666, method=sta_lta_method)
    classic_sta_lta4 = sta_lta_ratio(xc, 10000, 25000, method=sta_lta_method)
    classic_sta_lta5 = sta_lta_ratio(xc, 50, 1000, method=sta_lta_method)
    classic_sta_lta6 = sta_lta_ratio(xc, 100, 5000, method=sta_lta_method)
    classic_sta_lta7 = sta_lta_ratio(xc, 333, 666, method=sta_lta_method)
    classic_sta_lta8 = sta_lta_ratio(xc, 4000, 10000, method=sta_lta_method)

    X.loc[seg_id, prefix + 'classic_sta_lta1_mean'] = classic_sta_lta1.mean()
    X.loc[seg_id, prefix + 'classic_sta_lta2_mean'] = classic_sta_lta2.mean()
    X.loc[seg_id, prefix + 'classic_sta_lta3_mean'] = classic_sta_lta3.mean()
    X.loc[seg_id, prefix + 'classic_sta_lta4_mean'] = classic_sta_lta4.mean()
    X.loc[seg_id, prefix + 'classic_sta_lta5_mean'] = classic_sta_lta5.mean()
    X.loc[seg_id, prefix + 'classic_sta_lta6_mean'] = classic_sta_lta6.mean()
    X.loc[seg_id, prefix + 'classic_sta_lta7_mean'] = classic_sta_lta7.mean()
    X.loc[seg_id, prefix + 'classic_sta_lta8_mean'] = classic_sta_lta8.mean()

    X.loc[seg_id, prefix + 'classic_sta_lta1_q95'] = np.quantile(classic_sta_lta1, 0.95)
    X.loc[seg_id, prefix + 'classic_sta_lta2_q95'] = np.quantile(classic_sta_lta2, 0.95)
    X.loc[seg_id, prefix + 'classic_sta_lta3_q95'] = np.quantile(classic_sta_lta3, 0.95)
    X.loc[seg_id, prefix + 'classic_sta_lta4_q95'] = np.quantile(classic_sta_lta4, 0.95)
    X.loc[seg_id, prefix + 'classic_sta_lta5_q95'] = np.quantile(classic_sta_lta5, 0.95)
    X.loc[seg_id, prefix + 'classic_sta_lta6_q95'] = np.quantile(classic_sta_lta6, 0.95)
    X.loc[seg_id, prefix + 'classic_sta_lta7_q95'] = np.quantile(classic_sta_lta7, 0.95)
    X.loc[seg_id, prefix + 'classic_sta_lta8_q95'] = np.quantile(classic_sta_lta8, 0.95)

    X.loc[seg_id, prefix + 'classic_sta_lta1_q05'] = np.quantile(classic_sta_lta1, 0.05)
    X.loc[seg_id, prefix + 'classic_sta_lta2_q05'] = np.quantile(classic_sta_lta2, 0.05)
    X.loc[seg_id, prefix + 'classic_sta_lta3_q05'] = np.quantile(classic_sta_lta3, 0.05)
    X.loc[seg_id, prefix + 'classic_sta_lta4_q05'] = np.quantile(classic_sta_lta4, 0.05)
    X.loc[seg_id, prefix + 'classic_sta_lta5_q05'] = np.quantile(classic_sta_lta5, 0.05)
    X.loc[seg_id, prefix + 'classic_sta_lta6_q05'] = np.quantile(classic_sta_lta6, 0.05)
    X.loc[seg_id, prefix + 'classic_sta_lta7_q05'] = np.quantile(classic_sta_lta7, 0.05)
    X.loc[seg_id, prefix + 'classic_sta_lta8_q05'] = np.quantile(classic_sta_lta8, 0.05)

    sta_lta_method = 'modified'
    classic_sta_lta1 = sta_lta_ratio(xc, 500, 10000, method=sta_lta_method)
    classic_sta_lta2 = sta_lta_ratio(xc, 5000, 100000, method=sta_lta_method)
    classic_sta_lta3 = sta_lta_ratio(xc, 3333, 6666, method=sta_lta_method)
    classic_sta_lta4 = sta_lta_ratio(xc, 10000, 25000, method=sta_lta_method)
    classic_sta_lta5 = sta_lta_ratio(xc, 50, 1000, method=sta_lta_method)
    classic_sta_lta6 = sta_lta_ratio(xc, 100, 5000, method=sta_lta_method)
    classic_sta_lta7 = sta_lta_ratio(xc, 333, 666, method=sta_lta_method)
    classic_sta_lta8 = sta_lta_ratio(xc, 4000, 10000, method=sta_lta_method)

    X.loc[seg_id, prefix + 'modified_sta_lta1_mean'] = classic_sta_lta1.mean()
    X.loc[seg_id, prefix + 'modified_sta_lta2_mean'] = classic_sta_lta2.mean()
    X.loc[seg_id, prefix + 'modified_sta_lta3_mean'] = classic_sta_lta3.mean()
    X.loc[seg_id, prefix + 'modified_sta_lta4_mean'] = classic_sta_lta4.mean()
    X.loc[seg_id, prefix + 'modified_sta_lta5_mean'] = classic_sta_lta5.mean()
    X.loc[seg_id, prefix + 'modified_sta_lta6_mean'] = classic_sta_lta6.mean()
    X.loc[seg_id, prefix + 'modified_sta_lta7_mean'] = classic_sta_lta7.mean()
    X.loc[seg_id, prefix + 'modified_sta_lta8_mean'] = classic_sta_lta8.mean()

    X.loc[seg_id, prefix + 'modified_sta_lta1_q95'] = np.quantile(classic_sta_lta1, 0.95)
    X.loc[seg_id, prefix + 'modified_sta_lta2_q95'] = np.quantile(classic_sta_lta2, 0.95)
    X.loc[seg_id, prefix + 'modified_sta_lta3_q95'] = np.quantile(classic_sta_lta3, 0.95)
    X.loc[seg_id, prefix + 'modified_sta_lta4_q95'] = np.quantile(classic_sta_lta4, 0.95)
    X.loc[seg_id, prefix + 'modified_sta_lta5_q95'] = np.quantile(classic_sta_lta5, 0.95)
    X.loc[seg_id, prefix + 'modified_sta_lta6_q95'] = np.quantile(classic_sta_lta6, 0.95)
    X.loc[seg_id, prefix + 'modified_sta_lta7_q95'] = np.quantile(classic_sta_lta7, 0.95)
    X.loc[seg_id, prefix + 'modified_sta_lta8_q95'] = np.quantile(classic_sta_lta8, 0.95)

    X.loc[seg_id, prefix + 'modified_sta_lta1_q05'] = np.quantile(classic_sta_lta1, 0.05)
    X.loc[seg_id, prefix + 'modified_sta_lta2_q05'] = np.quantile(classic_sta_lta2, 0.05)
    X.loc[seg_id, prefix + 'modified_sta_lta3_q05'] = np.quantile(classic_sta_lta3, 0.05)
    X.loc[seg_id, prefix + 'modified_sta_lta4_q05'] = np.quantile(classic_sta_lta4, 0.05)
    X.loc[seg_id, prefix + 'modified_sta_lta5_q05'] = np.quantile(classic_sta_lta5, 0.05)
    X.loc[seg_id, prefix + 'modified_sta_lta6_q05'] = np.quantile(classic_sta_lta6, 0.05)
    X.loc[seg_id, prefix + 'modified_sta_lta7_q05'] = np.quantile(classic_sta_lta7, 0.05)
    X.loc[seg_id, prefix + 'modified_sta_lta8_q05'] = np.quantile(classic_sta_lta8, 0.05)

    X.loc[seg_id, prefix + 'Moving_average_700_mean'] = xc.rolling(window=700).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'Moving_average_1500_mean'] = xc.rolling(window=1500).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'Moving_average_3000_mean'] = xc.rolling(window=3000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'Moving_average_6000_mean'] = xc.rolling(window=6000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'Moving_average_30000_mean'] = xc.rolling(window=30000).mean().mean(skipna=True)

    ewma = pd.Series.ewm
    X.loc[seg_id, prefix + 'exp_Moving_average_300_mean'] = ewma(xc, span=300).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_3000_mean'] = ewma(xc, span=3000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_6000_mean'] = ewma(xc, span=6000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_30000_mean'] = ewma(xc, span=30000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_50000_mean'] = ewma(xc, span=50000).mean().mean(skipna=True)

    X.loc[seg_id, prefix + 'exp_Moving_average_300_std'] = ewma(xc, span=300).mean().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_3000_std'] = ewma(xc, span=3000).mean().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_6000_std'] = ewma(xc, span=6000).mean().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_30000_std'] = ewma(xc, span=30000).mean().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_average_50000_std'] = ewma(xc, span=50000).mean().std(skipna=True)

    X.loc[seg_id, prefix + 'exp_Moving_std_300_mean'] = ewma(xc, span=300).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_3000_mean'] = ewma(xc, span=3000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_6000_mean'] = ewma(xc, span=6000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_30000_mean'] = ewma(xc, span=30000).mean().mean(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_50000_mean'] = ewma(xc, span=50000).mean().mean(skipna=True)
    
    X.loc[seg_id, prefix + 'exp_Moving_std_300_std'] = ewma(xc, span=300).std().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_3000_std'] = ewma(xc, span=3000).std().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_6000_std'] = ewma(xc, span=6000).std().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_30000_std'] = ewma(xc, span=30000).std().std(skipna=True)
    X.loc[seg_id, prefix + 'exp_Moving_std_50000_std'] = ewma(xc, span=50000).std().std(skipna=True)

    no_of_std = 2
    X.loc[seg_id, prefix + 'MA_700MA_std_mean'] = xc.rolling(window=700).std().mean()
    X.loc[seg_id, prefix + 'MA_700MA_BB_high_mean'] = (X.loc[seg_id, prefix + 'Moving_average_700_mean'] + no_of_std * X.loc[seg_id, prefix + 'MA_700MA_std_mean']).mean()
    X.loc[seg_id, prefix + 'MA_700MA_BB_low_mean'] = (X.loc[seg_id, prefix + 'Moving_average_700_mean'] - no_of_std * X.loc[seg_id, prefix + 'MA_700MA_std_mean']).mean()
    X.loc[seg_id, prefix + 'MA_400MA_std_mean'] = xc.rolling(window=400).std().mean()
    X.loc[seg_id, prefix + 'MA_400MA_BB_high_mean'] = (X.loc[seg_id, prefix + 'Moving_average_700_mean'] + no_of_std * X.loc[seg_id, prefix + 'MA_400MA_std_mean']).mean()
    X.loc[seg_id, prefix + 'MA_400MA_BB_low_mean'] = (X.loc[seg_id, prefix + 'Moving_average_700_mean'] - no_of_std * X.loc[seg_id, prefix + 'MA_400MA_std_mean']).mean()
    X.loc[seg_id, prefix + 'MA_1000MA_std_mean'] = xc.rolling(window=1000).std().mean()

    X.loc[seg_id, prefix + 'iqr'] = np.subtract(*np.percentile(xc, [75, 25]))
    X.loc[seg_id, prefix + 'iqr1'] = np.subtract(*np.percentile(xc, [95, 5]))

    X.loc[seg_id, prefix + 'q999'] = np.quantile(xc, 0.999)
    X.loc[seg_id, prefix + 'q001'] = np.quantile(xc, 0.001)
    X.loc[seg_id, prefix + 'ave10'] = stats.trim_mean(xc, 0.1)

    X.loc[seg_id, prefix + 'freq_cross_first_50000'] = freq_from_crossings(xc.values[:50000], fs)
    X.loc[seg_id, prefix + 'freq_cross_last_50000'] = freq_from_crossings(xc.values[-50000:], fs)
    X.loc[seg_id, prefix + 'freq_cross_first_10000'] = freq_from_crossings(xc.values[:10000], fs)
    X.loc[seg_id, prefix + 'freq_cross_last_10000'] = freq_from_crossings(xc.values[-10000:], fs)

    for peak in [10, 20, 50, 100]:
        X.loc[seg_id, prefix + f'num_peaks_{peak}'] = feature_calculators.number_peaks(xc, peak)

    for c in [1, 5, 10, 50, 100]:
        X.loc[seg_id, prefix + f'spkt_welch_density_{c}'] = list(feature_calculators.spkt_welch_density(xc, [{'coeff': c}]))[0][1]
        X.loc[seg_id, prefix + f'time_rev_asym_stat_{c}'] = feature_calculators.time_reversal_asymmetry_statistic(xc, c) 

    for autocorr_lag in [5, 10, 50, 100, 500, 1000, 5000, 10000]:
        X.loc[seg_id, prefix + f'autocorrelation_{autocorr_lag}'] = feature_calculators.autocorrelation(xc, autocorr_lag)
        X.loc[seg_id, prefix + f'c3_{autocorr_lag}'] = feature_calculators.c3(xc, autocorr_lag)

    for windows in [10, 50, 100, 500, 1000, 10000]:
        x_roll_std = xc.rolling(windows).std().dropna().values
        x_roll_mean = xc.rolling(windows).mean().dropna().values

        for p in [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]:
            X.loc[seg_id, prefix + f'percentile_roll_std_{p}_window_{windows}'] = np.percentile(x_roll_std, p)
            X.loc[seg_id, prefix + f'percentile_roll_mean_{p}_window_{windows}'] = np.percentile(x_roll_mean, p)

        X.loc[seg_id, prefix + 'ave_roll_std_' + str(windows)] = x_roll_std.mean()
        X.loc[seg_id, prefix + 'std_roll_std_' + str(windows)] = x_roll_std.std()
        X.loc[seg_id, prefix + 'max_roll_std_' + str(windows)] = x_roll_std.max()
        X.loc[seg_id, prefix + 'min_roll_std_' + str(windows)] = x_roll_std.min()
        X.loc[seg_id, prefix + 'q01_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.01)
        X.loc[seg_id, prefix + 'q05_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.05)
        X.loc[seg_id, prefix + 'q95_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.95)
        X.loc[seg_id, prefix + 'q99_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.99)
        X.loc[seg_id, prefix + 'av_change_abs_roll_std_' + str(windows)] = np.mean(np.abs(np.diff(x_roll_std)))
        X.loc[seg_id, prefix + 'av_change_rate_roll_std_' + str(windows)] = change_rate(pd.Series(x_roll_std), method='original')
        X.loc[seg_id, prefix + 'av_change_rate_roll_std_' + str(windows) + 'v2'] = change_rate(pd.Series(x_roll_std), method='modified')
        X.loc[seg_id, prefix + 'abs_max_roll_std_' + str(windows)] = np.abs(x_roll_std).max()
        X.loc[seg_id, prefix + 'ave_roll_mean_' + str(windows)] = x_roll_mean.mean()
        X.loc[seg_id, prefix + 'std_roll_mean_' + str(windows)] = x_roll_mean.std()
        X.loc[seg_id, prefix + 'max_roll_mean_' + str(windows)] = x_roll_mean.max()
        X.loc[seg_id, prefix + 'min_roll_mean_' + str(windows)] = x_roll_mean.min()
        X.loc[seg_id, prefix + 'q01_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.01)
        X.loc[seg_id, prefix + 'q05_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.05)
        X.loc[seg_id, prefix + 'q95_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.95)
        X.loc[seg_id, prefix + 'q99_roll_mean_' + str(windows)] = np.quantile(x_roll_mean, 0.99)
        X.loc[seg_id, prefix + 'av_change_abs_roll_mean_' + str(windows)] = np.mean(np.abs(np.diff(x_roll_mean)))
        X.loc[seg_id, prefix + 'av_change_rate_roll_mean_' + str(windows)] = change_rate(pd.Series(x_roll_mean), method='original')
        X.loc[seg_id, prefix + 'av_change_rate_roll_mean_' + str(windows) + '_v2'] = change_rate(pd.Series(x_roll_mean), method='modified')
        X.loc[seg_id, prefix + 'abs_max_roll_mean_' + str(windows)] = np.abs(x_roll_mean).max()

    for p in [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]:
        X.loc[seg_id, prefix + f'percentile_roll_std_{p}'] = X.loc[seg_id, prefix + f'percentile_roll_std_{p}_window_10000']
        X.loc[seg_id, prefix + f'percentile_roll_mean_{p}'] = X.loc[seg_id, prefix + f'percentile_roll_mean_{p}_window_10000']
    def features(self, x, y, seg_id):
        feature_dict = dict()
        feature_dict['target'] = y
        feature_dict['seg_id'] = seg_id

        # create features here

        # lists with parameters to iterate over them
        percentiles = [
            1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99
        ]
        hann_windows = [50, 150, 1500, 15000]
        spans = [300, 3000, 30000, 50000]
        windows = [10, 50, 100, 500, 1000, 10000]
        borders = list(range(-4000, 4001, 1000))
        peaks = [10, 20, 50, 100]
        coefs = [1, 5, 10, 50, 100]
        lags = [10, 100, 1000, 10000]
        autocorr_lags = [5, 10, 50, 100, 500, 1000, 5000, 10000]

        # basic stats
        feature_dict['mean'] = x.mean()
        feature_dict['std'] = x.std()
        feature_dict['max'] = x.max()
        feature_dict['min'] = x.min()

        # basic stats on absolute values
        feature_dict['mean_change_abs'] = np.mean(np.diff(x))
        feature_dict['abs_max'] = np.abs(x).max()
        feature_dict['abs_mean'] = np.abs(x).mean()
        feature_dict['abs_std'] = np.abs(x).std()

        # geometric and harminic means
        feature_dict['hmean'] = stats.hmean(np.abs(x[np.nonzero(x)[0]]))
        feature_dict['gmean'] = stats.gmean(np.abs(x[np.nonzero(x)[0]]))

        # k-statistic and moments
        for i in range(1, 5):
            feature_dict[f'kstat_{i}'] = stats.kstat(x, i)
            feature_dict[f'moment_{i}'] = stats.moment(x, i)

        for i in [1, 2]:
            feature_dict[f'kstatvar_{i}'] = stats.kstatvar(x, i)

        # aggregations on various slices of data
        for agg_type, slice_length, direction in product(
            ['std', 'min', 'max', 'mean'], [1000, 10000, 50000],
            ['first', 'last']):
            if direction == 'first':
                feature_dict[
                    f'{agg_type}_{direction}_{slice_length}'] = x[:
                                                                  slice_length].agg(
                                                                      agg_type)
            elif direction == 'last':
                feature_dict[f'{agg_type}_{direction}_{slice_length}'] = x[
                    -slice_length:].agg(agg_type)

        feature_dict['max_to_min'] = x.max() / np.abs(x.min())
        feature_dict['max_to_min_diff'] = x.max() - np.abs(x.min())
        feature_dict['count_big'] = len(x[np.abs(x) > 500])
        feature_dict['sum'] = x.sum()

        feature_dict['mean_change_rate'] = calc_change_rate(x)
        # calc_change_rate on slices of data
        for slice_length, direction in product([1000, 10000, 50000],
                                               ['first', 'last']):
            if direction == 'first':
                feature_dict[
                    f'mean_change_rate_{direction}_{slice_length}'] = calc_change_rate(
                        x[:slice_length])
            elif direction == 'last':
                feature_dict[
                    f'mean_change_rate_{direction}_{slice_length}'] = calc_change_rate(
                        x[-slice_length:])

        # percentiles on original and absolute values
        for p in percentiles:
            feature_dict[f'percentile_{p}'] = np.percentile(x, p)
            feature_dict[f'abs_percentile_{p}'] = np.percentile(np.abs(x), p)

        feature_dict['trend'] = add_trend_feature(x)
        feature_dict['abs_trend'] = add_trend_feature(x, abs_values=True)

        feature_dict['mad'] = x.mad()
        feature_dict['kurt'] = x.kurtosis()
        feature_dict['skew'] = x.skew()
        feature_dict['med'] = x.median()

        feature_dict['Hilbert_mean'] = np.abs(hilbert(x)).mean()

        for hw in hann_windows:
            feature_dict[f'Hann_window_mean_{hw}'] = (
                convolve(x, hann(hw), mode='same') / sum(hann(hw))).mean()

        feature_dict['classic_sta_lta1_mean'] = classic_sta_lta(x, 500,
                                                                10000).mean()
        feature_dict['classic_sta_lta2_mean'] = classic_sta_lta(
            x, 5000, 100000).mean()
        feature_dict['classic_sta_lta3_mean'] = classic_sta_lta(x, 3333,
                                                                6666).mean()
        feature_dict['classic_sta_lta4_mean'] = classic_sta_lta(
            x, 10000, 25000).mean()
        feature_dict['classic_sta_lta5_mean'] = classic_sta_lta(x, 50,
                                                                1000).mean()
        feature_dict['classic_sta_lta6_mean'] = classic_sta_lta(x, 100,
                                                                5000).mean()
        feature_dict['classic_sta_lta7_mean'] = classic_sta_lta(x, 333,
                                                                666).mean()
        feature_dict['classic_sta_lta8_mean'] = classic_sta_lta(
            x, 4000, 10000).mean()

        # exponential rolling statistics
        ewma = pd.Series.ewm
        for s in spans:
            feature_dict[f'exp_Moving_average_{s}_mean'] = (ewma(
                x, span=s).mean(skipna=True)).mean(skipna=True)
            feature_dict[f'exp_Moving_average_{s}_std'] = (ewma(
                x, span=s).mean(skipna=True)).std(skipna=True)
            feature_dict[f'exp_Moving_std_{s}_mean'] = (ewma(
                x, span=s).std(skipna=True)).mean(skipna=True)
            feature_dict[f'exp_Moving_std_{s}_std'] = (ewma(
                x, span=s).std(skipna=True)).std(skipna=True)

        feature_dict['iqr'] = np.subtract(*np.percentile(x, [75, 25]))
        feature_dict['iqr1'] = np.subtract(*np.percentile(x, [95, 5]))
        feature_dict['ave10'] = stats.trim_mean(x, 0.1)

        for slice_length, threshold in product([50000, 100000, 150000],
                                               [5, 10, 20, 50, 100]):
            feature_dict[f'count_big_{slice_length}_threshold_{threshold}'] = (
                np.abs(x[-slice_length:]) > threshold).sum()
            feature_dict[
                f'count_big_{slice_length}_less_threshold_{threshold}'] = (
                    np.abs(x[-slice_length:]) < threshold).sum()

        # tfresh features take too long to calculate, so I comment them for now

#         feature_dict['abs_energy'] = feature_calculators.abs_energy(x)
#         feature_dict['abs_sum_of_changes'] = feature_calculators.absolute_sum_of_changes(x)
#         feature_dict['count_above_mean'] = feature_calculators.count_above_mean(x)
#         feature_dict['count_below_mean'] = feature_calculators.count_below_mean(x)
#         feature_dict['mean_abs_change'] = feature_calculators.mean_abs_change(x)
#         feature_dict['mean_change'] = feature_calculators.mean_change(x)
#         feature_dict['var_larger_than_std_dev'] = feature_calculators.variance_larger_than_standard_deviation(x)
        feature_dict['range_minf_m4000'] = feature_calculators.range_count(
            x, -np.inf, -4000)
        feature_dict['range_p4000_pinf'] = feature_calculators.range_count(
            x, 4000, np.inf)

        for i, j in zip(borders, borders[1:]):
            feature_dict[f'range_{i}_{j}'] = feature_calculators.range_count(
                x, i, j)

#         feature_dict['ratio_unique_values'] = feature_calculators.ratio_value_number_to_time_series_length(x)
#         feature_dict['first_loc_min'] = feature_calculators.first_location_of_minimum(x)
#         feature_dict['first_loc_max'] = feature_calculators.first_location_of_maximum(x)
#         feature_dict['last_loc_min'] = feature_calculators.last_location_of_minimum(x)
#         feature_dict['last_loc_max'] = feature_calculators.last_location_of_maximum(x)

#         for lag in lags:
#             feature_dict[f'time_rev_asym_stat_{lag}'] = feature_calculators.time_reversal_asymmetry_statistic(x, lag)
        for autocorr_lag in autocorr_lags:
            feature_dict[
                f'autocorrelation_{autocorr_lag}'] = feature_calculators.autocorrelation(
                    x, autocorr_lag)
            feature_dict[f'c3_{autocorr_lag}'] = feature_calculators.c3(
                x, autocorr_lag)


#         for coeff, attr in product([1, 2, 3, 4, 5], ['real', 'imag', 'angle']):
#             feature_dict[f'fft_{coeff}_{attr}'] = list(feature_calculators.fft_coefficient(x, [{'coeff': coeff, 'attr': attr}]))[0][1]

#         feature_dict['long_strk_above_mean'] = feature_calculators.longest_strike_above_mean(x)
#         feature_dict['long_strk_below_mean'] = feature_calculators.longest_strike_below_mean(x)
#         feature_dict['cid_ce_0'] = feature_calculators.cid_ce(x, 0)
#         feature_dict['cid_ce_1'] = feature_calculators.cid_ce(x, 1)

        for p in percentiles:
            feature_dict[
                f'binned_entropy_{p}'] = feature_calculators.binned_entropy(
                    x, p)

        feature_dict['num_crossing_0'] = feature_calculators.number_crossing_m(
            x, 0)

        for peak in peaks:
            feature_dict[
                f'num_peaks_{peak}'] = feature_calculators.number_peaks(
                    x, peak)

        for c in coefs:
            feature_dict[f'spkt_welch_density_{c}'] = list(
                feature_calculators.spkt_welch_density(x, [{
                    'coeff': c
                }]))[0][1]
            feature_dict[
                f'time_rev_asym_stat_{c}'] = feature_calculators.time_reversal_asymmetry_statistic(
                    x, c)

        # statistics on rolling windows of various sizes
        for w in windows:
            x_roll_std = x.rolling(w).std().dropna().values
            x_roll_mean = x.rolling(w).mean().dropna().values

            feature_dict[f'ave_roll_std_{w}'] = x_roll_std.mean()
            feature_dict[f'std_roll_std_{w}'] = x_roll_std.std()
            feature_dict[f'max_roll_std_{w}'] = x_roll_std.max()
            feature_dict[f'min_roll_std_{w}'] = x_roll_std.min()

            for p in percentiles:
                feature_dict[
                    f'percentile_roll_std_{p}_window_{w}'] = np.percentile(
                        x_roll_std, p)

            feature_dict[f'av_change_abs_roll_std_{w}'] = np.mean(
                np.diff(x_roll_std))
            feature_dict[f'av_change_rate_roll_std_{w}'] = np.mean(
                np.nonzero((np.diff(x_roll_std) / x_roll_std[:-1]))[0])
            feature_dict[f'abs_max_roll_std_{w}'] = np.abs(x_roll_std).max()

            feature_dict[f'ave_roll_mean_{w}'] = x_roll_mean.mean()
            feature_dict[f'std_roll_mean_{w}'] = x_roll_mean.std()
            feature_dict[f'max_roll_mean_{w}'] = x_roll_mean.max()
            feature_dict[f'min_roll_mean_{w}'] = x_roll_mean.min()

            for p in percentiles:
                feature_dict[
                    f'percentile_roll_mean_{p}_window_{w}'] = np.percentile(
                        x_roll_mean, p)

            feature_dict[f'av_change_abs_roll_mean_{w}'] = np.mean(
                np.diff(x_roll_mean))
            feature_dict[f'av_change_rate_roll_mean_{w}'] = np.mean(
                np.nonzero((np.diff(x_roll_mean) / x_roll_mean[:-1]))[0])
            feature_dict[f'abs_max_roll_mean_{w}'] = np.abs(x_roll_mean).max()

        return feature_dict
Exemple #30
0
def feature_extract(X_train,
                    i,
                    X_element,
                    y_train=None,
                    y_element=None,
                    is_TrainDataSet=True):
    if is_TrainDataSet:
        y_train.loc[i, 'time_to_failure'] = y_element

    X_element = X_element.reshape(-1)

    xcdm = X_element - np.mean(X_element)

    b, a = des_bw_filter_lp(cutoff=18000)
    xcz = sg.lfilter(b, a, xcdm)

    zc = np.fft.fft(xcz)
    zc = zc[:MAX_FREQ_IDX]

    # FFT transform values
    realFFT = np.real(zc)
    imagFFT = np.imag(zc)

    freq_bands = [x for x in range(0, MAX_FREQ_IDX, FREQ_STEP)]
    magFFT = np.sqrt(realFFT**2 + imagFFT**2)
    phzFFT = np.arctan(imagFFT / realFFT)
    phzFFT[phzFFT == -np.inf] = -np.pi / 2.0
    phzFFT[phzFFT == np.inf] = np.pi / 2.0
    phzFFT = np.nan_to_num(phzFFT)

    for freq in freq_bands:
        X_train.loc[i, 'FFT_Mag_01q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.01)
        X_train.loc[i, 'FFT_Mag_10q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.1)
        X_train.loc[i, 'FFT_Mag_90q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.9)
        X_train.loc[i, 'FFT_Mag_99q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.99)
        X_train.loc[i, 'FFT_Mag_mean%d' % freq] = np.mean(magFFT[freq:freq +
                                                                 FREQ_STEP])
        X_train.loc[i, 'FFT_Mag_std%d' % freq] = np.std(magFFT[freq:freq +
                                                               FREQ_STEP])
        X_train.loc[i, 'FFT_Mag_max%d' % freq] = np.max(magFFT[freq:freq +
                                                               FREQ_STEP])

        X_train.loc[i, 'FFT_Phz_mean%d' % freq] = np.mean(phzFFT[freq:freq +
                                                                 FREQ_STEP])
        X_train.loc[i, 'FFT_Phz_std%d' % freq] = np.std(phzFFT[freq:freq +
                                                               FREQ_STEP])

    X_train.loc[i, 'FFT_Rmean'] = realFFT.mean()
    X_train.loc[i, 'FFT_Rstd'] = realFFT.std()
    X_train.loc[i, 'FFT_Rmax'] = realFFT.max()
    X_train.loc[i, 'FFT_Rmin'] = realFFT.min()
    X_train.loc[i, 'FFT_Imean'] = imagFFT.mean()
    X_train.loc[i, 'FFT_Istd'] = imagFFT.std()
    X_train.loc[i, 'FFT_Imax'] = imagFFT.max()
    X_train.loc[i, 'FFT_Imin'] = imagFFT.min()

    X_train.loc[i, 'FFT_Rmean_first_6000'] = realFFT[:6000].mean()
    X_train.loc[i, 'FFT_Rstd__first_6000'] = realFFT[:6000].std()
    X_train.loc[i, 'FFT_Rmax_first_6000'] = realFFT[:6000].max()
    X_train.loc[i, 'FFT_Rmin_first_6000'] = realFFT[:6000].min()
    X_train.loc[i, 'FFT_Rmean_first_18000'] = realFFT[:18000].mean()
    X_train.loc[i, 'FFT_Rstd_first_18000'] = realFFT[:18000].std()
    X_train.loc[i, 'FFT_Rmax_first_18000'] = realFFT[:18000].max()
    X_train.loc[i, 'FFT_Rmin_first_18000'] = realFFT[:18000].min()

    peaks = [10, 20, 50, 100]
    for peak in peaks:
        X_train.loc[
            i, 'num_peaks_{}'.format(peak)] = feature_calculators.number_peaks(
                X_element, peak)

    autocorr_lags = [5, 10, 50, 100, 500, 1000, 5000, 10000]
    for autocorr_lag in autocorr_lags:
        X_train.loc[i, 'autocorrelation_{}'.format(
            autocorr_lag)] = feature_calculators.autocorrelation(
                X_element, autocorr_lag)
        X_train.loc[i, 'c3_{}'.format(autocorr_lag)] = feature_calculators.c3(
            X_element, autocorr_lag)

    X_train.loc[i, 'ave'] = X_element.mean()
    X_train.loc[i, 'std'] = X_element.std()
    X_train.loc[i, 'max'] = X_element.max()
    X_train.loc[i, 'min'] = X_element.min()

    # geometric and harminic means
    X_train.loc[i, 'hmean'] = stats.hmean(
        np.abs(X_element[np.nonzero(X_element)[0]]))
    X_train.loc[i, 'gmean'] = stats.gmean(
        np.abs(X_element[np.nonzero(X_element)[0]]))

    # nth k-statistic and nth moment
    for ii in range(1, 5):
        X_train.loc[i, 'kstat_{}'.format(ii)] = stats.kstat(X_element, ii)
        X_train.loc[i, 'moment_{}'.format(ii)] = stats.moment(X_element, ii)

    for ii in [1, 2]:
        X_train.loc[i,
                    'kstatvar_{}.format(ii)'] = stats.kstatvar(X_element, ii)

    X_train.loc[i, 'max_to_min'] = X_element.max() / np.abs(X_element.min())
    X_train.loc[i,
                'max_to_min_diff'] = X_element.max() - np.abs(X_element.min())
    X_train.loc[i, 'count_big'] = len(X_element[np.abs(X_element) > 500])
    X_train.loc[i, 'sum'] = X_element.sum()

    X_train.loc[i, 'av_change_abs'] = np.mean(np.diff(X_element))

    tmp = np.diff(X_element) / X_element[:-1]
    tmp = tmp[~np.isnan(tmp)]
    tmp = tmp[~np.isinf(tmp)]
    X_train.loc[i, 'av_change_rate'] = np.mean(tmp)

    X_train.loc[i, 'abs_max'] = np.abs(X_element).max()
    X_train.loc[i, 'abs_min'] = np.abs(X_element).min()

    X_train.loc[i, 'std_first_50000'] = X_element[:50000].std()
    X_train.loc[i, 'std_last_50000'] = X_element[-50000:].std()
    X_train.loc[i, 'std_first_10000'] = X_element[:10000].std()
    X_train.loc[i, 'std_last_10000'] = X_element[-10000:].std()

    X_train.loc[i, 'avg_first_50000'] = X_element[:50000].mean()
    X_train.loc[i, 'avg_last_50000'] = X_element[-50000:].mean()
    X_train.loc[i, 'avg_first_10000'] = X_element[:10000].mean()
    X_train.loc[i, 'avg_last_10000'] = X_element[-10000:].mean()

    X_train.loc[i, 'min_first_50000'] = X_element[:50000].min()
    X_train.loc[i, 'min_last_50000'] = X_element[-50000:].min()
    X_train.loc[i, 'min_first_10000'] = X_element[:10000].min()
    X_train.loc[i, 'min_last_10000'] = X_element[-10000:].min()

    X_train.loc[i, 'max_first_50000'] = X_element[:50000].max()
    X_train.loc[i, 'max_last_50000'] = X_element[-50000:].max()
    X_train.loc[i, 'max_first_10000'] = X_element[:10000].max()
    X_train.loc[i, 'max_last_10000'] = X_element[-10000:].max()

    percentiles = [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]
    for p in percentiles:
        X_train.loc[i, 'percentile_{}'.format(p)] = np.percentile(X_element, p)
        X_train.loc[i, 'abs_percentile_{}'.format(p)] = np.percentile(
            np.abs(X_element), p)

    windows = [10, 50, 100, 500, 1000, 10000]
    X_element_df = pd.DataFrame(X_element)
    for w in windows:
        x_roll_std = X_element_df.rolling(w).std().dropna().values
        x_roll_mean = X_element_df.rolling(w).mean().dropna().values
        x_roll_std = x_roll_std.reshape(-1)
        x_roll_mean = x_roll_mean.reshape(-1)

        X_train.loc[i, 'ave_roll_std_{}'.format(w)] = x_roll_std.mean()
        X_train.loc[i, 'std_roll_std_{}'.format(w)] = x_roll_std.std()
        X_train.loc[i, 'max_roll_std_{}'.format(w)] = x_roll_std.max()
        X_train.loc[i, 'min_roll_std_{}'.format(w)] = x_roll_std.min()

        for p in percentiles:
            X_train.loc[i, 'percentile_roll_std_{}_window_{}'.
                        format(p, w)] = np.percentile(x_roll_std, p)

        X_train.loc[i, 'av_change_abs_roll_std_{}'.format(w)] = np.mean(
            np.diff(x_roll_std))

        tmp = np.diff(x_roll_std) / x_roll_std[:-1]
        tmp = tmp[~np.isnan(tmp)]
        tmp = tmp[~np.isinf(tmp)]
        X_train.loc[i, 'av_change_rate_roll_std_{}'.format(w)] = np.mean(tmp)
        X_train.loc[i, 'abs_max_roll_std_{}'.format(w)] = np.abs(
            x_roll_std).max()

        X_train.loc[i, 'ave_roll_mean_{}'.format(w)] = x_roll_mean.mean()
        X_train.loc[i, 'std_roll_mean_{}'.format(w)] = x_roll_mean.std()
        X_train.loc[i, 'max_roll_mean_{}'.format(w)] = x_roll_mean.max()
        X_train.loc[i, 'min_roll_mean_{}'.format(w)] = x_roll_mean.min()

        for p in percentiles:
            X_train.loc[i, 'percentile_roll_mean_{}_window_{}'.
                        format(p, w)] = np.percentile(x_roll_mean, p)

        X_train.loc[i, 'av_change_abs_roll_mean_{}'.format(w)] = np.mean(
            np.diff(x_roll_mean))

        tmp = np.diff(x_roll_mean) / x_roll_mean[:-1]
        tmp = tmp[~np.isnan(tmp)]
        tmp = tmp[~np.isinf(tmp)]
        X_train.loc[i, 'av_change_rate_roll_mean_{}'.format(w)] = np.mean(tmp)
        X_train.loc[i, 'abs_max_roll_mean_{}'.format(w)] = np.abs(
            x_roll_mean).max()
Exemple #31
0
def feat_extraction(dataset):

    feat_dataset = pd.DataFrame(index=np.arange(len(dataset)))

    #Calculated columns
    feat_dataset['CGM_Min'] = dataset.min(axis=1)
    feat_dataset['CGM_Max'] = dataset.max(axis=1)

    ##ENTROPY
    feat_dataset['CGM_Entropy'] = np.nan
    for i in range(len(dataset)):
        feat_dataset['CGM_Entropy'][i] = ts.sample_entropy(
            np.array(dataset.iloc[i, :]))

    ##RMS
    feat_dataset['CGM_RMS'] = np.nan
    for i in range(len(dataset)):
        feat_dataset['CGM_RMS'][i] = np.sqrt(np.mean(dataset.iloc[i, :]**2))

    #Correlation
    feat_dataset['CGM_Correlation'] = np.nan
    for i in range(len(dataset)):
        feat_dataset['CGM_Correlation'][i] = ts.autocorrelation(
            np.array(dataset.iloc[i, :]), 1)

    ##Number_of_Peaks
    feat_dataset['CGM_Peaks'] = np.nan
    for i in range(len(dataset)):
        feat_dataset['CGM_Peaks'][i] = ts.number_peaks(
            np.array(dataset.iloc[i, :]), 2)

    #CGM Velocity
    feat_dataset['CGM_Velocity'] = np.nan
    for i in range(len(dataset)):
        c_list = dataset.loc[i, :].tolist()
        sum_ = []
        for j in range(1, len(c_list)):
            sum_.append(abs(c_list[j] - c_list[j - 1]))
        feat_dataset['CGM_Velocity'][i] = np.round(np.mean(sum_), 2)

    #MinMax
    feat_dataset['CGM_MinMax'] = np.nan
    feat_dataset[
        'CGM_MinMax'] = feat_dataset['CGM_Max'] - feat_dataset['CGM_Min']

    ##SKewness
    feat_dataset['CGM_Skewness'] = np.nan
    for i in range(len(dataset)):
        feat_dataset['CGM_Skewness'][i] = ts.skewness(dataset.loc[i, :])

    #CGM_Displacement
    feat_dataset['CGM_Displacement'] = np.nan
    for i in range(len(dataset)):
        c_list = dataset.loc[i, :].tolist()
        sum_ = []
        for j in range(1, len(c_list)):
            sum_.append(abs(c_list[j] - c_list[j - 1]))
        feat_dataset['CGM_Displacement'][i] = np.round(np.sum(sum_), 2)

    #CGM_Kurtosis
    feat_dataset['CGM_Kurtosis'] = np.nan
    for i in range(len(dataset)):
        feat_dataset['CGM_Kurtosis'][i] = ts.kurtosis(
            np.array(dataset.iloc[i, :]))

    #Recurr
    feat_dataset['CGM_Recur'] = np.nan
    for i in range(len(dataset)):
        feat_dataset['CGM_Recur'][
            i] = ts.ratio_value_number_to_time_series_length(
                np.array(dataset.iloc[i, :]))

    #Remove calculated columns
    del feat_dataset['CGM_Max']
    del feat_dataset['CGM_Min']

    feat_dataset = feat_dataset[[
        'CGM_Entropy', 'CGM_RMS', 'CGM_Correlation', 'CGM_Peaks',
        'CGM_Velocity', 'CGM_MinMax', 'CGM_Skewness', 'CGM_Displacement',
        'CGM_Kurtosis', 'CGM_Recur'
    ]]

    return feat_dataset