def fft_agg(x):
    param = [{
        "aggtype": "centroid"
    }, {
        "aggtype": "variance"
    }, {
        "aggtype": "skew"
    }, {
        "aggtype": "kurtosis"
    }]
    return list(dict(fc.fft_aggregated(x, param)).values())
Beispiel #2
0
def extract_heartrate_tsfresh(transformed: np.ndarray) -> np.ndarray:
    """Extract all tsfresh features from heart rate."""
    ecg_features = []
    print("Extracting TSFRESH statistics from heart rate signals...")

    for x in tqdm(transformed):
        vchange_quantiles_abs = change_quantiles(x[:, -1], 0, 0.8, True, "var")
        vfft_aggregated_k = list(
            fft_aggregated(x[:, -1], [{
                "aggtype": "kurtosis"
            }]))[0][1]
        vmean_abs_change = mean_abs_change(x[:, -1])
        vabsolute_sum_of_changes = absolute_sum_of_changes(x[:, -1])
        vfft_aggregated_s = list(
            fft_aggregated(x[:, -1], [{
                "aggtype": "skew"
            }]))[0][1]
        vfft_aggregated_c = list(
            fft_aggregated(x[:, -1], [{
                "aggtype": "centroid"
            }]))[0][1]
        vvariance = variance(x[:, -1])
        vvariation_coefficient = variation_coefficient(x[:, -1])

        new_tsfresh = np.array([
            vchange_quantiles_abs,
            vfft_aggregated_k,
            vmean_abs_change,
            vabsolute_sum_of_changes,
            vfft_aggregated_s,
            vfft_aggregated_c,
            vvariance,
            vvariation_coefficient,
        ])

        ecg_features.append(np.concatenate(new_tsfresh, axis=0))

    return np.array(ecg_features)
Beispiel #3
0
    def fft_aggregated(self, x, param=None):
        """
        As in tsfresh `fft_aggregated <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
        feature_calculators.py#L896>`_

        Returns the spectral centroid (mean), variance, skew, and kurtosis of the absolute fourier transform spectrum.

        :param x: the time series to calculate the feature of
        :type x: pandas.Series
        :param param: contains dictionaries {"aggtype": s} where s str and in ["centroid", "variance",
            "skew", "kurtosis"]
        :type param: list
        :return: the different feature values
        :rtype: pandas.Series
        """
        if param is None:
            param = [{'aggtype': 'centroid'}]
        _fft_agg = feature_calculators.fft_aggregated(x, param)
        logging.debug("fft aggregated by tsfresh calculated")
        return list(_fft_agg)
def get_fft(arr):
    attrs = [
        {
            'aggtype': 'centroid'
        },
        {
            'aggtype': 'variance'
        },
        {
            'aggtype': 'skew'
        },
        {
            'aggtype': 'kurtosis'
        },
    ]

    fft_zip = fft_aggregated(arr, attrs)
    res = np.array([item[1] for item in list(fft_zip)])
    res = np.nan_to_num(res)

    return res
def main():
    dirname = os.path.realpath('.')
    excelF = dirname + '\\Summary.xlsx'
    myworkbook = openpyxl.load_workbook(excelF)
    worksheet = myworkbook['SummaryPatients']

    file = 1
    for filename in glob.glob(dirname + "\*.txt"):

        data = open(filename, 'r')

        totalData = {}

        time = []
        totalForceL = []
        totalForceR = []
        id = []
        for line in data:
            tempForce = line.split()
            id.append(1)
            time.append(float(tempForce[0]))
            totalForceL.append(float(tempForce[17]))
            totalForceR.append(float(tempForce[18]))

        totalData["id"] = id
        totalData["time"] = time
        totalData["totalForceL"] = totalForceL
        totalData["totalForceR"] = totalForceR

        dataPandas = pd.DataFrame.from_dict(totalData)

        extracted_features = {}
        #extract_featuresL = extract_features(dataPandas, column_id="id", column_kind=None, column_value=None)

        worksheet['A' + str(file + 1)] = file
        if 'Pt' in filename:
            worksheet['B' + str(file + 1)] = 1
        else:
            worksheet['B' + str(file + 1)] = 0

        worksheet['C' + str(file + 1)] = tf.abs_energy(
            totalData["totalForceL"])
        worksheet['D' + str(file + 1)] = tf.abs_energy(
            totalData["totalForceR"])
        worksheet['E' + str(file + 1)] = tf.kurtosis(totalData["totalForceL"])
        worksheet['F' + str(file + 1)] = tf.kurtosis(totalData["totalForceR"])
        worksheet['G' + str(file + 1)] = tf.skewness(totalData["totalForceL"])
        worksheet['H' + str(file + 1)] = tf.skewness(totalData["totalForceR"])
        worksheet['I' + str(file + 1)] = tf.median(totalData["totalForceL"])
        worksheet['J' + str(file + 1)] = tf.median(totalData["totalForceR"])
        worksheet['K' + str(file + 1)] = tf.mean(totalData["totalForceL"])
        worksheet['L' + str(file + 1)] = tf.mean(totalData["totalForceR"])
        worksheet['M' + str(file + 1)] = tf.variance(totalData["totalForceL"])
        worksheet['N' + str(file + 1)] = tf.variance(totalData["totalForceR"])

        temp = tf.fft_aggregated(totalData["totalForceL"],
                                 [{
                                     "aggtype": "centroid"
                                 }, {
                                     "aggtype": "variance"
                                 }, {
                                     "aggtype": "skew"
                                 }, {
                                     "aggtype": "kurtosis"
                                 }])
        int = 0
        for list in temp:
            if int == 0:
                worksheet['O' + str(file + 1)] = list[1]
            if int == 1:
                worksheet['P' + str(file + 1)] = list[1]
            if int == 2:
                worksheet['Q' + str(file + 1)] = list[1]
            if int == 3:
                worksheet['R' + str(file + 1)] = list[1]
            int += 1

        temp2 = tf.fft_aggregated(totalData["totalForceR"],
                                  [{
                                      "aggtype": "centroid"
                                  }, {
                                      "aggtype": "variance"
                                  }, {
                                      "aggtype": "skew"
                                  }, {
                                      "aggtype": "kurtosis"
                                  }])
        int = 0
        for list in temp2:
            if int == 0:
                worksheet['S' + str(file + 1)] = list[1]
            if int == 1:
                worksheet['T' + str(file + 1)] = list[1]
            if int == 2:
                worksheet['U' + str(file + 1)] = list[1]
            if int == 3:
                worksheet['V' + str(file + 1)] = list[1]
            int += 1

        file += 1

    myworkbook.save(excelF)
Beispiel #6
0
 def function(x):
     param = [{"aggtype": self.aggtype}]
     return list(fft_aggregated(x, param=param))[0][1]
def generate_time_series_feats(x_dataset, dataset_name="raw", test=False):
    make_dir_if_not_exists(os.path.join(FEATURES_PATH, 'tsfeats'))
    time_length = x_dataset.shape[1]

    features_function_dict = {
        "mean":
        mean,
        "median":
        median,
        "length":
        length,
        "minimum":
        minimum,
        "maximum":
        maximum,
        "variance":
        variance,
        "skewness":
        skewness,
        "kurtosis":
        kurtosis,
        "sum_values":
        sum_values,
        "abs_energy":
        abs_energy,
        "mean_change":
        mean_change,
        "mean_abs_change":
        mean_abs_change,
        "count_below_mean":
        count_below_mean,
        "count_above_mean":
        count_above_mean,
        "has_duplicate_min":
        has_duplicate_min,
        "has_duplicate_max":
        has_duplicate_max,
        "standard_deviation":
        standard_deviation,
        "absolute_sum_of_changes":
        absolute_sum_of_changes,
        "last_location_of_minimum":
        last_location_of_minimum,
        "last_location_of_maximum":
        last_location_of_maximum,
        "first_location_of_maximum":
        first_location_of_maximum,
        "longest_strike_below_mean":
        longest_strike_below_mean,
        "longest_strike_above_mean":
        longest_strike_above_mean,
        "sum_of_reoccurring_values":
        sum_of_reoccurring_values,
        "first_location_of_minimum":
        first_location_of_minimum,
        "sum_of_reoccurring_data_points":
        sum_of_reoccurring_data_points,
        "variance_larger_than_standard_deviation":
        variance_larger_than_standard_deviation,
        "ratio_value_number_to_time_series_length":
        ratio_value_number_to_time_series_length,
        "percentage_of_reoccurring_values_to_all_values":
        percentage_of_reoccurring_values_to_all_values,
        "binned_entropy_max300":
        lambda x: binned_entropy(x, 300),
        "binned_entropy_max400":
        lambda x: binned_entropy(x, 400),
        "cid_ce_true":
        lambda x: cid_ce(x, True),
        "cid_ce_false":
        lambda x: cid_ce(x, False),
        "percentage_of_reoccurring_datapoints_to_all_datapoints":
        percentage_of_reoccurring_datapoints_to_all_datapoints
    }

    for feature_name, function_call in features_function_dict.iteritems():
        print "{:.<70s}".format("- Processing feature: %s" % feature_name),
        feature_name = 'tsfeats/%s_%s' % (dataset_name, feature_name)
        if not features_exists(feature_name, test):
            feats = x_dataset.apply(function_call, axis=1, raw=True).values
            save_features(feats, feature_name, test)
            print("Done")
        else:
            print("Already generated")

    ar_param_k100 = [{"coeff": i, "k": 100} for i in range(100 + 1)]
    ar_param_k500 = [{"coeff": i, "k": 500} for i in range(500 + 1)]
    agg50_mean_linear_trend = [{
        "attr": val,
        "chunk_len": 50,
        "f_agg": "mean"
    } for val in ("pvalue", "rvalue", "intercept", "slope", "stderr")]
    aug_dickey_fuler_params = [{
        "attr": "teststat"
    }, {
        "attr": "pvalue"
    }, {
        "attr": "usedlag"
    }]
    energy_ratio_num10_focus5 = [{"num_segments": 10, "segment_focus": 5}]
    fft_aggr_spectrum = [{
        "aggtype": "centroid"
    }, {
        "aggtype": "variance"
    }, {
        "aggtype": "skew"
    }, {
        "aggtype": "kurtosis"
    }]
    fft_coefficient_real = [{
        "coeff": i,
        "attr": "real"
    } for i in range((time_length + 1) // 2)]
    fft_coefficient_imag = [{
        "coeff": i,
        "attr": "imag"
    } for i in range((time_length + 1) // 2)]
    fft_coefficient_abs = [{
        "coeff": i,
        "attr": "abs"
    } for i in range((time_length + 1) // 2)]
    fft_coefficient_angle = [{
        "coeff": i,
        "attr": "angle"
    } for i in range((time_length + 1) // 2)]
    linear_trend_params = [{
        "attr": val
    } for val in ("pvalue", "rvalue", "intercept", "slope", "stderr")]

    other_feats_dict = {
        "ar_coeff100":
        lambda x: dict(ar_coefficient(x, ar_param_k100)),
        "ar_coeff500":
        lambda x: dict(ar_coefficient(x, ar_param_k500)),
        "agg50_mean_lin_trend":
        lambda x: dict(agg_linear_trend(x, agg50_mean_linear_trend)),
        "aug_dickey_fuler":
        lambda x: dict(augmented_dickey_fuller(x, aug_dickey_fuler_params)),
        "energy_ratio_num10_focus5":
        lambda x: dict(energy_ratio_by_chunks(x, energy_ratio_num10_focus5)),
        "fft_aggr_spectrum":
        lambda x: dict(fft_aggregated(x, fft_aggr_spectrum)),
        "fft_coeff_real":
        lambda x: dict(fft_coefficient(x, fft_coefficient_real)),
        "fft_coeff_imag":
        lambda x: dict(fft_coefficient(x, fft_coefficient_imag)),
        "fft_coeff_abs":
        lambda x: dict(fft_coefficient(x, fft_coefficient_abs)),
        "fft_coeff_angle":
        lambda x: dict(fft_coefficient(x, fft_coefficient_angle)),
        "linear_trend":
        lambda x: dict(linear_trend(x, linear_trend_params)),
    }

    for feature_name, function_call in other_feats_dict.iteritems():
        print "{:.<70s}".format("- Processing features: %s" % feature_name),
        feature_name = 'tsfeats/%s_%s' % (dataset_name, feature_name)
        if not features_exists(feature_name, test):
            feats_dict = x_dataset.apply(function_call, axis=1,
                                         raw=True).values.tolist()
            feats = pd.DataFrame.from_dict(feats_dict)
            save_features(feats.values, feature_name, test)
            print("Done")
        else:
            print("Already generated")

    # Auto-correlations as features
    print("- Processing Auto-correlation features...")
    corr_dataset = x_dataset.apply(autocorrelation_all, axis=1, raw=True)
    save_features(corr_dataset.values,
                  '%s_auto_correlation_all' % dataset_name, test)

    print("- Processing ARIMA(5,5,1) Features...")
    arima_features = parallelize_row(x_dataset.values,
                                     generate_arima_feats,
                                     n_jobs=2)
    assert arima_features.shape[0] == x_dataset.shape[0]  # Assert the axis
    save_features(arima_features, '%s_arima_5_5_1' % dataset_name, test)
Beispiel #8
0
plt.plot(timeline[:1000], data[:1000])
plt.xlabel('time')
plt.ylabel('data')
plt.show()

range_data = data[:600]

mean_abs_change = feature_calculators.mean_abs_change(data)
# 前後のポイント間での差分の平均値
# np.mean(np.abs(np.diff(x))) と等しい

first_location_of_maximum = feature_calculators.first_location_of_maximum(data)
# 最大値が観測される位置

fft_aggregated = feature_calculators.fft_aggregated(data, [{
    'aggtype': 'skew'
}])
# フーリエ変換

number_peaks = feature_calculators.number_peaks(data[:1000], 50)
# ピークの数

index_mass_quantile = feature_calculators.index_mass_quantile(
    data[:1000], [{
        'q': 0.5
    }, {
        'q': 0.1
    }])
# パーセンタイル処理

linear_trend = feature_calculators.linear_trend(range_data,
Beispiel #9
0
def main():
    dirname = os.path.realpath('.')
    filename = dirname + '\\GaPt07_01.txt'

    data = open(filename, 'r')

    totalData = {}

    time = []
    totalForceL = []
    totalForceR = []
    id = []
    for line in data:
        tempForce = line.split()
        id.append(1)
        time.append(float(tempForce[0]))
        totalForceL.append(float(tempForce[17]))
        totalForceR.append(float(tempForce[18]))

    totalData["id"] = id
    totalData["time"] = time
    totalData["totalForceL"] = totalForceL
    totalData["totalForceR"] = totalForceR

    dataPandas = pd.DataFrame.from_dict(totalData)

    extracted_features = {}

    #extract_featuresL = extract_features(dataPandas, column_id="id", column_kind=None, column_value=None)
    extracted_features["absEnergyL"] = tf.abs_energy(totalData["totalForceL"])
    extracted_features["absEnergyR"] = tf.abs_energy(totalData["totalForceR"])
    extracted_features["kurtosisL"] = tf.kurtosis(totalData["totalForceL"])
    extracted_features["kurtosisR"] = tf.kurtosis(totalData["totalForceR"])
    extracted_features["skewnessL"] = tf.skewness(totalData["totalForceL"])
    extracted_features["skewnessR"] = tf.skewness(totalData["totalForceR"])
    extracted_features["medianL"] = tf.median(totalData["totalForceL"])
    extracted_features["medianR"] = tf.median(totalData["totalForceR"])
    extracted_features["meanL"] = tf.mean(totalData["totalForceL"])
    extracted_features["meanR"] = tf.mean(totalData["totalForceR"])
    extracted_features["varianceL"] = tf.variance(totalData["totalForceL"])
    extracted_features["varianceR"] = tf.variance(totalData["totalForceR"])

    temp = tf.fft_aggregated(totalData["totalForceL"], [{
        "aggtype": "centroid"
    }, {
        "aggtype": "variance"
    }, {
        "aggtype": "skew"
    }, {
        "aggtype": "kurtosis"
    }])
    int = 0
    for list in temp:
        if int == 0:
            extracted_features["fftCentroidL"] = list
        if int == 1:
            extracted_features["fftVarianceL"] = list
        if int == 2:
            extracted_features["fftSkewL"] = list
        if int == 3:
            extracted_features["fftKurtosisL"] = list
        int += 1

    temp2 = tf.fft_aggregated(totalData["totalForceR"], [{
        "aggtype": "centroid"
    }, {
        "aggtype": "variance"
    }, {
        "aggtype": "skew"
    }, {
        "aggtype": "kurtosis"
    }])
    int = 0
    for list in temp2:
        if int == 0:
            extracted_features["fftCentroidR"] = list
        if int == 1:
            extracted_features["fftVarianceR"] = list
        if int == 2:
            extracted_features["fftSkewR"] = list
        if int == 3:
            extracted_features["fftKurtosisR"] = list
        int += 1