def lstats(x, n):
    '''
    n is the number of  the lratios you wish to include, starting at L2 to exclude mean
    the function return L2,L3,L4 if n=4
    '''
    lratios = lm.lmom_ratios(x, nmom=n)
    return lratios[1:n]
 def test_n50_nmom3(self):
     start_i = 0
     for i in range(10000):
         l1, l2, t3 = lm.lmom_ratios(self.record[start_i:start_i + self.n],
                                     nmom=3)
         t2 = l2 / l1
         start_i += self.n
示例#3
0
 def test_samlmu(self):
     testdata = [2.0, 3.0, 4.0, 2.4, 5.5, 1.2, 5.4, 2.2, 7.1, 1.3, 1.5]
     expected = [
         3.23636364, 1.14181818, 0.27388535, 0.02335456, -0.04246285
     ]
     result = lm.lmom_ratios(testdata)
     assert_almost_equal(result, expected)
示例#4
0
def smrstat(x, vmin=-np.inf, vmax=np.inf):
	'''
	Summary Statistics
	Input: one array of interest
	Output: mean, variance, skewness, kurtosis, min, max, percentile80, percentile90, percentile95, percentile99, percentile99.9, LCV, Lskew, Lkurtosis
	'''

	ind=np.where((np.isnan(x)==False) & (x>vmin) & (x<vmax))
	if np.any(ind):
		x_0=[ind[0]]
	else:
		sys.exit(' Array without valid numbers.')

	ferr=np.zeros((14),'f')*np.nan
	ferr[0] = np.mean(x_0)
	ferr[1] = np.var(x_0)
	ferr[2] = scipy.stats.skew(x_0)
	ferr[3] = scipy.stats.kurtosis(x_0)
	ferr[4] = np.min(x_0)
	ferr[5] = np.max(x_0)
	ferr[6] = np.percentile(x_0,80)
	ferr[7] = np.percentile(x_0,90)
	ferr[8] = np.percentile(x_0,95)
	ferr[9] = np.percentile(x_0,99)
	ferr[10] = np.percentile(x_0,99.9)
	# Hosking & Wallis L-moment ratios
	# pip install git+https://github.com/OpenHydrology/lmoments3.git
	hwlm = lm.lmom_ratios(x_0, nmom=5)
	ferr[11] = hwlm[1]/hwlm[0]
	ferr[12] = hwlm[2]
	ferr[13] = hwlm[3]

	return ferr
示例#5
0
文件: gevfit.py 项目: guziy/RPN
def get_initial_params_using_lm(vals):
    from lmoments3 import distr

    sorted_vals = list(sorted(vals))

    the_moments = lmoments3.lmom_ratios(sorted_vals, nmom=3)
    ksi, mu, sigma = distr.gev.lmom_fit(sorted_vals, the_moments[0:3]).values()
    return [sigma, mu, -ksi]  # -ksi because they are using -ksi convention
示例#6
0
def get_initial_params_using_lm(vals):
    from lmoments3 import distr

    sorted_vals = list(sorted(vals))

    the_moments = lmoments3.lmom_ratios(sorted_vals, nmom=3)
    ksi, mu, sigma = distr.gev.lmom_fit(sorted_vals, the_moments[0:3]).values()
    return [sigma, mu, -ksi]  # -ksi because they are using -ksi convention
示例#7
0
def test_lm():
    x = [
        360.228515625, 513.506103515625, 273.85031127929688,
        340.94839477539062, 244.13925170898438, 283.414306640625,
        394.42819213867188, 284.3604736328125, 281.26956176757812,
        241.46173095703125, 489.75482177734375, 236.31536865234375,
        407.55133056640625, 244.6295166015625, 432.40670776367188,
        260.501953125, 517.23052978515625, 317.6553955078125,
        407.61935424804688, 275.0709228515625, 330.369140625,
        285.92086791992188, 247.9954833984375, 344.34811401367188,
        379.55596923828125, 330.80569458007812, 312.35330200195312,
        251.79550170898438, 372.66928100585938, 239.72474670410156
    ]

    #    print(get_initial_params_using_lm(x))
    print(np.mean(x))
    pars = [128.28104749, 578.4927539, 0.62410911]
    data = [
        588.4747314453125, 693.6640625, 519.03155517578125, 716.58013916015625,
        686.29168701171875, 432.65786743164062, 682.72113037109375,
        730.12603759765625, 698.971923828125, 491.75332641601562,
        597.258544921875, 487.13619995117188, 482.33123779296875,
        573.57861328125, 801.67169189453125, 616.41668701171875,
        690.954833984375, 671.31646728515625, 680.87554931640625,
        534.18414306640625, 427.86019897460938, 236.22953796386719,
        691.40972900390625, 599.84637451171875, 545.3563232421875,
        553.059814453125, 549.1295166015625, 658.3983154296875,
        719.122802734375, 636.84906005859375
    ]

    import lmoments3
    from lmoments3 import distr

    the_moments = lmoments3.lmom_ratios(sorted(data), 5)
    pars = distr.gev.lmom_fit(sorted(data), lmom_ratios=the_moments)

    print("Fitted params using lmoments: ", pars)
    xi, mu, sigma = pars.values()
    print(objective_function_stationary_high([sigma, mu, -xi], data))

    print("Fitted using MLE: ", distr.gev.fit(sorted(data)))

    print(
        "Fitted using custom method (Huziy et al 2013), not using l-moments: ",
        optimize_stationary_for_period(np.array(sorted(data))))
    print(
        "Fitted using custom method (Huziy et al 2013), using l-moments: ",
        optimize_stationary_for_period(np.array(sorted(data)),
                                       use_lmoments=True))

    from scipy.stats import genextreme

    print("Fitted using scipy.stats.genextreme: ",
          genextreme.fit(np.array(sorted(data))))
    print("10 year high flow return level: ",
          get_high_ret_level_stationary([sigma, mu, -xi, 0], 10))
    print("10 year high flow return level: ",
          get_high_ret_level_stationary([sigma, mu, -0.5, 0], 10))
示例#8
0
    def _l_cv_and_skew(self, catchment):
        """
        Calculate L-CV and L-SKEW for a gauged catchment. Uses `lmoments3` library.

        Methodology source: Science Report SC050050, para. 6.7.5
        """
        z = self._dimensionless_flows(catchment)
        l1, l2, t3 = lm.lmom_ratios(z, nmom=3)
        return l2 / l1, t3
示例#9
0
    def _l_cv_and_skew(self, catchment):
        """
        Calculate L-CV and L-SKEW for a gauged catchment. Uses `lmoments3` library.

        Methodology source: Science Report SC050050, para. 6.7.5
        """
        z = self._dimensionless_flows(catchment)
        l1, l2, t3 = lm.lmom_ratios(z, nmom=3)
        return l2 / l1, t3
def hudgins_features(emg_vec, config):
    """ Extract the four hudgin's features 
    Parameters
    ----------
    data : dictionary
        epoched emg data
    config : yaml
        configuration file
        
    Return
    ------
    Data : dictionary
        dictionary of feature and label data from all the subjects
    """

    Data = collections.defaultdict(dict)

    # A 3d array with dimensions representing trial_samples x emg_channels x epochs
    data_shape = emg_vec.shape
    # initialize the feature array - samples x features
    features1 = np.zeros(
        (data_shape[0], config['n_electrodes'] * config['n_features']))
    features2 = np.zeros(
        (data_shape[0], config['n_electrodes'] * (config['n_features'] + 1)))

    for i in range(data_shape[0]):
        for j in range(data_shape[1]):
            rawEMGSignal = emg_vec[i, j, :]

            # feature set 1
            features1[i,
                      4 * j] = pysiology.electromyography.getWL(rawEMGSignal)
            features1[i, 4 * j + 1] = pysiology.electromyography.getZC(
                rawEMGSignal, config['threshold'])
            features1[i, 4 * j + 2] = pysiology.electromyography.getSSC(
                rawEMGSignal, config['threshold'])
            features1[i, 4 * j +
                      3] = pysiology.electromyography.getMAV(rawEMGSignal)

            # New feature set 2 based on the paper by Phinyomark, Angkoon, Rami N Khushaba, and Erik Scheme.
            # "Feature extraction and selection for myoelectric control based on wearable EMG sensors." Sensors 18.5 (2018): 1615.
            features2[i, 5 * j] = pysiology.electromyography.getWAMP(
                rawEMGSignal, config['threshold'])  # Willison amplitude
            features2[i, 5 * j + 1] = np.mean(
                np.sqrt(
                    np.absolute(rawEMGSignal)))  # Mean value of square root
            features2[i, 5 * j + 2] = np.log10(
                np.sqrt(np.absolute(np.sum(np.square(
                    np.diff(rawEMGSignal))))))  # Maximum fractal length
            features2[i, 5 * j + 3:5 * j + 5] = np.array(
                lm.lmom_ratios(rawEMGSignal,
                               nmom=2)).reshape(1, 2)  # L-moments with order 2
    return features1, features2
示例#11
0
文件: gevfit.py 项目: guziy/RPN
def test_lm():
    x = [360.228515625, 513.506103515625, 273.85031127929688, 340.94839477539062,
         244.13925170898438, 283.414306640625, 394.42819213867188, 284.3604736328125,
         281.26956176757812, 241.46173095703125, 489.75482177734375, 236.31536865234375,
         407.55133056640625, 244.6295166015625, 432.40670776367188, 260.501953125,
         517.23052978515625, 317.6553955078125, 407.61935424804688, 275.0709228515625,
         330.369140625, 285.92086791992188, 247.9954833984375, 344.34811401367188,
         379.55596923828125, 330.80569458007812, 312.35330200195312, 251.79550170898438,
         372.66928100585938, 239.72474670410156]

    #    print(get_initial_params_using_lm(x))
    print(np.mean(x))
    pars = [128.28104749, 578.4927539, 0.62410911]
    data = [588.4747314453125, 693.6640625, 519.03155517578125, 716.58013916015625,
            686.29168701171875, 432.65786743164062, 682.72113037109375, 730.12603759765625,
            698.971923828125, 491.75332641601562, 597.258544921875, 487.13619995117188, 482.33123779296875,
            573.57861328125, 801.67169189453125, 616.41668701171875, 690.954833984375, 671.31646728515625,
            680.87554931640625, 534.18414306640625, 427.86019897460938, 236.22953796386719, 691.40972900390625,
            599.84637451171875,
            545.3563232421875, 553.059814453125, 549.1295166015625, 658.3983154296875, 719.122802734375,
            636.84906005859375]

    import lmoments3
    from lmoments3 import distr

    the_moments = lmoments3.lmom_ratios(sorted(data), 5)
    pars = distr.gev.lmom_fit(sorted(data), lmom_ratios=the_moments)

    print("Fitted params using lmoments: ", pars)
    xi, mu, sigma = pars.values()
    print(objective_function_stationary_high([sigma, mu, -xi], data))

    print("Fitted using MLE: ", distr.gev.fit(sorted(data)))

    print("Fitted using custom method (Huziy et al 2013), not using l-moments: ", optimize_stationary_for_period(
        np.array(sorted(data))))
    print("Fitted using custom method (Huziy et al 2013), using l-moments: ",
          optimize_stationary_for_period(np.array(sorted(data)), use_lmoments=True))

    from scipy.stats import genextreme

    print("Fitted using scipy.stats.genextreme: ", genextreme.fit(np.array(sorted(data))))
    print("10 year high flow return level: ", get_high_ret_level_stationary([sigma, mu, -xi, 0], 10))
    print("10 year high flow return level: ", get_high_ret_level_stationary([sigma, mu, -0.5, 0], 10))
示例#12
0
    def __run_a_simulation(self):
        simulatedDurationStatisticsDF = pd.DataFrame()
        for row in self.__regionalDurationStatisticsDF.itertuples():
            years = row.nyears
            # Generate synethic AMS
            ams = self.__kapDist.rvs(years)
            ratios = lm.lmom_ratios(ams, nmom=5)
            data = {'nyears':years,
                    'l1':ratios[0], 
                    'l2':ratios[1], 
                    't':ratios[1]/ratios[0],
                    't3':ratios[2], 
                    't4':ratios[3], 
                    't5':ratios[4]}
            simulatedDurationStatisticsDF = simulatedDurationStatisticsDF.append(data, ignore_index=True)
        # Calculate the regionalized moments
        simulatedRlmoments = getWeightedLMoments(simulatedDurationStatisticsDF)

        v = self.get_V1(simulatedDurationStatisticsDF, simulatedRlmoments)
        self.__v_array.append(v)
示例#13
0
    def lmom_fit(self, data=[], lmom_ratios=[]):
        """
        Fit the distribution function to the given data or given L-moments.

        :param data: Data to use in calculating the distribution parameters
        :type data: array_like
        :param lmom_ratios: L-moments (ratios) l1, l2, t3, t4, .. to use in calculating the distribution parameters
        :type lmom_ratios: array_like
        :returns: Distribution parameters in `scipy` order, e.g. scale, loc, shape
        :rtype: :class:`OrderedDict`
        """
        n_min = self.numargs + 2
        if len(data) > 0:
            if len(data) <= n_min:
                raise ValueError("At least {} data points must be provided.".format(n_min))
            lmom_ratios = lm.lmom_ratios(data, nmom=n_min)
        elif not lmom_ratios:
            raise Exception("Either `data` or `lmom_ratios` must be provided.")
        elif len(lmom_ratios) < n_min:
            raise ValueError("At least {} number of L-moments must be provided.".format(n_min))

        return self._lmom_fit(lmom_ratios)
示例#14
0
    def lmom_fit(self, data=[], lmom_ratios=[]):
        """
        Fit the distribution function to the given data or given L-moments.

        :param data: Data to use in calculating the distribution parameters
        :type data: array_like
        :param lmom_ratios: L-moments (ratios) l1, l2, t3, t4, .. to use in calculating the distribution parameters
        :type lmom_ratios: array_like
        :returns: Distribution parameters in `scipy` order, e.g. scale, loc, shape
        :rtype: :class:`OrderedDict`
        """
        n_min = self.numargs + 2
        if len(data) > 0:
            if len(data) <= n_min:
                raise ValueError("At least {} data points must be provided.".format(n_min))
            lmom_ratios = lm.lmom_ratios(data, nmom=n_min)
        elif not lmom_ratios:
            raise Exception("Either `data` or `lmom_ratios` must be provided.")
        elif len(lmom_ratios) < n_min:
            raise ValueError("At least {} number of L-moments must be provided.".format(n_min))

        return self._lmom_fit(lmom_ratios)
def lscale(x):
    return lm.lmom_ratios(x)[1]
示例#16
0
 def setUpClass(cls):
     cls.testdata = [2.0, 3.0, 4.0, 2.4, 5.5, 1.2, 5.4, 2.2, 7.1, 1.3, 1.5]
     cls.lmu = lm.lmom_ratios(cls.testdata)
     if cls.dist:
         cls.distr_f = getattr(distr, cls.dist)
     super(DistributionTestCase, cls).setUpClass()
def mean(x):
    return lm.lmom_ratios(x)[0]
 def setUpClass(cls):
     cls.testdata = [2.0, 3.0, 4.0, 2.4, 5.5, 1.2, 5.4, 2.2, 7.1, 1.3, 1.5]
     cls.lmu = lm.lmom_ratios(cls.testdata)
     if cls.dist:
         cls.distr_f = getattr(distr, cls.dist)
     super(DistributionTestCase, cls).setUpClass()
 def test_n50_nmom4(self):
     start_i = 0
     for i in range(10000):
         l1, l2, t3, t4 = lm.lmom_ratios(self.record[start_i:start_i + self.n], nmom=4)
         t2 = l2 / l1
         start_i += self.n
def lskewness(x):
    return lm.lmom_ratios(x)[2]
def extract_emg_features(data, config, scale=False):
    """ Load the EMG data and extract the features
    Parameters
    ----------
    data : dictionary
        epoched emg data
    config : yaml
        configuration file
    scale : bool
        use min-max scaling if scale=True
        
    Return
    ------
    Data : dictionary
        dictionary of feature and label data from all the subjects
    """

    Data = collections.defaultdict(dict)

    for subject in tqdm(config['subjects']):
        emg_vec = data['subject_' + subject]['EMG']
        labels = data['subject_' + subject]['labels']

        # A 3d array with dimensions representing trial_samples x emg_channels x epochs
        data_shape = emg_vec.shape
        # initialize the feature array - samples x features
        features1 = np.zeros(
            (data_shape[0], config['n_electrodes'] * config['n_features']))
        features2 = np.zeros(
            (data_shape[0],
             config['n_electrodes'] * (config['n_features'] + 1)))

        for i in range(data_shape[0]):
            for j in range(data_shape[1]):
                rawEMGSignal = emg_vec[i, j, :]

                # feature set 1
                features1[i, 4 *
                          j] = pysiology.electromyography.getWL(rawEMGSignal)
                features1[i, 4 * j + 1] = pysiology.electromyography.getZC(
                    rawEMGSignal, config['threshold'])
                features1[i, 4 * j + 2] = pysiology.electromyography.getSSC(
                    rawEMGSignal, config['threshold'])
                features1[i, 4 * j +
                          3] = pysiology.electromyography.getMAV(rawEMGSignal)

                # Previously used - feature set 2
                # The getHIST feature returns ZC and WAMP for each bin. The 'WAMP' from all the bins are added and selected as a single feature
                # temp_Hist = pysiology.electromyography.getHIST(rawEMGSignal, nseg=9, threshold=config['threshold'])
                # Hist_sum  = 0
                # for val in temp_Hist:
                #     Hist_sum += temp_Hist[val]['WAMP']
                # features2[i,4*j]     = pysiology.electromyography.getRMS(rawEMGSignal)
                # features2[i,4*j+1]   = pysiology.electromyography.getWL(rawEMGSignal)
                # features2[i,4*j+2]   = sampEntropy(rawEMGSignal)
                # features2[i,4*j+3]   = Hist_sum

                # New feature set 2 based on the paper by Phinyomark, Angkoon, Rami N Khushaba, and Erik Scheme.
                # "Feature extraction and selection for myoelectric control based on wearable EMG sensors." Sensors 18.5 (2018): 1615.
                features2[i, 5 * j] = pysiology.electromyography.getWAMP(
                    rawEMGSignal, config['threshold'])  # Willison amplitude
                features2[i, 5 * j + 1] = np.mean(
                    np.sqrt(np.absolute(
                        rawEMGSignal)))  # Mean value of square root
                features2[i, 5 * j + 2] = np.log10(
                    np.sqrt(
                        np.absolute(np.sum(np.square(np.diff(
                            rawEMGSignal))))))  # Maximum fractal length
                features2[i, 5 * j + 3:5 * j + 5] = np.array(
                    lm.lmom_ratios(rawEMGSignal, nmom=2)).reshape(
                        1, 2)  # L-moments with order 2

                # features[i,6*j+5]   = sampen2(normalize_data(copy_rawEMGSignal))[2][1] # m = 2, r = 0.2 * std

        if scale:
            # print('Min-Max scaling the emg-features')
            # Min-Max scaling
            min_max_scaler = preprocessing.MinMaxScaler()
            features1 = min_max_scaler.fit_transform(features1)
            features2 = min_max_scaler.fit_transform(features2)

        Data['subject_' + subject]['features1'] = features1
        Data['subject_' + subject]['features2'] = features2

        Data['subject_' + subject]['labels'] = labels

    return Data
示例#22
0
    636,
    343,
    586,
    221,
    298,
    2200,
    237,
    275,
    980,
)

import matplotlib.pyplot as plt

plt.plot(x, y, '.')
#%%
l1, l2, l3, l4 = lm.lmom_ratios(x, nmom=4)
t3 = l3 / l2
t4 = l4 / l2

#%%
from scipy.stats import pearson3, kappa4
import matplotlib.pyplot as plt
import numpy as np

p = kappa4(h=l4, loc=l1, k=l3, scale=l2)

x = sorted(x)
vals1 = np.linspace(0, 1, 31)
vals = np.linspace(0, 1, 1000)
ys = p.ppf(vals)
plt.plot(vals, ys, vals1, x)
示例#23
0
 def test_samlmu(self):
     testdata = [2.0, 3.0, 4.0, 2.4, 5.5, 1.2, 5.4, 2.2, 7.1, 1.3, 1.5]
     expected = [3.23636364, 1.14181818, 0.27388535, 0.02335456, -0.04246285]
     result = lm.lmom_ratios(testdata)
     assert_almost_equal(result, expected)
def lkurtosis(x):
    return lm.lmom_ratios(x)[3]