예제 #1
0
def plotFilteredDataEnvelope(noisy_signal, filtData):
    
    filtDataEnvelope = np.abs(hilbert(filtData))
    noisy_signal_envelope = np.abs(hilbert(noisy_signal))

    tscale = 1.0E9
    
    f, axarr = plt.subplots(3)
    axarr[0].plot(t*tscale, noisy_signal)
    axarr[0].plot(t*tscale, noisy_signal_envelope)
    axarr[0].set_xlabel("Time (ns)")
    axarr[0].set_ylabel("Voltage (noise units)")

    axarr[1].plot(t*tscale, filtData)
    axarr[1].plot(t*tscale, filtDataEnvelope)
    axarr[1].set_xlabel("Time (ns)")
    axarr[1].set_ylabel("Voltage (noise units)")

    rv=rice(3.6)
    x = np.linspace(rv.ppf(0.01), rv.ppf(0.99), 100)
    axarr[2].hist(filtDataEnvelope,
                  normed=True, histtype='stepfilled', alpha=0.2)
    axarr[2].plot(x, rv.pdf(x))
    axarr[2].set_title("PDF of Signal+Noise")
    axarr[2].set_xlabel("Envelope amplitude (noise units)")
    axarr[2].set_ylabel("Probability")

    f.tight_layout()
    plt.show()
예제 #2
0
    def FadingModel(self, Time=1, Graphs=False, Results=False):
        """
		"""
        from scipy.stats import rice
        from scipy import integrate

        #CALCULATING THE RICE CONTINUOUS DISTIBUTION
        shape = 0.775
        #MeanPowerGain = (1/Time) * integrate(pow(rice.logcdf(t, shape), 2), (t, 0, Time))
        h = lambda x: pow(rice.logpdf(x, shape), 2)
        MeanPowerGain, err = integrate.quad(h, 0, Time)

        #PLOTING THE PROBABILITY DENSITY FUNCTION
        if Graphs is True:
            fig, ax = plt.subplots(1, 1)
            x = linspace(rice.ppf(0.01, shape), rice.ppf(0.99, shape), 100)
            ax.plot(x,
                    rice.pdf(x, shape),
                    'r-',
                    lw=5,
                    alpha=0.6,
                    label='Rice PDF')

            rv = rice(shape)
            ax.plot(x, rv.pdf(x), 'k-', lw=2, label='Frozen PDF')

            r = rice.rvs(shape, size=1000)
            ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
            ax.legend(loc='best', frameon=False)

        #PRINTING RESULTS
        if Results is True:
            print("Fading - Mean Power Gain: {}".format(
                (1 / Time) * MeanPowerGain))
        return (1 / Time) * MeanPowerGain
예제 #3
0
 def test_fit(self):
     data = stats.rice(5, loc=0, scale=2).rvs(size=37)
     params = self.dist.fit(data)
     check_params(
         (params.R, 10.100674084593422),
         (params.sigma, 1.759817171541185),
         (params.loc, 0),
     )
예제 #4
0
 def test_fit(self):
     data = stats.rice(5, loc=0, scale=2).rvs(size=37)
     params = self.dist.fit(data)
     check_params(
         (params.R, 10.100674084593422),
         (params.sigma, 1.759817171541185),
         (params.loc, 0),
     )
    def get_distro(rice_b, scale):
        """Get the distribution at given parameters with normal backup."""

        return (
            rice(b=rice_b, scale=scale)
            if rice_b < 50.0 else
            norm(loc=scale * numpy.sqrt(rice_b**2 + 1.0), scale=scale)
        )
예제 #6
0
def plotSignalNoiseEnvelope( sig, gaus, noisy_signal):

    signal_envelope = np.abs(hilbert(sig))
    noisy_signal_envelope = np.abs(hilbert(noisy_signal))
    noise_envelope = np.abs(hilbert(gaus))

    # OK, make our plots. Let's scale up the time.
    tscale = 1.0E9
    
    f, axarr = plt.subplots(4)

    axarr[0].plot(t*tscale, sig)
    axarr[0].plot(t*tscale, signal_envelope)
    axarr[0].set_title("Signal")
    axarr[0].set_xlabel("Time (ns)")
    axarr[0].set_ylabel("Voltage (noise units)")

    axarr[1].plot(t*tscale, gaus)
    axarr[1].plot(t*tscale, noise_envelope)
    axarr[1].set_title("Noise")
    axarr[1].set_xlabel("Time (ns)")
    axarr[1].set_ylabel("Voltage (noise units)")

    axarr[2].plot(t, noisy_signal)
    axarr[2].plot(t, noisy_signal_envelope)
    axarr[2].set_title("Signal+Noise")
    axarr[2].set_xlabel("Time (ns)")
    axarr[2].set_ylabel("Voltage (noise units)")

    # Now plot the distribution of the data plus the analytic PDF.
    # Get the x-space that makes the most sense, 100 points from 1-99%
    rv = rice(amplitude/noise)
    x = np.linspace(rv.ppf(0.01), rv.ppf(0.99), 100)

    # Histogram the data.
    axarr[3].hist(noisy_signal_envelope,
                  normed=True, histtype='stepfilled', alpha=0.2)
    # And plot the PDF on top.
    axarr[3].plot(x, rv.pdf(x))

    axarr[3].set_title("PDF of Signal+Noise")
    axarr[3].set_xlabel("Envelope amplitude (noise units)")
    axarr[3].set_ylabel("Probability")

    f.tight_layout()

    plt.show()
예제 #7
0
    def test_rician_high_noise(self):
        # Generate rician distribution with high noise
        # High noise is where the correction term counts!
        sigma = 20

        # Try out ours
        pM = rician(self.M, self.A, sigma)

        # Compare to scipy implementation
        rv = rice(self.A)
        pM_scipy = rv.pdf(self.M / (sigma**2))
        # Should fail with no correction
        self.assertFalse(np.allclose(pM, pM_scipy))

        # Now correct
        pM_scipy *= self.correction(sigma)
        self.assertTrue(np.allclose(pM, pM_scipy))
예제 #8
0
    def test_rician_low_noise(self):
        # Generate rician distribution with low noise
        sigma = .8  # too small will blow up the correction term for scipy rice.pdf function (see sigma**4 term in denominator)

        # Try out ours
        pM = rician(self.M, self.A, sigma)

        # Compare to scipy implementation
        rv = rice(self.A)
        pM_scipy = rv.pdf(self.M / (sigma**2)) * self.correction(sigma)

        # # Take a gander
        # plt.plot(pM,label='Rician')
        # plt.plot(pM_scipy,label='Rice (scipy)')
        # plt.plot(pM - pM_scipy)
        # plt.legend()
        # plt.show()

        self.assertTrue(np.allclose(pM, pM_scipy))
예제 #9
0
    def generate_rician(image):

        ssim_noise = 0

        if torch.is_tensor(image):
            image = image.numpy()

        rician_image = np.zeros_like(image)

        while ssim_noise <= 0.97 or ssim_noise >= 0.99:
            b = random.uniform(0, 1)
            rv = rice(b)
            rician_image = rv.pdf(image)
            ssim_noise = ssim(image[0],
                              rician_image[0],
                              data_range=rician_image[0].max() -
                              rician_image[0].min())

        #print('ssim : {:.2f}'.format(ssim_noise))

        return rician_image
예제 #10
0
파일: extraSetGen2.py 프로젝트: ccurro/snn
# python extraSetGen2.py | awk 'BEGIN {srand()} !/^$/ { if (rand() <= .5) print $0 > "r.train"; else print $0 > "r.test"}'
import numpy as np
from scipy.stats import rayleigh
from scipy.stats import rice

def genSequence(dist, len):
	a = 0.1
	x = [0]
	for i in range(0,len):
		x.append(x[-1] + a*dist.rvs(size=1)[0])

	return x

nExamples = 500

for i in range(0,nExamples):
	seq = genSequence(rice(1),10)
	for e in seq:
		print('{:0.3f}'.format(e),end=' ')
	print('1')

	seq = genSequence(rayleigh,10)
	for e in seq:
		print('{:0.3f}'.format(e),end=' ')
	print('0')
예제 #11
0
def all_dists():
    # dists param were taken from scipy.stats official
    # documentaion examples
    # Total - 89
    return {
        "alpha":
        stats.alpha(a=3.57, loc=0.0, scale=1.0),
        "anglit":
        stats.anglit(loc=0.0, scale=1.0),
        "arcsine":
        stats.arcsine(loc=0.0, scale=1.0),
        "beta":
        stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0),
        "betaprime":
        stats.betaprime(a=5, b=6, loc=0.0, scale=1.0),
        "bradford":
        stats.bradford(c=0.299, loc=0.0, scale=1.0),
        "burr":
        stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0),
        "cauchy":
        stats.cauchy(loc=0.0, scale=1.0),
        "chi":
        stats.chi(df=78, loc=0.0, scale=1.0),
        "chi2":
        stats.chi2(df=55, loc=0.0, scale=1.0),
        "cosine":
        stats.cosine(loc=0.0, scale=1.0),
        "dgamma":
        stats.dgamma(a=1.1, loc=0.0, scale=1.0),
        "dweibull":
        stats.dweibull(c=2.07, loc=0.0, scale=1.0),
        "erlang":
        stats.erlang(a=2, loc=0.0, scale=1.0),
        "expon":
        stats.expon(loc=0.0, scale=1.0),
        "exponnorm":
        stats.exponnorm(K=1.5, loc=0.0, scale=1.0),
        "exponweib":
        stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0),
        "exponpow":
        stats.exponpow(b=2.7, loc=0.0, scale=1.0),
        "f":
        stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0),
        "fatiguelife":
        stats.fatiguelife(c=29, loc=0.0, scale=1.0),
        "fisk":
        stats.fisk(c=3.09, loc=0.0, scale=1.0),
        "foldcauchy":
        stats.foldcauchy(c=4.72, loc=0.0, scale=1.0),
        "foldnorm":
        stats.foldnorm(c=1.95, loc=0.0, scale=1.0),
        # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0),
        # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0),
        "genlogistic":
        stats.genlogistic(c=0.412, loc=0.0, scale=1.0),
        "genpareto":
        stats.genpareto(c=0.1, loc=0.0, scale=1.0),
        "gennorm":
        stats.gennorm(beta=1.3, loc=0.0, scale=1.0),
        "genexpon":
        stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0),
        "genextreme":
        stats.genextreme(c=-0.1, loc=0.0, scale=1.0),
        "gausshyper":
        stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0),
        "gamma":
        stats.gamma(a=1.99, loc=0.0, scale=1.0),
        "gengamma":
        stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0),
        "genhalflogistic":
        stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0),
        "gilbrat":
        stats.gilbrat(loc=0.0, scale=1.0),
        "gompertz":
        stats.gompertz(c=0.947, loc=0.0, scale=1.0),
        "gumbel_r":
        stats.gumbel_r(loc=0.0, scale=1.0),
        "gumbel_l":
        stats.gumbel_l(loc=0.0, scale=1.0),
        "halfcauchy":
        stats.halfcauchy(loc=0.0, scale=1.0),
        "halflogistic":
        stats.halflogistic(loc=0.0, scale=1.0),
        "halfnorm":
        stats.halfnorm(loc=0.0, scale=1.0),
        "halfgennorm":
        stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0),
        "hypsecant":
        stats.hypsecant(loc=0.0, scale=1.0),
        "invgamma":
        stats.invgamma(a=4.07, loc=0.0, scale=1.0),
        "invgauss":
        stats.invgauss(mu=0.145, loc=0.0, scale=1.0),
        "invweibull":
        stats.invweibull(c=10.6, loc=0.0, scale=1.0),
        "johnsonsb":
        stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0),
        "johnsonsu":
        stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0),
        "ksone":
        stats.ksone(n=1e03, loc=0.0, scale=1.0),
        "kstwobign":
        stats.kstwobign(loc=0.0, scale=1.0),
        "laplace":
        stats.laplace(loc=0.0, scale=1.0),
        "levy":
        stats.levy(loc=0.0, scale=1.0),
        "levy_l":
        stats.levy_l(loc=0.0, scale=1.0),
        "levy_stable":
        stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0),
        "logistic":
        stats.logistic(loc=0.0, scale=1.0),
        "loggamma":
        stats.loggamma(c=0.414, loc=0.0, scale=1.0),
        "loglaplace":
        stats.loglaplace(c=3.25, loc=0.0, scale=1.0),
        "lognorm":
        stats.lognorm(s=0.954, loc=0.0, scale=1.0),
        "lomax":
        stats.lomax(c=1.88, loc=0.0, scale=1.0),
        "maxwell":
        stats.maxwell(loc=0.0, scale=1.0),
        "mielke":
        stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0),
        "nakagami":
        stats.nakagami(nu=4.97, loc=0.0, scale=1.0),
        "ncx2":
        stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0),
        "ncf":
        stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0),
        "nct":
        stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0),
        "norm":
        stats.norm(loc=0.0, scale=1.0),
        "pareto":
        stats.pareto(b=2.62, loc=0.0, scale=1.0),
        "pearson3":
        stats.pearson3(skew=0.1, loc=0.0, scale=1.0),
        "powerlaw":
        stats.powerlaw(a=1.66, loc=0.0, scale=1.0),
        "powerlognorm":
        stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0),
        "powernorm":
        stats.powernorm(c=4.45, loc=0.0, scale=1.0),
        "rdist":
        stats.rdist(c=0.9, loc=0.0, scale=1.0),
        "reciprocal":
        stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0),
        "rayleigh":
        stats.rayleigh(loc=0.0, scale=1.0),
        "rice":
        stats.rice(b=0.775, loc=0.0, scale=1.0),
        "recipinvgauss":
        stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0),
        "semicircular":
        stats.semicircular(loc=0.0, scale=1.0),
        "t":
        stats.t(df=2.74, loc=0.0, scale=1.0),
        "triang":
        stats.triang(c=0.158, loc=0.0, scale=1.0),
        "truncexpon":
        stats.truncexpon(b=4.69, loc=0.0, scale=1.0),
        "truncnorm":
        stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0),
        "tukeylambda":
        stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0),
        "uniform":
        stats.uniform(loc=0.0, scale=1.0),
        "vonmises":
        stats.vonmises(kappa=3.99, loc=0.0, scale=1.0),
        "vonmises_line":
        stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0),
        "wald":
        stats.wald(loc=0.0, scale=1.0),
        "weibull_min":
        stats.weibull_min(c=1.79, loc=0.0, scale=1.0),
        "weibull_max":
        stats.weibull_max(c=2.87, loc=0.0, scale=1.0),
        "wrapcauchy":
        stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0),
    }
예제 #12
0
b = 0.775
mean, var, skew, kurt = rice.stats(b, moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(rice.ppf(0.01, b), rice.ppf(0.99, b), 100)
ax.plot(x, rice.pdf(x, b), 'r-', lw=5, alpha=0.6, label='rice pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = rice(b)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = rice.ppf([0.001, 0.5, 0.999], b)
np.allclose([0.001, 0.5, 0.999], rice.cdf(vals, b))
# True

# Generate random numbers:

r = rice.rvs(b, size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
def rice_from_error_bars(value, abs_plus_error, abs_minus_error):
    """Return a Rice distribution reproducing the given mode & standard dev."""

    def get_distro(rice_b, scale):
        """Get the distribution at given parameters with normal backup."""

        return (
            rice(b=rice_b, scale=scale)
            if rice_b < 50.0 else
            norm(loc=scale * numpy.sqrt(rice_b**2 + 1.0), scale=scale)
        )

    def lower_diff(rice_b, scale):
        """Return the error of quantile of value - abs_minus_error."""

        return (
            get_distro(rice_b, scale).cdf(value - abs_minus_error)
            -
            norm.cdf(-1.0)
        )

    def upper_diff(scale, rice_b):
        """Return the error of quantile of value + abs_plus_error."""

        return (
            get_distro(rice_b, scale).cdf(value + abs_plus_error)
            -
            norm.cdf(1.0)
        )

    def find_b(scale):
        """Return the b parameter that matches the lower bound given scale."""

        b_min = 0.0
        b_min_lower_diff = lower_diff(b_min, scale)
        if abs(b_min_lower_diff) < 1e-8:
            return b_min
        _logger.debug('b_min_lower_diff(%s) = %s',
                      repr(scale),
                      repr(b_min_lower_diff))
        assert b_min_lower_diff >= 0

        b_max = (value + abs_plus_error) / scale
        while lower_diff(b_max, scale) > 0:
            b_max *= 10.0
        solution = root_scalar(f=lower_diff,
                               args=(scale,),
                               bracket=(b_min, b_max))
        assert solution.converged

        return solution.root

    def scale_equation(scale):
        """The equation to solve in order to find the scale."""

        return upper_diff(scale, find_b(scale))

    def find_scale():
        """Return the scale that matches the upper bound at b matching lower."""

        scale_max = (value - abs_minus_error) / rice.ppf(norm.cdf(-1.0), b=0)

        if scale_max <= 0 or scale_equation(scale_max) > 0:
            return None

        scale_min = 0.1 * min(abs_minus_error, abs_plus_error)

        while scale_equation(scale_min) < 0:
            scale_max = scale_min
            scale_min /= 10.0

        solution = root_scalar(f=scale_equation,
                               bracket=(scale_min, scale_max))
        assert solution.converged
        return solution.root

    scale = find_scale()
    if scale is None:
        _logger.warning(
            'Lower and upper limits of %s and %s cannot be matched to 16-th '
            'and 84-th percentiles of a Rice distribution. Matching upper '
            'limit and assuming b=0',
            repr(value - abs_minus_error),
            repr(value + abs_plus_error)
        )
        return rice(
            b=0.0,
            scale=((value + abs_plus_error) / rice.ppf(norm.cdf(1.0), b=0))
        )
    rice_b = find_b(scale)
    return rice(b=rice_b, scale=scale)