Ejemplo n.º 1
0
    def calculate_bitrate(self, lightpath, bert=1e-3, bn=12.5e9):
        snr = 10 * np.log10(lightpath.snr)
        Rs = lightpath.rs

        if lightpath.transceiver.lower() == 'fixed-rate':
            snrt = 2 * erfcinv(2 * bert) * (Rs / bn)
            rb = np.piecewise(snr, [snr < snrt, snr >= snrt], [0, 100])

        elif lightpath.transceiver.lower() == 'flex-rate':
            snrt1 = 2 * erfcinv(2 * bert)**2 * (Rs / bn)
            snrt2 = (14 / 3) * erfcinv(3 / 2 * bert)**2 * (Rs / bn)
            snrt3 = (10) * erfcinv(8 / 3 * bert)**2 * (Rs / bn)

            cond1 = (snr < snrt1)
            cond2 = (snr >= snrt1 and snr < snrt2)
            cond3 = (snr >= snrt2 and snr < snrt3)
            cond4 = (snr >= snrt3)

            rb = np.piecewise(snr, [cond1, cond2, cond3, cond4],
                              [0, 100, 200, 400])
        elif lightpath.transceiver.lower() == 'shannon':
            rb = 2 * Rs * np.log2(1 + snr * (Rs / bn)) * 1e-9

        lightpath.bitrate = float(rb)
        return float(rb)
Ejemplo n.º 2
0
 def sbm(self, T, alpha, sigma = 1):            
     '''Creates a scaled brownian motion trajectory'''
     msd = (sigma**2)*np.arange(T+1)**alpha
     deltas = np.sqrt(msd[1:]-msd[:-1])
     dx = np.sqrt(2)*deltas*erfcinv(2-2*np.random.rand(len(deltas)))            
     dy = np.sqrt(2)*deltas*erfcinv(2-2*np.random.rand(len(deltas)))  
     return np.concatenate((np.cumsum(dx)-dx[0], np.cumsum(dy)-dy[0]))
Ejemplo n.º 3
0
    def estimate_ewald_eta(self, precision: float) -> float:
        # estimate the ewald's sum eta for nuclei interaction energy
        # the precision is assumed to be relative precision
        # this formula is obtained by estimating the sum as an integral

        vol = float(self.volume().detach())
        eta0 = np.sqrt(np.pi) / vol**(1. / 3)
        eta = eta0 * erfcinv(0.5 * precision) / erfcinv(precision)
        return round(eta * 10) / 10  # round to 1 d.p.
Ejemplo n.º 4
0
def test_erfcinv():
    value = np.random.rand(1).item()
    assert (roundScaler(NumCpp.erfc_inv_Scaler(value), NUM_DECIMALS_ROUND) ==
            roundScaler(sp.erfcinv(value), NUM_DECIMALS_ROUND))

    shapeInput = np.random.randint(20, 100, [2, ])
    shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
    cArray = NumCpp.NdArray(shape)
    data = np.random.rand(shape.rows, shape.cols)
    cArray.setArray(data)
    assert np.array_equal(roundArray(NumCpp.erfc_inv_Array(cArray), NUM_DECIMALS_ROUND),
                          roundArray(sp.erfcinv(data), NUM_DECIMALS_ROUND))
Ejemplo n.º 5
0
def bit_rate_flex(gsnr, Rs=None):
    if not Rs:
        Rs = param.Rs
    if gsnr >= (10 * ((sci_spe.erfcinv(
        (8 / 3) * param.BERt))**2) * Rs / param.Bn):
        bit_rate = 4e11  # 400Gbps, PM-16QAM
    elif gsnr >= ((14 / 3) * ((sci_spe.erfcinv(
        (3 / 2) * param.BERt))**2) * Rs / param.Bn):
        bit_rate = 2e11  # 200Gbps, PM-8QAM
    elif gsnr >= (2 * ((sci_spe.erfcinv(2 * param.BERt))**2) * Rs / param.Bn):
        bit_rate = 1e11  # 100Gbps, PM-QPSK
    else:
        bit_rate = 0  # 0Gbps
    return bit_rate
Ejemplo n.º 6
0
def sample_trunc_gaussian(mu=1, sigma=1, lower=0, size=1):

    sqrt2 = np.sqrt(2)
    Phialpha = 0.5 * erfc(-(lower - mu) / (sqrt2 * sigma))

    if np.isscalar(mu):
        arg = Phialpha + np.random.uniform(size=size) * (1 - Phialpha)
        return np.squeeze(mu - sigma * sqrt2 * erfcinv(2 * arg))
    else:
        Phialpha = Phialpha[:, np.newaxis]
        arg = Phialpha + np.random.uniform(size=(mu.size,
                                                 size)) * (1 - Phialpha)

        return np.squeeze(mu[:, np.newaxis] -
                          sigma[:, np.newaxis] * sqrt2 * erfcinv(2 * arg))
Ejemplo n.º 7
0
    def from_unit_cube(self, x):

        mu = self.mu.value
        sigma = self.sigma.value
        lower_bound = self.lower_bound.value
        upper_bound = self.upper_bound.value

        sqrt_two = 1.414213562

        if x < 1e-16 or (1 - x) < 1e-16:
            res = -1e32

        # precalculate the arguments to the  CDF

        lower_arg = (lower_bound - mu) / sigma
        upper_arg = (upper_bound - mu) / sigma

        theta_lower = 0.5 + 0.5 * erf(lower_arg / sqrt_two)

        theta_upper = 0.5 + 0.5 * erf(upper_arg / sqrt_two)

        # now precalculate the argument to the Inv. CDF

        arg = theta_lower + x * (theta_upper - theta_lower)

        return mu + sigma * sqrt_two * erfcinv(2 * (1 - arg))
Ejemplo n.º 8
0
def outliers(x, axis=0, single_mad=None, p_threshold=1e-3):
    """Robustly detect outliers assuming a normal distribution.
    
    A modified Z-score is first computed based on the data. Then a threshold
    Z is computed according to p_threshold, and values that exceed it
    are rejected. p_threshold is the probability of rejection for strictly
    normally distributed data, i.e. probability for "false outlier"

    Parameters
    ----------
    data : array_like
        The data
    axis : Axis or axes along which to compute the Z scores. E.g. axis=0
        computes row-wise Z scores and rejects based on those.
    single_mad : bool
        Use a single MAD estimate computed all over the data. If False, MAD
        will be computed along given axis (e.g. separately for each variable).

    Returns
    -------
    idx : tuple
        Indexes of rejected values (as in np.where output)
    """
    zs = modified_zscore(x, axis=axis, single_mad=single_mad)
    z_threshold = np.sqrt(2) * erfcinv(p_threshold)
    logger.debug('Z threshold: %.2f' % z_threshold)
    return np.where(abs(zs) > z_threshold)
Ejemplo n.º 9
0
def renormalize(dists, mu, sigma):
    for i in range(len(dists)):
        dist = dists[i]
        normalized = (dist - 1) / 4
        normalized = -erfcinv(normalized * 2) * 2
        normalized = normalized * sigma + mu
        dists[i] = normalized
Ejemplo n.º 10
0
    def from_unit_cube(self, x):

        mu = self.mu.value
        sigma = self.sigma.value
        lower_bound = self.lower_bound.value
        upper_bound = self.upper_bound.value

        sqrt_two = 1.414213562

        if x < 1e-16 or (1 - x) < 1e-16:
            res = -1e32

        # precalculate the arguments to the  CDF

        lower_arg = old_div((lower_bound - mu), sigma)
        upper_arg = old_div((upper_bound - mu), sigma)

        theta_lower = 0.5 + 0.5 * erf(old_div(lower_arg, sqrt_two))

        theta_upper = 0.5 + 0.5 * erf(old_div(upper_arg, sqrt_two))

        # now precalculate the argument to the Inv. CDF

        arg = theta_lower + x * (theta_upper - theta_lower)

        out = mu + sigma * sqrt_two * erfcinv(2 * (1 - arg))
        
        return np.clip(out, lower_bound, upper_bound)
Ejemplo n.º 11
0
 def __init__(
     self,
     gp_kernel,
     gp_meanf=None,
     likelihood=gpflow.likelihoods.Gaussian(variance=1.0e-3),
     optimiser=tf.optimizers.Adam(0.01),
     varsigma=erfcinv(0.01),
     points=None,
     gpflow_model=None,
     natgrad_learning_rate=1.0,
     train_iterations=VGP_TRAIN_ITERATIONS,
 ):
     """
     :param likelihood: likelihood for VGP model
     :type likelihood: `gpflow.likelihoods.base.ScalarLikelihood`
     :param natgrad_learning_rate: step length (gamma) for Natural gradient
     :type natgrad_learning_rate: float
     :param train_iterations: number of iterations for VGP Adam vs NatGrad
         training loop
     :type train_iterations: int
     """
     super().__init__(
         gp_kernel=gp_kernel,
         gp_meanf=gp_meanf,
         optimiser=optimiser,
         varsigma=varsigma,
         points=points,
         gpflow_model=gpflow_model,
     )
     assert isinstance(likelihood, gpflow.likelihoods.base.ScalarLikelihood)
     self.likelihood = likelihood
     self.natgrad_optimiser = gpflow.optimizers.NaturalGradient(
         natgrad_learning_rate)
     self.train_iters = train_iterations
 def g1(x):  # x = [rho,delta]
     betap = x[
         1]  # Setting Beta' = delta for no good reason. Can try other values.
     epsilon = adv_comp(q, x[0], x[1], method="CDP")
     lhs = (math.exp(epsilon) - 1 + 3 * (betap + x[1]) / beta)
     rhs = math.sqrt(float(1) / (x[0] * (n * n))) * erfcinv(betap / q)
     return lhs + rhs
Ejemplo n.º 13
0
def sample_truncated_gaussian(mu=0, sigma=1, lb=-np.Inf, ub=np.Inf):
    """
    Sample a truncated normal with the specified params. This
    is not the most stable way but it works as long as the
    truncation region is not too far from the mean.
    """
    # Broadcast arrays to be of the same shape
    mu, sigma, lb, ub = np.broadcast_arrays(mu, sigma, lb, ub)
    shp = mu.shape
    if np.allclose(sigma, 0.0):
        return mu

    cdflb = normal_cdf(lb, mu, sigma)
    cdfub = normal_cdf(ub, mu, sigma)

    # Sample uniformly from the CDF
    cdfsamples = cdflb + np.random.rand(*shp) * (cdfub - cdflb)

    # Clip the CDF samples so that we can invert them
    cdfsamples = np.clip(cdfsamples, 1e-15, 1 - 1e-15)
    zs = -np.sqrt(2) * special.erfcinv(2 * cdfsamples)

    # Transform the standard normal samples
    xs = sigma * zs + mu
    xs = np.clip(xs, lb, ub)

    return xs
Ejemplo n.º 14
0
def outdoor_radius(sigma, n, ro, gama, eta, bw, figura, mcs):
    #prob_cobertura_borda = float()
    #Ms = 4*n/sigma - 3
    Ms = 4.6
    Q = 1 - (erfc(Ms/(sigma*sqrt(2))))/2
    #prob_cobertura_borda = 1 - Q

    sigma_sir = sqrt(2*sigma*sigma*(1-ro))
    Q_inv = sqrt(2)*erfcinv(2*Q)
    M_in = -1*Q_inv*sigma_sir

    m_in = pow(10, M_in/10)
    D_in = 10*log10(m_in*gama*eta)

    setor_gain = 4.77
    rate_rb = float()
    rate_total = float()
    sinr_mod = [1, 6, 14]

    for n, modulation in enumerate(mcs):
        sens = sinr_mod[n] + figura + 10*log10(180000) - 174 + D_in
        loss = link_budget(uplink_pot_tx,
                            sens,
                            Ms,
                            uplink_Gtx + uplink_Grx + uplink_TMA,
                            uplink_rx_loss + uplink_tx_loss,
                            0)
        mcs[modulation].append(max_radius(loss, 2.6))

    return mcs, m_in
Ejemplo n.º 15
0
def random_rot(shape, limit=180):
    """
    Modification of the Gaussian method to limit the angle directly.
    by Daniel Rebain
    shape: (N, )
    limit: max_angle
    rot: (N, 3, 3)
    """
    limit = limit / 180.0 * np.pi

    vp = np.random.randn(*shape, 3)
    d2 = np.sum(vp**2, axis=-1)
    c2theta = np.cos(0.5 * limit)**2
    wp_limit = np.sqrt(c2theta * d2 / (1.0 - c2theta))

    comp_widths = erfc(wp_limit / np.pi)
    # widths = 1.0 - comp_widths
    inv_x = comp_widths * np.random.rand(*shape)
    inv_x = np.clip(inv_x, np.finfo(float).tiny, 2.0)
    wp = erfcinv(inv_x) * np.pi

    wp *= 2.0 * np.random.randint(2, size=shape) - 1.0
    q = np.concatenate([vp, wp[:, None]], axis=-1)
    q /= np.linalg.norm(q, axis=-1, keepdims=True)
    rot = Rotation.from_quat(q).as_matrix()
    return rot
Ejemplo n.º 16
0
def randn_tail(shape, limits):
    comp_widths = erfc(limits / np.pi)
    widths = 1.0 - comp_widths
    inv_x = comp_widths * np.random.rand(*shape)
    inv_x = np.clip(inv_x, np.finfo(float).tiny, 2.0)
    x = erfcinv(inv_x) * np.pi
    return x
Ejemplo n.º 17
0
def invphi(input):
    """
    Inverse of Phi function.
    @param input: Input value.
    @return: inverse of Phi(input).
    """
    return -1 * np.sqrt(2) * erfcinv(input / 0.5)
Ejemplo n.º 18
0
def tn(l, u):

    # samples a column vector of length=length(l)=length(u)
    # from the standard multivariate normal distribution,
    # truncated over the region [l,u], where -a<l<u<a for some
    # 'a' and l and u are column vectors;
    # uses acceptance rejection and inverse-transform method;
    tol = 2  # controls switch between methods

    # threshold can be tuned for maximum speed for each platform
    # case: abs(u-l)>tol, uses accept-reject from randn

    I = np.abs(u - l) > tol
    x = l

    if np.any(I):
        tl = l[I]
        tu = u[I]
        x[I] = trnd(tl, tu)

    # case: abs(u-l)<tol, uses inverse-transform

    I = ~I
    if np.any(I):

        tl = l[I]
        tu = u[I]
        pl = erfc(tl / np.sqrt(2)) / 2
        pu = erfc(tu / np.sqrt(2)) / 2

        x[I] = np.sqrt(2) * erfcinv(
            2 * (pl - (pl - pu) * np.random.uniform(size=len(tl))))

    return x
Ejemplo n.º 19
0
    def _cluster_numeric_attribute(self, attrib):
        """Run K-means on a single attribute"""

        # "Attribute values(s) corresponding to..."
        x_s = []

        # Step 2: "Compute mean and std dev..."
        attr_mean = np.mean(attrib)

        # using non-default ddof=1 gives same as Khan's Java and Gnumeric
        attr_sd = np.std(attrib, ddof=1)

        # print("m=" + str(mn) + " sd=" + str(sd))

        # Step 3: "Compute percentile..."
        for i in range(0, self._num_clusters):
            percentile = (2 * (i + 1) - 1) / (2 * self._num_clusters)
            z_s = math.sqrt(2) * erfcinv(2 * percentile)
            x_s.append(z_s * attr_sd + attr_mean)

        attr_data = attrib.reshape(-1, 1)
        seeds = np.array(x_s).reshape(-1, 1)

        # Step 6?
        return self._k_means_clustering(attr_data, seeds, self._num_clusters)
Ejemplo n.º 20
0
def sig2sigma(sig):
   from scipy.special import erfc,erfcinv
   if sig > 1e-15: return erfcinv(sig)*2**0.5
   def inverfc(x,*args):
      return erfc(x/2**0.5)-args[0]
   from scipy.optimize import fsolve
   return fsolve(inverfc,[8],(sig,))
Ejemplo n.º 21
0
def sample_truncated_gaussian(mu=0, sigma=1, lb=-np.Inf, ub=np.Inf):
    """
    Sample a truncated normal with the specified params. This
    is not the most stable way but it works as long as the
    truncation region is not too far from the mean.
    """
    # Broadcast arrays to be of the same shape
    mu, sigma, lb, ub = np.broadcast_arrays(mu, sigma, lb, ub)
    shp = mu.shape
    if np.allclose(sigma, 0.0):
        return mu

    cdflb = normal_cdf(lb, mu, sigma)
    cdfub = normal_cdf(ub, mu, sigma)

    # Sample uniformly from the CDF
    cdfsamples = cdflb + np.random.rand(*shp) * (cdfub - cdflb)

    # Clip the CDF samples so that we can invert them
    cdfsamples = np.clip(cdfsamples, 1e-15, 1 - 1e-15)
    zs = -np.sqrt(2) * special.erfcinv(2 * cdfsamples)

    # Transform the standard normal samples
    xs = sigma * zs + mu
    xs = np.clip(xs, lb, ub)

    return xs
Ejemplo n.º 22
0
 def forward_cpu(self, x):
     if not available_cpu:
         raise ImportError('SciPy is not available. Forward computation'
                           ' of erfcinv in CPU can not be done.' +
                           str(_import_error))
     self.retain_outputs((0,))
     return utils.force_array(special.erfcinv(x[0]), dtype=x[0].dtype),
Ejemplo n.º 23
0
def Reject_Outliers_With_Median(x, m=3.):
    """
    Reject data in x whose values are greater than 'm' times 
    median deviation from the global median (MAD)  

    ...

    Attributes
    ----------
    x : float
        series data
    m : float
        number of deviation from MAD (default:3)

    Returns
    -------
    x : list
        filtered signal x with nan in place of outliers 
    """

    N = len(x)
    median = np.median(x)

    dev = np.abs(x - median)
    MAD = -1 / (math.sqrt(2) * special.erfcinv(1.5)) * np.median(dev)
    mdev = dev / MAD
    # print(MAD)

    for i in range(0, N):
        if (mdev[i] > m):
            x[i] = np.NaN

    return x
Ejemplo n.º 24
0
    def transform_uniform(self, r):
        """
        Tranformation from hypercube to physical parameters. The MultiNest native space is a unit hyper-cube
        in which all the parameter are uniformly distributed in [0, 1]. The user is required to transform
        the hypercube parameters to physical parameters. This transformation is described in Sec 5.1
        of arXiv:0809.3437.

        These functions are based on the prior transformations provided here:
        https://github.com/JohannesBuchner/MultiNest/blob/master/src/priors.f90

        Parameters
        ----------
        r : float
            Hypercube value

        Returns
        -------
        r2 : float
            Transformed parameter value
        """

        # Calculate transformation
        u = self.mu + self.sigma * np.sqrt(2.0) * erfcinv(2.0*(1.0 - r))

        return u
Ejemplo n.º 25
0
def band_radius_calculator(gap_prob=None, sensitivity=None):
    """Creates a function that behaves like :func:`band_radius` for a fixed
    set of parameters (intended for bulk usage).

    Keyword Args:
        gap_prob (float): as in :func:`band_radius`.
        sensitivity (float): as in :func:`band_radius`.

    Returns:
        function: A function with signature ``f(len0, len1, diag)``.
    """
    assert sensitivity > 0 and sensitivity < 1
    assert gap_prob > 0 and gap_prob < 1

    epsilon = 1. - sensitivity
    adjusted_epsilon = epsilon * 2 / 3

    C = 2 * erfcinv(adjusted_epsilon) * sqrt(gap_prob * (1 - gap_prob))

    def calculator(len0, len1, diag):
        e_len = expected_alignment_length(len0, len1, diag, gap_prob=gap_prob)
        radius = C * sqrt(e_len)
        return max(1, int(radius))

    return calculator
Ejemplo n.º 26
0
def truncated_normal_sample(m, s, l, random_state=None):
    """
    Return random number from distribution with density
    p(x)=K*exp(-(x-m)^2/s-l'x), x>=0.
    m and l are vectors and s is scalar
    Adapted from randr function at http://mikkelschmidt.dk/code/gibbsnmf.html
    which is Copyright 2007 Mikkel N. Schmidt, [email protected], www.mikkelschmidt.dk
    """
    if isinstance(random_state, np.random.RandomState):
        rs = random_state
    else:
        rs = np.random.RandomState(seed=random_state)
    sqrt_2s = np.sqrt(2 * s)
    ls = l * s
    lsm = ls - m
    A = lsm / sqrt_2s
    a = A > 26
    x = np.zeros(m.shape)
    y = rs.random_sample(m.shape)
    x[a] = -np.log(y[a]) / (lsm[a] / s)
    na = np.logical_not(a)
    R = erfc(abs(A[na]))
    x[na] = erfcinv(y[na] * R - (A[na] < 0) * (2 * y[na] + R - 2)) * sqrt_2s + m[na] - ls[na]
    x[np.isnan(x)] = 0
    x[x < 0] = 0
    x[np.isinf(x)] = 0
    return x.real
Ejemplo n.º 27
0
 def forward_cpu(self, x):
     if not available_cpu:
         raise ImportError('SciPy is not available. Forward computation'
                           ' of erfcinv in CPU cannot be done. ' +
                           str(_import_error))
     self.retain_outputs((0,))
     return utils.force_array(special.erfcinv(x[0]), dtype=x[0].dtype),
Ejemplo n.º 28
0
def compute_false_alarm_threshold(period_days, duration_hrs):
    """Compute the stat, significance needed to invalidate the null hypothesis

    An event should be considered statistically significant if its
    peak in the convolved lightcurves is greater than the value computed
    by this function.

    Note that this number is computed on a per-TCE basis. If you are looking
    at many TCEs you will need a stronger threshold. (For example, if
    you use this function to conclude that there is a less than 0.1% chance
    a given event is a false alarm due to Gaussian noise, you expect to
    see one such false alarm in 1,000 TCEs. See Coughlin et al. for the
    formula to ensure less than 1 false alarm over many TCEs.

    Parameters
    ----------
    period_days : float
        Orbital period
    duration_hrs : float
        Duration of transit in hours.

    Returns
    -------
    fa : float
        **TODO** What exactly is returned. Is this the 1 sigma false
        alarm threshold?
    """
    duration_days = duration_hrs / 24.0

    fa = spspec.erfcinv(duration_days / period_days)
    fa *= np.sqrt(2)
    return fa
def total_coll_prob(car, obst, 
                    obst_mu, obst_cov, 
                    gamma_mu, gamma_sd,
                    delta=0.99, n_intervals=5):
    
    """
    Calculates collision probability between car at x,y,theta = (0,0,0) and some
    uncertain obstacle. Returns an upper-bound collision probability using combined 
    body and circular over-approximation.

    Parameters
    ----------
    
        car, obst
            Shapely convex polygons of the car and obstacle, with the object origin 
            at (0,0) and at a zero angle orientation
        obst_mu, obst_cov
            mean (2,) and covariance (2,2) of the obstacle position
        gamma_mu, gamma_sd
            mean and standard deviation of obstacle orientation
        delta
            (optional) the proportion of probability to be explained by 'accurate' 
            probability calculations (the remainder is calculated using circular 
            over-approximation).
        n_intervals
            the number of discrete obstacle orientation ranges used for the 'accurate'
            probability calculations.

    Returns
    -------
        collision probability upper bound
    """
    
    cdf = lambda z: 0.5*(1 + math.erf(z/np.sqrt(2)))
    probit = lambda p: -np.sqrt(2)*special.erfcinv(2*p)

    # gamma confidence interval to consider 
    # (pick min and max such that p(gamma between min & max) = delta)
    min_gamma = probit((1-delta)/2)*gamma_sd + gamma_mu   
    max_gamma = (gamma_mu - min_gamma) + gamma_mu      
    #print 'gamma range:', min_gamma, max_gamma

    gamma_ranges = np.linspace(min_gamma, max_gamma, n_intervals+1)
    p_gamma = np.empty(n_intervals) # p that gamma is in range i
    p_rect = np.empty(n_intervals)  # p of collision | gamma range i
    p_coll = np.empty(n_intervals)  # p of collision + gamma range i
    for i in range(n_intervals):
        g_min = gamma_ranges[i]
        g_max = gamma_ranges[i+1]
        p_gamma[i] = cdf((g_max-gamma_mu)/gamma_sd) - cdf((g_min-gamma_mu)/gamma_sd)
        p_rect[i] = rect_coll_prob_ub(car, obst, obst_mu, obst_cov, (g_min, g_max))[0]
        p_coll[i] = p_gamma[i] * p_rect[i]
    # Approximate the circular bound probability with p = 1
    p_circ = 1 #circ_coll_prob(car, obst, obst_mu, obst_cov)[0]
    #print 'p_circ:', p_circ
    
    assert np.allclose(p_gamma.sum(), delta)
    
    return p_coll.sum() + (1-delta)*p_circ
Ejemplo n.º 30
0
def bit_rate_fixed(gsnr, Rs=None):
    if not Rs:
        Rs = param.Rs
    if gsnr >= (2 * ((sci_spe.erfcinv(2 * param.BERt))**2) * Rs / param.Bn):
        bit_rate = 1e11  # 100Gbps, PM-QPSK
    else:
        bit_rate = 0  # 0Gbps
    return bit_rate
Ejemplo n.º 31
0
 def GaussianPrior(self, r, mu, sigma):
     """Uniform[0:1]  ->  Gaussian[mean=mu,variance=sigma**2]"""
     from math import sqrt
     from scipy.special import erfcinv
     if (r <= 1.0e-16 or (1.0 - r) <= 1.0e-16):
         return -1.0e32
     else:
         return mu + sigma * sqrt(2.0) * erfcinv(2.0 * (1.0 - r))
Ejemplo n.º 32
0
def test_normal():
    for i in range(TESTS):
        prime_idx = randint(0, MAX_BASE)
        hammer = Hammersley(prime_idx)
        h1 = hammer.get_array(SIZE)
        hammer.set_idx(0)
        h2 = hammer.get_normal_array(SIZE)
        assert np.allclose(erfcinv(h1 * 2.0), h2)
def BER2Q(BER):
    if type(BER) == float:
        BER = np.asarray(BER)
    q = np.zeros(BER.shape)
    idx2Cal = BER < 0.5
    tmpQ = 20 * np.log10((np.sqrt(2) * erfcinv(BER[idx2Cal] / .5)))
    q[idx2Cal] = tmpQ
    return q
Ejemplo n.º 34
0
def _invphi(input):
    """Inverse phi function.

    Args:
    input (float or ndarray): Float (scalar or array) value.
    Returns:
      float: invphi(input)
    """
    return -1 * np.sqrt(2) * erfcinv(input / 0.5)
Ejemplo n.º 35
0
def induceRankCorr(R, Cstar):
    """Induces rank correlation Cstar onto a sample R [N x k].
    Note that it is easy to specify correlations that are not possible to generate.
    Results generated with a given Cstar should be checked.

    Iman, R. L., and W. J. Conover. 1982. A Distribution-free Approach to Inducing Rank
    Correlation Among Input Variables. Communications in Statistics: Simulation and
    Computations 11:311-334.
    
    Parameters
    ----------
    R : ndarray [N x k]
        Matrix of random samples (with no pre-existing correlation)
    Cstar : ndarray [k x k]
        Desired positive, symetric correlation matrix with ones along the diagonal.
    
    Returns
    -------
    corrR : ndarray [N x k]
        A correlated matrix of samples."""

    """Define inverse complimentary error function (erfcinv in matlab)
    x is on interval [0,2]
    its also defined in scipy.special"""
    #erfcinv = lambda x: -stats.norm.ppf(x/2)/sqrt(2)

    C = Cstar
    N, k = R.shape
    """Calculate the sample correlation matrix T"""
    T = np.corrcoef(R.T)

    """Calculate lower triangular cholesky
        decomposition of Cstar (i.e. P*P' = C)"""
    P = cholesky(C).T

    """Calculate lower triangular cholesky decomposition of T, i.e. Q*Q' = T"""
    Q = cholesky(T).T

    """S*T*S' = C"""
    S = P.dot(inv(Q))

    """Replace values in samples with corresponding
    rank-indices and convert to van der Waerden scores"""

    RvdW = -np.sqrt(2) * special.erfcinv(2*((_columnRanks(R)+1)/(N+1)))

    """Matrix RBstar has a correlation matrix exactly equal to C"""
    RBstar = RvdW.dot(S.T)
    
    """Match up the rank pairing in R according to RBstar"""
    ranks = _columnRanks(RBstar)
    sortedR = np.sort(R, axis=0)
    corrR = np.zeros(R.shape)
    for j in np.arange(k):
        corrR[:, j] = sortedR[ranks[:, j], j]

    return corrR
Ejemplo n.º 36
0
    def GaussianPrior(self, r, mu, sigma):
        """Uniform[0:1]  ->  Gaussian[mean=mu,variance=sigma**2]"""
        from math import sqrt
        from scipy.special import erfcinv

        if (r <= 1.0e-16 or (1.0 - r) <= 1.0e-16):
            return -1.0e32
        else:
            return mu + sigma * sqrt(2.0) * erfcinv(2.0 * (1.0 - r))
Ejemplo n.º 37
0
    def rate_adaptive_policy(self, ctf, cur_tx_power, cur_tx_constraint, snr_db, cur_ber):
        ber = max(self.required_ber, 1e-7)

        #    a=0.0004
        #    b=0.00001
        #
        #
        #    ber_err = ber-cur_ber
        #    self.ber_state = max(1e-12,self.ber_state + (a+b)*ber_err - b*self.last_ber_err)
        #    self.last_ber_err = ber_err
        #
        #    ber = min(.5,max(1e-12,self.ber_state))
        #
        #    print "current ber",cur_ber
        #    print "ber_err",ber_err
        #    print "ber state",self.ber_state

        gamma = (2.0 / 3.0) * (erfcinv(ber) ** 2.0) * 3.2
        # gamma = ((2./3.)*(erfcinv(ber))**2.0)
        print "input ber", ber, "required ber", self.required_ber
        print "snr gap (dB) for req. ber", 10 * log10(gamma)

        N = self.subcarriers

        (b, e) = levin_campello(self.mod_map, N, cur_tx_constraint, snr_db, ctf, gamma, cur_tx_power)

        b = numarray.array(b)
        e = numarray.array(e)
        a = numarray.array(zeros(len(self.assignment_map)))

        if sum(b < 0) > 0:
            print "WARNING: bit loading < 0"
            b[b < 0] = 0

        a[b > 0] = 1

        txpow = sum(e)
        e = e / txpow * N

        print "txpow", txpow
        print "tx amplitude", sqrt(txpow)
        #    print numarray.array(map(lambda x: "%.2f" % (x), e))
        #    print numarray.array(map(lambda x: "%d" % (x),b))

        # return

        self.tx_amplitude = sqrt(txpow)
        self.mod_map = list(b)
        self.pa_vector = list(e)
        self.assignment_map = list(a)

        frame_length_samples = 12 * self.block_length  # FIXME constant
        bits_per_frame = sum(b) * 9  # FIXME constant
        frame_duration = frame_length_samples / self.bandwidth
        self.data_rate = bits_per_frame / frame_duration
        print "Datarate", self.data_rate
Ejemplo n.º 38
0
            def myprior(cube, ndim, nparams):

                for ss, sig in enumerate(model.ptasignals):

                    # short hand
                    ii = sig['parindex']

                    if sig['prior'] == 'gaussian':
                        m, sigma = sig['mu'], sig['sigma']
                        cube[ii] = m + s*np.sqrt(2) * ss.erfcinv(2*(1-cube[ii]))
                    else:
                        cube[ii] = model.pmin[ii] + cube[ii] * (model.pmax[ii]-model.pmin[ii])
Ejemplo n.º 39
0
def get_error_f( z, inverse = False , stand = False ):
    '''
    :param z:
    P(|N|<x)=erf(x/sqrt(2)).
    z = ( x - mu) / std
    z can be 0.95 to 0.05
    tbd: at 0.5 should be 0.5?
    :return:
        2/sqrt(pi)*integral(exp(-t**2), t=0..z).

    '''
    if stand: z = ( (z - np.mean(z) ) / (np.std(z) * np.sqrt(2) ) )
    if inverse : return erfcinv(z)  # returns standardisized value
    return erf(z)  # returns probability
Ejemplo n.º 40
0
  def build_operators(self):

    """construct transport operators"""

    self.D1  = dmatrix(self.z,3,1,1)
    self.D2  = dmatrix(self.z,3,1,2)
    self.chi = self.chi_ref* \
               np.exp(2.0*(erfcinv(2*self.z_ref)**2-erfcinv(2*self.z)**2))
    XD2 = 0.5*spdiags(self.chi,0,self.nz,self.nz)*self.D2

    # note the first and last rows of XD2 are zero (since chi = 0 at z = 0,1)
    # this is convenient because it won't interfere with BCs in the rhs vector

    ns1 = self.nspec+1
    nn  = ns1*self.nz 
    Is  = eye(ns1)
    id,jd = XD2.nonzero()
    self.D = lil_matrix((nn,nn))
    for i,j in zip(id,jd):
      ii=i*ns1
      jj=j*ns1
      self.D[ii:ii+ns1,jj:jj+ns1] = XD2[i,j]*Is

    self.D.tocsr()
Ejemplo n.º 41
0
def effpi(effk, sep):
    """ The efficiency for mis-tagging a charged pion as kaon

    Parameters
    -----------------------
    effk: float
          The efficiency for tagging a charged kaon
    sep:  float
          The separation of the two gaussians corresponding to the kaon and pion distributions of
          some observable that discriminates between them
    """
    from scipy.special import erfc
    from scipy.special import erfcinv

    return 1-0.5*erfc(-sep + erfcinv(2-2*effk))
Ejemplo n.º 42
0
def sig2sigma(sig, two_tailed=True, logprob=False):
    """Convert tail probability to "sigma" units, i.e., find the value of the 
       argument for the normal distribution beyond which the integrated tail
       probability is sig.  Note that the default is to interpret this number
       as the two-tailed value, as this is the quantity that goes to 0
       when sig goes to 1.

       args
       ----
       sig     the chance probability

       kwargs
       ------
       two_tailed [True] interpret sig as two-tailed or one-tailed
                          probability in converting
       logprob [False] if True, the argument is the natural logarithm
                       of the probability
    """
    from scipy.special import erfc, erfcinv
    from scipy.optimize import fsolve

    sig = to_array(sig)
    if logprob:
        logsig = sig.copy()
        sig = np.exp(sig)
    results = np.empty_like(sig)

    if np.any((sig > 1) | (not logprob and sig <= 0)):
        raise ValueError("Probability must be between 0 and 1.")

    if not two_tailed:
        sig *= 2

    def inverfc(x, *args):
        return erfc(x / 2 ** 0.5) - args[0]

    for isig, mysig in enumerate(sig):
        if mysig < 1e-120:  # approx on asymptotic erfc
            if logprob:
                x0 = (-2 * (logsig + np.log(np.pi ** 0.5))) ** 0.5
            else:
                x0 = (-2 * np.log(mysig * (np.pi) ** 0.5)) ** 0.5
            results[isig] = x0 - np.log(x0) / (1 + 2 * x0)
        elif mysig > 1e-15:
            results[isig] = erfcinv(mysig) * 2 ** 0.5
        else:
            results[isig] = fsolve(inverfc, [8], (sig,))
    return from_array(results)
Ejemplo n.º 43
0
            def myprior(cube, ndim, nparams):

                for sct, sig in enumerate(model.ptasignals):

                    # short hand
                    parind = sig['parindex']
                    npars = sig['npars']

                    if npars:
                        for ct, ii in enumerate(range(parind, parind+npars)):
                            if sig['prior'][ct] == 'gaussian':
                                m, s = sig['mu'][ct], sig['sigma'][ct]
                                cube[ii] = m + s*np.sqrt(2) * ss.erfcinv(2*(1-cube[ii]))
                            else:
                                cube[ii] = model.pmin[ii] + cube[ii] * \
                                        (model.pmax[ii]-model.pmin[ii])
    def _generate_block_binary(self, k, corr=.9):
        '''Generate multivariate bernouilli with parameter rho
        and non null block correlation
        '''
        from math import sqrt
        from scipy.special import erfcinv
        sig = np.eye(self.K)
        sb = self.K // k

        # P(X_i = 1)= rho
        t = sqrt(2) * erfcinv(2 * self.rho)

        for i in range(k):
            ss = sig[i * sb:(i + 1) * sb, i * sb:(i + 1) * sb].shape[0]
            sig[i * sb:(i + 1) * sb, i * sb:(i + 1) * sb] = \
                (1 - corr) * np.eye(ss) + corr * np.ones((ss, ss))
        return sig, t
Ejemplo n.º 45
0
def unbiased_sigma(N_indep):
    """Calculate an unbiased sigma for using in sigma clipping.

    The formula below for cliplim is pretty subtle. Kappa, sigma
    clipping should be such that the noise is not biased by
    it. Consequently, the clipping boundaries should be such that
    exactly half an independent pixel should exceed it if the map were
    source free. A rigid boundary of 3 sigma is appropriate only if the
    number of independent pixels is about 185 (the number of
    independent pixels equals the number of pixels divided by the
    beamsize in pixels).

    The condition that kappa, sigma clipping may not bias the noise is
    translated in the formula below, using Gaussian statistics. A
    disadvantage of this is that more iterations of kappa, sigma
    clipping are needed, compared to 3 sigma clipping. However, the
    noise values derived are generally significantly different (lower)
    compared to 3 sigma clipping.
    """

    return 1.4142135623730951 * erfcinv(0.5 / N_indep)
Ejemplo n.º 46
0
def band_radius(len0, len1, diag, gap_prob=None, sensitivity=None):
    """Calculates the smallest band radius in the dynamic programming table
    such that an overlap alignment, with the given gap probability, stays
    entirely within the diagonal band centered at the given diagonal. This is
    given by:

    .. math::
        r = 2\\sqrt{g(1-g)K}
            \\mathrm{erf}^{-1}\\left(1-\\frac{2\\epsilon}{3}\\right)

    where :math:`g` is the gap probability, :math:`1-\\epsilon` is the desired
    sensitivity, and :math:`K` is the expected length of the alignment (cf.
    :func:`expected_alignment_length`).

    Args:
        len0 (int): Length of the first sequence (the "vertical" sequence in
            the table).
        len1 (int): Length of the second sequence (the "horizontal" sequence in
            the table).
        diag (int): Starting diagonal of alignments to consider.
        gap_prob (float): Probability of indels occuring at any position of an
            alignment.
        sensitivity (float): The probability that an alignment with given gap
            probability remains entirely within the band.
    Returns:
        int: The smallest band radius guaranteeing the required sensitivity.

    """
    assert sensitivity > 0 and sensitivity < 1
    assert gap_prob > 0 and gap_prob < 1

    epsilon = 1. - sensitivity
    adjusted_epsilon = epsilon * 2 / 3
    e_len = expected_alignment_length(len0, len1, diag, gap_prob=gap_prob)
    radius = 2 * erfcinv(adjusted_epsilon) * sqrt(
        gap_prob * (1 - gap_prob) * e_len
    )
    return max(1, int(radius))
Ejemplo n.º 47
0
def sample_truncnorm(mu=0, sigma=1, lb=-np.Inf, ub=np.Inf):
    """ Sample a truncated normal with the specified params
    """
    # Broadcast arrays to be of the same shape
    mu, sigma, lb, ub = np.broadcast_arrays(mu, sigma, lb, ub)
    shp = mu.shape
    if np.allclose(sigma, 0.0):
        return mu

    cdflb = normal_cdf(lb, mu, sigma)
    cdfub = normal_cdf(ub, mu, sigma)

    # Sample uniformly from the CDF
    cdfsamples = cdflb + np.random.rand(*shp)*(cdfub-cdflb)

    # Clip the CDF samples so that we can invert them
    cdfsamples = np.clip(cdfsamples, 1e-15, 1-1e-15)

    zs = -np.sqrt(2) * erfcinv(2*cdfsamples)

    assert np.all(np.isfinite(zs))


    return sigma * zs + mu
Ejemplo n.º 48
0
    def from_unit_cube(self, x):
        """
        Used by multinest

        :param x: 0 < x < 1
        :param lower_bound:
        :param upper_bound:
        :return:
        """

        mu = self.mu.value
        sigma = self.sigma.value

        sqrt_two = 1.414213562

        if x < 1e-16 or (1 - x) < 1e-16:

            res = -1e32

        else:

            res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x))

        return res
Ejemplo n.º 49
0
    def work(self):
        self.query_sounder()
        print self.ac_vector

        self.ac_vector = [0.0 + 0.0j] * self.ac_vlen
        if self.ac_vlen >= 8:
            self.ac_vector[3] = 0.3267
            self.ac_vector[4] = 0.8868
            self.ac_vector[5] = 0.3267

        rxperf = self.get_rx_perf_meas()

        if self.store_ctrl_events:
            self.process_received_events(rxperf)
        
        if len(rxperf) != 0:
            rxperf = rxperf[len(rxperf) - 1]
            current_ber = rxperf.ber
            snr_mean = rxperf.snr
            ctf = rxperf.ctf
        else:
            print 'Receiver not running or power constraint is too low for receiver...'
            current_ber = 1
            snr_mean = 0
            ctf = 0

        # this is for the lab exercise special case with only half the subcarriers
        if self.options.lab_special_case:
            nl = range(self.subcarriers/4)
            nr = range(3*self.subcarriers/4,self.subcarriers)
            self.null_indeces = nl + nr
            self.pa_vector = [2]*self.subcarriers
            self.mod_map = [self.modulation]*self.subcarriers
            self.assignment_map = [1]*self.subcarriers
            for x in self.null_indeces:
                self.assignment_map[x] = 0
                self.mod_map[x] = 0
                self.pa_vector[x] = 0
        else:
            self.pa_vector = [1.0] * self.subcarriers
            self.mod_map = [self.modulation] * self.subcarriers
            self.assignment_map = [1] * self.subcarriers

        self.tx_amplitude = self.constraint

        frame_length_samples = 12 * self.block_length  # FIXME constant
        bits_per_frame = self.modulation * self.subcarriers * 9  # FIXME constant
        frame_duration = frame_length_samples / self.bandwidth
        self.data_rate = bits_per_frame / frame_duration

        c_ber = max(current_ber, 1e-7)

        snr_mean_lin = 10 ** (snr_mean / 10.0)
        print 'Current SNR:', snr_mean
        print 'Current BER:', c_ber
        snr_func_lin = 2.0 * erfcinv(c_ber) ** 2.0
        snr_func = 10 * log10(snr_func_lin)
        print 'Func. SNR:', snr_func
        delta = self._delta = snr_mean_lin / snr_func_lin
        print 'Current delta', delta
        self._agg_rate = 2
                                            psr[kk].theta, psr[kk].phi))

# Perform chi-squared fit to determine best fit amplituded to HD curve
hc_sqr = np.sum(crosspower*hdcoeff / (crosspowererr*crosspowererr)) / \
            np.sum(hdcoeff*hdcoeff / (crosspowererr*crosspowererr))

hc_sqrerr = 1.0 / np.sqrt(np.sum(hdcoeff * hdcoeff / (crosspowererr * crosspowererr)))

# get reduced chi-squared value
chisqr = np.sum(((crosspower - hc_sqr*hdcoeff) / crosspowererr)**2)
redchisqr = np.sum(chisqr) / len(crosspower)


print 'Results of Search\n'

print '------------------------------------\n'

print 'A_gw^2 = {0}'.format(hc_sqr)
print 'std. dev. = {0}'.format(hc_sqrerr)
print 'Reduced Chi-squared = {0}'.format(redchisqr)

up = np.sqrt(hc_sqr + np.sqrt(2)*hc_sqrerr*ss.erfcinv(2*(1-0.95)))
print '2-sigma upper limit based on chi-squared fit is A_gw < {0}'.format(up)







                    if 'IMH' in data_tag or 'IH' in data_tag:
                        hypo_tag = 'hypo_NMH'
                    try:
                        res = indict['results'][data_tag][hypo_tag][0]
                    except:
                        res = indict['results'][data_tag][hypo_tag]
                    chi2 = res['chisquare'][0]
                    free_chi2s_livetime[data_tag][livetime]['false_h_best'].append(chi2)

    # Calculate significance

    for data_tag in free_chi2s_livetime.keys():
        num = free_chi2s_livetime[data_tag][livetime]['true_h_fiducial'][0]+free_chi2s_livetime[data_tag][livetime]['false_h_best'][0]
        denom = np.sqrt(8) * np.sqrt(free_chi2s_livetime[data_tag][livetime]['false_h_best'][0])
        alpha = 0.5 * math.erfc(num/denom)
        n = math.sqrt(2.0)*special.erfcinv(2.0*alpha)
        free_significances[data_tag].append(n)

    # Get chisquare values for prior octant true_h_fiducial distributions
    for trueinfile in sorted(os.listdir(prior_true_h_fid_dir)):
        if os.path.isfile(prior_true_h_fid_dir+trueinfile):
            indict = from_json(prior_true_h_fid_dir+trueinfile)
            livetime_val = indict['template_settings']['params']['livetime']['value']
            if livetime_val == livetime:
                for data_tag in indict['results'].keys():
                    if 'NMH' in data_tag or 'NH' in data_tag:
                        hypo_tag = 'hypo_IMH'
                    if 'IMH' in data_tag or 'IH' in data_tag:
                        hypo_tag = 'hypo_NMH'
                    try:
                        res = indict['results'][data_tag][hypo_tag][0]
Ejemplo n.º 52
0
def my_norminv(p,mu,sigma):
    
    x0 = -sqrt(2)*erfcinv(2*p)
    x = sigma*x0 + mu
    
    return x
Ejemplo n.º 53
0
def plotErfcProfiles():
    ############## chi vs. erfc profile (using interpolated chist) #############
    from scipy.special import erfcinv
    fig, ax = [], []
    for i in range(4):
        fig.append( plt.figure( figsize=figTiny ) )
        ax.append( fig[-1].add_subplot( 111,
            xlabel = r"Mischungsbruch $Z_\mathrm{ULF}$",
            ylabel = r"Skalare Dissipationsrate $\chi / s^{-1}$",
            xlim   = [0,1]
        ) )
    ax[0].plot( [0,0], 'k--', label=r'analytisch' )
    ax[1].plot( [0,0], 'k--', label=r'analytisch$\cdot 1.2$' )
    ax[2].plot( [0,0], 'k--', label=r'analytisch' )
    ax[3].plot( [0,0], 'r-' , label=r'analytisch$\cdot 1.2$' )
    ax[3].plot( [0,0], 'b-' , label=r'analytisch' )
    iComp = abs( chist_intp_list - 1.5 ).argmin()

    counter = -1
    for i in iSelected:
        counter += 1
        data, hdict = readUlfFile( 'results/v_'+str(v_list[i]) )
        ZUlf = data[:,hdict["Z"]]

        # simulated / experimental chi
        for j in range(len(ax)):
            if j==3:
                continue
            ax[j].plot( ZUlf, calcChi(data,hdict), '.', color=colors[counter],
                        label=chist_sr[i] )
        # chi-erfc-profile
        chianal = chist_list[i]*np.exp(2.*( erfcinv(2.*Zstanal)**2 -
                                            erfcinv(2.*ZUlf   )**2 ))
        ax[0].plot( ZUlf, chianal, '--', color=colors[counter] )
        # chi-1.2*erfc-profile
        ax[1].plot( ZUlf, 1.2*chianal, '--', color=colors[counter] )
        # chi_interpolated-erfc-profile
        ax[2].plot( ZUlf, chianal / chist_list[i] * chist_intp_list[i], '--', color=colors[counter] )

    for i in [iComp]:
        counter += 1
        data, hdict = readUlfFile( 'results/v_'+str(v_list[i]) )
        ZUlf = data[:,hdict["Z"]]

        # chi-erfc-profile
        chianal = chist_list[i]*np.exp(2.*( erfcinv(2.*Zstanal)**2 -
                                            erfcinv(2.*ZUlf   )**2 ))

        ax[3].plot( ZUlf, 1.2*chianal, 'r-' )
        ax[3].plot( ZUlf, chianal / chist_list[i] * chist_intp_list[i], 'b-' )

        # simulated / experimental chi
        ax[3].plot( ZUlf, calcChi(data,hdict), 'k.', markersize=2.0,
                    label=chist_sr[i] )

    #ax[3].set_title( chist_sr[iComp] )
    ax[3].set_xlim([0,0.1]) # roughly 2*Zstanal as upper limit
    ax[3].set_ylim( [0,5] )
    ax[3].plot( [Zstanal,Zstanal], ax[3].get_ylim(), 'k--' , label=r"$Z_\mathrm{stoch}$" )

        #chianal = strainrate_list[i]/np.pi*np.exp(-2.*erfcinv(2.*ZUlf)**2)
    filenames = [
        "chianal"     , # 1
        "chianal1.2"  , # 2
        "chianal-intp", # 3
        "chianal-zoom"  # 4
    ]
    for i in range(len(fig)):
        finishPlot( fig[i], ax[i], filenames[i] )
    ax3.set_ylim(ymax=140)
    ax3.minorticks_on()
    extra = ax3.set_ylabel(r'number of pulsar pairs')
    ax3.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
        nbins=8, integer=False, prune='both'))

    #plt.tight_layout(pad=0.3, rect=[0, 0, 1.3, 1])
    f.subplots_adjust(hspace=0.0)


    #plt.errorbar(xi*180/np.pi, rho, sig, fmt='.')
    #plt.xlabel('Angular Separation [degrees]')
    #plt.ylabel('Correlation Coefficient')
    plt.savefig(outdir+'/hd.png', bbox_inches='tight')
    print 'A_gw = {0}'.format(np.sqrt(Opt))
    print 'A_95 = {0}'.format(np.sqrt(Opt + np.sqrt(2)*Sig*ss.erfcinv(2*(1-0.95))))
    print 'SNR = {0}'.format(Opt/Sig)
    x = {}
    x['A_gw'] = np.sqrt(Opt)
    x['A_95'] = np.sqrt(Opt + np.sqrt(2)*Sig*ss.erfcinv(2*(1-0.95)))
    x['SNR'] = Opt/Sig
    with open(outdir + '/os_out.json', 'w') as f:
        json.dump(x, f)

    
elif args.pipeline == 'Fstat':
    from scipy.interpolate import interp1d

    # call likelihood once to set noise
    model.mark6LogLikelihood(p0, incCorrelations=False, incJitter=incJitterEquad,
                             varyNoise=True, fixWhite=False)
Ejemplo n.º 55
0
 def pair_extent(self, cnum1, cnum2, thresh=1e-8):
     if not self.shell_nprimative[cnum1] == 0 and self.shell_nprimative[cnum2] == 0:
         raise NotImplementedError("Contracted extents not implemented")
     exp1 = self.shells[cnum1].exp(0)
     exp2 = self.shells[cnum2].exp(0)
     return math.sqrt(2/(exp1+exp2))*erfcinv(thresh)
  def margin_adaptive_policy(self,sinr_sc,cur_tx_power,cur_bit_constraint,snr_db):
    ber = max(self.required_ber,1e-7)

    snrf = (2)*(erfcinv(ber)**2.0)   #*3.2
    snr_corr = snrf*self._delta

    #gamma = (2.0/3.0)*(erfcinv(ber)**2.0)   #*3.2

    #gamma = snr_corr/3.0    #*3.2
    gamma = snr_corr/(2**(self._agg_rate)-1)

    print "required ber",ber
    print "snr gap (dB) for req. ber",10*log10(gamma)

    N = self.subcarriers

    (b,e) = levin_campello_margin(self.mod_map, N, cur_bit_constraint,
                           snr_db ,sinr_sc, gamma, cur_tx_power)

    b = numarray.array(b)
    e = numarray.array(e)
    a = numarray.array(zeros(len(self.assignment_map)))

    if sum(b < 0) > 0:
      print "WARNING: bit loading < 0"
      b[b < 0] = 0

    a[b > 0] = 1

    txpow = sum(e)
    e = e / txpow * N

    print "txpow", txpow
    print "tx amplitude",sqrt(txpow)
    print numarray.array(map(lambda x: "%.2f" % (x), e))
    print numarray.array(map(lambda x: "%d" % (x),b))

    #return

    if self.options.usrp2:
        self.tx_amplitude = sqrt(txpow)*self.scale
    else:
        self.tx_amplitude = sqrt(txpow)
        
    self.mod_map = list(b)
    self.pa_vector = list(e)
    self.assignment_map = list(a)

    frame_length_samples = 12*self.block_length # FIXME constant
    bits_per_frame = sum(b)*9                   # FIXME constant
    frame_duration = frame_length_samples/self.bandwidth
    self.data_rate = bits_per_frame/frame_duration
    print "Datarate",self.data_rate

        ####New adaptation -> Experimental ########################
    # Calculating the aggregate rate per used subcarrier
    agg_rate =self._agg_rate = sum(b)/sum(a)
    print "Aggregate rate:", agg_rate

    rxperf = self.get_rx_perf_meas()
    if len(rxperf) == 0:
      return

    rxperf = rxperf[len(rxperf)-1]
    current_ber = rxperf.ber
    snr_mean = rxperf.snr
    #ctf = rxperf.ctf
    
    sinr_sc_wpil = rxperf.est_sinr_sc
    
    #self.bh = default_block_header
    
    
    sinr_sc = [0]*self.subcarriers
    i = 0
    for x in range(len(sinr_sc_wpil)):
      if not x in self.shifted_pilots:
        sinr_sc[i] = sinr_sc_wpil[x]
        i=i+1

    
    sinr_sc_lin = 10**(numarray.array(sinr_sc)/10.0)
    
    #Taking care of only used subcarriers
    str_corr = sum(sinr_sc_lin*a)/sum(a) #Improve lin <-> square
    print"STR CORR:", 10*log10(str_corr)

    ##
    c_ber = max(current_ber, 1e-7)
    snr_mean_lin = 10**(snr_mean/10.0)
    print "Current SNR:", snr_mean
    print "Current BER:", c_ber
    snr_func_lin = 2.0*(erfcinv(c_ber)**2.0)
    snr_func = 10*log10(snr_func_lin)
    print "Func. SNR:", snr_func
    #delta = self._delta = snr_mean_lin/snr_func_lin*str_corr
    delta = self._delta = str_corr/snr_func_lin
    print "Current delta", delta
Ejemplo n.º 57
0
'''

smooth_term   = np.sqrt((2.0*num_smooth)/(num_smooth*num_smooth))
featured_term = np.sqrt((2.0*num_featured)/ (num_featured*num_featured))

cval_smooth    = dist_smooth/smooth_term
cval_smooth_nb = dist_smooth_nb/smooth_term
cval_featured    = dist_featured/featured_term
cval_featured_nb = dist_featured_nb/featured_term

p_smooth    = special.kolmogorov(cval_smooth)
p_smooth_nb = special.kolmogorov(cval_smooth_nb)
p_featured    = special.kolmogorov(cval_featured)
p_featured_nb = special.kolmogorov(cval_featured_nb)

sigma_smooth    = special.erfcinv(p_smooth)*np.sqrt(2.)
sigma_smooth_nb = special.erfcinv(p_smooth_nb)*np.sqrt(2.)
sigma_featured    = special.erfcinv(p_featured)*np.sqrt(2.)
sigma_featured_nb = special.erfcinv(p_featured_nb)*np.sqrt(2.)


c_2sig = 1.36
dcrit_smooth_2sig   = c_2sig*smooth_term
dcrit_featured_2sig = c_2sig*featured_term

c_3sig = 1.63
dcrit_smooth_3sig   = c_3sig*smooth_term
dcrit_featured_3sig = c_3sig*featured_term


print("Smooth: %.2f effective objects" % num_smooth )
  def work(self):
    self.query_sounder()
    print self.ac_vector
    
    self.ac_vector = [0.0+0.0j]*self.ac_vlen
    if self.ac_vlen >= 8:
      self.ac_vector[0] = (2*10**(-0.452))
      self.ac_vector[3] = (10**(-0.651))
      self.ac_vector[7] = (10**(-1.151))
      print self.ac_vector

    rxperf = self.get_rx_perf_meas()
    if len(rxperf) == 0:
      return
  
    if self.store_ctrl_events:
      self.process_received_events(rxperf)
      
      
    if self.options.automode:
#      self.logger.warning("Automatic Measurement mode activated")

      self.logger.debug("Auto state is %d"%(self.auto_state))
      if self.auto_state == 0:

        self.state1_cntr = 0
        self.strategy_mode = ofdm_ti.PA_Ctrl.reset
        self.constraint = 2000
        self.required_ber = 1e-3
        self.auto_state = 1
      elif self.auto_state == 1:
        self.state1_cntr += 1
        if self.state1_cntr > 10:
          self.strategy_mode = ofdm_ti.PA_Ctrl.rate_adaptive
          self.constraint = 4000
          self.auto_state = 2
          self.store_ctrl_events = True
          self.start_measurement()
      elif self.auto_state == 2:
        if len(self.bervec) > 20000:
          self.end_measurement()
          self.store_ctrl_events = False
          self.auto_state = 0  

    rxperf = rxperf[len(rxperf)-1]
    current_ber = rxperf.ber
    snr_mean = rxperf.snr
    #ctf = rxperf.ctf
    sinr_sc_wpil = rxperf.est_sinr_sc
    
    #self.bh = default_block_header
    
    
    sinr_sc = [0]*self.subcarriers
    i = 0
    for x in range(len(sinr_sc_wpil)):
      if not x in self.shifted_pilots:
        sinr_sc[i] = sinr_sc_wpil[x]
        i=i+1
    
    
    
    #sinr_sc_lin = 10**(numarray.array(sinr_sc)/10.0)
    #print "Current LINEAR_SINR_PER_SC:", sinr_sc_lin

    print "Received performance measure estimate:"
    print repr(rxperf)
    print "======================================"

    if self.options.usrp2:
        cur_tx_power = (self.tx_amplitude*self.scale)**2
    else:
        cur_tx_power = self.tx_amplitude**2 
    #cur_tx_constraint = self.constraint**2  # dito

    # Input:
    #  self.required_ber
    #  self.constraint
    #  self.current_ber
    #  self.ac_vector (if sounder connected)

    if self.is_reset_mode():
      print "Current mode is reset mode"
      self.pa_vector = [1.0]*self.subcarriers
      self.mod_map = [2]*self.subcarriers
      self.assignment_map = [1] * self.subcarriers
      
      if self.options.usrp2:
        self.tx_amplitude = self.scale*self.constraint
      else:
        self.tx_amplitude = self.constraint

      frame_length_samples = 12*self.block_length # FIXME constant
      bits_per_frame = 2*self.subcarriers*9       # FIXME constant
      frame_duration = frame_length_samples/self.bandwidth
      self.data_rate = bits_per_frame/frame_duration

      ###################################
      c_ber = max(current_ber, 1e-7)

      snr_mean_lin = 10**(snr_mean/10.0)
      print "Current SNR:", snr_mean
      print "Current BER:", c_ber
      snr_func_lin = 2.0*(erfcinv(c_ber)**2.0)# ??????
      snr_func = 10*log10(snr_func_lin)
      print "Func. SNR:", snr_func
      delta = self._delta = snr_mean_lin/snr_func_lin
      print "Current delta", delta
      self._agg_rate = 2


      #################################

      pass
    elif self.is_margin_adaptive_policy():
      print "Current mode is margin adaptive mode"
      cur_bit_constraint = ceil(self.constraint*self.block_length/self.bandwidth*12/9)
      self.margin_adaptive_policy(sinr_sc, cur_tx_power, cur_bit_constraint, snr_mean)
      #pass
    elif self.is_rate_adaptive_policy():
      print "Current mode is rate adaptive mode"
      cur_tx_constraint = self.constraint**2
      self.rate_adaptive_policy(sinr_sc, cur_tx_power, cur_tx_constraint, snr_mean)

    if self.auto_state == 2 and self.options.automode:
      self.logger.debug("####################### Already collected %d ################################"%(len(self.bervec)))
Ejemplo n.º 59
0
    def synchronizeAnalysis(self):
        """ Method to analyse the synchronization of recieved responses. """
        self.logger.debug("Entering synchronizeAnalysis")

        measurement_file = "../test data/120215_asphalt.db"

        alpha = self.loadAbsorptionCoefficient(measurement_file)

        sample_rate = 44100.0

        gen_signal = alpha.generator_signals[0]
        mic_signal = alpha.microphone_signals[0]

        # Show Generator Impulse
        generator_impulse = gen_signal[19375:19450]

        fig = figure()
        ax = fig.add_subplot(111)
        ax.axhline(y=0, linestyle="-", color="black", linewidth=1)
        ax.plot(generator_impulse)
        ax.set_xlabel("Samples")
        ax.set_ylabel("Amplitude")
        ax.text(26, 0.22, "pre-ringing", ha="center", va="bottom", size=10)
        ax.annotate("", xy=(19, 0), xycoords="data", xytext=(26, 0.2),
                    arrowprops=dict(arrowstyle="->"))
        ax.annotate("", xy=(33, 0.08), xycoords="data", xytext=(26, 0.2),
                    arrowprops=dict(arrowstyle="->"))

        peak_y = max(generator_impulse)
        peak_x = where(generator_impulse == peak_y)[0][0]

        ax.annotate("(%d, %.2f)" % (peak_x, peak_y), xy=(peak_x, peak_y),
                    xycoords="data", xytext=(peak_x + 2, peak_y + 0.1),
                    arrowprops=dict(arrowstyle="->"))
        line = Line2D([19, 19], [0, 0.2], color="black", linestyle="--", lw=1)
        ax.add_line(line)
        line = Line2D([33, 33], [0, 0.2], color="black", linestyle="--", lw=1)
        ax.add_line(line)

        ax.set_xlim([0, 70])
        ax.yaxis.set_label_coords(-0.1, 0.5)
        savefig("Analysis/Images/generator_impulse_with_preringing.eps")
        # Show Generator Impulse with a Phase Shift
        cla()
        generator_impulse = hilbert(generator_impulse)
        ax = fig.add_subplot(111)
        ax.axhline(y=0, linestyle="-", color="black", linewidth=1)
        ax.plot(generator_impulse)
        ax.set_xlabel("Samples")
        ax.set_ylabel("Amplitude")

        peak_y = max(generator_impulse)
        peak_x = where(generator_impulse == peak_y)[0][0]

        ax.annotate("(%d, %.2f)" % (peak_x, peak_y), xy=(peak_x, peak_y),
                    xycoords="data", xytext=(peak_x + 2, peak_y + 0.1),
                    arrowprops=dict(arrowstyle="->"))
        ax.set_xlim([0, 70])
        ax.yaxis.set_label_coords(-0.1, 0.5)
        savefig("Analysis/Images/generator_impulse_phase_shifted.eps")

        # Show the Microphone Impulse Response
        mic_impulse = mic_signal[19470:20427]

        cla()
        ax.axhline(y=0, linestyle="-", color="black", linewidth=1)
        ax = fig.add_subplot(111)
        ax.plot(mic_impulse)

        ax.text(50, 0.01, "onset", ha="center", size=10)
        ax.annotate("", xy=(73, 0),
                    xycoords="data", xytext=(50, 0.01),
                    arrowprops=dict(arrowstyle="->"))
        ax.text(70, -0.019, "19 samples", ha="right", va="bottom", size=10)
        ax.annotate("", xy=(92, -0.02), xycoords="data", xytext=(115, -0.02),
                    arrowprops=dict(arrowstyle="->"))
        ax.annotate("", xy=(73, -0.02), xycoords="data", xytext=(50, -0.02),
                    arrowprops=dict(arrowstyle="->"))

        line = Line2D([73, 73], [0, -0.035], color="black", linestyle="--",
                        lw=1)
        ax.add_line(line)
        line = Line2D([92, 92], [0, -0.035], color="black", linestyle="--",
                        lw=1)
        ax.add_line(line)

        ax.set_xlim([0, 300])
        ax.set_xlabel("Samples")
        ax.set_ylabel("Amplitude")
        ax.yaxis.set_label_coords(-0.1, 0.5)
        ax.yaxis.set_ticks(arange(-0.04, 0.04, 0.02))
        savefig("Analysis/Images/microphone_impulse.eps")

        # Plot the Difference Microphone Response
        mic_impulse = abs((mic_impulse[1:] - mic_impulse[:-1]))
        d_mic = mic_signal[1:] - mic_signal[:-1]
        mic_noise = d_mic[abs(d_mic) > 0][:1000]
        max_noise = max(abs(mic_noise))
        std_noise = std(abs(mic_noise))

        mic_threshold = max_noise + 2.5 * std_noise
        onset = where(mic_impulse > mic_threshold)[0][0] - 1
        print onset
        cla()
        ax.axhline(y=0, linestyle="-", color="black", linewidth=1)
        ax.axhline(y=mic_threshold, linestyle="--", color="black", lw=1)
        ax.axvline(x=onset, linestyle="-.", color="grey", lw=1)
        ax = fig.add_subplot(111)
        ax.plot(mic_impulse)
        ax.set_xlim([0, 300])
        ax.text(30, 0.001, "onset at 73", ha="center", size=10)
        ax.annotate("", xy=(73, mic_impulse[onset]),
                    xycoords="data", xytext=(30, 0.001),
                    arrowprops=dict(arrowstyle="->"))
        ax.text(30, mic_threshold + 0.0001, "threshold", ha="center", size=10)
        xlabel("Samples")
        ylabel("Amplitude")
        ax.yaxis.set_label_coords(-0.1, 0.5)
        ax.yaxis.set_ticks(arange(0, 0.008, 0.002))
        savefig("Analysis/Images/onset_detection.eps")

        # Extreme Value Distribution
        from scipy.stats import norm
        from scipy.special import erf, erfc, erfcinv
        cla()
        icdf = lambda x: sqrt(2) * erf(2 * x - 1)

        n = 10
        alpha = icdf(1 - 1 / (n * exp(1)))
        beta = icdf(1 - 1 / n)

        x = arange(-10, 30, 0.1)
        evd = (1 / beta) * exp(-(x - alpha) / beta) * exp(-exp(-(x - alpha) / beta))
        plot(x, evd)
        xlabel("Maximum Value")
        ylabel("Probability")
        savefig("Analysis/Images/extreme_value_distribution.eps")
        # Mean Extreme Value
        cla()

        gamma = 0.57721566490153286060651209008240243104215933593992
        M = lambda n: sqrt(2) * ((-1 + gamma) * (erfcinv(2 - 2 / float(n))) - gamma * erfcinv(2 - 2 / (n * exp(1))))
        n = range(2, 1000)
        mean_max = [M(_) for _ in n]
        plot(n, mean_max)
        xlabel("Samples")
        ylabel("Expected Maximum Value")
        savefig("Analysis/Images/expected_maximum_value.eps")

        cla()
        eps = finfo(float).eps
        N = 1000
        multiplier = arange(0, 5, 0.1)
        samples = ((1 - norm.cdf(M(N) + multiplier)) ** -1) / 44100.0

        semilogy(multiplier, samples)

        samples_1 = ((1 - norm.cdf(M(N) + 1)) ** -1) / 44100.0
        samples_25 = ((1 - norm.cdf(M(N) + 2.5)) ** -1) / 44100.0

        line = Line2D([1, 1], [eps, samples_1], color="black", linestyle="-.", lw=1)
        ax.add_line(line)
        line = Line2D([0, 1], [samples_1, samples_1], color="black", linestyle="-.",
                        lw=1)
        ax.add_line(line)
        ax.text(0.5, samples_1 + 1, "39 seconds", ha="center", va="bottom", size=10)

        line = Line2D([2.5, 2.5], [eps, samples_25], color="black", linestyle="-.", lw=1)
        ax.add_line(line)
        line = Line2D([0, 2.5], [samples_25, samples_25], color="black", linestyle="-.",
                        lw=1)
        ax.add_line(line)
        ax.text(1.25, samples_25 + 5000, "62 hours", ha="center", va="bottom", size=10)

        xlabel("Multiplier")
        ylabel("Seconds")
        savefig("Analysis/Images/maximum_value_probability_multiplier.eps")