Ejemplo n.º 1
0
def Reff(data, si_mean, si_sd, tau=7, conf=0.95, mu=5):
    """ effective reproduction number
    assuming exponential distribution for prior Reff
    input:
      data = daily number of incidence
      si_mean = mean of serial interval
      si_sd = standard deviation of serial interval
      tau = length of time window (integer in days)
      conf = confidence level of estimated Reff
      mu = mean of prior ditribution of Reff
    return:
      R = daily Reff of shape (3,len(data))
      R[0:3] = median, min, max
    reference:
      A. Cori et al
        American Journal of Epidemiology 178 (2013) 1505
          Web Appendix 1
    """
    N = len(data)
    w = si_distr(N, si_mean, si_sd)
    L = np.convolve(data, w)[:N]
    u = np.ones(tau)
    a = 1 + np.convolve(data, u)[:N]
    b = mu / (1 + mu * np.convolve(L, u)[:N])
    return np.vstack([gamma.median(a, 0, b), gamma.interval(conf, a, 0, b)])
    def calibration_test(self, x, y_norm):
        mean, var, shape, rate, mixture_var = self(x)
        y = y_norm * self.y_std + self.y_mean

        y_norm = y_norm.detach().numpy()
        y = y.detach().numpy()

        confidence_values = np.expand_dims(np.arange(0.1, 1, 0.1), axis=1)

        norm_lower, norm_upper = norm.interval(
            confidence_values,
            loc=mean.detach().numpy(),
            scale=np.sqrt(var.detach().numpy()),
        )
        gamma_lower, gamma_upper = gamma.interval(confidence_values,
                                                  shape.detach().numpy(),
                                                  scale=1 /
                                                  rate.detach().numpy())

        output = torch.zeros_like(norm_upper)
        normal_check = np.logical_and(norm_lower < y_norm[np.newaxis, :],
                                      y_norm[np.newaxis, :] < norm_upper)
        gamma_check = np.logical_and(gamma_lower < y[np.newaxis, :],
                                     y[np.newaxis, :] < gamma_upper)

        output[mixture_var < 0.5] = normal_check
        output[mixture_var > 0.5] = gamma_check

        return output
Ejemplo n.º 3
0
    def calibration_test(y, shape, rate):

        confidence_values = np.expand_dims(np.arange(0.1, 1, 0.1), axis=1)
        lower_bounds, upper_bounds = gamma.interval(confidence_values, shape, scale=1 / rate)
        lower_check = lower_bounds < y[np.newaxis, :]
        upper_check = y[np.newaxis, :] < upper_bounds

        return np.logical_and(lower_check, upper_check).T
Ejemplo n.º 4
0
def dist_pdf(z, loc, scale, a, tail=0, tail_len=0):
    """
    Separation probability density function for confinement cell modelling.

    Used for modelling neutron reflectometry data collected using the
    confinment cell of Prescott et al. 2016-2020.

    It is produced by the normalised summation of
    a typical gamma distribution (parameterised by shape and scale varaiables)
    and a custom tail distribution (linear decay).

    Parameters
    ----------
    z : np.array
        Seperations at which the function is evaluated.
    loc : float
        The separation at which the PDF starts (i.e. shits PDF to lower /
        higher seperations).
    scale : float
        The width of the gamma-component of the distribution.
    a : float, optional
        The shape of the gamma-component of the distribution. Lower a
        result in 'exponential' PDFs, whilst higher a results in more
        'normal' PDFs.
    tail : float, optional
        The weighting of the tail-component. Higher values result in a
        higher tail. Values of zero result in no tail. The default is 0.
        The default is 0.
    tail_len : float, optional
        The length of the tail-component. The default is 0.

    Returns
    -------
    np.array
        Probability density at separations provided.

    """
    pdf1 = gamma.pdf(z, loc=loc, scale=scale, a=a)

    tpeak = loc + (a - 1) * scale
    tcut = tail_len + loc + (a - 1) * scale
    tstart = gamma.interval(0.999, loc=loc, scale=scale,
                            a=a)[0]  #start at 1% of cdf
    pdf2 = np.ones_like(z)
    pdf2[z < tpeak] = (z[z < tpeak] - tstart) / (tpeak - tstart)
    pdf2[z > tpeak] = (tcut - z[z > tpeak]) / (tcut - tpeak)
    pdf2[z > tcut] = 0
    pdf2[z < tstart] = 0

    pdf = pdf1 + pdf2 * tail

    pdf[z < 100] = 0

    return pdf / np.trapz(pdf, z)
Ejemplo n.º 5
0
def poisson_interval(mu, cl=0.683):
    return (mu - gamma.interval(cl, mu)[0] if mu > 0. else 0.,
            gamma.interval(cl, mu + 1.)[1] - mu)