Ejemplo n.º 1
0
def test():
    # Test the expression in dials_regression/doc/notes/scaling/scaling.tex
    # (as of revision 1537) for d<Ih>/dp. Simulate 10 measurements of a
    # reflection with different scales. Here the scale increases linearly between
    # each equally-spaced measurement, but the form of the scale variation
    # doesn't affect the derivative calculation.
    nobs = 10

    # known scale factors
    K = [1 + e / 40.0 for e in range(nobs)]
    g = [1.0 / e for e in K]

    # intensities
    means = [100 * e for e in K]
    I = [variate(poisson_distribution(e))() for e in means]

    # weights (inverse variances of I)
    w = [1.0 / e for e in means]

    mrgI = av_I(I, w, g)

    for iparam in range(nobs):
        dmrgI_dp = grad_av_I(I, w, g, iparam)
        fd_dmrgI_dp = fd_grad_av_I(I, w, g, iparam)

        assert approx_equal(dmrgI_dp, fd_dmrgI_dp)

    # Now test the complete expression for the first derivative of the residuals
    # of the HRS target.
    for iparam in range(nobs):
        dr_dp = grad_r(I, w, g, iparam)
        fd_dr_dp = fd_grad_r(I, w, g, iparam)

        assert approx_equal(dr_dp, fd_dr_dp)
Ejemplo n.º 2
0
def background(image, mean_bg):
    from scitbx.random import variate, poisson_distribution
    dy, dx = image.focus()
    g = variate(poisson_distribution(mean=mean_bg))
    for j in range(dy):
        for i in range(dx):
            image[j, i] += next(g)
    return image
Ejemplo n.º 3
0
def data_for_error_model_test(background_variance=1,
                              multiplicity=100,
                              b=0.05,
                              a=1.0):
    """Model a set of poisson-distributed observations on a constant-variance
    background."""

    ## First create a miller array of observations (in asu)
    from cctbx import miller
    from cctbx import crystal

    ms = miller.build_set(
        crystal_symmetry=crystal.symmetry(space_group_symbol="P212121",
                                          unit_cell=(12, 12, 25, 90, 90, 90)),
        anomalous_flag=False,
        d_min=1.0,
    )
    assert ms.size() == 2150
    mean_intensities = 5.0 * (ms.d_spacings().data()**4)
    # ^ get a good range of intensities, with high intensity at low
    # miller index, mean = 285.2, median = 13.4

    # when applying b, use fact that I' - Imean = alpha(I - Imean), will
    # give the same distribution as sigma' = alpha sigma,
    # where alpha = (1 + (b^2 I)) ^ 0.5. i.e. this is the way to increase the
    # deviations of I-Imean and keep the same 'poisson' sigmas, such that the
    # sigmas need to be inflated by the error model with the given a, b.
    import scitbx
    from scitbx.random import variate, poisson_distribution

    # Note, if a and b both set, probably not quite right, but okay for small
    # a and b for the purpose of a test

    scitbx.random.set_random_seed(0)
    intensities = flex.int()
    variances = flex.double()
    miller_index = flex.miller_index()
    for i, idx in zip(mean_intensities, ms.indices()):
        g = variate(poisson_distribution(mean=i))
        for _ in range(multiplicity):
            intensity = next(g)
            if b > 0.0:
                alpha = (1.0 + (b**2 * intensity))**0.5
                intensities.append(
                    int((alpha * intensity) + ((1.0 - alpha) * i)))
            else:
                intensities.append(intensity)
            variances.append((intensity + background_variance) / (a**2))
            miller_index.append(idx)

    reflections = flex.reflection_table()
    reflections["intensity"] = intensities.as_double()
    reflections["variance"] = variances.as_double()
    reflections["miller_index"] = miller_index
    reflections["inverse_scale_factor"] = flex.double(intensities.size(), 1.0)
    reflections["id"] = flex.int(intensities.size(), 1)

    return reflections
Ejemplo n.º 4
0
def exercise_variate_generators():
    from scitbx.random \
         import variate, normal_distribution, bernoulli_distribution, \
                gamma_distribution, poisson_distribution
    for i in range(10):
        scitbx.random.set_random_seed(0)
        g = variate(normal_distribution())
        assert approx_equal(g(), -0.917787219374)
        assert approx_equal(
            g(10),
            (1.21838707856, 1.732426915, 0.838038157555, -0.296895169923,
             0.246451144946, -0.635474652255, -0.0980626986425, 0.36458295417,
             0.534073780268, -0.665073136294))

    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 0, eps=0.005)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    assert approx_equal(stat.skew, 0, eps=0.005)
    assert approx_equal(stat.kurtosis, 3, eps=0.005)

    bernoulli_seq = variate(bernoulli_distribution(0.1))
    for b in itertools.islice(bernoulli_seq, 10):
        assert b in (True, False)
    bernoulli_sample = flex.bool(itertools.islice(bernoulli_seq, 10000))
    assert approx_equal(bernoulli_sample.count(True) / len(bernoulli_sample),
                        0.1,
                        eps=0.01)

    # Boost 1.64 changes the exponential distribution to use Ziggurat algorithm
    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution())
    if (boost_version < 106400):
        assert approx_equal(g(), 0.79587450456577546)
        assert approx_equal(g(2), (0.89856038848394115, 1.2559307580473893))
    else:
        assert approx_equal(g(), 0.864758191783)
        assert approx_equal(g(2), (1.36660841837, 2.26740986094))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 1, eps=0.005)
    assert approx_equal(stat.skew, 2, eps=0.01)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution(alpha=2, beta=3))
    assert approx_equal(g(), 16.670850592722729)
    assert approx_equal(g(2), (10.03662877519449, 3.9357158398972873))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 6, eps=0.005)
    assert approx_equal(stat.skew, 2 / math.sqrt(2), eps=0.05)
    assert approx_equal(stat.biased_variance, 18, eps=0.05)

    mean = 10.0
    pv = variate(poisson_distribution(mean))
    draws = pv(1000000).as_double()
    m = flex.mean(draws)
    v = flex.mean(draws * draws) - m * m
    assert approx_equal(m, mean, eps=0.05)
    assert approx_equal(v, mean, eps=0.05)
Ejemplo n.º 5
0
def model_background(shoebox, mean_bg):
  from scitbx.random import variate, poisson_distribution
  dz, dy, dx = shoebox.focus()
  g = variate(poisson_distribution(mean = mean_bg))
  for k in range(dz):
    for j in range(dy):
      for i in range(dx):
        shoebox[k, j, i] += g.next()
  return
Ejemplo n.º 6
0
def model_background(shoebox, mean_bg):
    from scitbx.random import variate, poisson_distribution
    dz, dy, dx = shoebox.focus()
    g = variate(poisson_distribution(mean=mean_bg))
    for k in range(dz):
        for j in range(dy):
            for i in range(dx):
                shoebox[k, j, i] += next(g)
    return
Ejemplo n.º 7
0
def exercise_variate_generators():
  from scitbx.random \
       import variate, normal_distribution, bernoulli_distribution, \
              gamma_distribution, poisson_distribution
  for i in xrange(10):
    scitbx.random.set_random_seed(0)
    g = variate(normal_distribution())
    assert approx_equal(g(), -1.2780081289048213)
    assert approx_equal(g(10),
      (-0.40474189234755492, -0.41845505596083288,
       -1.8825790263067721, -1.5779112018107659,
       -1.1888174422378859, -1.8619619179878537,
       -0.53946818661388318, -1.2400941724410812,
       0.64511959841907285, -0.59934120033270688))

  stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
  assert approx_equal(stat.mean,            0, eps=0.005)
  assert approx_equal(stat.biased_variance, 1, eps=0.005)
  assert approx_equal(stat.skew,            0, eps=0.005)
  assert approx_equal(stat.kurtosis,        3, eps=0.005)

  bernoulli_seq = variate(bernoulli_distribution(0.1))
  for b in itertools.islice(bernoulli_seq, 10):
    assert b in (True, False)
  bernoulli_sample = flex.bool(itertools.islice(bernoulli_seq, 10000))
  assert approx_equal(
    bernoulli_sample.count(True)/len(bernoulli_sample),
    0.1,
    eps = 0.01)

  scitbx.random.set_random_seed(0)
  g = variate(gamma_distribution())
  assert approx_equal(g(), 0.79587450456577546)
  assert approx_equal(g(2), (0.89856038848394115, 1.2559307580473893))
  stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
  assert approx_equal(stat.mean,            1, eps=0.005)
  assert approx_equal(stat.skew,            2, eps=0.005)
  assert approx_equal(stat.biased_variance, 1, eps=0.005)
  scitbx.random.set_random_seed(0)
  g = variate(gamma_distribution(alpha=2, beta=3))
  assert approx_equal(g(), 16.670850592722729)
  assert approx_equal(g(2), (10.03662877519449, 3.9357158398972873))
  stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
  assert approx_equal(stat.mean,            6, eps=0.005)
  assert approx_equal(stat.skew,            2/math.sqrt(2), eps=0.05)
  assert approx_equal(stat.biased_variance, 18, eps=0.05)

  mean = 10.0
  pv = variate(poisson_distribution(mean))
  draws = pv(1000000).as_double()
  m = flex.mean(draws)
  v = flex.mean(draws*draws) - m*m
  assert approx_equal(m,mean,eps=0.05)
  assert approx_equal(v,mean,eps=0.05)
Ejemplo n.º 8
0
def simulate(n, size):
    from scitbx.array_family import flex
    from scitbx.random import variate, poisson_distribution

    shoeboxes = []

    B = 10

    # Generate shoeboxes with uniform random background
    for l in range(n):
        sbox = flex.double(flex.grid(size), 0)
        g = variate(poisson_distribution(mean=B))
        for k in range(size[0]):
            for j in range(size[1]):
                for i in range(size[2]):
                    sbox[k, j, i] += next(g)
        shoeboxes.append(sbox)

    # Calculate the Intensity (should be zero)
    import random

    I_cal = []
    mean = []
    for i in range(len(shoeboxes)):
        nn = len(shoeboxes[i])
        mm = int(1.0 * nn)
        index = flex.size_t(random.sample(range(nn), mm))
        assert len(set(index)) == mm
        data = shoeboxes[i].select(index)
        II = flex.sum(data)
        # II = flex.mean(data)
        BB = mm * B
        # BB = B
        I_cal.append(II - BB)
        m = flex.mean(data)
        mean.append(m)
    I_cal = flex.double(I_cal)

    mv = flex.mean_and_variance(flex.double(mean))
    print(mv.mean() - B, mv.unweighted_sample_variance())
    v1 = B / (size[0] * size[1] * size[2])
    v2 = B * (size[0] * size[1] * size[2])
    print(v1)
    print(v2)
    print(I_cal[0])

    from math import sqrt

    Z = (I_cal - 0) / sqrt(v2)

    # Return the mean and standard deviation
    mv = flex.mean_and_variance(Z)
    return mv.mean(), mv.unweighted_sample_variance()
Ejemplo n.º 9
0
def exercise_variate_generators():
    from scitbx.random \
         import variate, normal_distribution, bernoulli_distribution, \
                gamma_distribution, poisson_distribution
    for i in xrange(10):
        scitbx.random.set_random_seed(0)
        g = variate(normal_distribution())
        assert approx_equal(g(), -1.2780081289048213)
        assert approx_equal(
            g(10),
            (-0.40474189234755492, -0.41845505596083288, -1.8825790263067721,
             -1.5779112018107659, -1.1888174422378859, -1.8619619179878537,
             -0.53946818661388318, -1.2400941724410812, 0.64511959841907285,
             -0.59934120033270688))

    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 0, eps=0.005)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    assert approx_equal(stat.skew, 0, eps=0.005)
    assert approx_equal(stat.kurtosis, 3, eps=0.005)

    bernoulli_seq = variate(bernoulli_distribution(0.1))
    for b in itertools.islice(bernoulli_seq, 10):
        assert b in (True, False)
    bernoulli_sample = flex.bool(itertools.islice(bernoulli_seq, 10000))
    assert approx_equal(bernoulli_sample.count(True) / len(bernoulli_sample),
                        0.1,
                        eps=0.01)

    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution())
    assert approx_equal(g(), 0.79587450456577546)
    assert approx_equal(g(2), (0.89856038848394115, 1.2559307580473893))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 1, eps=0.005)
    assert approx_equal(stat.skew, 2, eps=0.005)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution(alpha=2, beta=3))
    assert approx_equal(g(), 16.670850592722729)
    assert approx_equal(g(2), (10.03662877519449, 3.9357158398972873))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 6, eps=0.005)
    assert approx_equal(stat.skew, 2 / math.sqrt(2), eps=0.05)
    assert approx_equal(stat.biased_variance, 18, eps=0.05)

    mean = 10.0
    pv = variate(poisson_distribution(mean))
    draws = pv(1000000).as_double()
    m = flex.mean(draws)
    v = flex.mean(draws * draws) - m * m
    assert approx_equal(m, mean, eps=0.05)
    assert approx_equal(v, mean, eps=0.05)
Ejemplo n.º 10
0
def test():
  numbers = variate(poisson_distribution(mean = 1000))
  data = flex.double()
  for j in range(1000):
    data.append(next(numbers))

  _x, _y = npp_ify(data)
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()

  _x, _y = npp_ify(data, input_mean_variance=(1000, 1000))
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()
Ejemplo n.º 11
0
Archivo: NPP.py Proyecto: xia2/xia2
def test():
  numbers = variate(poisson_distribution(mean = 1000))
  data = flex.double()
  for j in range(1000):
    data.append(numbers.next())

  _x, _y = npp_ify(data)
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()

  _x, _y = npp_ify(data, input_mean_variance=(1000, 1000))
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()
Ejemplo n.º 12
0
def simulate(n, size):
  from scitbx.array_family import flex
  from scitbx.random import variate, poisson_distribution
  shoeboxes = []

  B = 10

  # Generate shoeboxes with uniform random background
  for l in range(n):
    sbox = flex.double(flex.grid(size),0)
    g = variate(poisson_distribution(mean = B))
    for k in range(size[0]):
      for j in range(size[1]):
        for i in range(size[2]):
          sbox[k, j, i] += g.next()
    shoeboxes.append(sbox)

  # Calculate the Intensity (should be zero)
  import random
  I_cal = []
  mean = []
  for i in range(len(shoeboxes)):
    nn = len(shoeboxes[i])
    mm = int(1.0 * nn)
    index = flex.size_t(random.sample(range(nn), mm))
    assert(len(set(index)) == mm)
    data = shoeboxes[i].select(index)
    II = flex.sum(data)
    #II = flex.mean(data)
    BB = mm * B
    #BB = B
    I_cal.append(II - BB)
    m = flex.mean(data)
    mean.append(m)
  I_cal = flex.double(I_cal)

  mv = flex.mean_and_variance(flex.double(mean))
  print mv.mean() - B, mv.unweighted_sample_variance()
  v1 = B / (size[0] * size[1] * size[2])
  v2= B * (size[0] * size[1] * size[2])
  print v1
  print v2
  print I_cal[0]

  from math import sqrt
  Z = (I_cal - 0) / sqrt(v2)


  # Return the mean and standard deviation
  mv = flex.mean_and_variance(Z)
  return mv.mean(), mv.unweighted_sample_variance()
Ejemplo n.º 13
0
def random_background_plane2(sbox, a, b, c, d):
    """Draw values from Poisson distribution for each position where the mean for
    that distribition is equal to a + b * i + c * j + d * k where a, b, c, d are
    floating point values and i, j, k are the shoebox indices in directions x, y
    and z respectively."""

    from scitbx.random import poisson_distribution, variate

    dz, dy, dx = sbox.focus()

    if b == c == d == 0.0:
        g = variate(poisson_distribution(mean=a))
        for k in range(dz):
            for j in range(dy):
                for i in range(dx):
                    sbox[k, j, i] += next(g)
    else:
        for k in range(dz):
            for j in range(dy):
                for i in range(dx):
                    pixel = a + b * (i + 0.5) + c * (j + 0.5) + d * (k + 0.5)
                    g = variate(poisson_distribution(mean=pixel))
                    sbox[k, j, i] += next(g)
    return
Ejemplo n.º 14
0
def random_background_plane2(sbox, a, b, c, d):
  '''Draw values from Poisson distribution for each position where the mean for
  that distribition is equal to a + b * i + c * j + d * k where a, b, c, d are
  floating point values and i, j, k are the shoebox indices in directions x, y
  and z respectively.'''

  from scitbx.random import variate, poisson_distribution

  dz, dy, dx = sbox.focus()

  if b == c == d == 0.0:
    g = variate(poisson_distribution(mean = a))
    for k in range(dz):
      for j in range(dy):
        for i in range(dx):
          sbox[k, j, i] += g.next()
  else:
    for k in range(dz):
      for j in range(dy):
        for i in range(dx):
          pixel = a + b * (i+0.5) + c * (j+0.5) + d * (k+0.5)
          g = variate(poisson_distribution(mean = pixel))
          sbox[k, j, i] += g.next()
  return
def generate_parameters(p=None, n_images=None, n_cpu=None):

    # determine number of images per thread
    if (n_images > n_cpu):
        n_images_per_cpu = int(math.floor(n_images / n_cpu))
        n_cpu = int(math.ceil(n_images / n_images_per_cpu))
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        remaining_images = n_images
        for i in xrange(len(parameters)):
            if (remaining_images > n_images_per_cpu):
                parameters[i].model_properties.n_images = n_images_per_cpu
                remaining_images -= n_images_per_cpu
            else:
                parameters[i].model_properties.n_images = remaining_images
    else:
        n_cpu = n_images
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        for i in xrange(n_cpu):
            parameters[i].model_properties.n_images = 1

    # jumble random state for each thread
    r = random.Random()
    r.setstate(p.model_properties.random_state)
    pv = list()
    for m in p.mean:
        pv.append(variate(poisson_distribution(m)))
    for i in xrange(len(parameters)):
        n_jump = 0
        parameters[i].n_particles = []
        for j in xrange(len(p.mean)):
            if (p.particle_count_noise):
                parameters[i].n_particles.append\
                  (pv[j](parameters[i].model_properties.n_images))
            else:
                parameters[i].n_particles.append(
                    flex.int(parameters[i].model_properties.n_images,
                             p.mean[j]))
            for k in xrange(parameters[i].model_properties.n_images):
                n_jump += 7 * parameters[i].n_particles[j][k]
            n_jump += parameters[i].model_properties.n_images
        parameters[i].model_properties.random_state = r.getstate()
        r.jumpahead(n_jump)
    p.model_properties.random_state = r.getstate()

    return n_cpu, parameters
def generate_parameters(p=None, n_images=None, n_cpu=None, mean=None):
    pv = variate(poisson_distribution(mean))

    r = random.Random()
    r.seed()
    if (n_images > n_cpu):
        n_images_per_cpu = int(math.floor(n_images / n_cpu))
        n_cpu = int(math.ceil(n_images / n_images_per_cpu))
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        remaining_images = n_images
        for i in xrange(len(parameters)):
            if (remaining_images > n_images_per_cpu):
                parameters[i].n_images = n_images_per_cpu
                remaining_images -= n_images_per_cpu
            else:
                parameters[i].n_images = remaining_images
            n_particles = [0 for k in xrange(parameters[i].n_images)]
            n_jump = 0
            for j in xrange(parameters[i].n_images):
                n_particles[j] = pv()
                n_jump += 6 * n_particles[j]
            parameters[i].n_particles = copy.deepcopy(n_particles)
            parameters[i].random_state = r.getstate()
            r.jumpahead(n_jump)
    else:
        n_cpu = n_images
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        for i in xrange(n_cpu):
            parameters[i].n_images = 1
            n_particles = [0 for k in xrange(parameters[i].n_images)]
            n_jump = 0
            for j in xrange(parameters[i].n_images):
                n_particles[j] = pv()
                n_jump += 6 * n_particles[j]
            parameters[i].n_particles = copy.deepcopy(n_particles)
            parameters[i].random_state = r.getstate()
            r.jumpahead(n_jump)

    return n_cpu, parameters
Ejemplo n.º 17
0
def poisson_source(howmany, counts):
  from scitbx.random import variate, poisson_distribution
  g = variate(poisson_distribution(mean=counts))
  return [g.next() for j in range(howmany)]
Ejemplo n.º 18
0
if __name__ == '__main__':

    # Test the expression in dials_regression/doc/notes/scaling/scaling.tex
    # (as of revision 1537) for d<Ih>/dp. Simulate 10 measurements of a
    # reflection with different scales. Here the scale increases linearly between
    # each equally-spaced measurement, but the form of the scale variation
    # doesn't affect the derivative calculation.
    nobs = 10

    # known scale factors
    K = [1 + e / 40. for e in range(nobs)]
    g = [1. / e for e in K]

    # intensities
    means = [100 * e for e in K]
    I = [variate(poisson_distribution(e))() for e in means]

    # weights (inverse variances of I)
    w = [1. / e for e in means]

    mrgI = av_I(I, w, g)

    for iparam in range(nobs):
        dmrgI_dp = grad_av_I(I, w, g, iparam)
        fd_dmrgI_dp = fd_grad_av_I(I, w, g, iparam)

        assert approx_equal(dmrgI_dp, fd_dmrgI_dp)
    print "OK"

    # Now test the complete expression for the first derivative of the residuals
    # of the HRS target.
Ejemplo n.º 19
0
if __name__ == '__main__':

  # Test the expression in dials_regression/doc/notes/scaling/scaling.tex
  # (as of revision 1537) for d<Ih>/dp. Simulate 10 measurements of a
  # reflection with different scales. Here the scale increases linearly between
  # each equally-spaced measurement, but the form of the scale variation
  # doesn't affect the derivative calculation.
  nobs = 10

  # known scale factors
  K = [1 + e/40. for e in range(nobs)]
  g = [1./e for e in K]

  # intensities
  means = [100 * e for e in K]
  I = [variate(poisson_distribution(e))() for e in means]

  # weights (inverse variances of I)
  w = [1./e for e in means]

  mrgI = av_I(I, w, g)

  for iparam in range(nobs):
    dmrgI_dp = grad_av_I(I, w, g, iparam)
    fd_dmrgI_dp = fd_grad_av_I(I, w, g, iparam)

    assert approx_equal(dmrgI_dp, fd_dmrgI_dp)
  print "OK"

  # Now test the complete expression for the first derivative of the residuals
  # of the HRS target.
Ejemplo n.º 20
0
from __future__ import print_function
from scitbx.array_family import flex
from scitbx.random import variate, uniform_distribution, poisson_distribution
import math

rate = 100
nn = 1000000
ss = 3

scale = variate(uniform_distribution(min=-ss, max=ss))
intensity = variate(poisson_distribution(mean=rate))

d = flex.double(nn)

for j in range(nn):
    x = next(scale)
    d[j] = math.exp(-x * x) * next(intensity)

h = flex.histogram(d, data_min=0, data_max=2 * rate, n_slots=100)

total = 0
for c, s in zip(h.slot_centers(), h.slots()):
    total += s
    print(c, s, total / nn)
Ejemplo n.º 21
0
def poisson_source(howmany, counts):
    from scitbx.random import variate, poisson_distribution

    g = variate(poisson_distribution(mean=counts))
    return [next(g) for j in range(howmany)]