def exercise_matrix_x_vector():
  from scitbx.random import variate, uniform_distribution
  for m,n in [(5,5), (3,5), (5,3)]:
    random_vectors = variate(
      sparse.vector_distribution(
        n, density=0.4,
        elements=uniform_distribution(min=-2, max=2)))
    random_matrices = variate(
      sparse.matrix_distribution(
        m, n, density=0.3,
        elements=uniform_distribution(min=-2, max=2)))
    for n_test in xrange(50):
      a = random_matrices.next()
      x = random_vectors.next()
      y = a*x
      aa = a.as_dense_matrix()
      xx = x.as_dense_vector()
      yy1 = y.as_dense_vector()
      yy2 = aa.matrix_multiply(xx)
      assert approx_equal(yy1,yy2)

  for m,n in [(5,5), (3,5), (5,3)]:
    random_matrices = variate(
      sparse.matrix_distribution(
        m, n, density=0.4,
        elements=uniform_distribution(min=-2, max=2)))
    for n_test in xrange(50):
      a = random_matrices.next()
      x = flex.random_double(n)
      y = a*x
      aa = a.as_dense_matrix()
      yy = aa.matrix_multiply(x)
      assert approx_equal(y, yy)
def exercise_random():
    from scitbx.random import variate, uniform_distribution

    g = random_matrices = variate(
        sparse.matrix_distribution(5,
                                   3,
                                   density=0.4,
                                   elements=uniform_distribution(min=-1,
                                                                 max=0.5)))
    for a in itertools.islice(g, 10):
        assert a.n_rows == 5 and a.n_cols == 3
        assert approx_equal(a.non_zeroes, a.n_rows * a.n_cols * 0.4, eps=1)
        for j in range(a.n_cols):
            for i, x in a.col(j):
                assert -1 <= x < 0.5, (i, j, x)

    g = random_vectors = variate(
        sparse.vector_distribution(6,
                                   density=0.3,
                                   elements=uniform_distribution(min=-2,
                                                                 max=2)))
    for v in itertools.islice(g, 10):
        assert v.size == 6
        assert approx_equal(v.non_zeroes, v.size * 0.3, eps=1)
        for i, x in v:
            assert -2 <= x < 2, (i, j, x)
Example #3
0
def exercise_variate_generators():
    from scitbx.random \
         import variate, normal_distribution, bernoulli_distribution, \
                gamma_distribution, poisson_distribution
    for i in range(10):
        scitbx.random.set_random_seed(0)
        g = variate(normal_distribution())
        assert approx_equal(g(), -0.917787219374)
        assert approx_equal(
            g(10),
            (1.21838707856, 1.732426915, 0.838038157555, -0.296895169923,
             0.246451144946, -0.635474652255, -0.0980626986425, 0.36458295417,
             0.534073780268, -0.665073136294))

    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 0, eps=0.005)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    assert approx_equal(stat.skew, 0, eps=0.005)
    assert approx_equal(stat.kurtosis, 3, eps=0.005)

    bernoulli_seq = variate(bernoulli_distribution(0.1))
    for b in itertools.islice(bernoulli_seq, 10):
        assert b in (True, False)
    bernoulli_sample = flex.bool(itertools.islice(bernoulli_seq, 10000))
    assert approx_equal(bernoulli_sample.count(True) / len(bernoulli_sample),
                        0.1,
                        eps=0.01)

    # Boost 1.64 changes the exponential distribution to use Ziggurat algorithm
    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution())
    if (boost_version < 106400):
        assert approx_equal(g(), 0.79587450456577546)
        assert approx_equal(g(2), (0.89856038848394115, 1.2559307580473893))
    else:
        assert approx_equal(g(), 0.864758191783)
        assert approx_equal(g(2), (1.36660841837, 2.26740986094))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 1, eps=0.005)
    assert approx_equal(stat.skew, 2, eps=0.01)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution(alpha=2, beta=3))
    assert approx_equal(g(), 16.670850592722729)
    assert approx_equal(g(2), (10.03662877519449, 3.9357158398972873))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 6, eps=0.005)
    assert approx_equal(stat.skew, 2 / math.sqrt(2), eps=0.05)
    assert approx_equal(stat.biased_variance, 18, eps=0.05)

    mean = 10.0
    pv = variate(poisson_distribution(mean))
    draws = pv(1000000).as_double()
    m = flex.mean(draws)
    v = flex.mean(draws * draws) - m * m
    assert approx_equal(m, mean, eps=0.05)
    assert approx_equal(v, mean, eps=0.05)
Example #4
0
def exercise_variate_generators():
  from scitbx.random \
       import variate, normal_distribution, bernoulli_distribution, \
              gamma_distribution, poisson_distribution
  for i in xrange(10):
    scitbx.random.set_random_seed(0)
    g = variate(normal_distribution())
    assert approx_equal(g(), -1.2780081289048213)
    assert approx_equal(g(10),
      (-0.40474189234755492, -0.41845505596083288,
       -1.8825790263067721, -1.5779112018107659,
       -1.1888174422378859, -1.8619619179878537,
       -0.53946818661388318, -1.2400941724410812,
       0.64511959841907285, -0.59934120033270688))

  stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
  assert approx_equal(stat.mean,            0, eps=0.005)
  assert approx_equal(stat.biased_variance, 1, eps=0.005)
  assert approx_equal(stat.skew,            0, eps=0.005)
  assert approx_equal(stat.kurtosis,        3, eps=0.005)

  bernoulli_seq = variate(bernoulli_distribution(0.1))
  for b in itertools.islice(bernoulli_seq, 10):
    assert b in (True, False)
  bernoulli_sample = flex.bool(itertools.islice(bernoulli_seq, 10000))
  assert approx_equal(
    bernoulli_sample.count(True)/len(bernoulli_sample),
    0.1,
    eps = 0.01)

  scitbx.random.set_random_seed(0)
  g = variate(gamma_distribution())
  assert approx_equal(g(), 0.79587450456577546)
  assert approx_equal(g(2), (0.89856038848394115, 1.2559307580473893))
  stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
  assert approx_equal(stat.mean,            1, eps=0.005)
  assert approx_equal(stat.skew,            2, eps=0.005)
  assert approx_equal(stat.biased_variance, 1, eps=0.005)
  scitbx.random.set_random_seed(0)
  g = variate(gamma_distribution(alpha=2, beta=3))
  assert approx_equal(g(), 16.670850592722729)
  assert approx_equal(g(2), (10.03662877519449, 3.9357158398972873))
  stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
  assert approx_equal(stat.mean,            6, eps=0.005)
  assert approx_equal(stat.skew,            2/math.sqrt(2), eps=0.05)
  assert approx_equal(stat.biased_variance, 18, eps=0.05)

  mean = 10.0
  pv = variate(poisson_distribution(mean))
  draws = pv(1000000).as_double()
  m = flex.mean(draws)
  v = flex.mean(draws*draws) - m*m
  assert approx_equal(m,mean,eps=0.05)
  assert approx_equal(v,mean,eps=0.05)
Example #5
0
def exercise_variate_generators():
    from scitbx.random \
         import variate, normal_distribution, bernoulli_distribution, \
                gamma_distribution, poisson_distribution
    for i in xrange(10):
        scitbx.random.set_random_seed(0)
        g = variate(normal_distribution())
        assert approx_equal(g(), -1.2780081289048213)
        assert approx_equal(
            g(10),
            (-0.40474189234755492, -0.41845505596083288, -1.8825790263067721,
             -1.5779112018107659, -1.1888174422378859, -1.8619619179878537,
             -0.53946818661388318, -1.2400941724410812, 0.64511959841907285,
             -0.59934120033270688))

    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 0, eps=0.005)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    assert approx_equal(stat.skew, 0, eps=0.005)
    assert approx_equal(stat.kurtosis, 3, eps=0.005)

    bernoulli_seq = variate(bernoulli_distribution(0.1))
    for b in itertools.islice(bernoulli_seq, 10):
        assert b in (True, False)
    bernoulli_sample = flex.bool(itertools.islice(bernoulli_seq, 10000))
    assert approx_equal(bernoulli_sample.count(True) / len(bernoulli_sample),
                        0.1,
                        eps=0.01)

    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution())
    assert approx_equal(g(), 0.79587450456577546)
    assert approx_equal(g(2), (0.89856038848394115, 1.2559307580473893))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 1, eps=0.005)
    assert approx_equal(stat.skew, 2, eps=0.005)
    assert approx_equal(stat.biased_variance, 1, eps=0.005)
    scitbx.random.set_random_seed(0)
    g = variate(gamma_distribution(alpha=2, beta=3))
    assert approx_equal(g(), 16.670850592722729)
    assert approx_equal(g(2), (10.03662877519449, 3.9357158398972873))
    stat = basic_statistics(flex.double(itertools.islice(g, 1000000)))
    assert approx_equal(stat.mean, 6, eps=0.005)
    assert approx_equal(stat.skew, 2 / math.sqrt(2), eps=0.05)
    assert approx_equal(stat.biased_variance, 18, eps=0.05)

    mean = 10.0
    pv = variate(poisson_distribution(mean))
    draws = pv(1000000).as_double()
    m = flex.mean(draws)
    v = flex.mean(draws * draws) - m * m
    assert approx_equal(m, mean, eps=0.05)
    assert approx_equal(v, mean, eps=0.05)
Example #6
0
def scale_down_array_py(image, scale_factor):
    '''Scale the data in image in a manner which retains the statistical structure
  of the input data. Input data type must be integers; negative values assumed
  to be flags of some kind (i.e. similar to Pilatus data) and hence preserved
  as input.'''

    from scitbx.random import variate, uniform_distribution
    from scitbx.array_family import flex

    assert (scale_factor <= 1)
    assert (scale_factor >= 0)

    # construct a random number generator in range [0, 1]
    dist = variate(uniform_distribution(0.0, 1.0))
    scaled_image = flex.int(len(image), 0)

    for j, pixel in enumerate(image):
        if pixel < 0:
            scaled_image[j] = pixel
        else:
            for c in range(pixel):
                if dist.next() < scale_factor:
                    scaled_image[j] += 1

    return scaled_image
def test():
    # Test the expression in dials_regression/doc/notes/scaling/scaling.tex
    # (as of revision 1537) for d<Ih>/dp. Simulate 10 measurements of a
    # reflection with different scales. Here the scale increases linearly between
    # each equally-spaced measurement, but the form of the scale variation
    # doesn't affect the derivative calculation.
    nobs = 10

    # known scale factors
    K = [1 + e / 40.0 for e in range(nobs)]
    g = [1.0 / e for e in K]

    # intensities
    means = [100 * e for e in K]
    I = [variate(poisson_distribution(e))() for e in means]

    # weights (inverse variances of I)
    w = [1.0 / e for e in means]

    mrgI = av_I(I, w, g)

    for iparam in range(nobs):
        dmrgI_dp = grad_av_I(I, w, g, iparam)
        fd_dmrgI_dp = fd_grad_av_I(I, w, g, iparam)

        assert approx_equal(dmrgI_dp, fd_dmrgI_dp)

    # Now test the complete expression for the first derivative of the residuals
    # of the HRS target.
    for iparam in range(nobs):
        dr_dp = grad_r(I, w, g, iparam)
        fd_dr_dp = fd_grad_r(I, w, g, iparam)

        assert approx_equal(dr_dp, fd_dr_dp)
Example #8
0
def exercise(flags, space_group_info):
    # Prepare a structure compatible with the ShelX model
    xs = random_structure.xray_structure(space_group_info,
                                         elements="random",
                                         n_scatterers=10,
                                         use_u_iso=True,
                                         random_u_iso=True,
                                         use_u_aniso=True)
    xs.apply_symmetry_sites()
    xs.apply_symmetry_u_stars()
    for isotropic, sc in itertools.izip(variate(bernoulli_distribution(0.4)),
                                        xs.scatterers()):
        sc.flags.set_grad_site(True)
        if isotropic:
            sc.flags.set_use_u_iso(True)
            sc.flags.set_use_u_aniso(False)
            sc.flags.set_grad_u_iso(True)
        else:
            sc.flags.set_use_u_iso(False)
            sc.flags.set_use_u_aniso(True)
            sc.flags.set_grad_u_aniso(True)

    not_origin_centric = (xs.space_group().is_centric()
                          and not xs.space_group().is_origin_centric())

    try:
        ins = list(
            shelx.writer.generator(xs,
                                   full_matrix_least_squares_cycles=4,
                                   weighting_scheme_params=(0, 0),
                                   sort_scatterers=False))
    except AssertionError:
        if (not_origin_centric):
            print(
                "Omitted %s\n  because it is centric but not origin centric" %
                xs.space_group().type().hall_symbol())
            return
        raise
    else:
        if (not_origin_centric):
            raise Exception_expected

    ins = cStringIO.StringIO("".join(ins))
    xs1 = xs.from_shelx(file=ins)
    xs.crystal_symmetry().is_similar_symmetry(xs1.crystal_symmetry(),
                                              relative_length_tolerance=1e-3,
                                              absolute_angle_tolerance=1e-3)
    uc = xs.unit_cell()
    uc1 = xs1.unit_cell()
    for sc, sc1 in itertools.izip(xs.scatterers(), xs1.scatterers()):
        assert sc.label.upper() == sc1.label.upper()
        assert sc.scattering_type == sc1.scattering_type
        assert sc.flags.bits == sc1.flags.bits
        assert approx_equal(sc.site, sc1.site, eps=1e-6)
        if sc.flags.use_u_iso():
            assert approx_equal(sc.u_iso, sc1.u_iso, eps=1e-5)
        else:
            assert approx_equal(adptbx.u_star_as_u_cif(uc, sc.u_star),
                                adptbx.u_star_as_u_cif(uc1, sc1.u_star),
                                eps=1e-5)
def apply_gaussian_noise(image,params):
    from scitbx.random import variate,normal_distribution
    import numpy
    G = variate(normal_distribution(mean=2.0,sigma=0.5))
    gaussian_noise = flex.double(G(image.linearintdata.size()))
    #image.linearintdata += flex.int(gaussian_noise.as_numpy_array().astype(numpy.int32))
    image.linearintdata += gaussian_noise
Example #10
0
def apply_gaussian_noise(image, params):
    from scitbx.random import variate, normal_distribution
    import numpy
    G = variate(normal_distribution(mean=2.0, sigma=0.5))
    gaussian_noise = flex.double(G(image.linearintdata.size()))
    #image.linearintdata += flex.int(gaussian_noise.as_numpy_array().astype(numpy.int32))
    image.linearintdata += gaussian_noise
Example #11
0
def scale_down_array_py(image, scale_factor):
  '''Scale the data in image in a manner which retains the statistical structure
  of the input data. Input data type must be integers; negative values assumed
  to be flags of some kind (i.e. similar to Pilatus data) and hence preserved
  as input.'''

  from scitbx.random import variate, uniform_distribution
  from scitbx.array_family import flex

  assert (scale_factor <= 1)
  assert (scale_factor >= 0)

  # construct a random number generator in range [0, 1]
  dist = variate(uniform_distribution(0.0, 1.0))
  scaled_image = flex.int(len(image), 0)

  for j, pixel in enumerate(image):
    if pixel < 0:
      scaled_image[j] = pixel
    else:
      for c in range(pixel):
        if dist.next() < scale_factor:
          scaled_image[j] += 1

  return scaled_image
Example #12
0
def measurement_process(counts, dqe):
  from scitbx.random import variate, uniform_distribution
  g = variate(uniform_distribution(min=0.0, max=1.0))
  result = 0
  for j in range(counts):
    if g.next() < dqe:
      result += 1
  return result
Example #13
0
def randomize(values, amount):
    from scitbx.random import variate, normal_distribution
    from dials.array_family import flex
    g = variate(normal_distribution(mean=0, sigma=amount))
    shift = flex.double(values.size())
    for j in range(values.size()):
        shift[j] = next(g)
    return values + shift
Example #14
0
def background(image, mean_bg):
    from scitbx.random import variate, poisson_distribution
    dy, dx = image.focus()
    g = variate(poisson_distribution(mean=mean_bg))
    for j in range(dy):
        for i in range(dx):
            image[j, i] += next(g)
    return image
Example #15
0
def random_positions(n, amount):
    from scitbx.random import variate, normal_distribution
    from dials.array_family import flex
    g = variate(normal_distribution(mean=0, sigma=amount))
    xy = flex.vec2_double(n)
    for j in range(n):
        xy[j] = (next(g), next(g))
    return xy
Example #16
0
def data_for_error_model_test(background_variance=1,
                              multiplicity=100,
                              b=0.05,
                              a=1.0):
    """Model a set of poisson-distributed observations on a constant-variance
    background."""

    ## First create a miller array of observations (in asu)
    from cctbx import miller
    from cctbx import crystal

    ms = miller.build_set(
        crystal_symmetry=crystal.symmetry(space_group_symbol="P212121",
                                          unit_cell=(12, 12, 25, 90, 90, 90)),
        anomalous_flag=False,
        d_min=1.0,
    )
    assert ms.size() == 2150
    mean_intensities = 5.0 * (ms.d_spacings().data()**4)
    # ^ get a good range of intensities, with high intensity at low
    # miller index, mean = 285.2, median = 13.4

    # when applying b, use fact that I' - Imean = alpha(I - Imean), will
    # give the same distribution as sigma' = alpha sigma,
    # where alpha = (1 + (b^2 I)) ^ 0.5. i.e. this is the way to increase the
    # deviations of I-Imean and keep the same 'poisson' sigmas, such that the
    # sigmas need to be inflated by the error model with the given a, b.
    import scitbx
    from scitbx.random import variate, poisson_distribution

    # Note, if a and b both set, probably not quite right, but okay for small
    # a and b for the purpose of a test

    scitbx.random.set_random_seed(0)
    intensities = flex.int()
    variances = flex.double()
    miller_index = flex.miller_index()
    for i, idx in zip(mean_intensities, ms.indices()):
        g = variate(poisson_distribution(mean=i))
        for _ in range(multiplicity):
            intensity = next(g)
            if b > 0.0:
                alpha = (1.0 + (b**2 * intensity))**0.5
                intensities.append(
                    int((alpha * intensity) + ((1.0 - alpha) * i)))
            else:
                intensities.append(intensity)
            variances.append((intensity + background_variance) / (a**2))
            miller_index.append(idx)

    reflections = flex.reflection_table()
    reflections["intensity"] = intensities.as_double()
    reflections["variance"] = variances.as_double()
    reflections["miller_index"] = miller_index
    reflections["inverse_scale_factor"] = flex.double(intensities.size(), 1.0)
    reflections["id"] = flex.int(intensities.size(), 1)

    return reflections
Example #17
0
def model_background(shoebox, mean_bg):
  from scitbx.random import variate, poisson_distribution
  dz, dy, dx = shoebox.focus()
  g = variate(poisson_distribution(mean = mean_bg))
  for k in range(dz):
    for j in range(dy):
      for i in range(dx):
        shoebox[k, j, i] += g.next()
  return
Example #18
0
def prepare_simulation_with_noise(sim,
                                  transmittance,
                                  apply_noise,
                                  ordered_intensities=None,
                                  half_data_flag=0):
    result = intensity_data()
    result.frame = sim["frame_lookup"]
    result.miller = sim['miller_lookup']
    raw_obs_no_noise = transmittance * sim['observed_intensity']
    if apply_noise:
        import scitbx.random
        from scitbx.random import variate, normal_distribution
        # bernoulli_distribution, gamma_distribution, poisson_distribution
        scitbx.random.set_random_seed(321)
        g = variate(normal_distribution())
        noise = flex.sqrt(raw_obs_no_noise) * g(len(raw_obs_no_noise))
        # adds in Gauss noise to signal
    else:
        noise = flex.double(len(raw_obs_no_noise), 0.)

    raw_obs = raw_obs_no_noise + noise

    if half_data_flag in [
            1, 2
    ]:  # apply selection after random numbers have been applied
        half_data_selection = (sim["frame_lookup"] % 2) == (half_data_flag % 2)
        result.frame = sim["frame_lookup"].select(half_data_selection)
        result.miller = sim['miller_lookup'].select(half_data_selection)
        raw_obs = raw_obs.select(half_data_selection)

    mean_signal = flex.mean(raw_obs)

    sigma_obs = flex.sqrt(flex.abs(raw_obs))
    mean_sigma = flex.mean(sigma_obs)
    print("<I> / <sigma>", (mean_signal / mean_sigma))

    scale_factor = mean_signal / 10.
    print("Mean signal is", mean_signal,
          "Applying a constant scale factor of ", scale_factor)

    #most important line; puts input data on a numerically reasonable scale
    result.raw_obs = raw_obs / scale_factor
    scaled_sigma = sigma_obs / scale_factor

    result.exp_var = scaled_sigma * scaled_sigma

    #ordered intensities gets us the unit cell & miller indices to
    # gain a static array of (sin theta over lambda)**2
    if ordered_intensities is not None:
        uc = ordered_intensities.unit_cell()
        stol_sq = flex.double()
        for i in range(len(result.miller)):
            this_hkl = ordered_intensities.indices()[result.miller[i]]
            stol_sq_item = uc.stol_sq(this_hkl)
            stol_sq.append(stol_sq_item)
        result.stol_sq = stol_sq
    return result
Example #19
0
def model_background(shoebox, mean_bg):
    from scitbx.random import variate, poisson_distribution
    dz, dy, dx = shoebox.focus()
    g = variate(poisson_distribution(mean=mean_bg))
    for k in range(dz):
        for j in range(dy):
            for i in range(dx):
                shoebox[k, j, i] += next(g)
    return
Example #20
0
def measurement_process(counts, dqe):
    from scitbx.random import variate, uniform_distribution

    g = variate(uniform_distribution(min=0.0, max=1.0))
    result = 0
    for j in range(counts):
        if next(g) < dqe:
            result += 1
    return result
 def __init__(self, space_group_info, **kwds):
     libtbx.adopt_optional_init_args(self, kwds)
     self.space_group_info = space_group_info
     self.structure = random_structure.xray_structure(
         space_group_info,
         elements=self.elements,
         volume_per_atom=20.,
         min_distance=1.5,
         general_positions_only=True,
         use_u_aniso=False,
         u_iso=adptbx.b_as_u(10),
     )
     self.structure.set_inelastic_form_factors(1.54, "sasaki")
     self.scale_factor = 0.05 + 10 * flex.random_double()
     fc = self.structure.structure_factors(anomalous_flag=True,
                                           d_min=self.d_min,
                                           algorithm="direct").f_calc()
     fo = fc.as_amplitude_array()
     fo.set_observation_type_xray_amplitude()
     if self.use_students_t_errors:
         nu = random.uniform(1, 10)
         normal_g = variate(normal_distribution())
         gamma_g = variate(gamma_distribution(0.5 * nu, 2))
         errors = normal_g(fc.size()) / flex.sqrt(2 * gamma_g(fc.size()))
     else:
         # use gaussian errors
         g = variate(normal_distribution())
         errors = g(fc.size())
     fo2 = fo.as_intensity_array()
     self.fo2 = fo2.customized_copy(
         data=(fo2.data() + errors) * self.scale_factor,
         sigmas=flex.double(fc.size(), 1),
     )
     self.fc = fc
     xs_i = self.structure.inverse_hand()
     self.fc_i = xs_i.structure_factors(anomalous_flag=True,
                                        d_min=self.d_min,
                                        algorithm="direct").f_calc()
     fo2_twin = self.fc.customized_copy(
         data=self.fc.data() + self.fc_i.data()).as_intensity_array()
     self.fo2_twin = fo2_twin.customized_copy(
         data=(errors + fo2_twin.data()) * self.scale_factor,
         sigmas=self.fo2.sigmas())
def exercise_matrix_x_matrix():
    from scitbx.random import variate, uniform_distribution
    mat = lambda m, n: variate(
        sparse.matrix_distribution(
            m, n, density=0.4, elements=uniform_distribution(min=-10, max=10))
    )()
    a, b = mat(3, 4), mat(4, 2)
    c = a * b
    aa, bb, cc = [m.as_dense_matrix() for m in (a, b, c)]
    cc1 = aa.matrix_multiply(bb)
    assert approx_equal(cc, cc1)
Example #23
0
def exercise_matrix_x_matrix():
  from scitbx.random import variate, uniform_distribution
  mat = lambda m,n: variate(
    sparse.matrix_distribution(
      m, n, density=0.4,
      elements=uniform_distribution(min=-10, max=10)))()
  a,b = mat(3,4), mat(4,2)
  c = a*b
  aa, bb, cc = [ m.as_dense_matrix() for m in (a,b,c) ]
  cc1 = aa.matrix_multiply(bb)
  assert approx_equal(cc, cc1)
 def __init__(self, space_group_info, **kwds):
   libtbx.adopt_optional_init_args(self, kwds)
   self.space_group_info = space_group_info
   self.structure = random_structure.xray_structure(
     space_group_info,
     elements=self.elements,
     volume_per_atom=20.,
     min_distance=1.5,
     general_positions_only=True,
     use_u_aniso=False,
     u_iso=adptbx.b_as_u(10),
   )
   self.structure.set_inelastic_form_factors(1.54, "sasaki")
   self.scale_factor = 0.05 + 10 * flex.random_double()
   fc = self.structure.structure_factors(
         anomalous_flag=True, d_min=self.d_min, algorithm="direct").f_calc()
   fo = fc.as_amplitude_array()
   fo.set_observation_type_xray_amplitude()
   if self.use_students_t_errors:
     nu = random.uniform(1, 10)
     normal_g = variate(normal_distribution())
     gamma_g = variate(gamma_distribution(0.5*nu, 2))
     errors = normal_g(fc.size())/flex.sqrt(2*gamma_g(fc.size()))
   else:
     # use gaussian errors
     g = variate(normal_distribution())
     errors = g(fc.size())
   fo2 = fo.as_intensity_array()
   self.fo2 = fo2.customized_copy(
     data=(fo2.data()+errors)*self.scale_factor,
     sigmas=flex.double(fc.size(), 1),
   )
   self.fc = fc
   xs_i = self.structure.inverse_hand()
   self.fc_i = xs_i.structure_factors(
     anomalous_flag=True, d_min=self.d_min, algorithm="direct").f_calc()
   fo2_twin = self.fc.customized_copy(
     data=self.fc.data()+self.fc_i.data()).as_intensity_array()
   self.fo2_twin = fo2_twin.customized_copy(
     data=(errors + fo2_twin.data()) * self.scale_factor,
     sigmas=self.fo2.sigmas())
Example #25
0
def simulate(n, size):
    from scitbx.array_family import flex
    from scitbx.random import variate, poisson_distribution

    shoeboxes = []

    B = 10

    # Generate shoeboxes with uniform random background
    for l in range(n):
        sbox = flex.double(flex.grid(size), 0)
        g = variate(poisson_distribution(mean=B))
        for k in range(size[0]):
            for j in range(size[1]):
                for i in range(size[2]):
                    sbox[k, j, i] += next(g)
        shoeboxes.append(sbox)

    # Calculate the Intensity (should be zero)
    import random

    I_cal = []
    mean = []
    for i in range(len(shoeboxes)):
        nn = len(shoeboxes[i])
        mm = int(1.0 * nn)
        index = flex.size_t(random.sample(range(nn), mm))
        assert len(set(index)) == mm
        data = shoeboxes[i].select(index)
        II = flex.sum(data)
        # II = flex.mean(data)
        BB = mm * B
        # BB = B
        I_cal.append(II - BB)
        m = flex.mean(data)
        mean.append(m)
    I_cal = flex.double(I_cal)

    mv = flex.mean_and_variance(flex.double(mean))
    print(mv.mean() - B, mv.unweighted_sample_variance())
    v1 = B / (size[0] * size[1] * size[2])
    v2 = B * (size[0] * size[1] * size[2])
    print(v1)
    print(v2)
    print(I_cal[0])

    from math import sqrt

    Z = (I_cal - 0) / sqrt(v2)

    # Return the mean and standard deviation
    mv = flex.mean_and_variance(Z)
    return mv.mean(), mv.unweighted_sample_variance()
Example #26
0
def prepare_simulation_with_noise(sim, transmittance,
                                       apply_noise,
                                       ordered_intensities=None,
                                       half_data_flag = 0):
  result = intensity_data()
  result.frame = sim["frame_lookup"]
  result.miller= sim['miller_lookup']
  raw_obs_no_noise = transmittance * sim['observed_intensity']
  if apply_noise:
    import scitbx.random
    from scitbx.random import variate, normal_distribution
         # bernoulli_distribution, gamma_distribution, poisson_distribution
    scitbx.random.set_random_seed(321)
    g = variate(normal_distribution())
    noise = flex.sqrt(raw_obs_no_noise) * g(len(raw_obs_no_noise))
    # adds in Gauss noise to signal
  else:
    noise = flex.double(len(raw_obs_no_noise),0.)

  raw_obs = raw_obs_no_noise + noise

  if half_data_flag in [1,2]:  # apply selection after random numbers have been applied
    half_data_selection = (sim["frame_lookup"]%2)==(half_data_flag%2)
    result.frame  = sim["frame_lookup"].select(half_data_selection)
    result.miller = sim['miller_lookup'].select(half_data_selection)
    raw_obs       = raw_obs.select(half_data_selection)

  mean_signal = flex.mean(raw_obs)

  sigma_obs = flex.sqrt(flex.abs(raw_obs))
  mean_sigma = flex.mean(sigma_obs)
  print "<I> / <sigma>", (mean_signal/ mean_sigma)

  scale_factor = mean_signal/10.
  print "Mean signal is",mean_signal,"Applying a constant scale factor of ",scale_factor

  #most important line; puts input data on a numerically reasonable scale
  result.raw_obs = raw_obs / scale_factor
  scaled_sigma = sigma_obs / scale_factor

  result.exp_var = scaled_sigma * scaled_sigma

  #ordered intensities gets us the unit cell & miller indices to
  # gain a static array of (sin theta over lambda)**2
  if ordered_intensities is not None:
    uc = ordered_intensities.unit_cell()
    stol_sq = flex.double()
    for i in xrange(len(result.miller)):
      this_hkl = ordered_intensities.indices()[result.miller[i]]
      stol_sq_item = uc.stol_sq(this_hkl)
      stol_sq.append(stol_sq_item)
    result.stol_sq = stol_sq
  return result
Example #27
0
def exercise(flags, space_group_info):
    # Prepare a structure compatible with the ShelX model
    xs = random_structure.xray_structure(
        space_group_info, elements="random", n_scatterers=10, use_u_iso=True, random_u_iso=True, use_u_aniso=True
    )
    xs.apply_symmetry_sites()
    xs.apply_symmetry_u_stars()
    for isotropic, sc in itertools.izip(variate(bernoulli_distribution(0.4)), xs.scatterers()):
        sc.flags.set_grad_site(True)
        if isotropic:
            sc.flags.set_use_u_iso(True)
            sc.flags.set_use_u_aniso(False)
            sc.flags.set_grad_u_iso(True)
        else:
            sc.flags.set_use_u_iso(False)
            sc.flags.set_use_u_aniso(True)
            sc.flags.set_grad_u_aniso(True)

    not_origin_centric = xs.space_group().is_centric() and not xs.space_group().is_origin_centric()

    try:
        ins = list(
            shelx.writer.generator(
                xs, full_matrix_least_squares_cycles=4, weighting_scheme_params=(0, 0), sort_scatterers=False
            )
        )
    except AssertionError:
        if not_origin_centric:
            print("Omitted %s\n  because it is centric but not origin centric" % xs.space_group().type().hall_symbol())
            return
        raise
    else:
        if not_origin_centric:
            raise Exception_expected

    ins = cStringIO.StringIO("".join(ins))
    xs1 = xs.from_shelx(file=ins)
    xs.crystal_symmetry().is_similar_symmetry(
        xs1.crystal_symmetry(), relative_length_tolerance=1e-3, absolute_angle_tolerance=1e-3
    )
    uc = xs.unit_cell()
    uc1 = xs1.unit_cell()
    for sc, sc1 in itertools.izip(xs.scatterers(), xs1.scatterers()):
        assert sc.label.upper() == sc1.label.upper()
        assert sc.scattering_type == sc1.scattering_type
        assert sc.flags.bits == sc1.flags.bits
        assert approx_equal(sc.site, sc1.site, eps=1e-6)
        if sc.flags.use_u_iso():
            assert approx_equal(sc.u_iso, sc1.u_iso, eps=1e-5)
        else:
            assert approx_equal(
                adptbx.u_star_as_u_cif(uc, sc.u_star), adptbx.u_star_as_u_cif(uc1, sc1.u_star), eps=1e-5
            )
Example #28
0
def test():
  numbers = variate(poisson_distribution(mean = 1000))
  data = flex.double()
  for j in range(1000):
    data.append(next(numbers))

  _x, _y = npp_ify(data)
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()

  _x, _y = npp_ify(data, input_mean_variance=(1000, 1000))
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()
Example #29
0
File: NPP.py Project: xia2/xia2
def test():
  numbers = variate(poisson_distribution(mean = 1000))
  data = flex.double()
  for j in range(1000):
    data.append(numbers.next())

  _x, _y = npp_ify(data)
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()

  _x, _y = npp_ify(data, input_mean_variance=(1000, 1000))
  fit = flex.linear_regression(_x, _y)
  fit.show_summary()
Example #30
0
def simulate(n, size):
  from scitbx.array_family import flex
  from scitbx.random import variate, poisson_distribution
  shoeboxes = []

  B = 10

  # Generate shoeboxes with uniform random background
  for l in range(n):
    sbox = flex.double(flex.grid(size),0)
    g = variate(poisson_distribution(mean = B))
    for k in range(size[0]):
      for j in range(size[1]):
        for i in range(size[2]):
          sbox[k, j, i] += g.next()
    shoeboxes.append(sbox)

  # Calculate the Intensity (should be zero)
  import random
  I_cal = []
  mean = []
  for i in range(len(shoeboxes)):
    nn = len(shoeboxes[i])
    mm = int(1.0 * nn)
    index = flex.size_t(random.sample(range(nn), mm))
    assert(len(set(index)) == mm)
    data = shoeboxes[i].select(index)
    II = flex.sum(data)
    #II = flex.mean(data)
    BB = mm * B
    #BB = B
    I_cal.append(II - BB)
    m = flex.mean(data)
    mean.append(m)
  I_cal = flex.double(I_cal)

  mv = flex.mean_and_variance(flex.double(mean))
  print mv.mean() - B, mv.unweighted_sample_variance()
  v1 = B / (size[0] * size[1] * size[2])
  v2= B * (size[0] * size[1] * size[2])
  print v1
  print v2
  print I_cal[0]

  from math import sqrt
  Z = (I_cal - 0) / sqrt(v2)


  # Return the mean and standard deviation
  mv = flex.mean_and_variance(Z)
  return mv.mean(), mv.unweighted_sample_variance()
Example #31
0
def exercise_random():
  from scitbx.random import variate, uniform_distribution

  g = random_matrices = variate(
      sparse.matrix_distribution(
        5, 3, density=0.4,
        elements=uniform_distribution(min=-1, max=0.5)))
  for a in itertools.islice(g, 10):
    assert a.n_rows== 5 and a.n_cols == 3
    assert approx_equal(a.non_zeroes, a.n_rows*a.n_cols*0.4, eps=1)
    for j in xrange(a.n_cols):
      for i,x in a.col(j):
        assert -1 <= x < 0.5, (i,j, x)

  g = random_vectors = variate(
      sparse.vector_distribution(
        6, density=0.3,
        elements=uniform_distribution(min=-2, max=2)))
  for v in itertools.islice(g, 10):
    assert v.size == 6
    assert approx_equal(v.non_zeroes, v.size*0.3, eps=1)
    for i,x in v:
      assert -2 <= x < 2, (i,j, x)
def random_background_plane2(sbox, a, b, c, d):
  '''Draw values from Poisson distribution for each position where the mean for
  that distribition is equal to a + b * i + c * j + d * k where a, b, c, d are
  floating point values and i, j, k are the shoebox indices in directions x, y
  and z respectively.'''

  from scitbx.random import variate, poisson_distribution

  dz, dy, dx = sbox.focus()

  if b == c == d == 0.0:
    g = variate(poisson_distribution(mean = a))
    for k in range(dz):
      for j in range(dy):
        for i in range(dx):
          sbox[k, j, i] += g.next()
  else:
    for k in range(dz):
      for j in range(dy):
        for i in range(dx):
          pixel = a + b * (i+0.5) + c * (j+0.5) + d * (k+0.5)
          g = variate(poisson_distribution(mean = pixel))
          sbox[k, j, i] += g.next()
  return
def random_background_plane2(sbox, a, b, c, d):
    """Draw values from Poisson distribution for each position where the mean for
    that distribition is equal to a + b * i + c * j + d * k where a, b, c, d are
    floating point values and i, j, k are the shoebox indices in directions x, y
    and z respectively."""

    from scitbx.random import poisson_distribution, variate

    dz, dy, dx = sbox.focus()

    if b == c == d == 0.0:
        g = variate(poisson_distribution(mean=a))
        for k in range(dz):
            for j in range(dy):
                for i in range(dx):
                    sbox[k, j, i] += next(g)
    else:
        for k in range(dz):
            for j in range(dy):
                for i in range(dx):
                    pixel = a + b * (i + 0.5) + c * (j + 0.5) + d * (k + 0.5)
                    g = variate(poisson_distribution(mean=pixel))
                    sbox[k, j, i] += next(g)
    return
def exercise_a_b_a_tr():
  from scitbx.random import variate, uniform_distribution
  for m,n in [(5,5), (3,5), (5,3)]:
    random_matrices = variate(
      sparse.matrix_distribution(
        m, n, density=0.6,
        elements=uniform_distribution(min=-3, max=10)))
    for n_test in xrange(50):
      b = flex.random_double(n*(n+1)//2)
      a = random_matrices.next()
      c = a.self_times_symmetric_times_self_transpose(b)
      aa = a.as_dense_matrix()
      bb = b.matrix_packed_u_as_symmetric()
      cc = c.matrix_packed_u_as_symmetric()
      assert approx_equal(
        cc,
        aa.matrix_multiply(bb.matrix_multiply(aa.matrix_transpose())))
def generate_parameters(p=None, n_images=None, n_cpu=None):

    # determine number of images per thread
    if (n_images > n_cpu):
        n_images_per_cpu = int(math.floor(n_images / n_cpu))
        n_cpu = int(math.ceil(n_images / n_images_per_cpu))
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        remaining_images = n_images
        for i in xrange(len(parameters)):
            if (remaining_images > n_images_per_cpu):
                parameters[i].model_properties.n_images = n_images_per_cpu
                remaining_images -= n_images_per_cpu
            else:
                parameters[i].model_properties.n_images = remaining_images
    else:
        n_cpu = n_images
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        for i in xrange(n_cpu):
            parameters[i].model_properties.n_images = 1

    # jumble random state for each thread
    r = random.Random()
    r.setstate(p.model_properties.random_state)
    pv = list()
    for m in p.mean:
        pv.append(variate(poisson_distribution(m)))
    for i in xrange(len(parameters)):
        n_jump = 0
        parameters[i].n_particles = []
        for j in xrange(len(p.mean)):
            if (p.particle_count_noise):
                parameters[i].n_particles.append\
                  (pv[j](parameters[i].model_properties.n_images))
            else:
                parameters[i].n_particles.append(
                    flex.int(parameters[i].model_properties.n_images,
                             p.mean[j]))
            for k in xrange(parameters[i].model_properties.n_images):
                n_jump += 7 * parameters[i].n_particles[j][k]
            n_jump += parameters[i].model_properties.n_images
        parameters[i].model_properties.random_state = r.getstate()
        r.jumpahead(n_jump)
    p.model_properties.random_state = r.getstate()

    return n_cpu, parameters
def generate_parameters(p=None, n_images=None, n_cpu=None, mean=None):
    pv = variate(poisson_distribution(mean))

    r = random.Random()
    r.seed()
    if (n_images > n_cpu):
        n_images_per_cpu = int(math.floor(n_images / n_cpu))
        n_cpu = int(math.ceil(n_images / n_images_per_cpu))
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        remaining_images = n_images
        for i in xrange(len(parameters)):
            if (remaining_images > n_images_per_cpu):
                parameters[i].n_images = n_images_per_cpu
                remaining_images -= n_images_per_cpu
            else:
                parameters[i].n_images = remaining_images
            n_particles = [0 for k in xrange(parameters[i].n_images)]
            n_jump = 0
            for j in xrange(parameters[i].n_images):
                n_particles[j] = pv()
                n_jump += 6 * n_particles[j]
            parameters[i].n_particles = copy.deepcopy(n_particles)
            parameters[i].random_state = r.getstate()
            r.jumpahead(n_jump)
    else:
        n_cpu = n_images
        parameters = [copy.deepcopy(p) for i in xrange(n_cpu)]
        for i in xrange(n_cpu):
            parameters[i].n_images = 1
            n_particles = [0 for k in xrange(parameters[i].n_images)]
            n_jump = 0
            for j in xrange(parameters[i].n_images):
                n_particles[j] = pv()
                n_jump += 6 * n_particles[j]
            parameters[i].n_particles = copy.deepcopy(n_particles)
            parameters[i].random_state = r.getstate()
            r.jumpahead(n_jump)

    return n_cpu, parameters
Example #37
0
def centroidify(width, shift, count):
    g = variate(normal_distribution(mean=shift, sigma=width))
    values = flex.double([next(g) for c in range(count)])
    hist = flex.histogram(data=values, n_slots=20, data_min=-10, data_max=10)
    true_mean = flex.sum(values) / values.size()
    true_variance = sum([(v - true_mean)**2
                         for v in values]) / (values.size() - 1)
    total = 1.0 * flex.sum(hist.slots())

    hist_mean = sum([c * v for c, v in zip(hist.slot_centers(), hist.slots())
                     ]) / total

    # equation 6
    hist_var = sum([(v / total)**2 * (1.0 / 12.0) for v in hist.slots()])

    # print input setings
    print("%8.5f %4.1f %4d" % (width**2 / count, shift, count), end=" ")

    # true variance / mean of distribution
    print("%6.3f %8.5f" % (true_mean, true_variance / values.size()), end=" ")

    # putative values of same derived from histogram
    print("%6.3f %8.5f" % (hist_mean, hist_var))
def exercise_a_tr_diag_a_and_a_diag_a_tr():
    from scitbx.random import variate, uniform_distribution
    for m, n in [(5, 5), (3, 5), (5, 3)]:
        random_matrices = variate(
            sparse.matrix_distribution(m,
                                       n,
                                       density=0.6,
                                       elements=uniform_distribution(min=-3,
                                                                     max=10)))
        w = flex.double_range(0, m)
        ww = flex.double(m * m)
        ww.resize(flex.grid(m, m))
        ww.matrix_diagonal_set_in_place(diagonal=w)
        for n_test in range(50):
            a = next(random_matrices)
            b = a.self_transpose_times_diagonal_times_self(w)
            aa = a.as_dense_matrix()
            bb = b.as_dense_matrix()
            assert approx_equal(
                bb,
                aa.matrix_transpose().matrix_multiply(ww).matrix_multiply(aa))
            c = a.transpose().self_times_diagonal_times_self_transpose(w)
            cc = c.as_dense_matrix()
            assert approx_equal(cc, bb)
Example #39
0
    def main(self):
        # FIXME import simulation code
        import six.moves.cPickle as pickle
        import math
        from dials.util.command_line import Importer
        from dials.algorithms.integration import ReflectionPredictor
        from libtbx.utils import Sorry

        # Parse the command line
        params, options, args = self.parser.parse_args()

        importer = Importer(args)
        if len(importer.imagesets) == 0 and len(importer.crystals) == 0:
            self.config().print_help()
            return
        if len(importer.imagesets) != 1:
            raise Sorry('need 1 sweep: %d given' % len(importer.imagesets))
        if len(importer.crystals) != 1:
            raise Sorry('need 1 crystal: %d given' % len(importer.crystals))
        sweep = importer.imagesets[0]
        crystal = importer.crystals[0]

        # generate predictions for possible reflections => generate a
        # reflection list

        predict = ReflectionPredictor()
        predicted = predict(sweep, crystal)

        # sort with James's reflection table: should this not go somewhere central?
        from dials.scratch.jmp.container.reflection_table import ReflectionTable

        # calculate shoebox sizes: take parameters from params & transform
        # from reciprocal space to image space to decide how big a shoe box to use

        table = ReflectionTable()
        table['miller_index'] = predicted.miller_index()
        indexer = table.index_map('miller_index')

        candidates = []

        unique = sorted(indexer)

        for h, k, l in unique:

            try:
                for _h in h - 1, h + 1:
                    if not indexer[(_h, k, l)]:
                        raise ValueError('missing')
                for _k in k - 1, k + 1:
                    if not indexer[(h, _k, l)]:
                        raise ValueError('missing')
                for _l in l - 1, l + 1:
                    if not indexer[(h, k, _l)]:
                        raise ValueError('missing')
                candidates.append((h, k, l))
            except ValueError:
                continue

        from dials.algorithms.simulation.utils import build_prediction_matrix

        from dials.algorithms.simulation.generate_test_reflections import \
         master_phil
        from libtbx.phil import command_line
        cmd = command_line.argument_interpreter(master_params=master_phil)
        working_phil = cmd.process_and_fetch(args=args[2:])
        params = working_phil.extract()

        node_size = params.rs_node_size
        window_size = params.rs_window_size
        reference = params.integrated_data_file
        scale = params.integrated_data_file_scale

        if reference:
            counts_database = {}
            from iotbx import mtz
            m = mtz.object(reference)
            mi = m.extract_miller_indices()
            i = m.extract_reals('IMEAN').data
            s = m.space_group().build_derived_point_group()
            for j in range(len(mi)):
                for op in s.all_ops():
                    hkl = tuple(map(int, op * mi[j]))
                    counts = max(0, int(math.floor(i[j] * scale)))
                    counts_database[hkl] = counts
                    counts_database[(-hkl[0], -hkl[1], -hkl[2])] = counts
        else:

            def constant_factory(value):
                import itertools
                return itertools.repeat(value).next

            from collections import defaultdict
            counts_database = defaultdict(constant_factory(params.counts))

        from dials.model.data import ReflectionList

        useful = ReflectionList()
        d_matrices = []

        for h, k, l in candidates:
            hkl = predicted[indexer[(h, k, l)][0]]
            _x = hkl.image_coord_px[0]
            _y = hkl.image_coord_px[1]
            _z = hkl.frame_number

            # build prediction matrix
            mhkl = predicted[indexer[(h - 1, k, l)][0]]
            phkl = predicted[indexer[(h + 1, k, l)][0]]
            hmkl = predicted[indexer[(h, k - 1, l)][0]]
            hpkl = predicted[indexer[(h, k + 1, l)][0]]
            hkml = predicted[indexer[(h, k, l - 1)][0]]
            hkpl = predicted[indexer[(h, k, l + 1)][0]]
            d = build_prediction_matrix(hkl, mhkl, phkl, hmkl, hpkl, hkml,
                                        hkpl)
            d_matrices.append(d)

            # construct the shoebox parameters: outline the ellipsoid
            x, y, z = [], [], []

            for dh in (1, 0, 0), (0, 1, 0), (0, 0, 1):
                dxyz = -1 * window_size * d * dh
                x.append(dxyz[0] + _x)
                y.append(dxyz[1] + _y)
                z.append(dxyz[2] + _z)
                dxyz = window_size * d * dh
                x.append(dxyz[0] + _x)
                y.append(dxyz[1] + _y)
                z.append(dxyz[2] + _z)

            hkl.bounding_box = (int(math.floor(min(x))),
                                int(math.floor(max(x)) + 1),
                                int(math.floor(min(y))),
                                int(math.floor(max(y)) + 1),
                                int(math.floor(min(z))),
                                int(math.floor(max(z)) + 1))
            try:
                counts = counts_database[hkl.miller_index]
                useful.append(hkl)
            except KeyError:
                continue

        from dials.algorithms import shoebox
        shoebox.allocate(useful)

        from dials.util.command_line import ProgressBar
        p = ProgressBar(title='Generating shoeboxes')

        # now for each reflection perform the simulation
        for j, refl in enumerate(useful):
            p.update(j * 100.0 / len(useful))
            d = d_matrices[j]

            from scitbx.random import variate, normal_distribution
            g = variate(normal_distribution(mean=0, sigma=node_size))
            counts = counts_database[refl.miller_index]
            dhs = g(counts)
            dks = g(counts)
            dls = g(counts)
            self.map_to_image_space(refl, d, dhs, dks, dls)

        p.finished('Generated %d shoeboxes' % len(useful))

        # now for each reflection add background
        from dials.algorithms.simulation.generate_test_reflections import \
         random_background_plane

        p = ProgressBar(title='Generating background')
        for j, refl in enumerate(useful):
            p.update(j * 100.0 / len(useful))
            if params.background:
                random_background_plane(refl.shoebox, params.background, 0.0,
                                        0.0, 0.0)
            else:
                random_background_plane(refl.shoebox, params.background_a,
                                        params.background_b,
                                        params.background_c,
                                        params.background_d)

        p.finished('Generated %d backgrounds' % len(useful))
        if params.output.all:
            with open(params.output.all, 'wb') as fh:
                pickle.dump(useful, fh, pickle.HIGHEST_PROTOCOL)
Example #40
0
from __future__ import print_function
from scitbx.array_family import flex
from scitbx.random import variate, uniform_distribution, poisson_distribution
import math

rate = 100
nn = 1000000
ss = 3

scale = variate(uniform_distribution(min=-ss, max=ss))
intensity = variate(poisson_distribution(mean=rate))

d = flex.double(nn)

for j in range(nn):
    x = next(scale)
    d[j] = math.exp(-x * x) * next(intensity)

h = flex.histogram(d, data_min=0, data_max=2 * rate, n_slots=100)

total = 0
for c, s in zip(h.slot_centers(), h.slots()):
    total += s
    print(c, s, total / nn)
Example #41
0
if __name__ == '__main__':

    # Test the expression in dials_regression/doc/notes/scaling/scaling.tex
    # (as of revision 1537) for d<Ih>/dp. Simulate 10 measurements of a
    # reflection with different scales. Here the scale increases linearly between
    # each equally-spaced measurement, but the form of the scale variation
    # doesn't affect the derivative calculation.
    nobs = 10

    # known scale factors
    K = [1 + e / 40. for e in range(nobs)]
    g = [1. / e for e in K]

    # intensities
    means = [100 * e for e in K]
    I = [variate(poisson_distribution(e))() for e in means]

    # weights (inverse variances of I)
    w = [1. / e for e in means]

    mrgI = av_I(I, w, g)

    for iparam in range(nobs):
        dmrgI_dp = grad_av_I(I, w, g, iparam)
        fd_dmrgI_dp = fd_grad_av_I(I, w, g, iparam)

        assert approx_equal(dmrgI_dp, fd_dmrgI_dp)
    print "OK"

    # Now test the complete expression for the first derivative of the residuals
    # of the HRS target.
Example #42
0
def poisson_source(howmany, counts):
  from scitbx.random import variate, poisson_distribution
  g = variate(poisson_distribution(mean=counts))
  return [g.next() for j in range(howmany)]
if __name__ == '__main__':

  # Test the expression in dials_regression/doc/notes/scaling/scaling.tex
  # (as of revision 1537) for d<Ih>/dp. Simulate 10 measurements of a
  # reflection with different scales. Here the scale increases linearly between
  # each equally-spaced measurement, but the form of the scale variation
  # doesn't affect the derivative calculation.
  nobs = 10

  # known scale factors
  K = [1 + e/40. for e in range(nobs)]
  g = [1./e for e in K]

  # intensities
  means = [100 * e for e in K]
  I = [variate(poisson_distribution(e))() for e in means]

  # weights (inverse variances of I)
  w = [1./e for e in means]

  mrgI = av_I(I, w, g)

  for iparam in range(nobs):
    dmrgI_dp = grad_av_I(I, w, g, iparam)
    fd_dmrgI_dp = fd_grad_av_I(I, w, g, iparam)

    assert approx_equal(dmrgI_dp, fd_dmrgI_dp)
  print "OK"

  # Now test the complete expression for the first derivative of the residuals
  # of the HRS target.
Example #44
0
      except KeyError, e:
        continue

    from dials.algorithms import shoebox
    shoebox.allocate(useful)

    from dials.util.command_line import ProgressBar
    p = ProgressBar(title = 'Generating shoeboxes')

    # now for each reflection perform the simulation
    for j, refl in enumerate(useful):
      p.update(j * 100.0 / len(useful))
      d = d_matrices[j]

      from scitbx.random import variate, normal_distribution
      g = variate(normal_distribution(mean = 0, sigma = node_size))
      counts = counts_database[refl.miller_index]
      dhs = g(counts)
      dks = g(counts)
      dls = g(counts)
      self.map_to_image_space(refl, d, dhs, dks, dls)

    p.finished('Generated %d shoeboxes' % len(useful))

    # now for each reflection add background
    from dials.algorithms.simulation.generate_test_reflections import \
     random_background_plane

    p = ProgressBar(title = 'Generating background')
    for j, refl in enumerate(useful):
      p.update(j * 100.0 / len(useful))
Example #45
0
def poisson_source(howmany, counts):
    from scitbx.random import variate, poisson_distribution

    g = variate(poisson_distribution(mean=counts))
    return [next(g) for j in range(howmany)]
Example #46
0
      except KeyError, e:
        continue

    from dials.algorithms import shoebox
    shoebox.allocate(useful)

    from dials.util.command_line import ProgressBar
    p = ProgressBar(title = 'Generating shoeboxes')

    # now for each reflection perform the simulation
    for j, refl in enumerate(useful):
      p.update(j * 100.0 / len(useful))
      d = d_matrices[j]

      from scitbx.random import variate, normal_distribution
      g = variate(normal_distribution(mean = 0, sigma = node_size))
      counts = counts_database[refl.miller_index]
      dhs = g(counts)
      dks = g(counts)
      dls = g(counts)
      self.map_to_image_space(refl, d, dhs, dks, dls)

    p.finished('Generated %d shoeboxes' % len(useful))

    # now for each reflection add background
    from dials.algorithms.simulation.generate_test_reflections import \
     random_background_plane

    p = ProgressBar(title = 'Generating background')
    for j, refl in enumerate(useful):
      p.update(j * 100.0 / len(useful))