예제 #1
0
def tst_nsd():
  moving1 = flex.vec3_double()
  moving2 = flex.vec3_double()
  fixed  = flex.vec3_double()
  max_noise = 0
  for ii in range(10):
    noise = flex.random_double(3)*2-1.0
    if noise.norm() > max_noise:
      max_noise = noise.norm()
    xyz = flex.random_double(3)*5
    fixed.append( list(xyz) )
    moving1.append(  list(xyz + noise/10) )
    moving2.append(  list(xyz + noise/2) )

  ne = nsd_engine(fixed)
  a = ne.nsd(fixed)
  b = ne.nsd(moving1)
  c = ne.nsd(moving2)
  assert abs(a)<1e-6
  assert(b<=c)

  matrix = euler.zyz_matrix(0.7,1.3,2.1)
  fixed_r = matrix*moving1+(8,18,28)
  fitter = nsd_rigid_body_fitter( fixed,fixed_r)
  nxyz = fitter.best_shifted()
  dd = nxyz[0:fixed.size()]-fixed
  dd = dd.norms()
  dd = flex.max(dd)
  assert (dd<2.00*max_noise/10)
예제 #2
0
  def test_axis(self):

    axis = (0.37394394059075464, 0.49642290523592875, 0.7834093619893614)
    angle = flex.random_double() * math.pi
    rotmat = scitbx.matrix.sqr(
      scitbx.math.r3_rotation_axis_and_angle_as_matrix( axis, angle )
      )

    missets = [
      scitbx.matrix.sqr(
        scitbx.math.r3_rotation_axis_and_angle_as_matrix(
          flex.random_double_point_on_sphere(),
          ( flex.random_double() - 0.5 )* self.MAX_ERROR,
          )
        )
      for i in range( self.SAMPLE_SIZE )
      ]
    matrices = [ misset * rotmat for misset in missets ]
    aver_lie = scitbx.math.r3_rotation_average_rotation_via_lie_algebra(
      matrices = matrices,
      )
    aver_quat = scitbx.matrix.sqr(
      scitbx.math.r3_rotation_average_rotation_matrix_from_matrices(
        *matrices
        )
      )
    diff = aver_quat.transpose() * aver_lie
    self.assertAlmostEqual( ( diff - TestElement.IDENTITY ).norm_sq(), 0, 7 )
예제 #3
0
def exercise(args):
    verbose = "--verbose" in args
    if not verbose:
        out = StringIO()
    else:
        out = sys.stdout
    for i_trial in xrange(100):
        ops = []
        for i in xrange(3):
            ops.append(matrix.sqr(flex.random_double(size=9, factor=4) - 2))
        sites = []
        for i in xrange(2):
            sites.append(matrix.col(flex.random_double(size=3, factor=4) - 2))
        hkl = matrix.row(flex.random_double(size=3, factor=4) - 2)
        ca = cos_alpha(sites=sites, ops=ops, hkl=hkl)
        grads_fin = d_cos_alpha_d_sites_finite(sites=sites, ops=ops, hkl=hkl)
        print >> out, "grads_fin:", list(grads_fin)
        grads_ana = ca.d_sites()
        print >> out, "grads_ana:", list(grads_ana)
        assert approx_equal(grads_ana, grads_fin)
        curvs_fin = d2_cos_alpha_d_sites_finite(sites=sites, ops=ops, hkl=hkl)
        print >> out, "curvs_fin:", list(curvs_fin)
        curvs_ana = ca.d2_sites()
        print >> out, "curvs_ana:", list(curvs_ana)
        assert approx_equal(curvs_ana, curvs_fin, 1.0e-5)
        print >> out
    print "OK"
def exercise(args):
  verbose =  "--verbose" in args
  if (not verbose):
    out = StringIO()
  else:
    out = sys.stdout
  for i_trial in xrange(10):
    for n_sites in xrange(2,5+1):
      ops = []
      for i in xrange(3):
        ops.append(matrix.sqr(flex.random_double(size=9, factor=2)-1))
      sites = []
      for i in xrange(n_sites):
        sites.append(matrix.col(flex.random_double(size=3, factor=4)-2))
      hkl = matrix.row(flex.random_double(size=3, factor=4)-2)
      sf = exp_i_hx(sites=sites, ops=ops, hkl=hkl)
      for obs_factor in [1, 1.1]:
        obs = abs(sf.f()) * obs_factor
        grads_fin = d_exp_i_hx_d_sites_finite(
          sites=sites, ops=ops, obs=obs, hkl=hkl)
        print >> out, "grads_fin:", list(grads_fin)
        tf = least_squares(obs=obs, calc=sf.f())
        grads_ana = sf.d_target_d_sites(target=tf)
        print >> out, "grads_ana:", list(grads_ana)
        compare_derivatives(grads_ana, grads_fin)
        curvs_fin = d2_exp_i_hx_d_sites_finite(
          sites=sites, ops=ops, obs=obs, hkl=hkl)
        print >> out, "curvs_fin:", list(curvs_fin)
        curvs_ana = sf.d2_target_d_sites(target=tf)
        print >> out, "curvs_ana:", list(curvs_ana)
        compare_derivatives(curvs_ana, curvs_fin)
        print >> out
  print "OK"
예제 #5
0
  def embed(self,n_dimensions,n_points):
    x = []
    for ii in range(n_points):
      x.append( flex.random_double(n_dimensions)*100 )

    l = float(self.l)
    for mm in range(self.max_cycle):
      atom_order = flex.sort_permutation( flex.random_double(len(x))  )
      strain = 0.0
      for ii in atom_order:
        n_contacts = len(self.dmat[ii])
        jj_index = flex.sort_permutation( flex.random_double( n_contacts ) )[0]
        jj_info = self.dmat[ii][jj_index]
        jj = jj_info[0]
        td = jj_info[1]
        xi = x[ii]
        xj = x[jj]
        cd = smath.sqrt( flex.sum( (xi-xj)*(xi-xj) ) )
        new_xi = xi + l*0.5*(td-cd)/(cd+self.eps)*(xi-xj)
        new_xj = xj + l*0.5*(td-cd)/(cd+self.eps)*(xj-xi)
        strain += abs(cd-td)
        x[ii] = new_xi
        x[jj] = new_xj
      l = l-self.dl
    return x,strain/len(x)
예제 #6
0
def exercise(args):
  verbose =  "--verbose" in args
  if (not verbose):
    out = StringIO()
  else:
    out = sys.stdout
  for i_trial in xrange(100):
    ops = []
    for i in xrange(3):
      ops.append(matrix.sqr(flex.random_double(size=9, factor=4)-2))
    u = matrix.col((flex.random_double(size=6, factor=2)-1)*1.e-3)
    hkl = matrix.row(flex.random_double(size=3, factor=4)-2)
    dw = debye_waller(u=u, ops=ops, hkl=hkl)
    grads_fin = d_debye_waller_d_u_finite(u=u, ops=ops, hkl=hkl)
    print >> out, "grads_fin:", list(grads_fin)
    grads_ana = dw.d_u()
    print >> out, "grads_ana:", list(grads_ana)
    compare_derivatives(grads_ana, grads_fin)
    curvs_fin = d2_debye_waller_d_u_finite(u=u, ops=ops, hkl=hkl)
    print >> out, "curvs_fin:", list(curvs_fin)
    curvs_ana = dw.d2_u()
    print >> out, "curvs_ana:", list(curvs_ana)
    compare_derivatives(curvs_ana, curvs_fin)
    print >> out
  print "OK"
예제 #7
0
def exercise_complex_to_complex_3d():
  print "complex_to_complex_3d"
  for n_complex,n_repeats in [((100,80,90),2), ((200,160,180),1)]:
    print "  dimensions:", n_complex
    print "  repeats:", n_repeats
    np = n_complex[0]*n_complex[1]*n_complex[2]
    d0 = (flex.random_double(size=np)*2-1) * flex.polar(
      1, flex.random_double(size=np)*2-1)
    d0.reshape(flex.grid(n_complex))
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print "    overhead: %.2f seconds" % overhead
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      fftw3tbx.complex_to_complex_3d_in_place(data=d, exp_sign=-1)
      fftw3tbx.complex_to_complex_3d_in_place(data=d, exp_sign=+1)
    print "    fftw:     %.2f seconds" % (time.time()-t0-overhead)
    rw = d / np
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      fftpack.complex_to_complex_3d(n_complex).forward(d)
      fftpack.complex_to_complex_3d(n_complex).backward(d)
    print "    fftpack:  %.2f seconds" % (time.time()-t0-overhead)
    sys.stdout.flush()
    rp = d / np
    #
    assert flex.max(flex.abs(rw-rp)) < 1.e-6
예제 #8
0
def exercise_savitzky_golay_smoothing():

  plot = False

  def rms(flex_double):
    return math.sqrt(flex.mean(flex.pow2(flex_double)))

  for sigma_frac in (0.005, 0.01, 0.05, 0.1):
    mean = random.randint(-5,5)
    scale = flex.random_double() * 10
    sigma = flex.random_double() * 5 + 1
    gaussian = curve_fitting.gaussian(scale, mean, sigma)

    x = flex.double(frange(-20,20,0.1))
    y = gaussian(x)
    rand_norm = scitbx.random.normal_distribution(
      mean=0, sigma=sigma_frac*flex.max_absolute(y))
    g = scitbx.random.variate(rand_norm)
    noise = g(y.size())
    y_noisy = y + noise
    # according to numerical recipes the best results are obtained where the
    # full window width is between 1 and 2 times the number of points at fwhm
    # for polynomials of degree 4
    half_window = int(round(0.5 * 2.355 * sigma * 10))
    y_filtered = savitzky_golay_filter(x, y_noisy, half_window=half_window, degree=4)[1]
    extracted_noise = y_noisy - y_filtered
    rms_noise = rms(noise)
    rms_extracted_noise = rms(extracted_noise)

    assert is_below_limit(
      value=abs(rand_norm.sigma - rms_noise)/rand_norm.sigma,
      limit=0.15)
    assert is_below_limit(
      value=abs(rand_norm.sigma - rms_extracted_noise)/rand_norm.sigma,
      limit=0.15)

    diff = y_filtered - y
    assert is_below_limit(
      value=(rms(diff)/ rand_norm.sigma),
      limit=0.4)

    if plot:
      from matplotlib import pyplot
      pyplot.plot(x, y)
      pyplot.plot(x, noise)
      pyplot.scatter(x, y_noisy, marker="x")
      pyplot.plot(x, y_filtered)
      pyplot.show()
      pyplot.plot(x, extracted_noise)
      pyplot.plot(x, noise)
      pyplot.show()

  return
예제 #9
0
    def __init__(self):
        from scitbx.array_family import flex

        # Create an image
        self.image = flex.random_double(2000 * 2000)
        self.image.reshape(flex.grid(2000, 2000))
        self.mask = flex.random_bool(2000 * 2000, 0.99)
        self.mask.reshape(flex.grid(2000, 2000))
        self.gain = flex.random_double(2000 * 2000) + 0.5
        self.gain.reshape(flex.grid(2000, 2000))
        self.size = (3, 3)
        self.min_count = 2
예제 #10
0
  def tst_with_noisy_flat_background(self):

    from dials.algorithms.integration.fit import fit_profile
    from scitbx.array_family import flex
    from tst_profile_helpers import gaussian

    # Create profile
    p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    s = flex.sum(p)
    p = p / s

    # Copy profile
    c0 = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    n = flex.random_double(9 * 9 * 9)
    b = flex.double(flex.grid(9, 9, 9), 5) + n
    m = flex.bool(flex.grid(9,9,9), True)
    c = c0 + b

    # Fit
    fit = fit_profile(p, m, c, b)
    I = fit.intensity()
    V = fit.variance()

    # Test intensity is the same
    eps = 1e-7
    assert(abs(I - flex.sum(c0)) < eps)
    assert(abs(V - (flex.sum(c0) + flex.sum(b))) < eps)

    print 'OK'
예제 #11
0
 def run_dssa(self):
   start_matrix=[]
   for ii in range(self.n):
     start_matrix.append( flex.random_double(self.n)*2.0-1.0 )
   start_matrix.append( flex.double(self.n,0) )
   self.optimizer = dssa.dssa( dimension=self.n, matrix=start_matrix, evaluator=self, tolerance=1e-5, further_opt=True )
   self.x = self.optimizer.get_solution()
예제 #12
0
def exercise_matrix_x_vector():
  from scitbx.random import variate, uniform_distribution
  for m,n in [(5,5), (3,5), (5,3)]:
    random_vectors = variate(
      sparse.vector_distribution(
        n, density=0.4,
        elements=uniform_distribution(min=-2, max=2)))
    random_matrices = variate(
      sparse.matrix_distribution(
        m, n, density=0.3,
        elements=uniform_distribution(min=-2, max=2)))
    for n_test in xrange(50):
      a = random_matrices.next()
      x = random_vectors.next()
      y = a*x
      aa = a.as_dense_matrix()
      xx = x.as_dense_vector()
      yy1 = y.as_dense_vector()
      yy2 = aa.matrix_multiply(xx)
      assert approx_equal(yy1,yy2)

  for m,n in [(5,5), (3,5), (5,3)]:
    random_matrices = variate(
      sparse.matrix_distribution(
        m, n, density=0.4,
        elements=uniform_distribution(min=-2, max=2)))
    for n_test in xrange(50):
      a = random_matrices.next()
      x = flex.random_double(n)
      y = a*x
      aa = a.as_dense_matrix()
      yy = aa.matrix_multiply(x)
      assert approx_equal(y, yy)
def exercise_ellipsoidal_truncation(space_group_info, n_sites=100, d_min=1.5):
    xrs = random_structure.xray_structure(
        space_group_info=space_group_info,
        elements=(("O", "N", "C") * (n_sites // 3 + 1))[:n_sites],
        volume_per_atom=50,
        min_distance=1.5,
    )
    f_obs = abs(xrs.structure_factors(d_min=d_min).f_calc())
    # exercise reciprocal_space_vector()
    for mi, d in zip(f_obs.indices(), f_obs.d_spacings().data()):
        rsv = flex.double(f_obs.unit_cell().reciprocal_space_vector(mi))
        assert approx_equal(d, 1.0 / math.sqrt(rsv.dot(rsv)))
    ##
    print f_obs.unit_cell()
    f = flex.random_double(f_obs.data().size()) * flex.mean(f_obs.data()) / 10
    #
    f_obs1 = f_obs.customized_copy(data=f_obs.data(), sigmas=f_obs.data() * f)
    print "datat in:", f_obs1.data().size()
    r = f_obs1.ellipsoidal_truncation_by_sigma(sigma_cutoff=1)
    print "data left:", r.data().size()
    r.miller_indices_as_pdb_file(file_name="indices1.pdb", expand_to_p1=False)
    r.miller_indices_as_pdb_file(file_name="indices2.pdb", expand_to_p1=True)
    #
    f_obs.miller_indices_as_pdb_file(file_name="indices3.pdb", expand_to_p1=False)
    f_obs.miller_indices_as_pdb_file(file_name="indices4.pdb", expand_to_p1=True)
    print "*" * 25
예제 #14
0
 def run_simplex(self):
   start_matrix=[]
   for ii in range(self.n):
     start_matrix.append( flex.random_double(self.n)*2.0-1.0 )
   start_matrix.append( flex.double(self.n,0) )
   self.optimizer = simplex.simplex_opt( dimension=self.n, matrix=start_matrix, evaluator=self, tolerance=1e-5 )
   self.x = self.optimizer.get_solution()
def another_example(np=41,nt=5):
  x = flex.double( range(np) )/(np-1)
  y = 0.99*flex.exp(-x*x*0.5)
  y = -flex.log(1.0/y-1)
  w = y*y/1.0
  d = (flex.random_double(np)-0.5)*w
  y_obs = y+d

  y = 1.0/( 1.0 + flex.exp(-y) )

  fit_w = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
                                              x,
                                              y_obs,
                                              w )
  fit_w_f = chebyshev_polynome(
    nt, fit_w.low_limit, fit_w.high_limit, fit_w.coefs)


  fit_nw = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
                                              x,
                                              y_obs)
  fit_nw_f = chebyshev_polynome(
    nt, fit_nw.low_limit, fit_nw.high_limit, fit_nw.coefs)
  print
  print "Coefficients from weighted lsq"
  print list( fit_w.coefs )
  print "Coefficients from non-weighted lsq"
  print list( fit_nw.coefs )
  assert flex.max( flex.abs(fit_nw.coefs-fit_w.coefs) ) > 0
예제 #16
0
  def tst_mean_and_variance_filter(self):
    from dials.algorithms.image.filter import mean_and_variance_filter
    from scitbx.array_family import flex
    from random import randint

    # Create an image
    image = flex.random_double(2000 * 2000)
    image.reshape(flex.grid(2000, 2000))

    # Calculate the summed area table
    mean_and_variance = mean_and_variance_filter(image, (3, 3))
    mean = mean_and_variance.mean()
    variance = mean_and_variance.variance()
    sample_variance = mean_and_variance.sample_variance()

    # For a selection of random points, ensure that the value is the
    # sum of the area under the kernel
    eps = 1e-7
    for i in range(10000):
      i = randint(10, 1990)
      j = randint(10, 1990)
      m1 = mean[j,i]
      v1 = variance[j,i]
      sv1 = sample_variance[j,i]
      p = image[j-3:j+4,i-3:i+4]
      mv = flex.mean_and_variance(p.as_1d())
      m2 = mv.mean()
      sv2 = mv.unweighted_sample_variance()
      assert(abs(m1 - m2) <= eps)
      assert(abs(sv1 - sv2) <= eps)

    # Test passed
    print 'OK'
예제 #17
0
def test_setup(config):
  import cctbx.miller
  from iotbx import mtz, pdb
  from scitbx.array_family import flex

  mtz_name = config['mtz_filename']
  mtz_file = mtz.object(mtz_filename)

  pdb_name = config['pdb_name']
  pdb_inp = pdb.input(file_name=pdb_name)
  structure = pdb_inp.xray_structure_simple()
  miller = structure.structure_factors(d_min=2.85).f_calc()
  miller_sub = miller[20000:20002]

  flex.random_generator.seed(82364)
  size = miller.size()
  rand_sel_1 = flex.random_bool(size, 0.5)
  rand_sel_2 = flex.random_bool(size, 0.5)
  miller_1 = miller.select(rand_sel_1).randomize_phases()
  miller_2 = miller.select(rand_sel_2).randomize_phases()
  rand_doub_1 = flex.random_double(miller_1.size(), 0.1) + 0.015
  rand_doub_2 = flex.random_double(miller_2.size(), 0.1) + 0.015
  sigmas_1 = rand_doub_1 * miller_1.amplitudes().data()
  sigmas_2 = rand_doub_2 * miller_2.amplitudes().data()
  miller_1.set_sigmas(sigmas_1)
  miller_2.set_sigmas(sigmas_2)
  miller_1.set_observation_type_xray_amplitude()
  miller_2.set_observation_type_xray_amplitude()
  miller_1.as_intensity_array().i_over_sig_i()
  miller_2.as_intensity_array().i_over_sig_i()

  binner = miller.setup_binner(n_bins=20)
  indices = miller.indices()

  mtch_indcs = miller_1.match_indices(miller_2)
  mset = miller.set()

  # doc = wikify_all_methods(cctbx.miller.binning, config)
  # doc = wikify_all_methods(type(mset), config)
  # doc = wikify_all_methods(type(binner), config)
  # doc = wikify_all_methods(type(miller), config)
  # doc = wikify_all_methods(type(miller.data()), config)
  # doc = wikify_all_methods(type(indices), config)
  # doc = wikify_all_methods(type(mtch_indcs), config,
  #                          module=["cctbx", "miller"])

  return (mtch_indcs, ["cctbx", "miller"])
예제 #18
0
def exercise_complex_to_complex_3d () :
  from scitbx.array_family import flex
  from cudatbx import cufft
  from scitbx import fftpack
  import time
  import sys
  print ""
  print "complex_to_complex_3d"
  for n_complex,n_repeats in [((100,80,90),16), ((200,160,180),16)]:
    print "  dimensions:", n_complex
    print "  repeats:", n_repeats
    np = n_complex[0]*n_complex[1]*n_complex[2]
    d0 = flex.polar(
      flex.random_double(size=np)*2-1,
      flex.random_double(size=np)*2-1)
    d0.reshape(flex.grid(n_complex))
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print "    overhead: %.2f seconds" % overhead
    #
    # XXX extra CuFFT to initialize device - can we avoid this somehow?
    d = d0.deep_copy()
    cufft.complex_to_complex_3d(n_complex).forward(d)
    cufft.complex_to_complex_3d(n_complex).backward(d)
    # benchmarking run
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      cufft.complex_to_complex_3d(n_complex).forward(d)
      cufft.complex_to_complex_3d(n_complex).backward(d)
    print "    cufft:    %6.2f seconds" % ((time.time()-t0-overhead)/n_repeats)
    rw = d / np
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      fftpack.complex_to_complex_3d(n_complex).forward(d)
      fftpack.complex_to_complex_3d(n_complex).backward(d)
    print "    fftpack:  %6.2f seconds" % ((time.time()-t0-overhead)/n_repeats)
    sys.stdout.flush()
    rp = d / np
    #
    print ""
    assert flex.max(flex.abs(rw-rp)) < 1.e-6
예제 #19
0
 def get_flex_image(self, brightness, **kwargs):
     # no kwargs supported at present
     rawdata = flex.random_double(200 * 250)
     rawdata.reshape(flex.grid(250, 200))
     self.data = rawdata
     return GenericFlexImage(
         rawdata=rawdata, size1_readout=250, size2_readout=200, brightness=brightness, saturation=256.0
     )
 def load_coefs(self, coefs=None):
   if coefs is None:
     self.coefs = (flex.random_double(self.n)-0.5)*2.0
   else:
     assert len(coefs)==self.n
     self.coefs = coefs
   # no means to refresh the coefficients yet in an elegant manner
   self.polynome = chebyshev_polynome(self.n, -1.0, +1.0, self.coefs)
예제 #21
0
def exercise(method):
  assert method in ["kearsley", "kabsch"]
  # global shifts
  for n_sites in [1,3,7,10,30]:
    reference = flex.vec3_double(flex.random_double(n_sites*3)*10-5)
    other = reference + list(flex.random_double(3)*100-50)
    for i_trial in xrange(10):
      s = least_squares_fit(reference, other, method)
      assert approx_equal(reference, s.other_sites_best_fit())
      c = random_rotation()
      s = least_squares_fit(reference, tuple(c)*other, method)
      if method == "kearsley": # Kabsch fails in special cases
        assert approx_equal(s.r.determinant(), 1)
      assert approx_equal(reference, s.other_sites_best_fit())
      assert approx_equal(s.rt().r, s.r)
      assert approx_equal(s.rt().t, s.t)
      assert approx_equal(reference, s.rt() * s.other_sites)
예제 #22
0
  def test_random_matrices(self):

    for i in range( self.SAMPLE_SIZE ):
      axis = flex.random_double_point_on_sphere()
      angle = flex.random_double() * ( math.pi - self.SINGULARITY_GUARD )
      rotmat = scitbx.matrix.sqr(
        scitbx.math.r3_rotation_axis_and_angle_as_matrix( axis, angle )
        )
      self.run_tests_with( matrix = rotmat )
예제 #23
0
 def run_simplex(self, start, max_iter=500):
     dim = 3
     starting_matrix = [start]
     for ii in range(dim):
         starting_matrix.append(start + (flex.random_double(dim) * 2 - 1) * self.dx)
     optimizer = simplex.simplex_opt(
         dimension=dim, matrix=starting_matrix, evaluator=self, max_iter=max_iter, tolerance=1e-5
     )
     result = optimizer.get_solution()
     return result
예제 #24
0
 def tst_no_mask(self):
   from scitbx.array_family import flex
   from dials.algorithms.background import NormalDiscriminator
   discriminate = NormalDiscriminator(min_data=10)
   shoebox_d = flex.random_double(5 * 5 * 5) * 100
   shoebox = flex.int([int(s) for s in shoebox_d])
   shoebox.reshape(flex.grid((5, 5, 5)))
   mask = discriminate(shoebox)
   self.is_correct(shoebox, mask, 3.0, 10)
   print 'OK'
  def evolve(self):
    for ii in xrange(self.population_size):
      rnd = flex.random_double(self.population_size-1)
      permut = flex.sort_permutation(rnd)
      # make parent indices
      i1=permut[0]
      if (i1>=ii):
        i1+=1
      i2=permut[1]
      if (i2>=ii):
        i2+=1
      i3=permut[2]
      if (i3>=ii):
        i3+=1
      #
      x1 = self.population[ i1 ]
      x2 = self.population[ i2 ]
      x3 = self.population[ i3 ]

      if self.f is None:
        use_f = random.random()/2.0 + 0.5
      else:
        use_f = self.f

      vi = x1 + use_f*(x2-x3)
      # prepare the offspring vector please
      rnd = flex.random_double(self.vector_length)
      permut = flex.sort_permutation(rnd)
      test_vector = self.population[ii].deep_copy()
      # first the parameters that sure cross over
      for jj in xrange( self.vector_length  ):
        if (jj<self.n_cross):
          test_vector[ permut[jj] ] = vi[ permut[jj] ]
        else:
          if (rnd[jj]>self.cr):
            test_vector[ permut[jj] ] = vi[ permut[jj] ]
      # get the score please
      test_score = self.evaluator.target( test_vector )
      # check if the score if lower
      if test_score < self.scores[ii] :
        self.scores[ii] = test_score
        self.population[ii] = test_vector
예제 #26
0
def exercise_real_to_complex_3d():
  print "real_to_complex_3d"
  for n_real,n_repeats in [((100,80,90),8),
                           ((200,160,180),2),
                           ((300,240,320),1)]:
    print "  dimensions:", n_real
    print "  repeats:", n_repeats
    fft = fftpack.real_to_complex_3d(n_real)
    m_real = fft.m_real()
    np = n_real[0]*n_real[1]*n_real[2]
    mp = m_real[0]*m_real[1]*m_real[2]
    d0 = flex.random_double(size=mp)*2-1
    d0.reshape(flex.grid(m_real).set_focus(n_real))
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print "    overhead: %.2f seconds" % overhead
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      c = fftw3tbx.real_to_complex_3d_in_place(data=d)
      assert c.all() == fft.n_complex()
      assert c.focus() == fft.n_complex()
      assert c.id() == d.id()
      r = fftw3tbx.complex_to_real_3d_in_place(data=c, n=n_real)
      assert r.all() == fft.m_real()
      assert r.focus() == fft.n_real()
      assert r.id() == d.id()
    print "    fftw:     %.2f seconds" % (time.time()-t0-overhead)
    if (maptbx is not None):
      maptbx.unpad_in_place(map=d)
      rw = d / np
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      c = fftpack.real_to_complex_3d(n_real).forward(d)
      assert c.all() == fft.n_complex()
      assert c.focus() == fft.n_complex()
      assert c.id() == d.id()
      r = fftpack.real_to_complex_3d(n_real).backward(c)
      assert r.all() == fft.m_real()
      assert r.focus() == fft.n_real()
      assert r.id() == d.id()
    print "    fftpack:  %.2f seconds" % (time.time()-t0-overhead)
    sys.stdout.flush()
    if (maptbx is not None):
      maptbx.unpad_in_place(map=d)
      rp = d / np
      #
      assert flex.max(flex.abs(rw-rp)) < 1.e-6
예제 #27
0
def exercise_00():
  x = flex.random_double(1000)
  y = flex.random_double(1000)
  xa = flex.double()
  ya = flex.double()
  ba = flex.bool()
  for x_, y_ in zip(x,y):
    scale1 = random.choice([1.e-6, 1.e-3, 0.1, 1, 1.e+3, 1.e+6])
    scale2 = random.choice([1.e-6, 1.e-3, 0.1, 1, 1.e+3, 1.e+6])
    b = random.choice([True, False])
    x_ = x_*scale1
    y_ = y_*scale2
    v1 = fw_ext.expectEFW(eosq=x_, sigesq=y_, centric=b)
    v2 = fw_ext.expectEsqFW(eosq=x_, sigesq=y_, centric=b)
    assert type(v1) == type(1.)
    assert type(v2) == type(1.)
    xa.append(x_)
    ya.append(y_)
    ba.append(b)
  fw_ext.is_FrenchWilson(F=xa, SIGF=ya, is_centric=ba, eps=0.001)
예제 #28
0
  def read(self):
    # it is intended that the filename should be used to read in the raw
    #  data; but in this example just use random numbers:
    rawdata = 256*flex.random_double(self.size1*self.size2)
    rawdata.reshape(flex.grid(self.size1,self.size2))
    # this could equally well have been a imported numpy array
    #  import numpy
    #  rawdata2 = 256*numpy.random.rand(self.size1,self.size2)
    #  rawdata = flex.double(rawdata2) #conversion of numpy to cctbx-flex type

    self.linearintdata = rawdata.iround()
 def make_random_population(self):
   for ii in xrange(self.vector_length):
     delta  = self.evaluator.domain[ii][1]-self.evaluator.domain[ii][0]
     offset = self.evaluator.domain[ii][0]
     random_values = flex.random_double(self.population_size)
     random_values = random_values*delta+offset
     # now please place these values ni the proper places in the
     # vectors of the population we generated
     for vector, item in zip(self.population,random_values):
       vector[ii] = item
   if self.seeded is not False:
     self.population[0] = self.seeded
예제 #30
0
 def __init__(self,n):
   self.n = n
   self.starting_simplex=[]
   for ii in range(self.n+1):
     self.starting_simplex.append(flex.random_double(self.n))
   self.optimizer = simplex_opt( dimension=self.n,
                                 matrix  = self.starting_simplex,
                                 evaluator = self,
                                 tolerance=1e-10)
   self.x = self.optimizer.get_solution()
   for ii in xrange(self.n):
     assert approx_equal(self.x[ii],ii+1,1e-5)
예제 #31
0
 def __init__(self,
              n_macro_cycle,
              sites,
              u_iso,
              finite_grad_differences_test,
              use_geometry_restraints,
              shake_site_mean_distance=1.5,
              d_min=2,
              shake_angles_sigma=0.035,
              shake_translation_sigma=0.5):
     """ create temp test files and data for tests """
     adopt_init_args(self, locals())
     self.test_files_names = []  # collect names of files for cleanup
     # 1 NCS copy: starting template to generate whole asu; place into P1 box
     pdb_inp = iotbx.pdb.input(source_info=None, lines=ncs_1_copy)
     mtrix_object = pdb_inp.process_MTRIX_records()
     ph = pdb_inp.construct_hierarchy()
     xrs = pdb_inp.xray_structure_simple()
     xrs_one_ncs = xrs.orthorhombic_unit_cell_around_centered_scatterers(
         buffer_size=8)
     ph.adopt_xray_structure(xrs_one_ncs)
     of = open("one_ncs_in_asu.pdb", "w")
     print(mtrix_object.as_pdb_string(), file=of)
     print(
         ph.as_pdb_string(crystal_symmetry=xrs_one_ncs.crystal_symmetry()),
         file=of)
     of.close()
     # 1 NCS copy -> full asu (expand NCS). This is the answer-structure
     xrs_asu, pdb_str, dummy1, dummy2, dummy3 = step_1(
         file_name="one_ncs_in_asu.pdb",
         crystal_symmetry=xrs_one_ncs.crystal_symmetry(),
         write_name="full_asu.pdb")
     # force ASU none-rounded coordinates into xray structure
     assert xrs_asu.crystal_symmetry().is_similar_symmetry(
         xrs_one_ncs.crystal_symmetry())
     # Generate Fobs from answer structure
     f_obs = abs(
         xrs_asu.structure_factors(d_min=d_min,
                                   algorithm="direct").f_calc())
     r_free_flags = f_obs.generate_r_free_flags()
     mtz_dataset = f_obs.as_mtz_dataset(column_root_label="F-obs")
     mtz_dataset.add_miller_array(miller_array=r_free_flags,
                                  column_root_label="R-free-flags")
     mtz_object = mtz_dataset.mtz_object()
     mtz_object.write(file_name="data.mtz")
     # Shake structure - subject to refinement input
     xrs_shaken = xrs_one_ncs.deep_copy_scatterers()
     if sites:
         xrs_shaken.shake_sites_in_place(
             mean_distance=shake_site_mean_distance)
     if self.u_iso:
         u_random = flex.random_double(xrs_shaken.scatterers().size())
         xrs_shaken = xrs_shaken.set_u_iso(values=u_random)
     ph.adopt_xray_structure(xrs_shaken)
     of = open("one_ncs_in_asu_shaken.pdb", "w")
     print(mtrix_object.as_pdb_string(), file=of)
     print(ph.as_pdb_string(crystal_symmetry=xrs.crystal_symmetry()),
           file=of)
     of.close()
     self.f_obs = f_obs
     self.r_free_flags = r_free_flags
     self.xrs_one_ncs = xrs_one_ncs
     # Get restraints manager
     self.grm = None
     self.iso_restraints = None
     if (self.use_geometry_restraints):
         pdb_inp = iotbx.pdb.input(lines=pdb_str, source_info=None)
         model = mmtbx.model.manager(model_input=pdb_inp, log=null_out())
         self.grm = model.get_restraints_manager()
         if (self.u_iso):
             temp = mmtbx.refinement.adp_refinement.adp_restraints_master_params
             self.iso_restraints = temp.extract().iso
예제 #32
0
def example():
    x_obs = (flex.double(range(100)) + 1.0) / 101.0
    y_ideal = flex.sin(x_obs * 6.0 * 3.1415) + flex.exp(x_obs)
    y_obs = y_ideal + (flex.random_double(size=x_obs.size()) - 0.5) * 0.5
    w_obs = flex.double(x_obs.size(), 1)
    print("Trying to determine the best number of terms ")
    print(" via cross validation techniques")
    print()
    n_terms = chebyshev_lsq_fit.cross_validate_to_determine_number_of_terms(
        x_obs, y_obs, w_obs, min_terms=5, max_terms=20, n_goes=20, n_free=20)
    print("Fitting with", n_terms, "terms")
    print()
    fit = chebyshev_lsq_fit.chebyshev_lsq_fit(n_terms, x_obs, y_obs)
    print("Least Squares residual: %7.6f" % (fit.f))
    print("  R2-value            : %7.6f" % (fit.f / flex.sum(y_obs * y_obs)))
    print()
    fit_funct = chebyshev_polynome(n_terms, fit.low_limit, fit.high_limit,
                                   fit.coefs)

    y_fitted = fit_funct.f(x_obs)
    abs_deviation = flex.max(flex.abs((y_ideal - y_fitted)))
    print("Maximum deviation between fitted and error free data:")
    print("    %4.3f" % (abs_deviation))
    abs_deviation = flex.mean(flex.abs((y_ideal - y_fitted)))
    print("Mean deviation between fitted and error free data:")
    print("    %4.3f" % (abs_deviation))
    print()
    abs_deviation = flex.max(flex.abs((y_obs - y_fitted)))
    print("Maximum deviation between fitted and observed data:")
    print("    %4.3f" % (abs_deviation))
    abs_deviation = flex.mean(flex.abs((y_obs - y_fitted)))
    print("Mean deviation between fitted and observed data:")
    print("    %4.3f" % (abs_deviation))
    print()
    print("Showing 10 points")
    print("   x    y_obs y_ideal y_fit")
    for ii in range(10):
        print("%6.3f %6.3f %6.3f %6.3f" \
              %(x_obs[ii*9], y_obs[ii*9], y_ideal[ii*9], y_fitted[ii*9]))

    try:
        from iotbx import data_plots
    except ImportError:
        pass
    else:
        print("Preparing output for loggraph in a file called")
        print("   chebyshev.loggraph")
        chebyshev_plot = data_plots.plot_data(plot_title='Chebyshev fitting',
                                              x_label='x values',
                                              y_label='y values',
                                              x_data=x_obs,
                                              y_data=y_obs,
                                              y_legend='Observed y values',
                                              comments='Chebyshev fit')
        chebyshev_plot.add_data(y_data=y_ideal, y_legend='Error free y values')
        chebyshev_plot.add_data(y_data=y_fitted,
                                y_legend='Fitted chebyshev approximation')
        output_logfile = open('chebyshev.loggraph', 'w')
        f = StringIO()
        data_plots.plot_data_loggraph(chebyshev_plot, f)
        output_logfile.write(f.getvalue())
예제 #33
0
 def random_image(self):
   ind = flex.sort_permutation( flex.random_double(self.N) )[0]
   return self.rot_img[ ind ]
예제 #34
0
def normal_variate(mu=0.0, sigma=1.0, N=100):
    "Normal variate via Box-Muller transform"
    U1 = flex.random_double(size=N)
    U2 = flex.random_double(size=N)
    return flex.sqrt(-2.0 * flex.log(U1)) * flex.cos(
        2.0 * math.pi * U2) * sigma + mu
예제 #35
0
def t_variate(a=1.0, mu=0.0, sigma=1.0, N=100):
    "T-variate via Baley's one-liner"
    U1 = flex.random_double(size=N)
    U2 = flex.random_double(size=N)
    return (flex.sqrt(a * (flex.pow(U1, -2.0 / a) - 1.0)) *
            flex.cos(2.0 * math.pi * U2) * sigma + mu)
예제 #36
0
def pseudo_normalized_abs_delta_i(N=100):
    x = flex.random_double(size=N)
    x = -0.5 * flex.log(1.0 - x)
    return (x)
예제 #37
0
def test_plot_rij_histogram():
    rij_matrix = flex.random_double(16)
    d = plots.plot_rij_histogram(rij_matrix)
    assert "cosym_rij_histogram" in d
    assert sum(d["cosym_rij_histogram"]["data"][0]["y"]) == 16
예제 #38
0
def generate_image(xsize, ysize):
    from scitbx.array_family import flex
    image = flex.random_double(xsize * ysize)
    image.reshape(flex.grid(ysize, xsize))
    return image
예제 #39
0
 def sigma(self, m, n):
   p = min(m,n)
   sigma = flex.random_double(p - p//2, factor=1/self.big)
   sigma.extend(flex.random_double(p//2, factor=self.big) + self.big)
   return sigma
예제 #40
0
 def random_increment(self):
     random = 2.0 * flex.random_double(len(
         self.x)) - 1.0  # values from -1 to 1
     sum_sq = flex.sum(random * random)
     normalized = random / math.sqrt(sum_sq)
     return normalized * self.L
예제 #41
0
    def read_default(self):
        if (self.n > 8):
            for ii in range(self.n):
                self.bounds[ii] = flex.random_double(4) * 0
        print "& default constraints are applied"
        if (self.n == 4):
            self.bounds[0] = flex.double([
                -13.6492076687, 4.47249489477, -0.648531500588, 1.00508578883
            ])
            self.bounds[1] = flex.double([
                -11.7770483671, 3.80867469991, -0.464755522251, 0.805970604608
            ])
            self.bounds[2] = flex.double([
                -8.26619615161, 2.65922048539, -0.467162236722, 0.684465258322
            ])
            self.bounds[3] = flex.double([
                -3.88576804298, 1.20291988782, -0.201727072497, 0.321418789576
            ])

        if (self.n == 5):  #coef 5
            self.bounds[0] = flex.double([
                -4.10870916383, 14.5435070049, -0.0237872167003, 1.04148458907
            ])
            self.bounds[1] = flex.double([
                -4.34557410092, 13.2260007625, -0.0505775399547, 1.04786892835
            ])
            self.bounds[2] = flex.double([
                -3.02921642669, 9.47967940636, -0.0255496685307, 0.73961261201
            ])
            self.bounds[3] = flex.double([
                -2.43568258085, 5.45508644475, -0.0428255444417, 0.54548555217
            ])
            self.bounds[4] = flex.double([
                -1.05904411075, 2.18286060437, -0.015965263936, 0.23150527771
            ])
        if (self.n == 6):  #coef 6
            self.bounds[0] = flex.double(
                [-9.98987114005, 5.75104320728, -0.203361142977, 1.2797496454])
            self.bounds[1] = flex.double(
                [-9.0881925762, 5.21730938463, -0.168699949349, 1.12217804255])
            self.bounds[2] = flex.double([
                -7.65751454288, 4.48953026585, -0.169312093081, 1.01238401591
            ])
            self.bounds[3] = flex.double([
                -5.15246914011, 3.06792122304, -0.110692433408, 0.671756096776
            ])
            self.bounds[4] = flex.double([
                -3.13981698125, 2.01054589994, -0.0893939874168, 0.471697111647
            ])
            self.bounds[5] = flex.double([
                -1.28037493374, 0.876578929774, -0.0363599791812,
                0.195285480471
            ])
        if (self.n == 7):  #coef 7
            self.bounds[0] = flex.double([
                -20.3241896393, 7.50025274559, -0.0925684548913, 1.90794419639
            ])
            self.bounds[1] = flex.double([
                -19.271644653, 7.20831756624, -0.0935785997935, 1.87184076462
            ])
            self.bounds[2] = flex.double([
                -15.8255430056, 5.98498321626, -0.0770293463687, 1.52852426121
            ])
            self.bounds[3] = flex.double([
                -11.5624283884, 4.56748518877, -0.0646443019148, 1.20681535555
            ])
            self.bounds[4] = flex.double([
                -7.02554681447, 2.89348936331, -0.0418172018712, 0.751875159574
            ])
            self.bounds[5] = flex.double([
                -3.64460485882, 1.63779480883, -0.0272670458923, 0.462530654903
            ])
            self.bounds[6] = flex.double([
                -1.53005568061, 0.649058372991, -0.0112777025715,
                0.182214074339
            ])
        if (self.n == 8):  #coef 8
            self.bounds[0] = flex.double([
                -15.5696141957, 26.7417847653, -0.429101838698, 2.58512688665
            ])
            self.bounds[1] = flex.double([
                -14.6649384853, 25.1585349149, -0.379700422122, 2.40814186005
            ])
            self.bounds[2] = flex.double([
                -12.8078616893, 21.2070970566, -0.380471864759, 2.14461149339
            ])
            self.bounds[3] = flex.double([
                -9.80561153093, 15.6616904481, -0.291756240601, 1.63458073134
            ])
            self.bounds[4] = flex.double([
                -6.89022655824, 10.1990490377, -0.254789020103, 1.19989926802
            ])
            self.bounds[5] = flex.double([
                -4.04173244902, 5.58238627602, -0.156095989293, 0.710323105559
            ])
            self.bounds[6] = flex.double(
                [-2.11816666574, 2.5721593076, -0.112548787807, 0.41572981972])
            self.bounds[7] = flex.double([
                -0.826062377037, 0.8598617704, -0.0440222198406, 0.158790415553
            ])
def exercise_simple():
  pdb_str="""
ATOM     47  N   TYR A   7       8.292   1.817   6.147  1.00 14.70           N
ATOM     48  CA  TYR A   7       9.159   2.144   7.299  1.00 15.18           C
ATOM     49  C   TYR A   7      10.603   2.331   6.885  1.00 15.91           C
ATOM     50  O   TYR A   7      11.041   1.811   5.855  1.00 15.76           O
ATOM     51  CB  TYR A   7       9.061   1.065   8.369  1.00 15.35           C
ATOM     52  CG  TYR A   7       7.665   0.929   8.902  1.00 14.45           C
ATOM     53  CD1 TYR A   7       6.771   0.021   8.327  1.00 15.68           C
ATOM     54  CD2 TYR A   7       7.210   1.756   9.920  1.00 14.80           C
ATOM     55  CE1 TYR A   7       5.480  -0.094   8.796  1.00 13.46           C
ATOM     56  CE2 TYR A   7       5.904   1.649  10.416  1.00 14.33           C
ATOM     57  CZ  TYR A   7       5.047   0.729   9.831  1.00 15.09           C
ATOM     58  OH  TYR A   7       3.766   0.589  10.291  1.00 14.39           O
ATOM     59  OXT TYR A   7      11.358   2.999   7.612  1.00 17.49           O
TER
"""
  pdb_in = iotbx.pdb.input(source_info=None,lines=pdb_str)
  hierarchy = pdb_in.construct_hierarchy()
  xrs = pdb_in.xray_structure_simple()
  xrs.scattering_type_registry(
    d_min=1.5,
    table="n_gaussian")
  xrs.set_inelastic_form_factors(
    photon=1.54,
    table="sasaki")
  file_base = "tmp_mmtbx_cmdline"
  with open(file_base+".pdb", "w") as f:
    f.write(hierarchy.as_pdb_string(crystal_symmetry=xrs))
  fc = abs(xrs.structure_factors(d_min=1.5).f_calc())
  flags = fc.generate_r_free_flags()
  mtz = fc.as_mtz_dataset(column_root_label="F")
  mtz.add_miller_array(flags, column_root_label="FreeR_flag")
  mtz.mtz_object().write(file_base+".mtz")
  with open(file_base+".fa", "w") as f:
    f.write(">Tyr\nY\n")
  base_args = [ file_base + ext for ext in [".pdb",".mtz",".fa"] ]
  cmdline = mmtbx.command_line.load_model_and_data(
    args=base_args+["wavelength=1.54"],
    master_phil=mmtbx.command_line.generate_master_phil_with_inputs(""),
    out=StringIO(),
    create_log_buffer=True)
  assert (cmdline.params.input.xray_data.file_name is not None)
  assert (cmdline.sequence is not None)
  r_factor = cmdline.fmodel.r_work()
  assert (r_factor < 0.002)
  cmdline.save_data_mtz("tmp_mmtbx_cmdline_data.mtz")
  assert os.path.isfile("tmp_mmtbx_cmdline_data.mtz")
  model = cmdline.create_model_manager()
  # energy input
  cmdline = mmtbx.command_line.load_model_and_data(
    args=base_args+["energy=8050"],
    master_phil=mmtbx.command_line.generate_master_phil_with_inputs(""),
    out=StringIO(),
    create_log_buffer=True)
  assert approx_equal(cmdline.params.input.wavelength, 1.54018, eps=0.0001)
  # UNMERGED DATA INPUT
  log = cmdline.start_log_file("tst_mmtbx_cmdline.log")
  log.close()
  fc2 = xrs.structure_factors(d_min=1.3).f_calc().generate_bijvoet_mates()
  fc2 = fc2.randomize_amplitude_and_phase(amplitude_error=0.01,
    phase_error_deg=5, random_seed=12345).customized_copy(
      sigmas=flex.random_double(fc2.size(), 10))
  i_obs = abs(fc2).f_as_f_sq()
  i_obs = i_obs.expand_to_p1().customized_copy(
    crystal_symmetry=fc2).set_observation_type_xray_intensity()
  with open(file_base + ".sca", "w") as f:
    no_merge_original_index.writer(i_obs, file_object=f)
  master_phil = mmtbx.command_line.generate_master_phil_with_inputs(
    phil_string="",
    enable_unmerged_data=True)
  cmdline = mmtbx.command_line.load_model_and_data(
    args=[ file_base + ext for ext in [".pdb",".mtz",".fa",] ] +
      ["unmerged_data.file_name=%s.sca" % file_base ],
    master_phil=master_phil,
    out=StringIO(),
    create_log_buffer=True)
  assert (cmdline.unmerged_i_obs is not None)
  # test with unknown scatterers
  pdb_in = iotbx.pdb.input(source_info=None,lines=pdb_str+"""\
ATOM     59  UNK UNL A   7       0.000   0.000   0.000  1.00 20.00           X
""")
  hierarchy = pdb_in.construct_hierarchy()
  file_base = "tmp_mmtbx_cmdline"
  with open(file_base+".pdb", "w") as f:
    f.write(hierarchy.as_pdb_string(crystal_symmetry=xrs))
  try :
    cmdline = mmtbx.command_line.load_model_and_data(
      args=[ file_base + ext for ext in [".pdb",".mtz",".fa",] ],
      master_phil=master_phil,
      out=StringIO(),
      process_pdb_file=False,
      create_log_buffer=True)
  except Sorry :
    pass
  else :
    raise Exception_expected
  cmdline = mmtbx.command_line.load_model_and_data(
    args=[ file_base + ext for ext in [".pdb",".mtz",".fa",] ],
    master_phil=master_phil,
    out=StringIO(),
    process_pdb_file=False,
    create_log_buffer=True,
    remove_unknown_scatterers=True)