Exemple #1
0
def kernapply(x, k, circular=False):
  """Convolve a sequence x with a Kernel k"""

  x = flex.double(x).deep_copy()
  lenx = len(x)
  w = flex.double(lenx, 0.0)
  w.set_selected(flex.size_t_range(k.m + 1), k.coef)
  sel = lenx -1 - flex.size_t_range(k.m)
  w.set_selected(sel, k.coef[1:])

  # do convolution in the Fourier domain
  fft = fftpack.real_to_complex(lenx)
  n = fft.n_real()
  m = fft.m_real()
  x.extend(flex.double(m-n, 0.))
  w.extend(flex.double(m-n, 0.))
  conv = fft.forward(x) * fft.forward(w)

  # extend result by the reverse conjugate, omitting the DC offset and Nyquist
  # frequencies. If fft.n_real() is odd there is no Nyquist term.
  end = fft.n_complex() - (fft.n_real() + 1) % 2
  conv.extend(flex.conj(conv[1:end]).reversed())

  # transform back, take real part and scale
  fft = fftpack.complex_to_complex(len(conv))
  result = fft.backward(conv).parts()[0] / n

  if circular:
    return result
  else:
    return result[(k.m):(lenx-k.m)]
def good2n(nmax, coefs_list, ref_coefs, threshold=0.90, outfile=''):

    ## cc=0.90 is equivalent to 5% mutation in real space at nmax<=10
    max_indx = math.nlm_array(nmax).nlm().size()
    for nn in range(nmax, 1, -1):
        min_indx = math.nlm_array(nn - 1).nlm().size()
        #coef_0 = ref_coefs[min_indx:max_indx]
        coef_0 = ref_coefs[0:max_indx]
        mean_0 = abs(ref_coefs[0])
        sigma_0 = flex.sum(flex.norm(coef_0)) - mean_0**2
        sigma_0 = smath.sqrt(sigma_0)
        cc_array = flex.double()
        #out = open(outfile,"w")
        for coef in coefs_list:
            #coef_1 = coef[min_indx:max_indx]
            coef_1 = coef[0:max_indx]
            mean_1 = abs(coef[0])
            sigma_1 = flex.sum(flex.norm(coef_1)) - mean_1**2
            sigma_1 = smath.sqrt(sigma_1)
            cov_01 = abs(flex.sum(coef_0 * flex.conj(coef_1)))
            cov_01 = cov_01 - mean_0 * mean_1
            this_cc = cov_01 / sigma_1 / sigma_0
            cc_array.append(this_cc)
            out = open(outfile, "a")
            print >> out, this_cc
            out.close()
            print this_cc
        mean_cc = flex.mean(cc_array)
        out = open(outfile, "a")
        print >> out, "level n: ", nn, mean_cc
        out.close()
        print "level n: ", nn, mean_cc
        if (mean_cc >= threshold): return nn
        max_indx = min_indx
    return nn
Exemple #3
0
def tst_zernike_grid(skip_iteration_probability=0.95):
  #THIS TEST TAKES A BIT OF TIME
  M=20
  N=4
  ddd = (M*2+1)
  zga = math.zernike_grid(M,N,False)
  zgb = math.zernike_grid(M,N,False)

  xyz = zga.xyz()
  coefs = zga.coefs()
  nlm = zga.nlm()

  import random
  rng = random.Random(x=None)
  for ii in range(nlm.size()):
    for jj in range(ii+1,nlm.size()):
      if (rng.random() < skip_iteration_probability):
        continue
      coefsa = coefs*0.0
      coefsb = coefs*0.0
      coefsa[ii]=1.0+1.0j
      coefsb[jj]=1.0+1.0j
      zga.load_coefs( nlm, coefsa )
      zgb.load_coefs( nlm, coefsb )
      fa = zga.f()
      fb = zgb.f()

      prodsum = flex.sum(fa*flex.conj(fb) )/xyz.size()
      prodsuma= flex.sum(fa*flex.conj(fa) )/xyz.size()
      prodsumb= flex.sum(fb*flex.conj(fb) )/xyz.size()

      t1 = abs(prodsum)
      t2 = abs(prodsuma)
      t3 = abs(prodsumb)
      t1 = 100.0*(t1/t2)
      t2 = 100.0*(abs(t2-t3)/t3)
      # unfortunately, this numerical integration scheme is not optimal. For certain
      # combinations of nlm, we see significant non-orthogonality that reduces when
      # we increase the number of points. A similar behavior is seen in the radial
      # part of the Zernike polynome. If we compile withiout the radial function, similar
      # behavior is seen using only the spherical harmonics functions.
      # For this reason, the liberal limts set below are ok
      assert t1<2.0
      assert t2<5.0
def tst_zernike_grid(skip_iteration_probability=0.95):
  #THIS TEST TAKES A BIT OF TIME
  M=20
  N=4
  ddd = (M*2+1)
  zga = math.zernike_grid(M,N,False)
  zgb = math.zernike_grid(M,N,False)

  xyz = zga.xyz()
  coefs = zga.coefs()
  nlm = zga.nlm()

  import random
  rng = random.Random(x=None)
  for ii in range(nlm.size()):
    for jj in range(ii+1,nlm.size()):
      if (rng.random() < skip_iteration_probability):
        continue
      coefsa = coefs*0.0
      coefsb = coefs*0.0
      coefsa[ii]=1.0+1.0j
      coefsb[jj]=1.0+1.0j
      zga.load_coefs( nlm, coefsa )
      zgb.load_coefs( nlm, coefsb )
      fa = zga.f()
      fb = zgb.f()

      prodsum = flex.sum(fa*flex.conj(fb) )/xyz.size()
      prodsuma= flex.sum(fa*flex.conj(fa) )/xyz.size()
      prodsumb= flex.sum(fb*flex.conj(fb) )/xyz.size()

      t1 = abs(prodsum)
      t2 = abs(prodsuma)
      t3 = abs(prodsumb)
      t1 = 100.0*(t1/t2)
      t2 = 100.0*(abs(t2-t3)/t3)
      # unfortunately, this numerical integration scheme is not optimal. For certain
      # combinations of nlm, we see significant non-orthogonality that reduces when
      # we increase the number of points. A similar behavior is seen in the radial
      # part of the Zernike polynome. If we compile withiout the radial function, similar
      # behavior is seen using only the spherical harmonics functions.
      # For this reason, the liberal limts set below are ok
      assert t1<2.0
      assert t2<5.0