예제 #1
0
  def plot_unitcells(self, experiments):
    if len(experiments) == 1:
      return
    all_a = flex.double()
    all_b = flex.double()
    all_c = flex.double()
    for crystal in experiments.crystals():
      a, b, c = crystal.get_unit_cell().parameters()[0:3]
      all_a.append(a); all_b.append(b); all_c.append(c)

    fig, axes = plt.subplots(nrows=3, ncols=1)
    for ax, axis, data in zip(axes, ['A', 'B', 'C'], [all_a, all_b, all_c]):
      stats = flex.mean_and_variance(data)
      cutoff = 4*stats.unweighted_sample_standard_deviation()
      if cutoff < 0.5:
        cutoff = 0.5
      limits = stats.mean()-cutoff, stats.mean()+cutoff
      sel = (data >= limits[0]) & (data <= limits[1])
      subset = data.select(sel)
      h = flex.histogram(subset,n_slots=50)
      ax.plot(h.slot_centers().as_numpy_array(),h.slots().as_numpy_array(),'-')
      ax.set_title("%s axis histogram (showing %d of %d xtals). Mean: %7.2f Stddev: %7.2f"%(
        axis, len(subset), len(data), stats.mean(),
        stats.unweighted_sample_standard_deviation()))
      ax.set_ylabel("N lattices")
      ax.set_xlabel(r"$\AA$")
      ax.set_xlim(limits)
    plt.tight_layout()
예제 #2
0
 def tst_for_dataset(self, creator, filename):
   from dials.array_family import flex
   from dials.algorithms.shoebox import MaskCode
   print filename
   rlist = flex.reflection_table.from_pickle(filename)
   shoebox = rlist['shoebox']
   background = [sb.background.deep_copy() for sb in shoebox]
   success = creator(shoebox)
   assert(success.count(True) == len(success))
   diff = []
   for i in range(len(rlist)):
     mask = flex.bool([(m & MaskCode.Foreground) != 0 for m in shoebox[i].mask])
     px1 = background[i].select(mask)
     px2 = shoebox[i].background.select(mask)
     den = max([flex.mean(px1), 1.0])
     diff.append(flex.mean(px2 - px1) / den)
   diff = flex.double(diff)
   mv = flex.mean_and_variance(flex.double(diff))
   mean = mv.mean()
   sdev = mv.unweighted_sample_standard_deviation()
   try:
     assert(abs(mean) < 0.01)
   except Exception:
     print "Mean: %f, Sdev: %f", mean, sdev
     from matplotlib import pylab
     pylab.hist(diff)
     pylab.show()
     raise
  def _wx_img_w_cpp(self, np_2d_tmp, show_nums, np_2d_mask = None):

    xmax = np_2d_tmp.shape[0]
    ymax = np_2d_tmp.shape[1]

    if(np_2d_mask == None):
      np_2d_mask = np.zeros( (xmax, ymax), 'double')

    transposed_data = np.zeros( (ymax, xmax), 'double')
    transposed_mask = np.zeros( (ymax, xmax), 'double')

    transposed_data[:,:] = np.transpose(np_2d_tmp)
    transposed_mask[:,:] = np.transpose(np_2d_mask)

    flex_data_in = flex.double(transposed_data)
    flex_mask_in = flex.double(transposed_mask)

    err_code = self.wx_bmp_arr.set_min_max(self.vl_min, self.vl_max)

    img_array_tmp = self.wx_bmp_arr.gen_bmp(flex_data_in, flex_mask_in, show_nums)
    np_img_array = img_array_tmp.as_numpy_array()

    height = np.size( np_img_array[:, 0:1, 0:1] )
    width = np.size(  np_img_array[0:1, :, 0:1] )
    img_array = np.empty( (height, width, 3),'uint8')
    img_array[:,:,:] = np_img_array[:,:,:]

    self._wx_image = wx.EmptyImage(width, height)
    self._wx_image.SetData( img_array.tostring() )

    data_to_become_bmp = (self._wx_image, width, height)

    return data_to_become_bmp
def get_pix_coords(wavelength, A, mill_arr, detector, delta_i=0.02):
    """ Code copied from sim.py courtesy of Aaron and Tara """
    s0=col((0,0,-1/wavelength))
    q=flex.vec3_double([A*col(idx) for idx in  mill_arr.indices().as_vec3_double()])
    s0_hat=flex.vec3_double([s0.normalize()]*len(q))
    q_hat=q.each_normalize()
    #q_hat.cross(flex.vec3_double([s0_hat]*len(q_hat)))
    e1_hat = q_hat.cross(s0_hat)
    c0_hat = s0_hat.cross(e1_hat)
    q_len_sq = flex.double([col(v).length_sq() for v in q])
    a_side=q_len_sq*wavelength/2
    b_side=flex.sqrt(q_len_sq)-a_side**2
    #flex.vec3_double([sqrt(q.length_sq()-a_side**2 for idx in mill_arr)])
    r_vec=flex.vec3_double(-a_side*s0_hat+b_side*c0_hat)
    s1=r_vec+s0

    EQ=q+s0
    len_EQ=flex.double([col(v).length() for v in EQ])
    ratio=len_EQ*wavelength

    indices = flex.miller_index()
    coords =flex.vec2_double()
    for i in range(len(s1)):
        if ratio[i] > 1 - delta_i and ratio[i] < 1 + delta_i:
            indices.append(mill_arr.indices()[i])
            pix = detector[0].get_ray_intersection_px(s1[i])
            if detector[0].is_coord_valid(pix):
                coords.append(pix)

    return coords, indices
  def compute_functional_and_gradients_test_code(self):
    values = self.parameterization(self.x)
    assert -150. < values.BFACTOR < 150. # limits on the exponent, please
    self.func = self.refinery.fvec_callable(values)
    functional = flex.sum(self.func*self.func)
    self.f = functional
    jacobian = self.refinery.jacobian_callable(values)
    self.gg_0 = flex.sum(2. * self.func * jacobian[0])
    self.gg_1 = flex.sum(2. * self.func * jacobian[1])
    self.gg_3 = flex.sum(2. * self.func * jacobian[3])
    self.gg_4 = flex.sum(2. * self.func * jacobian[4])
    DELTA = 1.E-7
    self.g = flex.double()
    for x in xrange(self.n):
      templist = list(self.x)
      templist[x]+=DELTA
      dvalues = flex.double(templist)

      dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
      dfunctional = flex.sum(dfunc*dfunc)
      #calculate by finite_difference
      self.g.append( ( dfunctional-functional )/DELTA )
    self.g[2]=0.

    print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
    values.show(self.out)
    print >>self.out, "derivatives--> %15.5f    %15.5f    %9.7f   %5.2f   %5.2f"%tuple(self.g)
    print >>self.out, "  analytical-> %15.5f    %15.5f                %5.2f   %5.2f"%(
      self.gg_0,self.gg_1, self.gg_3,self.gg_4)
    self.g[0]=self.gg_0
    self.g[1]=self.gg_1
    self.g[3]=self.gg_3
    self.g[4]=self.gg_4
    return self.f, self.g
예제 #6
0
  def decode(self, handle):
    '''Decode the reflection data.'''
    from dials.array_family import flex

    # Get the group containing the reflection data
    g = handle['entry/data_processing']

    # Create the list of reflections
    rl = flex.reflection_table(int(g.attrs['num_reflections']))

    # Decode all the columns
    for key in g:
      item = g[key]
      name = item.attrs['flex_type']
      if name == 'shoebox':
        flex_type = getattr(flex, name)
        data = item['data']
        mask = item['mask']
        background = item['background']
        col = flex_type(len(rl))
        for i in range(len(rl)):
          dd = data['%d' % i].value
          col[i].data = flex.double(data['%d' % i].value)
          col[i].mask = flex.int(mask['%d' % i].value)
          col[i].background = flex.double(background['%d' % i].value)

      else:
        flex_type = getattr(flex, name)
        col = self.decode_column(flex_type, item)
      rl[str(key)] = col

    # Return the list of reflections
    return rl
  def unstable_matrix_inversion_diagonal(self,Lower,Diag,Transpose):
    ### Can't use the Cholesky factorization to derive the Variance-Covariance matrix
    ### Demonstrate that inverting the matrix is numerically unstable
    Nx = len(self.helper.x)
    error_diagonal_elems = flex.double(Nx)
    for j_element in xrange(Nx):

      # now solve for the vector p = D * LT * x by substitution in eqn L * p = b
      # b is the column vector of zeroes except jth_element is 1.
      p = flex.double(Nx)
      p[j_element] = 1.
      for p_idx in xrange(j_element+1,Nx):
        for subs_idx in xrange(j_element,p_idx):
          p[p_idx] -= Lower(p_idx,subs_idx) * p[subs_idx]

      Pvec = col(p)

      # now solve for the vector q = LT * x by division in eqn D * q = b
      q = flex.double([ p[i] / Diag(i,i) for i in xrange(Nx)] )
      #  this is the unstable step.  We can't divide by tiny denominators
      Qvec = col(q)

      # now solve for the jth element of x in the eqn LT * x = q
      xelem = flex.double(Qvec.elems)

      for x_idx in xrange(Nx-1,j_element-1,-1):  #comment this in for production
      #for x_idx in xrange(Nx-1,-1,-1):
        for subs_idx in xrange(x_idx+1, Nx):
          xelem[x_idx] -= Transpose(x_idx,subs_idx) * xelem[subs_idx]
      Xvec = col(xelem) # got the whole vector; only need j_element for the error matrix diagonal
      error_diagonal_elems[j_element] = xelem[j_element]
    return col(error_diagonal_elems)
예제 #8
0
  def __init__(self, strategies, n_bins=8, degrees_per_bin=5):
    from cctbx import crystal, miller
    import copy

    sg = strategies[0].experiment.crystal.get_space_group() \
      .build_derived_reflection_intensity_group(anomalous_flag=True)
    cs = crystal.symmetry(
      unit_cell=strategies[0].experiment.crystal.get_unit_cell(), space_group=sg)

    for i, strategy in enumerate(strategies):
      if i == 0:
        predicted = copy.deepcopy(strategy.predicted)
      else:
        predicted_ = copy.deepcopy(strategy.predicted)
        predicted_['dose'] += (flex.max(predicted['dose']) + 1)
        predicted.extend(predicted_)
    ms = miller.set(cs, indices=predicted['miller_index'], anomalous_flag=True)
    ma = miller.array(ms, data=flex.double(ms.size(),1),
                      sigmas=flex.double(ms.size(), 1))
    if 1:
      merging = ma.merge_equivalents()
      o = merging.array().customized_copy(
        data=merging.redundancies().data().as_double()).as_mtz_dataset('I').mtz_object()
      o.write('predicted.mtz')

    d_star_sq = ma.d_star_sq().data()

    binner = ma.setup_binner_d_star_sq_step(
      d_star_sq_step=(flex.max(d_star_sq)-flex.min(d_star_sq)+1e-8)/n_bins)

    dose = predicted['dose']
    range_width = 1
    range_min = flex.min(dose) - range_width
    range_max = flex.max(dose)
    n_steps = 2 + int((range_max - range_min) - range_width)

    binner_non_anom = ma.as_non_anomalous_array().use_binning(
      binner)
    self.n_complete = flex.size_t(binner_non_anom.counts_complete()[1:-1])

    from xia2.Modules.PyChef2 import ChefStatistics
    chef_stats = ChefStatistics(
      ma.indices(), ma.data(), ma.sigmas(),
      ma.d_star_sq().data(), dose, self.n_complete, binner,
      ma.space_group(), ma.anomalous_flag(), n_steps)

    def fraction_new(completeness):
      # Completeness so far at end of image
      completeness_end = completeness[1:]
      # Completeness so far at start of image
      completeness_start = completeness[:-1]
      # Fraction of unique reflections observed for the first time on each image
      return completeness_end - completeness_start

    self.dose = dose
    self.ieither_completeness = chef_stats.ieither_completeness()
    self.iboth_completeness = chef_stats.iboth_completeness()
    self.frac_new_ref = fraction_new(self.ieither_completeness) / degrees_per_bin
    self.frac_new_pairs = fraction_new(self.iboth_completeness) / degrees_per_bin
예제 #9
0
def plot_multirun_stats(runs,
                        run_numbers,
                        d_min,
                        ratio_cutoff=1,
                        n_strong_cutoff=40,
                        run_tags=[],
                        run_statuses=[],
                        interactive=False,
                        compress_runs=True,
                        xsize=30,
                        ysize=10,
                        high_vis=False):
  tset = flex.double()
  two_theta_low_set = flex.double()
  two_theta_high_set = flex.double()
  nset = flex.int()
  I_sig_I_low_set = flex.double()
  I_sig_I_high_set = flex.double()
  boundaries = []
  lengths = []
  runs_with_data = []
  offset = 0
  for idx in xrange(len(runs)):
    r = runs[idx]
    if len(r[0]) > 0:
      if compress_runs:
        tslice = r[0] - r[0][0] + offset
        offset += (r[0][-1] - r[0][0])
      else:
        tslice = r[0]
      last_end = r[0][-1]
      tset.extend(tslice)
      two_theta_low_set.extend(r[1])
      two_theta_high_set.extend(r[2])
      nset.extend(r[3])
      I_sig_I_low_set.extend(r[4])
      I_sig_I_high_set.extend(r[5])
      boundaries.append(tslice[0])
      boundaries.append(tslice[-1])
      lengths.append(len(tslice))
      runs_with_data.append(run_numbers[idx])
    else:
      boundaries.extend([None]*2)
  stats_tuple = get_run_stats(tset,
                              two_theta_low_set,
                              two_theta_high_set,
                              nset,
                              I_sig_I_low_set,
                              I_sig_I_high_set,
                              tuple(boundaries),
                              tuple(lengths),
                              runs_with_data,
                              ratio_cutoff=ratio_cutoff,
                              n_strong_cutoff=n_strong_cutoff)
  png = plot_run_stats(stats_tuple, d_min, run_tags=run_tags, run_statuses=run_statuses, interactive=interactive,
    xsize=xsize, ysize=ysize, high_vis=high_vis)
  return png
 def get_partiality_array(self,values):
   Rh = self.get_Rh_array(values)
   Rs = flex.double(len(self.MILLER),1./values.DEFF)+flex.double(len(self.MILLER),values.ETA/2.)/self.DVEC
   Rs_sq = Rs * Rs
   Rh_sq = Rh * Rh
   numerator = Rs_sq - Rh_sq
   denominator = values.DEFF * Rs * Rs_sq
   partiality = numerator / denominator
   return partiality
예제 #11
0
  def __call__(self, params, options):
    ''' Import the integrate.hkl file. '''

    from iotbx.xds import integrate_hkl
    from dials.array_family import flex
    from dials.util.command_line import Command
    from cctbx import sgtbx

    # Get the unit cell to calculate the resolution
    uc = self._experiment.crystal.get_unit_cell()

    # Read the INTEGRATE.HKL file
    Command.start('Reading INTEGRATE.HKL')
    handle = integrate_hkl.reader()
    handle.read_file(self._integrate_hkl)
    hkl    = flex.miller_index(handle.hkl)
    xyzcal = flex.vec3_double(handle.xyzcal)
    xyzobs = flex.vec3_double(handle.xyzobs)
    iobs   = flex.double(handle.iobs)
    sigma  = flex.double(handle.sigma)
    rlp = flex.double(handle.rlp)
    peak = flex.double(handle.peak) * 0.01
    Command.end('Read %d reflections from INTEGRATE.HKL file.' % len(hkl))

    # Derive the reindex matrix
    rdx = self.derive_reindex_matrix(handle)
    print 'Reindex matrix:\n%d %d %d\n%d %d %d\n%d %d %d' % (rdx.elems)

    # Reindex the reflections
    Command.start('Reindexing reflections')
    cb_op = sgtbx.change_of_basis_op(sgtbx.rt_mx(sgtbx.rot_mx(rdx.elems)))
    hkl = cb_op.apply(hkl)
    Command.end('Reindexed %d reflections' % len(hkl))

    # Create the reflection list
    Command.start('Creating reflection table')
    table = flex.reflection_table()
    table['id'] = flex.int(len(hkl), 0)
    table['panel'] = flex.size_t(len(hkl), 0)
    table['miller_index'] = hkl
    table['xyzcal.px'] = xyzcal
    table['xyzobs.px.value'] = xyzobs
    table['intensity.cor.value'] = iobs
    table['intensity.cor.variance'] = sigma**2
    table['intensity.prf.value'] = iobs * peak / rlp
    table['intensity.prf.variance'] = (sigma * peak / rlp)**2
    table['lp'] = 1.0 / rlp
    table['d'] = flex.double(uc.d(h) for h in hkl)
    Command.end('Created table with {0} reflections'.format(len(table)))

    # Output the table to pickle file
    if params.output.filename is None:
      params.output.filename = 'integrate_hkl.pickle'
    Command.start('Saving reflection table to %s' % params.output.filename)
    table.as_pickle(params.output.filename)
    Command.end('Saved reflection table to %s' % params.output.filename)
예제 #12
0
파일: tst_vs_R.py 프로젝트: dials/dials
  def __init__(self, x, detrend=True, spans=None):

    if spans is None: spans = robjects.r("NULL")
    dat = robjects.FloatVector(list(x))
    pgram = robjects.r['spec.pgram']
    result = pgram(dat, spans=spans, detrend=detrend, taper=0, fast=False, plot=False)
    self.result = result
    self.spec = flex.double(result.rx2('spec'))
    self.freq = flex.double(result.rx2('freq'))
    return
예제 #13
0
파일: model.py 프로젝트: dials/dials
 def from_dict(cls, obj):
   ''' Convert the profile model from a dictionary. '''
   if obj['__id__'] != "gaussian_rs":
     raise RuntimeError('expected __id__ gaussian_rs, got %s' % obj['__id__'])
   n_sigma = obj['n_sigma']
   sigma_b = obj['sigma_b']
   sigma_m = obj['sigma_m']
   if isinstance(sigma_b, list):
     assert(len(sigma_b) == len(sigma_m))
     sigma_b = flex.double(sigma_b)
     sigma_m = flex.double(sigma_m)
   return cls(None, n_sigma, sigma_b, sigma_m, deg=True)
예제 #14
0
  def tst_split_blocks_1_frame(self):
    from dials.array_family import flex
    from random import randint, uniform, seed
    from dials.algorithms.integration.integrator import JobList
    r = flex.reflection_table()
    r['value1'] = flex.double()
    r['value2'] = flex.int()
    r['value3'] = flex.double()
    r['bbox'] = flex.int6()
    r['id'] = flex.int()
    expected = []
    for i in range(100):
      x0 = randint(0, 100)
      x1 = x0 + randint(1, 10)
      y0 = randint(0, 100)
      y1 = y0 + randint(1, 10)
      z0 = randint(0, 100)
      z1 = z0 + randint(1, 10)
      v1 = uniform(0, 100)
      v2 = randint(0, 100)
      v3 = uniform(0, 100)
      r.append({
        'id' : 0,
        'value1' : v1,
        'value2' : v2,
        'value3' : v3,
        'bbox' : (x0, x1, y0, y1, z0, z1)
      })
      for z in range(z0, z1):
        expected.append({
          'id' : 0,
          'value1' : v1,
          'value2' : v2,
          'value3' : v3,
          'bbox' : (x0, x1, y0, y1, z, z+1),
          'partial_id' : i,
        })

    jobs = JobList()
    jobs.add((0,1), (0, 111), 1)

    jobs.split(r)
    assert(len(r) == len(expected))
    EPS = 1e-7
    for r1, r2 in zip(r, expected):
      assert(r1['bbox'] == r2['bbox'])
      assert(r1['partial_id'] == r2['partial_id'])
      assert(abs(r1['value1'] - r2['value1']) < EPS)
      assert(r1['value2'] == r2['value2'])
      assert(abs(r1['value3'] - r2['value3']) < EPS)

    print 'OK'
예제 #15
0
  def _wx_img_w_cpp(self, np_2d_tmp, show_nums, palette, np_2d_mask = None):

    xmax = np_2d_tmp.shape[1]
    ymax = np_2d_tmp.shape[0]

    if np_2d_mask is None:
      np_2d_mask = np.zeros((ymax, xmax), 'double')

    transposed_data = np.zeros((ymax, xmax), 'double')
    transposed_mask = np.zeros((ymax, xmax), 'double')

    transposed_data[:,:] = np_2d_tmp
    transposed_mask[:,:] = np_2d_mask

    flex_data_in = flex.double(transposed_data)
    flex_mask_in = flex.double(transposed_mask)

    err_code = self.wx_bmp_arr.set_min_max(self.vl_min, self.vl_max)

    test_log = '''
    print "self.vl_min, self.vl_max = ", self.vl_min, self.vl_max
    print "err_code =", err_code
    print "before crash"
    print "palette =", palette
    '''

    if palette == "black2white":
      palette_num = 1
    elif palette == "white2black":
      palette_num = 2
    elif palette == "hot ascend":
      palette_num = 3
    else: # assuming "hot descend"
      palette_num = 4

    img_array_tmp = self.wx_bmp_arr.gen_bmp(flex_data_in, flex_mask_in, show_nums, palette_num)

    np_img_array = img_array_tmp.as_numpy_array()

    height = np.size(np_img_array[:, 0:1, 0:1])
    width = np.size( np_img_array[0:1, :, 0:1])
    img_array = np.empty((height, width, 3),'uint8')
    img_array[:,:,:] = np_img_array[:,:,:]

    self._wx_image = wx.EmptyImage(width, height)
    self._wx_image.SetData(img_array.tostring())

    data_to_become_bmp = (self._wx_image, width, height)

    return data_to_become_bmp
 def build_up(pfh, objective_only=False):
   values = pfh.parameterization(pfh.x)
   assert 0. < values.G , "G-scale value out of range ( < 0 ) within LevMar build_up"
   # XXX revisit these limits.  Seems like an ad hoc approach to have to set these limits
   # However, the assertions are necessary to avoid floating point exceptions at the C++ level
   # Regardless, these tests throw out ~30% of LM14 data, thus search for another approach
   assert -150. < values.BFACTOR < 150. ,"B-factor out of range (+/-150) within LevMar build_up"
   assert -0.5 < 180.*values.thetax/math.pi < 0.5 , "thetax out of range ( |rotx|>.5 degrees ) within LevMar build_up"
   assert -0.5 < 180.*values.thetay/math.pi < 0.5 , "thetay out of range ( |roty|>.5 degrees ) within LevMar build_up"
   assert 0.000001 < values.RS , "RLP size out of range (<0.000001) within LevMar build_up"
   assert values.RS < 0.001 , "RLP size out of range (>0.001) within LevMar build_up"
   residuals = pfh.refinery.fvec_callable(values)
   pfh.reset()
   if objective_only:
     pfh.add_residuals(residuals, weights=pfh.refinery.WEIGHTS)
   else:
     grad_r = pfh.refinery.jacobian_callable(values)
     jacobian = flex.double(
       flex.grid(len(pfh.refinery.MILLER), pfh.n_parameters))
     for j, der_r in enumerate(grad_r):
       jacobian.matrix_paste_column_in_place(der_r,j)
       #print >> pfh.out, "COL",j, list(der_r)
     pfh.add_equations(residuals, jacobian, weights=pfh.refinery.WEIGHTS)
   print >> pfh.out, "rms %10.3f"%math.sqrt(flex.mean(pfh.refinery.WEIGHTS*residuals*residuals)),
   values.show(pfh.out)
예제 #17
0
  def tst_copy(self):
    import copy
    from dials.array_family import flex

    # Create a table
    table = flex.reflection_table([
      ('col1', flex.int(range(10)))])

    # Make a shallow copy of the table
    shallow = copy.copy(table)
    shallow['col2'] = flex.double(range(10))
    assert(table.ncols() == 2)
    assert(table.is_consistent())
    print 'OK'

    # Make a deep copy of the table
    deep = copy.deepcopy(table)
    deep['col3'] = flex.std_string(10)
    assert(table.ncols() == 2)
    assert(deep.ncols() == 3)
    assert(table.is_consistent())
    assert(deep.is_consistent())

    table2 = table.copy()
    table2['col3'] = flex.std_string(10)
    assert(table.ncols() == 2)
    assert(table2.ncols() == 3)
    assert(table.is_consistent())
    assert(table2.is_consistent())
    print 'OK'
예제 #18
0
파일: fill.py 프로젝트: dials/dials_scratch
def generate_image(function, height, width):
  from dials.array_family import flex
  image = flex.double(flex.grid(height, width))
  for j in range(height):
    for i in range(width):
      image[j,i] = function(j,i)
  return image
예제 #19
0
    def model(self, reflections, profiles):
        from dials.array_family import flex

        indices = flex.size_t(range(len(self)))
        weights = flex.double([1.0] * len(self))
        for profile in profiles:
            self.add(indices, weights, profile)
예제 #20
0
    def plot_scale_vs_x_y(self):
        from scitbx.array_family import flex
        from math import ceil

        print "Getting scale"
        points = [(int(xyz[1] / 8), int(xyz[0] / 8)) for xyz in self.xyz]
        scale = [x / d for x, d in zip(self.i_xds, self.i_dials)]

        print "Creating Grid"
        image_size = self.sweep.get_detector()[0].get_image_size()[::-1]
        image_size = (int(ceil(image_size[0] / 8)), int(ceil(image_size[1] / 8)))
        grid = flex.double(flex.grid(image_size))
        count = flex.int(flex.grid(image_size))
        for p, s in zip(points, scale):
            grid[p] += s
            count[p] += 1
        for i in range(len(grid)):
            if count[i] > 0:
                grid[i] /= count[i]

        # grid_points = [(j,i) for j in range(image_size[0]) for i in range(image_size[1])]

        # grid = griddata(points, scale, grid_points)
        # grid.shape = image_size
        from matplotlib import pyplot

        fig, ax = pyplot.subplots()
        pyplot.title("scale vs x/y")
        cax = pyplot.imshow(grid.as_numpy_array())
        cbar = fig.colorbar(cax)
        pyplot.savefig("plot-scale-vs-xy.png")
        pyplot.close()
예제 #21
0
  def _xl_unit_cell_derivatives(self, isel, parameterisation=None,
    reflections=None):

    # Get required data
    h = self._h.select(isel)
    B = self._B.select(isel)
    wl = self._wavelength.select(isel)

    # get derivatives of the B matrix wrt the parameters
    dB_dxluc_p = [None if der is None else flex.mat3_double(len(isel), der.elems) \
                  for der in parameterisation.get_ds_dp(use_none_as_null=True)]

    d2theta_dp = []

    # loop through the parameters
    for der in dB_dxluc_p:

      if der is None:
        d2theta_dp.append(None)
        continue

      r0 = B * h
      dr0 = der * h
      r0len = r0.norms()
      dr0len = dr0.dot(r0) / r0len

      # 2theta = 2 * arcsin( |r0| / (2 * |s0| ) )
      sintheta = 0.5 * r0len * wl
      fac = 1.0 / flex.sqrt(flex.double(len(wl), 1.0) - sintheta**2)
      val = fac * wl * dr0len

      d2theta_dp.append(val)

    return d2theta_dp
예제 #22
0
def draw_palette_label(i_min, i_max):

    if i_max > 500:
        i_max = 500
        logger.debug("reshaping i_max in shown palette bitmap")

    if i_min < -3:
        i_min = -3
        logger.debug("reshaping i_min in shown palette bitmap")

    scale_size = int(i_max - i_min)
    np_img_arr = np.zeros((50, 503), dtype=np.double)
    m_point = int((i_max + i_min) / 2) + 3
    np_img_arr[0:50, :m_point] = i_min
    np_img_arr[0:50, m_point:] = i_max
    if scale_size > 10:
        try:
            ascending_img_arr = (
                np.arange(i_min, i_max, 1.0 / 50.0).reshape(scale_size, 50).T
            )

            lbound = int(i_min) + 3
            ubound = int(i_max) + 3
            np_img_arr[0:50, lbound:ubound] = ascending_img_arr[0:50, 0:scale_size]
        except BaseException as e:
            # We don't want to catch bare exceptions but don't know
            # what this was supposed to catch. Log it.
            logger.error("Caught unknown exception type %s: %s", type(e).__name__, e)
            logger.debug("something went wrong with the creation of palette bitmap")

    tmp_flex_arr = flex.double(np_img_arr)
    return tmp_flex_arr
예제 #23
0
def residual_map_special_deltapsi_add_on( reflections,experiments,matches,hkllist, predicted,plot,eta_deg,deff ):

        detector = experiments[0].detector
        crystal = experiments[0].crystal
        unit_cell = crystal.get_unit_cell()
        pxlsz = detector[0].get_pixel_size()
        model_millers = reflections["miller_index"]
        dpsi = flex.double()
        for match in matches:

          obs_miller = hkllist[match["pred"]]
          model_index= model_millers.first_index(obs_miller)

          raw_delta_psi = reflections["delpsical.rad"][model_index]
          deltapsi_envelope = (unit_cell.d(obs_miller)/deff) + math.pi*eta_deg/180.
          normalized_delta_psi = raw_delta_psi/deltapsi_envelope

          dpsi.append( normalized_delta_psi )

        from matplotlib.colors import Normalize
        dnorm = Normalize()
        dnorm.autoscale(dpsi.as_numpy_array())

        CMAP = plot.get_cmap("bwr")
        for match,dcolor in zip(matches,dpsi):

          #print dcolor, dnorm(dcolor),  CMAP(dnorm(dcolor))
          #blue represents negative delta psi:  outside Ewald sphere; red, positive, inside Ewald sphere
          plot.plot([predicted[match["pred"]][1]/pxlsz[1]],[-predicted[match["pred"]][0]/pxlsz[0]],color=CMAP(dnorm(dcolor)),
          marker=".", markersize=5)
예제 #24
0
  def calculate_weights(self, reflections):
    """Set weights to constant terms. If stills, the z weights are
    the 'delpsical.weights' attribute of the reflection table. Otherwise, use
    the usual 'xyzobs.mm.weights'"""

    wx = flex.double(len(reflections), self._wx)
    wy = flex.double(len(reflections), self._wy)
    wz = flex.double(len(reflections), self._wz)
    if self._stills:
      null = flex.double(len(reflections), 0)
      reflections['xyzobs.mm.weights'] = flex.vec3_double(wx, wy, null)
      reflections['delpsical.weights'] = wz
    else:
      reflections['xyzobs.mm.weights'] = flex.vec3_double(wx, wy, wz)

    return reflections
예제 #25
0
  def _create_block_columns(self):
    """Create a column to contain the block number."""

    from scitbx.array_family import flex
    self._reflections['block'] = flex.size_t(len(self._reflections))
    self._reflections['block_centre'] = flex.double(len(self._reflections))
    return
예제 #26
0
파일: test.py 프로젝트: dials/dials_scratch
def plot_prob_for_zero(c, b, s):
  from math import log, exp, factorial
  from dials.array_family import flex
  L = flex.double(flex.grid(100, 100))
  MASK = flex.bool(flex.grid(100, 100))
  c = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
  b = [bb / sum(b) for bb in b]
  s = [ss / sum(s) for ss in s]
  for BB in range(0, 100):
    for SS in range(0, 100):
      B = 0 + BB / 10000.0
      S = 0 + SS / 40.0
      LL = 0
      for i in range(len(b)):
        if B*b[i] + S*s[i] <= 0:
          MASK[BB, SS] = True
          LL = -999999
          break
        else:
          LL += c[i]*log(B*b[i]+S*s[i]) - log(factorial(c[i])) - B*b[i] - S*s[i]

      L[BB, SS] = LL
  index = flex.max_index(L)
  i = index % 100
  j = index // 100
  B = 0 + j / 10000.0
  S = 0 + i / 40.0
  print flex.max(L), B, S
  from matplotlib import pylab
  import numpy
  im = numpy.ma.masked_array(flex.exp(L).as_numpy_array(), mask=MASK.as_numpy_array())
  pylab.imshow(im)
  pylab.show()
  exit(0)
예제 #27
0
  def tst_serialize(self):

    from dials.array_family import flex

    # The columns as lists
    c1 = list(range(10))
    c2 = list(range(10))
    c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

    # Create a table with some elements
    table = flex.reflection_table()
    table['col1'] = flex.int(c1)
    table['col2'] = flex.double(c2)
    table['col3'] = flex.std_string(c3)

    # Pickle, then unpickle
    import cPickle as pickle
    obj = pickle.dumps(table)
    new_table = pickle.loads(obj)
    assert(new_table.is_consistent())
    assert(new_table.nrows() == 10)
    assert(new_table.ncols() == 3)
    assert(all(a == b for a, b in zip(new_table['col1'], c1)))
    assert(all(a == b for a, b in zip(new_table['col2'], c2)))
    assert(all(a == b for a, b in zip(new_table['col3'], c3)))
    print 'OK'
예제 #28
0
파일: bfgs.py 프로젝트: dials/dials_scratch
def show_image(c,b,s, BB=None, SS=None):

  import numpy.ma
  from dials.array_family import flex
  N = 100
  im = flex.double(flex.grid(N, N))
  mask = flex.bool(flex.grid(N, N))
  for j in range(N):
    for i in range(N):
      B = -1.0 + j * 10.0 / N
      S = -1.0 + i * 10.0 / N
      im[j,i], mask[j,i] = func(c,b,s,B,S)
      im[j,i] = -im[j,i]

  masked_im = numpy.ma.array(
    # im.as_numpy_array(),
    flex.exp(im).as_numpy_array(),
    mask = mask.as_numpy_array())
  mask2 = flex.bool(flex.grid(N, N))
  indices = []
  for i in range(len(mask)):
    if mask[i] == False:
      indices.append(i)
  indices = flex.size_t(indices)
  ind = flex.max_index(im.select(indices))
  ind = indices[ind]
  maxy = -1.0 + (ind % N) * 10.0 / N
  maxx = -1.0 + (ind // N) * 10.0 / N
  from matplotlib import pylab
  pylab.imshow(masked_im, origin='bottom', extent=[-1.0, 9.0, -1.0, 9.0])
  if YY is not None and XX is not None:
    pylab.plot(YY, XX)
  pylab.scatter([maxy], [maxx])
  pylab.show()
 def get_Rh_array(self, values):
   Rh = flex.double()
   eff_Astar = self.get_eff_Astar(values)
   for mill in self.MILLER:
     x = eff_Astar * matrix.col(mill)
     Svec = x + self.BEAM
     Rh.append(Svec.length() - (1./self.WAVE))
   return Rh
  def result_for_cxi_merge(self, file_name):
    values = self.get_parameter_values()
    self.rs2_parameter_range_assertions(values)
    scaler = self.nave1_refinery.scaler_callable(self.get_parameter_values())

    partiality_array = self.refinery.get_partiality_array(values)
    p_scaler = flex.pow(partiality_array,
                        0.5*self.params.postrefinement.merge_partiality_exponent)

    fat_selection = (self.nave1_refinery.lorentz_callable(self.get_parameter_values()) >
                     self.params.postrefinement.rs_hybrid.partiality_threshold) # was 0.2 for rs2
    fat_count = fat_selection.count(True)
    scaler_s = scaler.select(fat_selection)
    p_scaler_s = p_scaler.select(fat_selection)

    #avoid empty database INSERT, if insufficient centrally-located Bragg spots:
    # in samosa, handle this at a higher level, but handle it somehow.
    if fat_count < 3:
      raise ValueError, "< 3 near-fulls after refinement"
    print >> self.out, "On total %5d the fat selection is %5d"%(
      len(self.observations_pair1_selected.indices()), fat_count)
    observations_original_index = \
      self.observations_original_index_pair1_selected.select(fat_selection)

    observations = self.observations_pair1_selected.customized_copy(
      indices = self.observations_pair1_selected.indices().select(fat_selection),
      data = (self.observations_pair1_selected.data().select(fat_selection)/scaler_s),
      sigmas = (self.observations_pair1_selected.sigmas().select(fat_selection)/(scaler_s * p_scaler_s))
    )
    matches = miller.match_multi_indices(
      miller_indices_unique=self.miller_set.indices(),
      miller_indices=observations.indices())

    I_weight = flex.double(len(observations.sigmas()), 1.)
    I_reference = flex.double([self.i_model.data()[pair[0]] for pair in matches.pairs()])
    SWC = simple_weighted_correlation(I_weight, I_reference, observations.data())
    print >> self.out, "CORR: NEW correlation is", SWC.corr
    self.final_corr = SWC.corr
    #another range assertion
    assert self.final_corr > 0.1,"correlation coefficient out of range (<= 0.1) after LevMar refinement"
    # XXX Specific to the hybrid_rs method, and likely these limits are problem-specific (especially G-max) so look for another approach
    #     or expose the limits as phil parameters.
    assert values.G < 0.5 , "G-scale value out of range ( > 0.5 XXX may be too strict ) after LevMar refinement"

    return observations_original_index,observations,matches
예제 #31
0
def get_run_stats(
    timestamps,
    two_theta_low,
    two_theta_high,
    n_strong,
    resolutions,
    n_lattices,
    tuple_of_timestamp_boundaries,
    lengths,
    run_numbers,
    n_multiples=2,
    ratio_cutoff=1,
    n_strong_cutoff=40,
    i_sigi_cutoff=1,
    d_min=2,
):
    print("")
    print("%d shots" % len(timestamps))
    print("%d first lattices" % (n_lattices >= 1).count(True))
    print("%d multiple lattices" % (n_lattices >= 2).count(True))
    print("%d total lattices" % (flex.sum(n_lattices)))
    iterator = range(len(resolutions))
    # hit rate of drops (observe solvent) or crystals (observe strong spots)
    # since -1 is used as a flag for "did not store this value", and we want a quotient,
    # set the numerator value to 0 whenever either the numerator or denominator is -1
    invalid = (two_theta_low <= 0) or (two_theta_high < 0)  # <= to prevent /0
    numerator = two_theta_high.set_selected(invalid, 0)
    denominator = two_theta_low.set_selected(two_theta_low == 0,
                                             1)  # prevent /0
    drop_ratios = numerator / denominator
    drop_hits = drop_ratios >= ratio_cutoff
    xtal_hits = n_strong >= n_strong_cutoff
    half_idx_rate_window = min(50, max(int(len(timestamps) // 20), 1))
    half_hq_rate_window = 500
    indexed_sel = n_lattices > 0
    hq_sel = (resolutions > 0) & (resolutions <= d_min)
    # indexing and droplet hit rate in a sliding window
    idx_rate = flex.double()
    multiples_rate = flex.double()
    hq_rate = flex.double()
    drop_hit_rate = flex.double()
    for i in iterator:
        idx_min = max(0, i - half_idx_rate_window)
        idx_max = min(i + half_idx_rate_window, len(resolutions))
        n_lattices_local = n_lattices[idx_min:idx_max]
        shots_this_span = len(n_lattices_local)
        first_lattices_local = n_lattices_local >= 1
        idx_local_rate = first_lattices_local.count(True) / shots_this_span
        idx_rate.append(idx_local_rate)
        multiples_sel = n_lattices_local >= n_multiples
        multiples_local_rate = multiples_sel.count(True) / shots_this_span
        multiples_rate.append(multiples_local_rate)
        drop_sel = drop_hits[idx_min:idx_max]
        drop_local_rate = drop_sel.count(True) / shots_this_span
        drop_hit_rate.append(drop_local_rate)
        # different sliding window for "high quality" xtals
        hq_min = max(0, i - half_hq_rate_window)
        hq_max = min(i + half_hq_rate_window, len(resolutions))
        n_lattices_local_hq = n_lattices[hq_min:hq_max]
        first_lattices_local_hq = n_lattices_local_hq >= 1
        hq_high_sel = hq_sel[hq_min:hq_max].select(first_lattices_local_hq)
        n_first_lattices_local_hq = first_lattices_local_hq.count(True)
        if n_first_lattices_local_hq > 0:
            hq_rate.append(hq_high_sel.count(True) / n_first_lattices_local_hq)
        else:
            hq_rate.append(0)
    return (timestamps, drop_ratios, drop_hits, drop_hit_rate, n_strong,
            xtal_hits, idx_rate, multiples_rate, hq_rate, indexed_sel, hq_sel,
            resolutions, half_idx_rate_window * 2, lengths,
            tuple_of_timestamp_boundaries, run_numbers)
예제 #32
0
파일: test_model.py 프로젝트: hattne/dials
def test_ArrayScalingModel(test_reflections, mock_exp):
    """Test the ArrayScalingModel class."""

    configdict = {
        "corrections": ["decay", "absorption", "modulation"],
        "n_res_param": 2,
        "n_time_param": 2,
        "resmin": 1.0,
        "res_bin_width": 1.0,
        "time_norm_fac": 1.0,
        "time_rot_interval": 1.0,
        "n_x_param": 2,
        "n_y_param": 2,
        "xmin": 0.0,
        "ymin": 0.0,
        "x_bin_width": 1.0,
        "y_bin_width": 2.0,
        "n_x_mod_param": 2,
        "n_y_mod_param": 2,
        "x_det_bin_width": 2.0,
        "y_det_bin_width": 2.0,
    }

    parameters_dict = {
        "decay": {
            "parameters": flex.double([1.2, 1.1, 1.0, 0.9]),
            "parameter_esds": None,
        },
        "absorption": {
            "parameters": flex.double([0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2]),
            "parameter_esds": None,
        },
        "modulation": {"parameters": flex.double(4, 0.9), "parameter_esds": None},
    }

    # Test standard factory initialisation
    arraymodel = ArrayScalingModel(parameters_dict, configdict)
    assert arraymodel.id_ == "array"
    assert "decay" in arraymodel.components
    assert "absorption" in arraymodel.components
    assert "modulation" in arraymodel.components
    assert list(arraymodel.components["decay"].parameters) == [1.2, 1.1, 1.0, 0.9]
    assert list(arraymodel.components["absorption"].parameters) == [
        0.1,
        0.2,
        0.1,
        0.2,
        0.1,
        0.2,
        0.1,
        0.2,
    ]
    assert list(arraymodel.components["modulation"].parameters) == 4 * [0.9]

    # Test configure reflection table
    _ = arraymodel.configure_components(test_reflections, mock_exp, [])

    # Test from_dict initialisation method for previous model case.
    init_dict = arraymodel.to_dict()
    new_array_model = ArrayScalingModel.from_dict(init_dict)
    assert new_array_model.id_ == "array"
    comps = new_array_model.components
    assert "modulation" in comps
    assert "absorption" in comps
    assert "decay" in comps
    assert list(comps["decay"].parameters) == [1.2, 1.1, 1.0, 0.9]
    assert list(comps["absorption"].parameters) == [
        0.1,
        0.2,
        0.1,
        0.2,
        0.1,
        0.2,
        0.1,
        0.2,
    ]
    assert list(comps["modulation"].parameters) == 4 * [0.9]

    # Test from_dict initialisation method for another case.
    array_dict = {
        "__id__": "array",
        "is_scaled": True,
        "decay": {
            "n_parameters": 4,
            "parameters": [0.5, 1.0, 0.4, 1.0],
            "null_parameter_value": 1.0,
            "est_standard_devs": [0.05, 0.1, 0.05, 0.1],
        },
        "configuration_parameters": {
            "corrections": ["decay"],
            "n_res_param": 2,
            "n_time_param": 2,
            "resmin": 1.0,
            "res_bin_width": 1.0,
            "time_norm_fac": 1.0,
            "time_rot_interval": 1.0,
        },
    }
    arraymodel = ArrayScalingModel.from_dict(array_dict)
    assert arraymodel.id_ == "array"
    comps = arraymodel.components
    assert "modulation" not in comps
    assert "absorption" not in comps
    assert "decay" in comps
    assert list(comps["decay"].parameters) == [0.5, 1.0, 0.4, 1.0]
    assert list(comps["decay"].parameter_esds) == [0.05, 0.1, 0.05, 0.1]

    new_dict = arraymodel.to_dict()
    assert new_dict == array_dict

    with pytest.raises(RuntimeError):
        array_dict["__id__"] = "physical"
        arraymodel = ArrayScalingModel.from_dict(array_dict)

    assert len(arraymodel.consecutive_refinement_order) == 3
    arraymodel.show()

    # test limit batch range
    configdict = {
        "corrections": ["decay", "absorption"],
        "n_res_param": 2,
        "n_time_param": 3,
        "resmin": 1.0,
        "res_bin_width": 1.0,
        "time_norm_fac": 0.1,
        "time_rot_interval": 10.0,
        "n_x_param": 2,
        "n_y_param": 2,
        "xmin": 0.0,
        "ymin": 0.0,
        "x_bin_width": 1.0,
        "y_bin_width": 2.0,
        "n_x_mod_param": 2,
        "n_y_mod_param": 2,
        "x_det_bin_width": 2.0,
        "y_det_bin_width": 2.0,
        "valid_image_range": (1, 20),
        "valid_osc_range": (0, 20),
    }

    parameters_dict = {
        "decay": {
            "parameters": flex.double([1.2, 1.1, 1.0, 0.9, 0.8, 0.7]),
            "parameter_esds": None,
        },
        "absorption": {
            "parameters": flex.double(
                [0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.3, 0.4, 0.3, 0.4]
            ),
            "parameter_esds": None,
        },
    }
    array = ArrayScalingModel(parameters_dict, configdict)
    array.limit_image_range((1, 10))
    assert list(array.components["decay"].parameters) == [1.2, 1.1, 1.0, 0.9]
    assert list(array.components["absorption"].parameters) == [
        0.1,
        0.2,
        0.1,
        0.2,
        0.1,
        0.2,
        0.1,
        0.2,
    ]
    assert array.configdict["n_time_param"] == 2
    assert array.configdict["valid_image_range"] == (1, 10)
    assert array.configdict["valid_osc_range"] == (0, 10)
예제 #33
0
 def resolve_model_parameters(self):
     """Update parameters in the model."""
     self.model.components["b"].parameters = flex.double([self.x[0]])
  def integration_concept_detail(self, experiments, reflections, spots,image_number,cb_op_to_primitive,**kwargs):
    detector = experiments[0].detector
    crystal = experiments[0].crystal
    from cctbx.crystal import symmetry
    c_symmetry = symmetry(space_group = crystal.get_space_group(), unit_cell = crystal.get_unit_cell())

    self.image_number = image_number
    NEAR = 10
    pxlsz = detector[0].get_pixel_size()

    Predicted = self.get_predictions_accounting_for_centering(experiments,reflections,cb_op_to_primitive,**kwargs)

    FWMOSAICITY = self.inputai.getMosaicity()
    self.DOMAIN_SZ_ANG = kwargs.get("domain_size_ang",  self.__dict__.get("actual",0)  )
    refineflag = {True:0,False:1}[kwargs.get("domain_size_ang",0)==0]
    c_symmetry.show_summary(prefix="EXCURSION%1d REPORT FWMOS= %6.4f DOMAIN= %6.1f "%(refineflag,FWMOSAICITY,self.DOMAIN_SZ_ANG))
    from annlib_ext import AnnAdaptor
    self.cell = c_symmetry.unit_cell()

    query = flex.double()
    print len(self.predicted)

    for pred in self.predicted: # predicted spot coord in pixels
      query.append(pred[0]/pxlsz[0])
      query.append(pred[1]/pxlsz[1])

    self.reserve_hkllist_for_signal_search = self.hkllist

    reference = flex.double()

    assert self.length>NEAR# Can't do spot/pred matching with too few spots
    for spot in spots:
      reference.append(spot.ctr_mass_x())
      reference.append(spot.ctr_mass_y())

    IS_adapt = AnnAdaptor(data=reference,dim=2,k=NEAR)
    IS_adapt.query(query)
    idx_cutoff = float(min(self.mask_focus[image_number]))

    from rstbx.apps.slip_helpers import slip_callbacks
    cache_refinement_spots = getattr(slip_callbacks.slip_callback,"requires_refinement_spots",False)

    indexed_pairs_provisional = []
    correction_vectors_provisional = []
    c_v_p_flex = flex.vec3_double()
    this_setting_matched_indices = reflections["miller_index"]
    for j,item in enumerate(this_setting_matched_indices):
      this_setting_index = self.hkllist.first_index(item)
      if this_setting_index:
        Match = dict(spot=j,pred=this_setting_index)
        indexed_pairs_provisional.append(Match)
        vector = matrix.col(
            [reflections["xyzobs.px.value"][j][0] - self.predicted[Match["pred"]][0]/pxlsz[0],
             reflections["xyzobs.px.value"][j][1] - self.predicted[Match["pred"]][1]/pxlsz[1]])
        correction_vectors_provisional.append(vector)
        c_v_p_flex.append((vector[0],vector[1],0.))
    self.N_correction_vectors = len(correction_vectors_provisional)
    self.rmsd_px = math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))
    print "... %d provisional matches"%self.N_correction_vectors,
    print "r.m.s.d. in pixels: %6.3f"%(self.rmsd_px)

    if self.horizons_phil.integration.enable_residual_scatter:
      from matplotlib import pyplot as plt
      fig = plt.figure()
      for cv in correction_vectors_provisional:
        plt.plot([cv[1]],[-cv[0]],"r.")
      plt.title(" %d matches, r.m.s.d. %5.2f pixels"%(len(correction_vectors_provisional),math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
      plt.axes().set_aspect("equal")
      self.show_figure(plt,fig,"res")
      plt.close()

    if self.horizons_phil.integration.enable_residual_map:
      from matplotlib import pyplot as plt
      PX = reflections["xyzobs.px.value"]
      fig = plt.figure()
      for match,cv in zip(indexed_pairs_provisional,correction_vectors_provisional):
        plt.plot([PX[match["spot"]][1]],[-PX[match["spot"]][0]],"r.")
        plt.plot([self.predicted[match["pred"]][1]/pxlsz[1]],[-self.predicted[match["pred"]][0]/pxlsz[0]],"g.")
        plt.plot([PX[match["spot"]][1], PX[match["spot"]][1] + 10.*cv[1]],
                 [-PX[match["spot"]][0], -PX[match["spot"]][0] - 10.*cv[0]],'r-')
      if kwargs.get("user-reentrant") != None and self.horizons_phil.integration.spot_prediction == "dials" \
             and self.horizons_phil.integration.enable_residual_map_deltapsi:
        from rstbx.apps.stills.util import residual_map_special_deltapsi_add_on
        residual_map_special_deltapsi_add_on(
          reflections = self.dials_spot_prediction,
          matches = indexed_pairs_provisional, experiments=experiments,
          hkllist = self.hkllist,
          predicted = self.predicted, plot=plt, eta_deg=FWMOSAICITY, deff=self.DOMAIN_SZ_ANG
          )
      plt.xlim([0,detector[0].get_image_size()[1]])
      plt.ylim([-detector[0].get_image_size()[0],0])
      plt.title(" %d matches, r.m.s.d. %5.2f pixels"%(len(correction_vectors_provisional),math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
      plt.axes().set_aspect("equal")
      self.show_figure(plt,fig,"map")
      plt.close()

    indexed_pairs = indexed_pairs_provisional
    correction_vectors = correction_vectors_provisional
    ########### skip outlier rejection for this derived class

    ### However must retain the ability to write out correction vectiors.
    if True: # at Aaron's request; test later
      correction_lengths = flex.double([v.length() for v in correction_vectors_provisional])
      clorder = flex.sort_permutation(correction_lengths)
      sorted_cl = correction_lengths.select(clorder)
      indexed_pairs = []
      correction_vectors = []
      self.correction_vectors = []
      for icand in xrange(len(sorted_cl)):
        # somewhat arbitrary sigma = 1.0 cutoff for outliers
        indexed_pairs.append(indexed_pairs_provisional[clorder[icand]])
        correction_vectors.append(correction_vectors_provisional[clorder[icand]])
        if cache_refinement_spots:
          self.spotfinder.images[self.frame_numbers[self.image_number]]["refinement_spots"].append(
          spots[reflections[indexed_pairs[-1]["spot"]]['spotfinder_lookup']])
        if kwargs.get("verbose_cv")==True:
            print "CV OBSCENTER %7.2f %7.2f REFINEDCENTER %7.2f %7.2f"%(
              float(self.inputpd["size1"])/2.,float(self.inputpd["size2"])/2.,
              self.inputai.xbeam()/pxlsz[0], self.inputai.ybeam()/pxlsz[1]),
            print "OBSSPOT %7.2f %7.2f PREDSPOT %7.2f %7.2f"%(
              reflections[indexed_pairs[-1]["spot"]]['xyzobs.px.value'][0],
              reflections[indexed_pairs[-1]["spot"]]['xyzobs.px.value'][1],
              self.predicted[indexed_pairs[-1]["pred"]][0]/pxlsz[0],
              self.predicted[indexed_pairs[-1]["pred"]][1]/pxlsz[1]),
            the_hkl = self.hkllist[indexed_pairs[-1]["pred"]]
            print "HKL %4d %4d %4d"%the_hkl,"%2d"%self.setting_id,
            radial, azimuthal = spots[indexed_pairs[-1]["spot"]].get_radial_and_azimuthal_size(
              self.inputai.xbeam()/pxlsz[0], self.inputai.ybeam()/pxlsz[1])
            print "RADIALpx %5.3f AZIMUTpx %5.3f"%(radial,azimuthal)

        # Store a list of correction vectors in self.
        radial, azimuthal = spots[indexed_pairs[-1]['spot']].get_radial_and_azimuthal_size(
          self.inputai.xbeam()/pxlsz[0], self.inputai.ybeam()/pxlsz[1])
        self.correction_vectors.append(
          dict(obscenter=(float(self.inputpd['size1']) / 2,
                          float(self.inputpd['size2']) / 2),
               refinedcenter=(self.inputai.xbeam() / pxlsz[0],
                              self.inputai.ybeam() / pxlsz[1]),
               obsspot=(reflections[indexed_pairs[-1]['spot']]['xyzobs.px.value'][0],
                        reflections[indexed_pairs[-1]['spot']]['xyzobs.px.value'][1]),
               predspot=(self.predicted[indexed_pairs[-1]['pred']][0] / pxlsz[0],
                         self.predicted[indexed_pairs[-1]['pred']][1] / pxlsz[1]),
               hkl=(self.hkllist[indexed_pairs[-1]['pred']][0],
                    self.hkllist[indexed_pairs[-1]['pred']][1],
                    self.hkllist[indexed_pairs[-1]['pred']][2]),
               setting_id=self.setting_id,
               radial=radial,
               azimuthal=azimuthal))


    self.inputpd["symmetry"] = c_symmetry
    self.inputpd["symmetry"].show_summary(prefix="SETTING ")


    if self.horizons_phil.integration.model == "user_supplied":
      # Not certain of whether the reentrant_* dictionary keys create a memory leak
      if kwargs.get("user-reentrant",None)==None:
        kwargs["reentrant_experiments"] = experiments
        kwargs["reentrant_reflections"] = reflections
        from cxi_user import post_outlier_rejection
        self.indexed_pairs = indexed_pairs
        self.spots = spots
        post_outlier_rejection(self,image_number,cb_op_to_primitive,self.horizons_phil,kwargs)
        return
    ########### finished with user-supplied code


    correction_lengths=flex.double([v.length() for v in correction_vectors])

    self.r_residual = pxlsz[0]*flex.mean(correction_lengths)

    #assert len(indexed_pairs)>NEAR # must have enough indexed spots
    if (len(indexed_pairs) <= NEAR):
      raise Sorry("Not enough indexed spots, only found %d, need %d" % (len(indexed_pairs), NEAR))

    reference = flex.double()
    for item in indexed_pairs:
      reference.append(spots[item["spot"]].ctr_mass_x())
      reference.append(spots[item["spot"]].ctr_mass_y())

    PS_adapt = AnnAdaptor(data=reference,dim=2,k=NEAR)
    PS_adapt.query(query)

    self.BSmasks = []
    # do not use null: self.null_correction_mapping( predicted=self.predicted,
    self.positional_correction_mapping( predicted=self.predicted,
                                        correction_vectors = correction_vectors,
                                        PS_adapt = PS_adapt,
                                        IS_adapt = IS_adapt,
                                        spots = spots)

    # which spots are close enough to interfere with background?
    MAXOVER=6
    OS_adapt = AnnAdaptor(data=query,dim=2,k=MAXOVER) #six near nbrs
    OS_adapt.query(query)
    if self.mask_focus[image_number] is None:
      raise Sorry("No observed/predicted spot agreement; no Spotfinder masks; skip integration")
    nbr_cutoff = 2.0* max(self.mask_focus[image_number])
    FRAME = int(nbr_cutoff/2)
    #print "The overlap cutoff is %d pixels"%nbr_cutoff
    nbr_cutoff_sq = nbr_cutoff * nbr_cutoff

    #print "Optimized C++ section...",
    self.set_frame(FRAME)
    self.set_background_factor(kwargs["background_factor"])
    self.set_nbr_cutoff_sq(nbr_cutoff_sq)
    self.set_guard_width_sq(self.horizons_phil.integration.guard_width_sq)
    self.set_detector_gain(self.horizons_phil.integration.detector_gain)
    flex_sorted = flex.int()
    for item in self.sorted:
      flex_sorted.append(item[0]);flex_sorted.append(item[1]);

    if self.horizons_phil.integration.mask_pixel_value is not None:
      self.set_mask_pixel_val(self.horizons_phil.integration.mask_pixel_value)

    image_obj = self.imagefiles.imageindex(self.frame_numbers[self.image_number])
    image_obj.read()
    rawdata = image_obj.linearintdata # assume image #1

    if self.inputai.active_areas != None:
      self.detector_xy_draft = self.safe_background( rawdata=rawdata,
                          predicted=self.predicted,
                          OS_adapt=OS_adapt,
                          sorted=flex_sorted,
                          tiles=self.inputai.active_areas.IT,
                          tile_id=self.inputai.active_areas.tile_id);
    else:
      self.detector_xy_draft = self.safe_background( rawdata=rawdata,
                          predicted=self.predicted,
                          OS_adapt=OS_adapt,
                          sorted=flex_sorted);
    for i in xrange(len(self.predicted)): # loop over predicteds
      B_S_mask = {}
      keys = self.get_bsmask(i)
      for k in xrange(0,len(keys),2):
        B_S_mask[(keys[k],keys[k+1])]=True
      self.BSmasks.append(B_S_mask)
    #print "Done"
    return
예제 #35
0
    def run(self):
        from libtbx.test_utils import approx_equal
        from dials.array_family import flex

        def approx_equal_dict(a, b, k):
            return approx_equal(a[k], b[k])

        # Do summation by all different methods
        result1 = self.integrate("3d")
        result2 = self.integrate("flat3d")
        result3 = self.integrate("2d")
        result4 = self.integrate("single2d")
        assert (len(result1) >= len(self.rlist))
        assert (len(result2) >= len(self.rlist))
        assert (len(result3) >= len(self.rlist))
        assert (len(result4) >= len(self.rlist))

        # result1 and result2 should be the same
        assert (len(result1) == len(result2))
        for r1, r2 in zip(result1, result2):
            assert (r1['partial_id'] == r2['partial_id'])
            assert (r1['bbox'] == r2['bbox'])
            assert (r1['entering'] == r2['entering'])
            assert (r1['flags'] == r2['flags'])
            assert (r1['id'] == r2['id'])
            assert (r1['miller_index'] == r2['miller_index'])
            assert (r1['panel'] == r2['panel'])
            assert (approx_equal_dict(r1, r2, 'd'))
            assert (approx_equal_dict(r1, r2, 'intensity.sum.value'))
            assert (approx_equal_dict(r1, r2, 'intensity.sum.variance'))
            assert (approx_equal_dict(r1, r2, 'lp'))
            assert (approx_equal_dict(r1, r2, 'partiality'))
            assert (approx_equal_dict(r1, r2, 's1'))
            assert (approx_equal_dict(r1, r2, 'xyzcal.mm'))
            assert (approx_equal_dict(r1, r2, 'xyzcal.px'))
            assert (approx_equal_dict(r1, r2, 'zeta'))
        print 'OK'

        # result3 and result4 should be the same
        assert (len(result3) == len(result4))
        for r3, r4 in zip(result3, result4):
            assert (r3['partial_id'] == r4['partial_id'])
            assert (r3['bbox'] == r4['bbox'])
            assert (r3['entering'] == r4['entering'])
            assert (r3['flags'] == r4['flags'])
            assert (r3['id'] == r4['id'])
            assert (r3['miller_index'] == r4['miller_index'])
            assert (r3['panel'] == r4['panel'])
            assert (approx_equal_dict(r3, r4, 'd'))
            assert (approx_equal_dict(r3, r4, 'intensity.sum.value'))
            assert (approx_equal_dict(r3, r4, 'intensity.sum.variance'))
            assert (approx_equal_dict(r3, r4, 'lp'))
            assert (approx_equal_dict(r3, r4, 'partiality'))
            assert (approx_equal_dict(r3, r4, 's1'))
            assert (approx_equal_dict(r3, r4, 'xyzcal.mm'))
            assert (approx_equal_dict(r3, r4, 'xyzcal.px'))
            assert (approx_equal_dict(r3, r4, 'xyzobs.px.value'))
            assert (approx_equal_dict(r3, r4, 'xyzobs.px.variance'))
            assert (approx_equal_dict(r3, r4, 'zeta'))
        print 'OK'

        # result3 should add up to result1
        assert (len(result3) >= len(result1))
        expected1 = self.rlist.copy()
        expected1['intensity.sum.value'] = flex.double(len(self.rlist), 0)
        expected1['intensity.sum.variance'] = flex.double(len(self.rlist), 0)
        for r1 in result1:
            pid = r1['partial_id']
            r2 = expected1[pid]
            assert (r1['entering'] == r2['entering'])
            assert (r1['id'] == r2['id'])
            assert (r1['miller_index'] == r2['miller_index'])
            assert (r1['panel'] == r2['panel'])
            assert (approx_equal_dict(r1, r2, 's1'))
            assert (approx_equal_dict(r1, r2, 'xyzcal.mm'))
            assert (approx_equal_dict(r1, r2, 'xyzcal.px'))
            expected1['intensity.sum.value'][pid] += r1['intensity.sum.value']
            expected1['intensity.sum.variance'][pid] += r1[
                'intensity.sum.variance']
        expected3 = self.rlist.copy()
        expected3['intensity.sum.value'] = flex.double(len(self.rlist), 0)
        expected3['intensity.sum.variance'] = flex.double(len(self.rlist), 0)
        for r1 in result3:
            pid = r1['partial_id']
            r2 = expected3[pid]
            assert (r1['entering'] == r2['entering'])
            assert (r1['id'] == r2['id'])
            assert (r1['miller_index'] == r2['miller_index'])
            assert (r1['panel'] == r2['panel'])
            assert (approx_equal_dict(r1, r2, 's1'))
            assert (approx_equal_dict(r1, r2, 'xyzcal.mm'))
            assert (approx_equal_dict(r1, r2, 'xyzcal.px'))
            expected3['intensity.sum.value'][pid] += r1['intensity.sum.value']
            expected3['intensity.sum.variance'][pid] += r1[
                'intensity.sum.variance']
        for r1, r3, in zip(expected1, expected3):
            assert (approx_equal_dict(r1, r3, 'intensity.sum.value'))
            assert (approx_equal_dict(r1, r3, 'intensity.sum.variance'))
        print 'OK'
예제 #36
0
def mock_scaling_component(n_refl):
    """Return a mock component of a general model."""
    component = mock_component()
    component.calculate_scales.return_value = flex.double(n_refl, 1.0)
    component.n_refl = [n_refl]
    return component
예제 #37
0
파일: test_model.py 프로젝트: hattne/dials
def test_PhysicalScalingModel(test_reflections, mock_exp):
    """Test the PhysicalScalingModel class."""
    configdict = {
        "corrections": ["scale", "decay", "absorption"],
        "s_norm_fac": 1.0,
        "scale_rot_interval": 2.0,
        "d_norm_fac": 1.0,
        "decay_rot_interval": 2.0,
        "lmax": 1,
        "abs_surface_weight": 1e6,
    }

    parameters_dict = {
        "scale": {"parameters": flex.double([1.2, 1.1]), "parameter_esds": None},
        "decay": {"parameters": flex.double([0.1, 0.2]), "parameter_esds": None},
        "absorption": {
            "parameters": flex.double([0.01, 0.01, 0.01]),
            "parameter_esds": None,
        },
    }

    # Test standard factory initialisation
    physicalmodel = PhysicalScalingModel(parameters_dict, configdict)
    assert physicalmodel.id_ == "physical"
    physicalmodel.show()  # test show works with no parameter esds.
    comps = physicalmodel.components
    assert "scale" in comps
    assert "absorption" in comps
    assert "decay" in comps
    assert list(comps["scale"].parameters) == [1.2, 1.1]
    assert list(comps["decay"].parameters) == [0.1, 0.2]
    assert list(comps["absorption"].parameters) == [0.01, 0.01, 0.01]

    # Test configure reflection table
    mock_params = Mock()
    mock_params.parameterisation.decay_restraint = 0.0
    physicalmodel.configure_components(test_reflections, mock_exp, mock_params)
    # Test normalise components.
    physicalmodel.components["scale"].update_reflection_data()
    physicalmodel.components["scale"].calculate_scales_and_derivatives()
    physicalmodel.components["decay"].update_reflection_data()
    physicalmodel.components["decay"].calculate_scales_and_derivatives()
    physicalmodel.normalise_components()
    assert list(physicalmodel.components["scale"].parameters) == pytest.approx(
        [1.0091065, 0.925014], 1e-4
    )

    # Test from_dict initialisation method.
    physical_dict = {
        "__id__": "physical",
        "is_scaled": True,
        "scale": {
            "n_parameters": 2,
            "parameters": [0.5, 1.0],
            "est_standard_devs": [0.05, 0.1],
            "null_parameter_value": 1,
        },
        "configuration_parameters": {
            "corrections": ["scale"],
            "s_norm_fac": 0.1,
            "scale_rot_interval": 10.0,
            "decay_restaint": 1e-1,
        },
    }
    physicalmodel = PhysicalScalingModel.from_dict(physical_dict)
    assert physicalmodel.id_ == "physical"
    assert "scale" in physicalmodel.components
    assert "absorption" not in physicalmodel.components
    assert "decay" not in physicalmodel.components
    assert list(physicalmodel.components["scale"].parameters) == [0.5, 1.0]
    assert list(physicalmodel.components["scale"].parameter_esds) == [0.05, 0.1]

    new_dict = physicalmodel.to_dict()
    assert new_dict == physical_dict

    # Test from_dict initialisation method for all components.
    physical_dict = {
        "__id__": "physical",
        "is_scaled": True,
        "scale": {
            "n_parameters": 2,
            "parameters": [0.5, 1.0],
            "est_standard_devs": [0.05, 0.1],
            "null_parameter_value": 1,
        },
        "decay": {
            "n_parameters": 2,
            "parameters": [0.1, 0.2],
            "est_standard_devs": [0.01, 0.01],
            "null_parameter_value": 0,
        },
        "absorption": {
            "n_parameters": 3,
            "parameters": [0.0, 0.1, 0.2],
            "est_standard_devs": [0.01, 0.02, 0.03],
            "null_parameter_value": 0,
        },
        "configuration_parameters": {
            "corrections": ["scale", "decay", "absorption"],
            "s_norm_fac": 0.1,
            "scale_rot_interval": 10.0,
            "d_norm_fac": 0.2,
            "decay_rot_interval": 20.0,
            "lmax": 1,
            "abs_surface_weight": 1e6,
        },
    }
    physicalmodel = PhysicalScalingModel.from_dict(physical_dict)
    assert physicalmodel.id_ == "physical"
    assert "scale" in physicalmodel.components
    assert "absorption" in physicalmodel.components
    assert "decay" in physicalmodel.components
    assert list(physicalmodel.components["scale"].parameters) == [0.5, 1.0]
    assert list(physicalmodel.components["scale"].parameter_esds) == [0.05, 0.1]
    assert list(physicalmodel.components["decay"].parameters) == [0.1, 0.2]
    assert list(physicalmodel.components["decay"].parameter_esds) == [0.01, 0.01]
    assert list(physicalmodel.components["absorption"].parameters) == [0.0, 0.1, 0.2]
    assert list(physicalmodel.components["absorption"].parameter_esds) == [
        0.01,
        0.02,
        0.03,
    ]

    new_dict = physicalmodel.to_dict()
    assert new_dict == physical_dict

    with pytest.raises(RuntimeError):
        physical_dict["__id__"] = "array"
        physicalmodel = PhysicalScalingModel.from_dict(physical_dict)

    assert len(physicalmodel.consecutive_refinement_order) == 2
    physicalmodel.show()

    # test limit batch range
    parameters_dict = {
        "scale": {
            "n_parameters": 11,
            "parameters": [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5],
            "parameter_esds": None,
        },
        "decay": {
            "n_parameters": 11,
            "parameters": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1],
            "parameter_esds": None,
        },
    }
    configdict = {
        "corrections": ["scale", "decay"],
        "s_norm_fac": 0.1,
        "scale_rot_interval": 10.0,
        "d_norm_fac": 0.1,
        "decay_rot_interval": 10.0,
        "valid_image_range": (1, 90),
        "valid_osc_range": (0, 90),
    }
    physical = PhysicalScalingModel(parameters_dict, configdict)
    physical.limit_image_range((1, 50))
    assert list(physical.components["scale"].parameters) == [
        0.5,
        1.0,
        1.5,
        2.0,
        2.5,
        3.0,
        3.5,
    ]
    assert list(physical.components["decay"].parameters) == [
        0.1,
        0.2,
        0.3,
        0.4,
        0.5,
        0.6,
        0.7,
    ]
    assert physical.configdict["valid_osc_range"] == (0, 50)
    assert physical.configdict["valid_image_range"] == (1, 50)

    # try edge cases
    # if restricted by > rot int, then reduce number of params and shift offset
    # if necessary
    physical = PhysicalScalingModel(
        copy.deepcopy(parameters_dict), copy.deepcopy(configdict)
    )
    physical.limit_image_range((7, 45))
    assert list(physical.components["scale"].parameters) == [
        1.0,
        1.5,
        2.0,
        2.5,
        3.0,
        3.5,
    ]
    assert list(physical.components["decay"].parameters) == [
        0.2,
        0.3,
        0.4,
        0.5,
        0.6,
        0.7,
    ]
    assert physical.configdict["valid_osc_range"] == (6, 45)
    assert physical.configdict["valid_image_range"] == (7, 45)

    # if not restricted by > rot int, then should 'squeeze' the parameters closer
    # in rotation angle, leaving the same number of params (as reducing number of
    # params would give parameter spacing greater than initially specified rot int)
    physical = PhysicalScalingModel(
        copy.deepcopy(parameters_dict), copy.deepcopy(configdict)
    )
    physical.limit_image_range((5, 45))
    assert list(physical.components["scale"].parameters) == [
        0.5,
        1.0,
        1.5,
        2.0,
        2.5,
        3.0,
        3.5,
    ]
    assert list(physical.components["decay"].parameters) == [
        0.1,
        0.2,
        0.3,
        0.4,
        0.5,
        0.6,
        0.7,
    ]
    assert physical.configdict["valid_osc_range"] == (4, 45)
    assert physical.configdict["valid_image_range"] == (5, 45)
예제 #38
0
 def set_param_vals(self, x):
     """method for refinement engine access"""
     self.x = x
     self.model.components["b"].parameters = flex.double([self.x[0]])
     self.model.binner.update(self.model.parameters)
예제 #39
0
        tsim = time.time() - t

        panel_images = np.array(sims[0])

        for pidx in range(len(DETECTOR)):
            SIM = nanoBragg(detector=DETECTOR,
                            beam=SPECTRUM_BEAM,
                            panel_id=pidx)
            SIM.beamsize_mm = 0.001
            SIM.exposure_s = 1
            SIM.flux = 1e12
            SIM.adc_offset_adu = 0
            # SIM.detector_psf_kernel_radius_pixels = 5
            # SIM.detector_psf_type = shapetype.Unknown  # for CSPAD
            SIM.detector_psf_fwhm_mm = 0
            SIM.quantum_gain = 1
            SIM.raw_pixels = flex.double(panel_images[pidx].ravel())
            SIM.add_noise()
            panel_images[pidx] = SIM.raw_pixels.as_numpy_array().reshape(
                panel_images[pidx].shape)
            SIM.free_all()
            del SIM

        writer.add_image(panel_images)

        if rank == 0:
            print("Done with shot %d / %d , time: %.4fs" %
                  (img_num + 1, num_imgs, tsim),
                  flush=True)
예제 #40
0
    def write_columns(self, integrated_data):
        """Write the column definitions AND data to the current dataset."""

        # now create the actual data structures - first keep a track of the columns

        # H K L M/ISYM BATCH I SIGI IPR SIGIPR FRACTIONCALC XDET YDET ROT WIDTH
        # LP MPART FLAG BGPKRATIOS

        # gather the required information for the reflection file

        nref = len(integrated_data["miller_index"])
        assert nref
        xdet, ydet, _ = [
            flex.double(x) for x in integrated_data["xyzobs.px.value"].parts()
        ]

        # now add column information...

        # FIXME add DIALS_FLAG which can include e.g. was partial etc.

        type_table = {
            "H": "H",
            "K": "H",
            "L": "H",
            "I": "J",
            "SIGI": "Q",
            "IPR": "J",
            "SIGIPR": "Q",
            "BG": "R",
            "SIGBG": "R",
            "XDET": "R",
            "YDET": "R",
            "BATCH": "B",
            "BGPKRATIOS": "R",
            "WIDTH": "R",
            "MPART": "I",
            "M_ISYM": "Y",
            "FLAG": "I",
            "LP": "R",
            "FRACTIONCALC": "R",
            "ROT": "R",
            "QE": "R",
        }

        # derive index columns from original indices with
        #
        # from m.replace_original_index_miller_indices
        #
        # so all that is needed now is to make space for the reflections - fill with
        # zeros...

        self.mtz_file.adjust_column_array_sizes(nref)
        self.mtz_file.set_n_reflections(nref)
        dataset = self.current_dataset

        # assign H, K, L, M_ISYM space
        for column in "H", "K", "L", "M_ISYM":
            dataset.add_column(column, type_table[column]).set_values(
                flex.double(nref, 0.0).as_float())

        self.mtz_file.replace_original_index_miller_indices(
            integrated_data["miller_index_rebase"])

        dataset.add_column("BATCH", type_table["BATCH"]).set_values(
            integrated_data["batch"].as_double().as_float())

        # if intensity values used in scaling exist, then just export these as I, SIGI
        if "intensity.scale.value" in integrated_data:
            I_scaling = integrated_data["intensity.scale.value"]
            V_scaling = integrated_data["intensity.scale.variance"]
            # Trap negative variances
            assert V_scaling.all_gt(0)
            dataset.add_column("I", type_table["I"]).set_values(
                I_scaling.as_float())
            dataset.add_column("SIGI", type_table["SIGI"]).set_values(
                flex.sqrt(V_scaling).as_float())
            dataset.add_column("SCALEUSED", "R").set_values(
                integrated_data["inverse_scale_factor"].as_float())
            dataset.add_column("SIGSCALEUSED", "R").set_values(
                flex.sqrt(integrated_data["inverse_scale_factor_variance"]).
                as_float())
        else:
            if "intensity.prf.value" in integrated_data:
                if "intensity.sum.value" in integrated_data:
                    col_names = ("IPR", "SIGIPR")
                else:
                    col_names = ("I", "SIGI")
                I_profile = integrated_data["intensity.prf.value"]
                V_profile = integrated_data["intensity.prf.variance"]
                # Trap negative variances
                assert V_profile.all_gt(0)
                dataset.add_column(col_names[0], type_table["I"]).set_values(
                    I_profile.as_float())
                dataset.add_column(col_names[1],
                                   type_table["SIGI"]).set_values(
                                       flex.sqrt(V_profile).as_float())
            if "intensity.sum.value" in integrated_data:
                I_sum = integrated_data["intensity.sum.value"]
                V_sum = integrated_data["intensity.sum.variance"]
                # Trap negative variances
                assert V_sum.all_gt(0)
                dataset.add_column("I", type_table["I"]).set_values(
                    I_sum.as_float())
                dataset.add_column("SIGI", type_table["SIGI"]).set_values(
                    flex.sqrt(V_sum).as_float())
        if ("background.sum.value" in integrated_data
                and "background.sum.variance" in integrated_data):
            bg = integrated_data["background.sum.value"]
            varbg = integrated_data["background.sum.variance"]
            assert (varbg >= 0).count(False) == 0
            sigbg = flex.sqrt(varbg)
            dataset.add_column("BG",
                               type_table["BG"]).set_values(bg.as_float())
            dataset.add_column("SIGBG", type_table["SIGBG"]).set_values(
                sigbg.as_float())

        dataset.add_column("FRACTIONCALC",
                           type_table["FRACTIONCALC"]).set_values(
                               integrated_data["fractioncalc"].as_float())

        dataset.add_column("XDET",
                           type_table["XDET"]).set_values(xdet.as_float())
        dataset.add_column("YDET",
                           type_table["YDET"]).set_values(ydet.as_float())
        dataset.add_column("ROT", type_table["ROT"]).set_values(
            integrated_data["ROT"].as_float())
        if "lp" in integrated_data:
            dataset.add_column("LP", type_table["LP"]).set_values(
                integrated_data["lp"].as_float())
        if "qe" in integrated_data:
            dataset.add_column("QE", type_table["QE"]).set_values(
                integrated_data["qe"].as_float())
        elif "dqe" in integrated_data:
            dataset.add_column("QE", type_table["QE"]).set_values(
                integrated_data["dqe"].as_float())
        else:
            dataset.add_column("QE", type_table["QE"]).set_values(
                flex.double(nref, 1.0).as_float())
def generate_simple_table(prf=True):
    """Generate a reflection table for testing intensity combination.
    The numbers are contrived to make sum intensities agree well at high
    intensity but terribly at low and vice versa for profile intensities."""
    reflections = flex.reflection_table()
    reflections["miller_index"] = flex.miller_index([
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 5),
        (0, 0, 5),
        (0, 0, 5),
        (0, 0, 5),
        (0, 0, 5),
    ])
    reflections["inverse_scale_factor"] = flex.double(25, 1.0)
    # Contrive an example that should give the best cc12 when combined.
    # make sum intensities agree well at high intensity but terribly at low
    # and vice versa for profile intensities.
    # profile less consistent at high intensity here

    # sumless consistent at low intensity here
    reflections["intensity.sum.value"] = flex.double([
        10000.0,
        11000.0,
        9000.0,
        8000.0,
        12000.0,
        500.0,
        5600.0,
        5500.0,
        2000.0,
        6000.0,
        100.0,
        50.0,
        150.0,
        75.0,
        125.0,
        30.0,
        10.0,
        2.0,
        35.0,
        79.0,
        1.0,
        10.0,
        20.0,
        10.0,
        5.0,
    ])
    reflections["intensity.sum.variance"] = flex.double([10000] * 5 +
                                                        [5000] * 5 +
                                                        [100] * 5 + [30] * 5 +
                                                        [10] * 5)
    reflections.set_flags(flex.bool(25, False),
                          reflections.flags.outlier_in_scaling)
    reflections.set_flags(flex.bool(25, True), reflections.flags.integrated)
    reflections["lp"] = flex.double(25, 0.5)
    if prf:
        reflections["intensity.prf.value"] = flex.double([
            10000.0,
            16000.0,
            12000.0,
            6000.0,
            9000.0,
            5000.0,
            2000.0,
            1500.0,
            1300.0,
            9000.0,
            100.0,
            80.0,
            120.0,
            90.0,
            100.0,
            30.0,
            40.0,
            50.0,
            30.0,
            30.0,
            10.0,
            12.0,
            9.0,
            8.0,
            10.0,
        ])
        reflections["intensity.prf.variance"] = flex.double([1000] * 5 +
                                                            [500] * 5 +
                                                            [10] * 5 +
                                                            [3] * 5 + [1] * 5)
    reflections = calculate_prescaling_correction(reflections)
    return reflections
예제 #42
0
    def _multiplicity_mean_error_stddev(
        self, calculate_variances=False, keep_singles=False
    ):
        """"
        Calculate aggregate properties of grouped symmetry-equivalent reflections.

        Populate the reflection table of observations with the following
        properties:
          * ``multiplicity`` — Multiplicity of observations of a given reflection
          in the asymmetric unit;
          :type: `dials.array_family_flex_ext.int` array
          * ``intensity.mean.value`` — Mean of symmetry-equivalent reflections,
          weighted by measurement error;
          :type: `dials.array_family_flex_ext.double` array
          * ``intensity.mean.std_error`` — Standard error on the weighted mean;
          :type: `dials.array_family_flex_ext.double` array
          * (optional) ``intensity.mean.variance`` — variance of
          symmetry-equivalent reflections, weighted by measurement error;
          :type: `dials.array_family_flex_ext.double` array

        :param calculate_variances: Elect whether to calculate the weighted
        variances.  Defaults to False, to spare an expensive computation.
        :type calculate_variances: bool
        :param keep_singles: Choose whether to keep single-multiplicity
        reflections.
        :type keep_singles: bool
        """

        for key, rtable in self.rtables.items():
            # Sort the reflection table for speedier iteration.
            rtable.sort("miller_index.asu")
            # Record the positions of any multiplicity-1 reflections.
            if not keep_singles:
                singles = flex.size_t()
            # Record the multiplicities.
            multiplicity = flex.int()
            # For weighted averaging.
            weights = 1 / rtable["intensity.sum.variance"]
            sum_weights = flex.double()
            if calculate_variances:
                sum_square_weights = flex.double()
            # Calculate the weighted mean intensities.
            i_means = flex.double()
            # Calculate the standard deviations from unbiased weighted variances.
            variances = flex.double()

            # Iterate over the reflections, grouping by equivalent Miller index,
            # to calculate multiplicities, weighted mean intensities, etc..
            # Some time can be saved by only calculating variances if necessary.
            # Initial values:
            prev_index = None
            count = 1
            # One big loop through the entire reflection table:
            for j in range(rtable.size()):
                index = rtable["miller_index.asu"][j]
                weight = weights[j]
                # Aggregate within a symmetry-equivalent group of reflections:
                if index == prev_index:
                    count += 1
                    i_sum += weight * rtable["intensity.sum.value"][j]
                    sum_weight += weight
                    if calculate_variances:
                        sum_square_weight += weight * weight
                # Record the aggregated values for the group:
                elif prev_index:
                    if count == 1 and not keep_singles:
                        singles.append(j - 1)
                    multiplicity.extend(flex.int(count, count))
                    i_means.extend(flex.double(count, i_sum / sum_weight))
                    sum_weights.extend(flex.double(count, sum_weight))
                    if calculate_variances:
                        sum_square_weights.extend(flex.double(count, sum_square_weight))
                    # And reinitialise:
                    prev_index = index
                    count = 1
                    i_sum = weight * rtable["intensity.sum.value"][j]
                    sum_weight = weight
                    if calculate_variances:
                        sum_square_weight = weight * weight
                # Handle the first row:
                else:
                    prev_index = rtable["miller_index.asu"][j]
                    i_sum = weight * rtable["intensity.sum.value"][j]
                    sum_weight = weight
                    if calculate_variances:
                        sum_square_weight = weight * weight
            # Record the aggregated values for the last group:
            if count == 1 and not keep_singles:
                singles.append(rtable.size() - 1)
            multiplicity.extend(flex.int(count, count))
            i_means.extend(flex.double(count, i_sum / sum_weight))
            sum_weights.extend(flex.double(count, sum_weight))
            if calculate_variances:
                sum_square_weights.extend(flex.double(count, sum_square_weight))

            # Discard singletons:
            if not keep_singles:
                singles_del = flex.bool(rtable.size(), True)
                singles_del.set_selected(singles, False)
                multiplicity, weights, sum_weights, i_means = [
                    a.select(singles_del)
                    for a in (multiplicity, weights, sum_weights, i_means)
                ]
                rtable.del_selected(singles)
                if calculate_variances:
                    sum_square_weights = sum_square_weights.select(singles_del)

            # Record the multiplicities in the reflection table.
            rtable["multiplicity"] = multiplicity
            # Record the weighted mean intensities in the reflection table.
            rtable["intensity.mean.value"] = i_means
            # Record the standard errors on the means in the reflection table.
            rtable["intensity.mean.std_error"] = flex.sqrt(1 / sum_weights)

            if calculate_variances:
                # Initialise values:
                prev_index = None
                for j in range(rtable.size()):
                    index = rtable["miller_index.asu"][j]
                    weight = weights[j]
                    residual = rtable["intensity.sum.value"][j] - i_means[j]
                    # Aggregate within a symmetry-equivalent group of reflections:
                    if index == prev_index:
                        count += 1
                        weighted_sum_square_residual += weight * residual * residual
                    # Record the aggregated value for the group:
                    elif prev_index:
                        # The weighted variance is undefined for multiplicity=1,
                        # use the measured variance instead in this case.
                        if count == 1:
                            variances.append(rtable["intensity.sum.variance"][j - 1])
                        else:
                            sum_weight = sum_weights[j - 1]
                            var_weight = 1 / (
                                sum_weight - sum_square_weights[j - 1] / sum_weight
                            )
                            variances.extend(
                                flex.double(
                                    count, weighted_sum_square_residual * var_weight
                                )
                            )
                        # Reinitialise:
                        prev_index = index
                        count = 1
                        weighted_sum_square_residual = weight * residual * residual
                    # Handle the first row:
                    else:
                        prev_index = rtable["miller_index.asu"][j]
                        count = 1
                        weighted_sum_square_residual = weight * residual * residual
                # Record the aggregated values for the last group:
                # The weighted variance is undefined for multiplicity=1,
                # use the measured variance instead in this case.
                if count == 1:
                    variances.append(rtable["intensity.sum.variance"][-1])
                else:
                    sum_weight = sum_weights[-1]
                    var_weight = 1 / (sum_weight - sum_square_weights[-1] / sum_weight)
                    variances.extend(
                        flex.double(count, weighted_sum_square_residual * var_weight)
                    )
                # Record the variances in the reflection table.
                rtable["intensity.mean.variance"] = variances

            self.rtables[key] = rtable
예제 #43
0
 def parameters(self, parameters):
     assert len(parameters) == 2
     self.components["a"].parameters = flex.double([parameters[0]])
     self.components["b"].parameters = flex.double([parameters[1]])
예제 #44
0
def plot_multirun_stats(runs,
                        run_numbers,
                        d_min,
                        n_multiples=2,
                        ratio_cutoff=1,
                        n_strong_cutoff=40,
                        i_sigi_cutoff=1,
                        run_tags=[],
                        run_statuses=[],
                        minimalist=False,
                        interactive=False,
                        easy_run=False,
                        compress_runs=True,
                        xsize=30,
                        ysize=10,
                        high_vis=False,
                        title=None):
    tset = flex.double()
    two_theta_low_set = flex.double()
    two_theta_high_set = flex.double()
    nset = flex.int()
    resolutions_set = flex.double()
    n_lattices = flex.int()
    boundaries = []
    lengths = []
    runs_with_data = []
    offset = 0
    for idx in range(len(runs)):
        r = runs[idx]
        if len(r[0]) > 0:
            if compress_runs:
                tslice = r[0] - r[0][0] + offset
                offset += (r[0][-1] - r[0][0] + 1 / 120.)
            else:
                tslice = r[0]
            last_end = r[0][-1]
            tset.extend(tslice)
            two_theta_low_set.extend(r[1])
            two_theta_high_set.extend(r[2])
            nset.extend(r[3])
            resolutions_set.extend(r[4])
            n_lattices.extend(r[5])
            boundaries.append(tslice[0])
            boundaries.append(tslice[-1])
            lengths.append(len(tslice))
            runs_with_data.append(run_numbers[idx])
        else:
            boundaries.extend([None] * 2)
    stats_tuple = get_run_stats(tset,
                                two_theta_low_set,
                                two_theta_high_set,
                                nset,
                                resolutions_set,
                                n_lattices,
                                tuple(boundaries),
                                tuple(lengths),
                                runs_with_data,
                                n_multiples=n_multiples,
                                ratio_cutoff=ratio_cutoff,
                                n_strong_cutoff=n_strong_cutoff,
                                i_sigi_cutoff=i_sigi_cutoff,
                                d_min=d_min)
    if easy_run:
        from libtbx import easy_run, easy_pickle
        easy_pickle.dump(
            "plot_run_stats_tmp.pickle",
            (stats_tuple, d_min, n_multiples, run_tags, run_statuses,
             minimalist, interactive, xsize, ysize, high_vis, title))
        result = easy_run.fully_buffered(
            command=
            "cctbx.xfel.plot_run_stats_from_stats_pickle plot_run_stats_tmp.pickle"
        )
        try:
            png = result.stdout_lines[-1]
            if png == "None":
                return None
        except Exception:
            return None
    else:
        png = plot_run_stats(stats_tuple,
                             d_min,
                             n_multiples=n_multiples,
                             run_tags=run_tags,
                             run_statuses=run_statuses,
                             minimalist=minimalist,
                             interactive=interactive,
                             xsize=xsize,
                             ysize=ysize,
                             high_vis=high_vis,
                             title=title)
    return png
예제 #45
0
    def create(cls, params, experiment, reflection_table, for_multi=False):
        """Perform reflection_table preprocessing and create a SingleScaler."""

        cls.ensure_experiment_identifier(experiment, reflection_table)

        logger.info(
            "The scaling model type being applied is %s. \n",
            experiment.scaling_model.id_,
        )
        try:
            reflection_table = cls.filter_bad_reflections(
                reflection_table,
                partiality_cutoff=params.cut_data.partiality_cutoff,
                min_isigi=params.cut_data.min_isigi,
                intensity_choice=params.reflection_selection.intensity_choice,
            )
        except ValueError:
            raise BadDatasetForScalingException

        # combine partial measurements of same reflection, to handle those reflections
        # that were split by dials.integrate  - changes size of reflection table.
        reflection_table = sum_partial_reflections(reflection_table)

        if "inverse_scale_factor" not in reflection_table:
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0
            )
        elif (
            reflection_table["inverse_scale_factor"].count(0.0)
            == reflection_table.size()
        ):
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0
            )
        reflection_table = choose_initial_scaling_intensities(
            reflection_table, params.reflection_selection.intensity_choice
        )

        excluded_for_scaling = reflection_table.get_flags(
            reflection_table.flags.excluded_for_scaling
        )
        user_excluded = reflection_table.get_flags(
            reflection_table.flags.user_excluded_in_scaling
        )
        reasons = Reasons()
        reasons.add_reason("user excluded", user_excluded.count(True))
        reasons.add_reason("excluded for scaling", excluded_for_scaling.count(True))
        n_excluded = (excluded_for_scaling | user_excluded).count(True)
        if n_excluded == reflection_table.size():
            logger.info("All reflections were determined to be unsuitable for scaling.")
            logger.info(reasons)
            raise BadDatasetForScalingException(
                """Unable to use this dataset for scaling"""
            )
        else:
            logger.info(
                "Excluding %s/%s reflections\n%s",
                n_excluded,
                reflection_table.size(),
                reasons,
            )

        if params.reflection_selection.method == "intensity_ranges":
            reflection_table = quasi_normalisation(reflection_table, experiment)
        if (
            params.reflection_selection.method in (None, Auto, "auto", "quasi_random")
        ) or (
            experiment.scaling_model.id_ == "physical"
            and "absorption" in experiment.scaling_model.components
        ):
            if experiment.scan:
                # calc theta and phi cryst
                reflection_table["phi"] = (
                    reflection_table["xyzobs.px.value"].parts()[2]
                    * experiment.scan.get_oscillation()[1]
                )
                reflection_table = calc_crystal_frame_vectors(
                    reflection_table, experiment
                )

        return SingleScaler(params, experiment, reflection_table, for_multi)
예제 #46
0
 def calculate_scales(self, block_id=0):
     """Calculate and return inverse scales for a given block."""
     scales = flex.exp(
         flex.double(self._n_refl[block_id], self._parameters[0]) /
         (2.0 * (self._d_values[block_id] * self._d_values[block_id])))
     return scales
예제 #47
0
def test(args=[]):
    # Python and cctbx imports
    from math import pi
    from scitbx import matrix
    from libtbx.phil import parse
    from libtbx.test_utils import approx_equal

    # Import for surgery on reflection_tables
    from dials.array_family import flex

    # Get module to build models using PHIL
    import dials.test.algorithms.refinement.setup_geometry as setup_geometry

    # We will set up a mock scan and a mock experiment list
    from dxtbx.model import ScanFactory
    from dxtbx.model.experiment_list import ExperimentList, Experiment

    # Crystal parameterisations
    from dials.algorithms.refinement.parameterisation.crystal_parameters import \
        CrystalOrientationParameterisation, CrystalUnitCellParameterisation

    # Symmetry constrained parameterisation for the unit cell
    from cctbx.uctbx import unit_cell
    from rstbx.symmetry.constraints.parameter_reduction import \
        symmetrize_reduce_enlarge

    # Reflection prediction
    from dials.algorithms.spot_prediction import IndexGenerator
    from dials.algorithms.refinement.prediction import ScansRayPredictor, \
      ExperimentsPredictor
    from dials.algorithms.spot_prediction import ray_intersection
    from cctbx.sgtbx import space_group, space_group_symbols

    #############################
    # Setup experimental models #
    #############################

    master_phil = parse("""
      include scope dials.test.algorithms.refinement.geometry_phil
      include scope dials.test.algorithms.refinement.minimiser_phil
      """,
                        process_includes=True)

    # build models, with a larger crystal than default in order to get enough
    # reflections on the 'still' image
    param = """
  geometry.parameters.crystal.a.length.range=40 50;
  geometry.parameters.crystal.b.length.range=40 50;
  geometry.parameters.crystal.c.length.range=40 50;
  geometry.parameters.random_seed = 42"""
    models = setup_geometry.Extract(master_phil,
                                    cmdline_args=args,
                                    local_overrides=param)

    crystal = models.crystal
    mydetector = models.detector
    mygonio = models.goniometer
    mybeam = models.beam

    # Build a mock scan for a 1.5 degree wedge. Only used for generating indices near
    # the Ewald sphere
    sf = ScanFactory()
    myscan = sf.make_scan(image_range=(1, 1),
                          exposure_times=0.1,
                          oscillation=(0, 1.5),
                          epochs=range(1),
                          deg=True)
    sweep_range = myscan.get_oscillation_range(deg=False)
    im_width = myscan.get_oscillation(deg=False)[1]
    assert approx_equal(im_width, 1.5 * pi / 180.)

    # Build experiment lists
    stills_experiments = ExperimentList()
    stills_experiments.append(
        Experiment(beam=mybeam,
                   detector=mydetector,
                   crystal=crystal,
                   imageset=None))
    scans_experiments = ExperimentList()
    scans_experiments.append(
        Experiment(beam=mybeam,
                   detector=mydetector,
                   crystal=crystal,
                   goniometer=mygonio,
                   scan=myscan,
                   imageset=None))

    ##########################################################
    # Parameterise the models (only for perturbing geometry) #
    ##########################################################

    xlo_param = CrystalOrientationParameterisation(crystal)
    xluc_param = CrystalUnitCellParameterisation(crystal)

    ################################
    # Apply known parameter shifts #
    ################################

    # rotate crystal (=5 mrad each rotation)
    xlo_p_vals = []
    p_vals = xlo_param.get_param_vals()
    xlo_p_vals.append(p_vals)
    new_p_vals = [a + b for a, b in zip(p_vals, [5., 5., 5.])]
    xlo_param.set_param_vals(new_p_vals)

    # change unit cell (=1.0 Angstrom length upsets, 0.5 degree of
    # gamma angle)
    xluc_p_vals = []
    p_vals = xluc_param.get_param_vals()
    xluc_p_vals.append(p_vals)
    cell_params = crystal.get_unit_cell().parameters()
    cell_params = [
        a + b for a, b in zip(cell_params, [1.0, 1.0, -1.0, 0.0, 0.0, 0.5])
    ]
    new_uc = unit_cell(cell_params)
    newB = matrix.sqr(new_uc.fractionalization_matrix()).transpose()
    S = symmetrize_reduce_enlarge(crystal.get_space_group())
    S.set_orientation(orientation=newB)
    X = tuple([e * 1.e5 for e in S.forward_independent_parameters()])
    xluc_param.set_param_vals(X)

    # keep track of the target crystal model to compare with refined
    from copy import deepcopy
    target_crystal = deepcopy(crystal)

    #############################
    # Generate some reflections #
    #############################

    # All indices in a 2.0 Angstrom sphere for crystal
    resolution = 2.0
    index_generator = IndexGenerator(
        crystal.get_unit_cell(),
        space_group(space_group_symbols(1).hall()).type(), resolution)
    indices = index_generator.to_array()

    # Build a ray predictor and predict rays close to the Ewald sphere by using
    # the narrow rotation scan
    ref_predictor = ScansRayPredictor(scans_experiments, sweep_range)
    obs_refs = ref_predictor(indices, experiment_id=0)

    # Take only those rays that intersect the detector
    intersects = ray_intersection(mydetector, obs_refs)
    obs_refs = obs_refs.select(intersects)

    # Add in flags and ID columns by copying into standard reflection table
    tmp = flex.reflection_table.empty_standard(len(obs_refs))
    tmp.update(obs_refs)
    obs_refs = tmp

    # Invent some variances for the centroid positions of the simulated data
    im_width = 0.1 * pi / 180.
    px_size = mydetector[0].get_pixel_size()
    var_x = flex.double(len(obs_refs), (px_size[0] / 2.)**2)
    var_y = flex.double(len(obs_refs), (px_size[1] / 2.)**2)
    var_phi = flex.double(len(obs_refs), (im_width / 2.)**2)
    obs_refs['xyzobs.mm.variance'] = flex.vec3_double(var_x, var_y, var_phi)

    # Re-predict using the stills reflection predictor
    stills_ref_predictor = ExperimentsPredictor(stills_experiments)
    obs_refs_stills = stills_ref_predictor(obs_refs)

    # Set 'observed' centroids from the predicted ones
    obs_refs_stills['xyzobs.mm.value'] = obs_refs_stills['xyzcal.mm']

    ###############################
    # Undo known parameter shifts #
    ###############################

    xlo_param.set_param_vals(xlo_p_vals[0])
    xluc_param.set_param_vals(xluc_p_vals[0])

    # make a refiner
    from dials.algorithms.refinement.refiner import phil_scope
    params = phil_scope.fetch(source=parse('')).extract()

    # Change this to get a plot
    do_plot = False
    if do_plot:
        params.refinement.refinery.journal.track_parameter_correlation = True

    from dials.algorithms.refinement.refiner import RefinerFactory
    # decrease bin_size_fraction to terminate on RMSD convergence
    params.refinement.target.bin_size_fraction = 0.01
    params.refinement.parameterisation.beam.fix = "all"
    params.refinement.parameterisation.detector.fix = "all"
    refiner = RefinerFactory.from_parameters_data_experiments(
        params, obs_refs_stills, stills_experiments, verbosity=0)

    # run refinement
    history = refiner.run()

    # regression tests
    assert len(history["rmsd"]) == 9

    refined_crystal = refiner.get_experiments()[0].crystal
    uc1 = refined_crystal.get_unit_cell()
    uc2 = target_crystal.get_unit_cell()
    assert uc1.is_similar_to(uc2)

    if do_plot:
        plt = refiner.parameter_correlation_plot(
            len(history["parameter_correlation"]) - 1)
        plt.show()
    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_reflections
        from dxtbx.model import ExperimentList
        from scitbx.math import five_number_summary
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)

        assert len(reflections) == 1
        reflections = reflections[0]
        print("Found", len(reflections), "reflections", "and",
              len(experiments), "experiments")

        filtered_reflections = flex.reflection_table()
        filtered_experiments = ExperimentList()

        skipped_reflections = flex.reflection_table()
        skipped_experiments = ExperimentList()

        if params.detector is not None:
            culled_reflections = flex.reflection_table()
            culled_experiments = ExperimentList()
            detector = experiments.detectors()[params.detector]
            for expt_id, experiment in enumerate(experiments):
                refls = reflections.select(reflections['id'] == expt_id)
                if experiment.detector is detector:
                    culled_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(culled_experiments) - 1)
                    culled_reflections.extend(refls)
                else:
                    skipped_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(skipped_experiments) - 1)
                    skipped_reflections.extend(refls)

            print(
                "RMSD filtering %d experiments using detector %d, out of %d" %
                (len(culled_experiments), params.detector, len(experiments)))
            reflections = culled_reflections
            experiments = culled_experiments

        difference_vector_norms = (reflections['xyzcal.mm'] -
                                   reflections['xyzobs.mm.value']).norms()

        if params.max_delta is not None:
            sel = difference_vector_norms <= params.max_delta
            reflections = reflections.select(sel)
            difference_vector_norms = difference_vector_norms.select(sel)

        data = flex.double()
        counts = flex.double()
        for i in range(len(experiments)):
            dvns = difference_vector_norms.select(reflections['id'] == i)
            counts.append(len(dvns))
            if len(dvns) == 0:
                data.append(0)
                continue
            rmsd = math.sqrt(flex.sum_sq(dvns) / len(dvns))
            data.append(rmsd)
        data *= 1000
        subset = data.select(counts > 0)
        print(len(subset), "experiments with > 0 reflections")

        if params.show_plots:
            h = flex.histogram(subset, n_slots=40)
            fig = plt.figure()
            ax = fig.add_subplot('111')
            ax.plot(h.slot_centers().as_numpy_array(),
                    h.slots().as_numpy_array(), '-')
            plt.title("Histogram of %d image RMSDs" % len(subset))

            fig = plt.figure()
            plt.boxplot(subset, vert=False)
            plt.title("Boxplot of %d image RMSDs" % len(subset))
            plt.show()

        outliers = counts == 0
        min_x, q1_x, med_x, q3_x, max_x = five_number_summary(subset)
        print(
            "Five number summary of RMSDs (microns): min %.1f, q1 %.1f, med %.1f, q3 %.1f, max %.1f"
            % (min_x, q1_x, med_x, q3_x, max_x))
        iqr_x = q3_x - q1_x
        cut_x = params.iqr_multiplier * iqr_x
        outliers.set_selected(data > q3_x + cut_x, True)
        #outliers.set_selected(col < q1_x - cut_x, True) # Don't throw away the images that are outliers in the 'good' direction!

        for i in range(len(experiments)):
            if outliers[i]:
                continue
            refls = reflections.select(reflections['id'] == i)
            refls['id'] = flex.int(len(refls), len(filtered_experiments))
            filtered_reflections.extend(refls)
            filtered_experiments.append(experiments[i])

        zeroes = counts == 0
        n_zero = len(counts.select(zeroes))
        print(
            "Removed %d bad experiments and %d experiments with zero reflections, out of %d (%%%.1f)"
            % (len(experiments) - len(filtered_experiments) - n_zero, n_zero,
               len(experiments), 100 *
               ((len(experiments) - len(filtered_experiments)) /
                len(experiments))))

        if params.detector is not None:
            crystals = filtered_experiments.crystals()
            for expt_id, experiment in enumerate(skipped_experiments):
                if experiment.crystal in crystals:
                    filtered_experiments.append(experiment)
                    refls = skipped_reflections.select(
                        skipped_reflections['id'] == expt_id)
                    refls['id'] = flex.int(len(refls),
                                           len(filtered_experiments) - 1)
                    filtered_reflections.extend(refls)

        if params.delta_psi_filter is not None:
            delta_psi = filtered_reflections['delpsical.rad'] * 180 / math.pi
            sel = (delta_psi <= params.delta_psi_filter) & (
                delta_psi >= -params.delta_psi_filter)
            l = len(filtered_reflections)
            filtered_reflections = filtered_reflections.select(sel)
            print("Filtering by delta psi, removing %d out of %d reflections" %
                  (l - len(filtered_reflections), l))

        print("Final experiment count", len(filtered_experiments))

        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(filtered_experiments)
        dump.as_json(params.output.filtered_experiments)

        filtered_reflections.as_pickle(params.output.filtered_reflections)
예제 #49
0
    def tst_split_blocks_non_overlapping(self):
        from dials.array_family import flex
        from random import randint, uniform, seed
        from dials.algorithms.integration.integrator import JobList

        from scitbx.array_family import shared
        blocks = shared.tiny_int_2([(0, 10), (10, 20), (20, 30), (30, 35),
                                    (35, 40), (40, 50), (50, 60), (60, 70),
                                    (70, 80), (80, 90), (90, 100), (100, 110)])

        jobs = JobList((0, 1), blocks)

        r = flex.reflection_table()
        r['value1'] = flex.double()
        r['value2'] = flex.int()
        r['value3'] = flex.double()
        r['bbox'] = flex.int6()
        r['id'] = flex.int()
        expected = []
        for i in range(100):
            x0 = randint(0, 100)
            x1 = x0 + randint(1, 10)
            y0 = randint(0, 100)
            y1 = y0 + randint(1, 10)
            z0 = randint(0, 100)
            z1 = z0 + randint(1, 10)
            v1 = uniform(0, 100)
            v2 = randint(0, 100)
            v3 = uniform(0, 100)
            r.append({
                'id': 0,
                'value1': v1,
                'value2': v2,
                'value3': v3,
                'bbox': (x0, x1, y0, y1, z0, z1)
            })

            for j in range(len(blocks)):
                b0 = blocks[j][0]
                b1 = blocks[j][1]
                if ((z0 >= b0 and z1 <= b1) or (z0 < b1 and z1 >= b1)
                        or (z0 < b0 and z1 > b0)):
                    z00 = max(b0, z0)
                    z11 = min(b1, z1)
                    expected.append({
                        'id': 0,
                        'value1': v1,
                        'value2': v2,
                        'value3': v3,
                        'bbox': (x0, x1, y0, y1, z00, z11),
                        'partial_id': i,
                    })

        jobs.split(r)
        assert (len(r) == len(expected))
        EPS = 1e-7
        for r1, r2 in zip(r, expected):
            assert (r1['bbox'] == r2['bbox'])
            assert (r1['partial_id'] == r2['partial_id'])
            assert (abs(r1['value1'] - r2['value1']) < EPS)
            assert (r1['value2'] == r2['value2'])
            assert (abs(r1['value3'] - r2['value3']) < EPS)

        print 'OK'
예제 #50
0
파일: test_model.py 프로젝트: hattne/dials
def mock_errormodel():
    """Mock error model."""
    em = MagicMock()
    em.refined_parameters = [1.0, 0.1]
    em.update_variances.return_value = flex.double([1.0, 1.1, 1.0, 1.0])
    return em
예제 #51
0
    def run(self):
        from dials.algorithms.integration.integrator import ReflectionManager
        from dials.algorithms.integration.integrator import JobList
        from dials.array_family import flex
        jobs = JobList()
        jobs.add((0, 1), self.array_range, self.block_size)

        # Create the executor
        executor = ReflectionManager(jobs, self.reflections)

        # Ensure the tasks make sense
        jobs = [executor.job(i) for i in range(len(executor))]
        assert (len(executor) == 12)
        assert (not executor.finished())
        assert (len(jobs) == 12)
        assert (jobs[0].frames() == (0, 20))
        assert (jobs[1].frames() == (10, 30))
        assert (jobs[2].frames() == (20, 40))
        assert (jobs[3].frames() == (30, 50))
        assert (jobs[4].frames() == (40, 60))
        assert (jobs[5].frames() == (50, 70))
        assert (jobs[6].frames() == (60, 80))
        assert (jobs[7].frames() == (70, 90))
        assert (jobs[8].frames() == (80, 100))
        assert (jobs[9].frames() == (90, 110))
        assert (jobs[10].frames() == (100, 120))
        assert (jobs[11].frames() == (110, 130))

        # Get the task specs
        data0 = executor.split(0)
        data1 = executor.split(1)
        data2 = executor.split(2)
        data3 = executor.split(3)
        data4 = executor.split(4)
        data5 = executor.split(5)
        data6 = executor.split(6)
        data7 = executor.split(7)
        data8 = executor.split(8)
        data9 = executor.split(9)
        data10 = executor.split(10)
        data11 = executor.split(11)
        assert (len(data0) == len(self.processed[0]))
        assert (len(data1) == len(self.processed[1]))
        assert (len(data2) == len(self.processed[2]))
        assert (len(data3) == len(self.processed[3]))
        assert (len(data4) == len(self.processed[4]))
        assert (len(data5) == len(self.processed[5]))
        assert (len(data6) == len(self.processed[6]))
        assert (len(data7) == len(self.processed[7]))
        assert (len(data8) == len(self.processed[8]))
        assert (len(data9) == len(self.processed[9]))
        assert (len(data10) == len(self.processed[10]))
        assert (len(data11) == len(self.processed[11]))

        # Add some results
        data0["data"] = flex.double(len(data0), 1)
        data1["data"] = flex.double(len(data1), 2)
        data2["data"] = flex.double(len(data2), 3)
        data3["data"] = flex.double(len(data3), 4)
        data4["data"] = flex.double(len(data4), 5)
        data5["data"] = flex.double(len(data5), 6)
        data6["data"] = flex.double(len(data6), 7)
        data7["data"] = flex.double(len(data7), 8)
        data8["data"] = flex.double(len(data8), 9)
        data9["data"] = flex.double(len(data9), 10)
        data10["data"] = flex.double(len(data10), 11)
        data11["data"] = flex.double(len(data11), 12)

        # Accumulate the data again
        assert (not executor.finished())
        executor.accumulate(0, data0)
        executor.accumulate(1, data1)
        executor.accumulate(2, data2)
        executor.accumulate(3, data3)
        executor.accumulate(4, data4)
        executor.accumulate(5, data5)
        executor.accumulate(6, data6)
        executor.accumulate(7, data7)
        executor.accumulate(8, data8)
        executor.accumulate(9, data9)
        executor.accumulate(10, data10)
        executor.accumulate(11, data11)
        assert (executor.finished())

        # Get results and check they're as expected
        data = executor.data()
        result = data["data"]
        bbox = data["bbox"]
        for i in range(len(self.processed)):
            for j in range(len(self.processed[i])):
                assert (result[self.processed[i][j]] == i + 1)

        # Test passed
        print 'OK'
예제 #52
0
def model_reflection_rt0(reflection, experiment, params):
    import math
    import random
    from scitbx import matrix
    from dials.array_family import flex

    d2r = math.pi / 180.0

    hkl = reflection['miller_index']
    xyz = reflection['xyzcal.px']
    xyz_mm = reflection['xyzcal.mm']

    if params.debug:
        print 'hkl = %d %d %d' % hkl
        print 'xyz px = %f %f %f' % xyz
        print 'xyz mm = %f %f %f' % xyz_mm
        if reflection['entering']:
            print 'entering'
        else:
            print 'exiting'

    Amat = matrix.sqr(experiment.crystal.get_A_at_scan_point(int(xyz[2])))
    p0_star = Amat * hkl

    angles = predict_angles(p0_star, experiment)

    assert (angles)

    if params.debug:
        print 'angles = %f %f' % angles

    angle = angles[0] if (
        abs(angles[0] - xyz_mm[2]) < abs(angles[1] - xyz_mm[2])) else angles[1]

    p = experiment.detector[reflection['panel']]
    n = matrix.col(p.get_normal())
    s1 = matrix.col(reflection['s1'])
    t = p.get_thickness() / math.cos(s1.angle(n))
    if params.debug:
        print 'dqe = %f' % reflection['dqe']

    if params.physics:
        i0 = reflection['intensity.sum.value'] / reflection['dqe']
    else:
        i0 = reflection['intensity.sum.value']

    if params.min_isum:
        if i0 < params.min_isum:
            return

    s1 = reflection['s1']
    a = matrix.col(experiment.goniometer.get_rotation_axis())
    s0 = matrix.col(experiment.beam.get_s0())

    if params.debug:
        print 's1 = %f %f %f' % s1

    pixels = reflection['shoebox']
    pixels.flatten()
    data = pixels.data
    dz, dy, dx = data.focus()

    # since now 2D data
    data.reshape(flex.grid(dy, dx))

    if params.show:
        print 'Observed reflection (flattened in Z):'
        print
        for j in range(dy):
            for i in range(dx):
                print '%5d' % data[(j, i)],
            print

    if params.sigma_m > 0:
        sigma_m = params.sigma_m * d2r
    else:
        sigma_m = experiment.profile.sigma_m() * d2r

    if params.sigma_b > 0:
        sigma_b = params.sigma_b * d2r
    else:
        sigma_b = experiment.profile.sigma_b() * d2r

    r0 = xyz_mm[2]

    detector = experiment.detector

    patch = flex.double(dy * dx, 0)
    patch.reshape(flex.grid(dy, dx))

    bbox = reflection['bbox']

    scale = params.scale
    if params.show:
        print '%d rays' % (int(round(i0 * scale)))
    for i in range(int(round(i0 * scale))):
        if params.sigma_l:
            l_scale = random.gauss(1, params.sigma_l)
            b = random_vector_cone(s0 * l_scale, sigma_b)
        else:
            b = random_vector_cone(s0, sigma_b)
        if params.sigma_cell:
            cell_scale = random.gauss(1, params.sigma_cell)
            p0 = random_vector_cone(cell_scale * Amat * hkl, sigma_m)
        else:
            p0 = random_vector_cone(Amat * hkl, sigma_m)
        if params.rs_node_size > 0:
            ns = params.rs_node_size
            import random
            dp0 = matrix.col(
                (random.gauss(0, ns), random.gauss(0, ns), random.gauss(0,
                                                                        ns)))
            p0 += dp0
        angles = predict_angles(p0, experiment, b)
        if angles is None:
            # scattered ray ended up in blind region
            continue
        r = angles[0] if reflection['entering'] else angles[1]
        p = p0.rotate(a, r)
        s1 = p + b

        if params.physics:
            model_path_through_sensor(detector, reflection, s1, patch, scale)

        else:
            panel, xy = detector.get_ray_intersection(s1)

            # FIXME DO NOT USE THIS FUNCTION EVENTUALLY...
            x, y = detector[panel].millimeter_to_pixel(xy)
            if x < bbox[0] or x >= bbox[1]:
                continue
            if y < bbox[2] or y >= bbox[3]:
                continue
            x -= bbox[0]
            y -= bbox[2]
            # FIXME in here try to work out probability distribution along path
            # length through the detector sensitive surface i.e. deposit fractional
            # counts along pixels (and allow for DQE i.e. photon passing right through
            # the detector)
            patch[(int(y), int(x))] += 1.0 / scale

    if params.show:
        print 'Simulated reflection (flattened in Z):'
        print
        for j in range(dy):
            for i in range(dx):
                print '%5d' % int(patch[(j, i)]),
            print

    cc = profile_correlation(data, patch)
    print 'Correlation coefficient: %.3f isum: %.1f ' % (cc, i0)

    return cc
예제 #53
0
    def tst_split_blocks_overlapping(self):
        from dials.array_family import flex
        from random import randint, uniform, seed
        from dials.algorithms.integration.integrator import JobList
        from scitbx.array_family import shared
        blocks = shared.tiny_int_2([(0, 10), (5, 15), (10, 20), (15, 25),
                                    (20, 30), (25, 35), (30, 40), (35, 45),
                                    (40, 50), (45, 55), (50, 60), (55, 65),
                                    (60, 70), (65, 75), (70, 80), (75, 85),
                                    (80, 90), (85, 95), (90, 100), (95, 105),
                                    (100, 110)])

        jobs = JobList((0, 1), blocks)

        r = flex.reflection_table()
        r['value1'] = flex.double()
        r['value2'] = flex.int()
        r['value3'] = flex.double()
        r['bbox'] = flex.int6()
        r['id'] = flex.int()
        expected = []
        for i in range(100):
            x0 = randint(0, 100)
            x1 = x0 + randint(1, 10)
            y0 = randint(0, 100)
            y1 = y0 + randint(1, 10)
            z0 = randint(0, 90)
            z1 = z0 + randint(1, 20)
            v1 = uniform(0, 100)
            v2 = randint(0, 100)
            v3 = uniform(0, 100)
            r.append({
                'id': 0,
                'value1': v1,
                'value2': v2,
                'value3': v3,
                'bbox': (x0, x1, y0, y1, z0, z1)
            })
            expected.append({
                'id': 0,
                'value1': v1,
                'value2': v2,
                'value3': v3,
                'bbox': (x0, x1, y0, y1, z0, z1)
            })

        jobs.split(r)
        assert (len(r) > 100)
        for r1 in r:
            v1 = r1['value1']
            v2 = r1['value2']
            v3 = r1['value3']
            bbox = r1['bbox']
            pid = r1['partial_id']

            z0 = bbox[4]
            z1 = bbox[5]
            success = False
            for i in range(len(blocks)):
                b0 = blocks[i][0]
                b1 = blocks[i][1]
                if z0 >= b0 and z1 <= b1:
                    success = True
                    break
            assert (success)

            v11 = expected[pid]['value1']
            v22 = expected[pid]['value2']
            v33 = expected[pid]['value3']
            bb = expected[pid]['bbox']
            assert (v11 == v1)
            assert (v22 == v2)
            assert (v33 == v3)
            assert (bb[0] == bbox[0])
            assert (bb[1] == bbox[1])
            assert (bb[2] == bbox[2])
            assert (bb[3] == bbox[3])

        print 'OK'
예제 #54
0
    def _create_summation_matrix(self):
        """Create a summation matrix to allow sums into intensity bins.

        This routine attempts to bin into bins equally spaced in log(intensity),
        to give a representative sample across all intensities. To avoid
        undersampling, it is required that there are at least 100 reflections
        per intensity bin unless there are very few reflections."""
        n = self.Ih_table.size
        self.binning_info["n_reflections"] = n
        summation_matrix = sparse.matrix(n, self.n_bins)
        Ih = self.Ih_table.Ih_values * self.Ih_table.inverse_scale_factors
        size_order = flex.sort_permutation(Ih, reverse=True)
        Imax = max(Ih)
        Imin = max(1.0, min(Ih))  # avoid log issues
        spacing = (log(Imax) - log(Imin)) / float(self.n_bins)
        boundaries = [Imax] + [
            exp(log(Imax) - (i * spacing)) for i in range(1, self.n_bins + 1)
        ]
        boundaries[-1] = min(Ih) - 0.01
        self.binning_info["bin_boundaries"] = boundaries
        self.binning_info["refl_per_bin"] = flex.double()

        n_cumul = 0
        if Ih.size() > 100 * self.min_reflections_required:
            self.min_reflections_required = int(Ih.size() / 100.0)
        min_per_bin = min(self.min_reflections_required,
                          int(n / (3.0 * self.n_bins)))
        for i in range(len(boundaries) - 1):
            maximum = boundaries[i]
            minimum = boundaries[i + 1]
            sel1 = Ih <= maximum
            sel2 = Ih > minimum
            sel = sel1 & sel2
            isel = sel.iselection()
            n_in_bin = isel.size()
            if n_in_bin < min_per_bin:  # need more in this bin
                m = n_cumul + min_per_bin
                if m < n:  # still some refl left to use
                    idx = size_order[m]
                    intensity = Ih[idx]
                    boundaries[i + 1] = intensity
                    minimum = boundaries[i + 1]
                    sel = sel1 & (Ih > minimum)
                    isel = sel.iselection()
                    n_in_bin = isel.size()
            self.binning_info["refl_per_bin"].append(n_in_bin)
            for j in isel:
                summation_matrix[j, i] = 1
            n_cumul += n_in_bin
        cols_to_del = []
        for i, col in enumerate(summation_matrix.cols()):
            if col.non_zeroes < min_per_bin - 5:
                cols_to_del.append(i)
        n_new_cols = summation_matrix.n_cols - len(cols_to_del)
        if n_new_cols == self.n_bins:
            for i in range(len(boundaries) - 1):
                maximum = boundaries[i]
                minimum = boundaries[i + 1]
                sel1 = Ih <= maximum
                sel2 = Ih > minimum
                sel = sel1 & sel2
                m = flex.mean(Ih.select(sel))
                self.binning_info["mean_intensities"].append(m)
            return summation_matrix
        new_sum_matrix = sparse.matrix(summation_matrix.n_rows, n_new_cols)
        next_col = 0
        refl_per_bin = flex.double()
        new_bounds = []
        for i, col in enumerate(summation_matrix.cols()):
            if i not in cols_to_del:
                new_sum_matrix[:, next_col] = col
                next_col += 1
                new_bounds.append(boundaries[i])
                refl_per_bin.append(self.binning_info["refl_per_bin"][i])
        self.binning_info["refl_per_bin"] = refl_per_bin
        new_bounds.append(boundaries[-1])
        self.binning_info["bin_boundaries"] = new_bounds
        for i in range(len(new_bounds) - 1):
            maximum = new_bounds[i]
            minimum = new_bounds[i + 1]
            sel1 = Ih <= maximum
            sel2 = Ih > minimum
            sel = sel1 & sel2
            m = flex.mean(Ih.select(sel))
            self.binning_info["mean_intensities"].append(m)
        return new_sum_matrix
예제 #55
0
def tv_alg(image, mask, tolerance=1e-3, max_iter=10):
    from scipy.optimize import fmin_bfgs, fmin_l_bfgs_b, fmin_cobyla, fmin_tnc
    from dials.array_family import flex
    from scitbx import matrix
    from math import sqrt

    F = image
    L = flex.double(mask.accessor())
    for i in range(len(mask)):
        if mask[i]:
            L[i] = 5
        else:
            L[i] = 0
    U = flex.double(mask.accessor(), 0)
    for i in range(len(U)):
        U[i] = F[i]

    def func(U):

        U1 = flex.double(F.accessor())
        for i in range(len(F)):
            U1[i] = U[i]
        U = U1

        sum1 = 0
        for f, u, l in zip(F, U, L):
            sum1 += l * (f - u)**2
        sum1 *= 0.5

        del_U = del_op(U)
        sum2 = 0
        for du in del_U:
            sum2 += sqrt(du[0]**2 + du[1]**2)

        Y = sum1 + sum2

        FP = fprime(U)
        print(Y)
        return Y, FP

    def fprime(U):

        U1 = flex.double(F.accessor())
        for i in range(len(F)):
            U1[i] = U[i]
        U = U1

        def calc_c(U, j, i):
            return (2 * U[i, j]**2 - 2 * U[i, j] *
                    (U[i - 1, j] + U[i, j - 1]) +
                    (U[i - 1, j]**2 + U[i, j - 1]**2))

        DU = flex.double(F.accessor(), 0)
        for j in range(DU.all()[0]):
            for i in range(DU.all()[1]):
                if i > 0 and j > 0 and i < DU.all()[1] - 1 and j < DU.all(
                )[0] - 1:
                    C1 = calc_c(U, i, j)  # C_{i,j}
                    C2 = calc_c(U, i + 1, j)  # C_{i+1,j}
                    C3 = calc_c(U, i, j + 1)  # C_{i,j+1}
                    DU[j, i] = ((2 * U[j, i] + U[j, i - 1] + U[j - 1, i]) /
                                sqrt(C1 + 0.001) +
                                (U[j, i] - U[j, i + 1]) / sqrt(C2 + 0.001) +
                                (U[j, i] - U[j + 1, i]) / sqrt(C3 + 0.001) -
                                L[j, i] * (F[j, i] - U[j, i]))
        # print sum(DU)
        return list(DU)

    x0 = U

    min_F = min(F)
    max_F = max(F)
    bounds = [(min_F, max_F) for x in x0]

    # U = fmin_bfgs(func, x0, epsilon=1e-5*sum(image), maxiter=max_iter)
    U, nfeval, rc = fmin_tnc(func, list(x0), bounds=bounds, disp=3)

    U1 = flex.double(F.accessor())
    for i in range(len(F)):
        U1[i] = U[i]
    U = U1

    return U
예제 #56
0
    U1 = flex.double(F.accessor())
    for i in range(len(F)):
        U1[i] = U[i]
    U = U1

    return U


if __name__ == "__main__":

    from dials.array_family import flex

    height = 50
    width = 50
    image = flex.double(flex.grid(height, width))

    X = []
    Y = []
    for j in range(height):
        for i in range(width):
            X.append(i)
            Y.append(j)
            image[j, i] = ice_background(j, i, height, width)

    x0 = 5
    x1 = 15
    y0 = 5
    y1 = 15

    mask = flex.bool(flex.grid(height, width), True)
예제 #57
0
 def calculate_scales(self, block_id=0):
     """Calculate and return inverse scales for a given block."""
     return flex.double(self.n_refl[block_id], self._parameters[0])
예제 #58
0
파일: test_model.py 프로젝트: hattne/dials
def test_KBScalingModel():
    """Test for the KB Scaling Model."""

    # Test standard initialisation method.
    configdict = {"corrections": ["scale", "decay"]}
    parameters_dict = {
        "scale": {
            "parameters": flex.double([1.2]),
            "parameter_esds": flex.double([0.1]),
        },
        "decay": {
            "parameters": flex.double([0.01]),
            "parameter_esds": flex.double([0.02]),
        },
    }
    KBmodel = KBScalingModel(parameters_dict, configdict)
    assert KBmodel.id_ == "KB"
    assert "scale" in KBmodel.components
    assert "decay" in KBmodel.components
    assert list(KBmodel.components["scale"].parameters) == [1.2]
    assert list(KBmodel.components["decay"].parameters) == [0.01]
    assert list(KBmodel.components["scale"].parameter_esds) == [0.1]
    assert list(KBmodel.components["decay"].parameter_esds) == [0.02]

    # Test from_dict initialisation method.
    KB_dict = {
        "__id__": "KB",
        "is_scaled": True,
        "scale": {
            "n_parameters": 1,
            "parameters": [0.5],
            "est_standard_devs": [0.05],
            "null_parameter_value": 1,
        },
        "configuration_parameters": {"corrections": ["scale"]},
    }
    KBmodel = KBScalingModel.from_dict(KB_dict)
    assert KBmodel.is_scaled is True
    assert "scale" in KBmodel.components
    assert "decay" not in KBmodel.components
    assert list(KBmodel.components["scale"].parameters) == [0.5]
    assert list(KBmodel.components["scale"].parameter_esds) == [0.05]

    new_dict = KBmodel.to_dict()
    assert new_dict == KB_dict

    # Test again with all parameters
    KB_dict = {
        "__id__": "KB",
        "is_scaled": True,
        "scale": {
            "n_parameters": 1,
            "parameters": [0.5],
            "est_standard_devs": [0.05],
            "null_parameter_value": 1,
        },
        "decay": {
            "n_parameters": 1,
            "parameters": [0.2],
            "est_standard_devs": [0.02],
            "null_parameter_value": 0,
        },
        "configuration_parameters": {"corrections": ["scale", "decay"]},
    }
    KBmodel = KBScalingModel.from_dict(KB_dict)
    assert KBmodel.is_scaled is True
    assert "scale" in KBmodel.components
    assert "decay" in KBmodel.components
    assert list(KBmodel.components["scale"].parameters) == [0.5]
    assert list(KBmodel.components["scale"].parameter_esds) == [0.05]
    assert list(KBmodel.components["decay"].parameters) == [0.2]
    assert list(KBmodel.components["decay"].parameter_esds) == [0.02]

    new_dict = KBmodel.to_dict()
    assert new_dict == KB_dict

    with pytest.raises(RuntimeError):
        KB_dict["__id__"] = "physical"
        KBmodel = KBScalingModel.from_dict(KB_dict)

    assert KBmodel.consecutive_refinement_order == [["scale", "decay"]]
    KBmodel.show()
예제 #59
0
 def __init__(self, initial_value=1.00):
     self.parameters = flex.double([initial_value])
     self._n_params = 1
예제 #60
0
파일: gen.py 프로젝트: dials/dials_scratch
        # print "---"
        a = list(poisson(1, 100))

        # a[4] = 1000
        # a[5] = 100

        mean_m, weight_m, res_m = m_estimate(a)
        # print mean_m
        # from matplotlib import pylab
        # pylab.plot(res_m)
        # pylab.show()
        means1.append(sum(a) / len(a))
        means3.append(mtl(a))
        mean_m = glm3(a)

        X = flex.double([1] * len(a))
        X.reshape(flex.grid(len(a), 1))
        Y = flex.double(a)
        B = flex.double([0])
        P = flex.double([1] * len(a))
        v = glmc(X, Y, B, P, max_iter=100)

        print(k, mean_m, exp(v.parameters()[0]))
        means4.append(mean_m)

    from matplotlib import pylab

    print("MOM1: ", sum(means1) / len(means1))
    print("MOM3: ", sum(means3) / len(means3))
    print("MOM4: ", sum(means4) / len(means4))
    pylab.plot(means1, color="black")