コード例 #1
0
 def calculate_gradients(self, apm):
     "calculate the gradient vector"
     a = self.error_model.components["a"].parameters[0]
     b = apm.x[0]
     Ih_table = self.error_model.binner.Ih_table
     I_hl = Ih_table.intensities
     g_hl = Ih_table.inverse_scale_factors
     weights = self.error_model.binner.weights
     bin_vars = self.error_model.binner.bin_variances
     sum_matrix = self.error_model.binner.summation_matrix
     bin_counts = self.error_model.binner.binning_info["refl_per_bin"]
     dsig_dc = (b * flex.pow2(I_hl) * (a**2) /
                (self.error_model.binner.sigmaprime * flex.pow2(g_hl)))
     ddelta_dsigma = (-1.0 * self.error_model.binner.delta_hl /
                      self.error_model.binner.sigmaprime)
     deriv = ddelta_dsigma * dsig_dc
     dphi_by_dvar = -2.0 * (flex.double(bin_vars.size(), 0.5) - bin_vars +
                            (1.0 / (2.0 * flex.pow2(bin_vars))))
     term1 = 2.0 * self.error_model.binner.delta_hl * deriv * sum_matrix
     term2a = self.error_model.binner.delta_hl * sum_matrix
     term2b = deriv * sum_matrix
     grad = dphi_by_dvar * ((term1 / bin_counts) -
                            (2.0 * term2a * term2b / flex.pow2(bin_counts)))
     gradients = flex.double([flex.sum(grad * weights) / flex.sum(weights)])
     return gradients
コード例 #2
0
ファイル: delta_cchalf.py プロジェクト: kmdalton/dials
    def compute_overall_stats(self):
        # Create lookups for elements by miller index
        index_lookup = defaultdict(list)
        for i, h in enumerate(self.reflection_table["miller_index"]):
            index_lookup[h].append(i)

        # Compute the Overall Sum(X) and Sum(X^2) for each unique reflection

        for h in index_lookup:
            sel = flex.size_t(index_lookup[h])
            intensities = self.reflection_table["intensity"].select(sel)
            n = intensities.size()
            sum_x = flex.sum(intensities)
            sum_x2 = flex.sum(flex.pow2(intensities))
            self.reflection_sums[h] = ReflectionSum(sum_x, sum_x2, n)

        # Compute some numbers
        self._num_datasets = len(set(self.reflection_table["dataset"]))
        self._num_groups = len(set(self.reflection_table["group"]))
        self._num_reflections = self.reflection_table.size()
        self._num_unique = len(self.reflection_sums)

        logger.info(
            """
Summary of input data:
# Datasets: %s
# Groups: %s
# Reflections: %s
# Unique reflections: %s""",
            self._num_datasets,
            self._num_groups,
            self._num_reflections,
            self._num_unique,
        )
コード例 #3
0
def check_experiment(experiment, reflections):

    # predict reflections in place
    from dials.algorithms.spot_prediction import StillsReflectionPredictor
    sp = StillsReflectionPredictor(experiment)
    UB = experiment.crystal.get_A()
    try:
        sp.for_reflection_table(reflections, UB)
    except RuntimeError:
        return False

    # calculate unweighted RMSDs
    x_obs, y_obs, _ = reflections['xyzobs.px.value'].parts()
    delpsi = reflections['delpsical.rad']
    x_calc, y_calc, _ = reflections['xyzcal.px'].parts()

    # calculate residuals and assign columns
    x_resid = x_calc - x_obs
    x_resid2 = x_resid**2
    y_resid = y_calc - y_obs
    y_resid2 = y_resid**2
    delpsical2 = delpsi**2
    r_x = flex.sum(x_resid2)
    r_y = flex.sum(y_resid2)
    r_z = flex.sum(delpsical2)

    # rmsd calculation
    n = len(reflections)
    rmsds = (sqrt(r_x / n), sqrt(r_y / n), sqrt(r_z / n))

    # check positional RMSDs are within 5 pixels
    if rmsds[0] > 5: return False
    if rmsds[1] > 5: return False

    return True
コード例 #4
0
    def compute_functional_and_gradients(self):
        values = self.parameterization(self.x)
        assert -150. < values.BFACTOR < 150.  # limits on the exponent, please
        self.func = self.refinery.fvec_callable(values)
        functional = flex.sum(self.func * self.func)
        self.f = functional
        DELTA = 1.E-7
        self.g = flex.double()
        for x in range(self.n):
            templist = list(self.x)
            templist[x] += DELTA
            dvalues = flex.double(templist)

            dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
            dfunctional = flex.sum(dfunc * dfunc)
            #calculate by finite_difference
            self.g.append((dfunctional - functional) / DELTA)
        self.g[2] = 0.

        print("rms %10.3f; " % math.sqrt(flex.mean(self.func * self.func)),
              file=self.out,
              end='')
        values.show(self.out)

        return self.f, self.g
コード例 #5
0
  def tst_identical_partial(self):

    from dials.algorithms.integration.fit import ProfileFitter
    from scitbx.array_family import flex
    from numpy.random import seed
    seed(0)

    # Create profile
    p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    s = flex.sum(p)
    p = p / s

    # Copy profile
    c = p.deep_copy()
    b = flex.double(flex.grid(9, 9, 9), 0)
    m = flex.bool(flex.grid(9,9,9), True)

    # Get the partial profiles
    pp = p[0:5,:,:]
    mp = m[0:5,:,:]
    cp = c[0:5,:,:]
    bp = b[0:5,:,:]

    # Fit
    fit = ProfileFitter(cp, bp, mp, pp)
    I = fit.intensity()
    V = fit.variance()
    assert fit.niter() < fit.maxiter()

    # Test intensity is the same
    eps = 1e-7
    assert(abs(I[0] - flex.sum(p)) < eps)
    assert(abs(V[0] - flex.sum(p)) < eps)

    print 'OK'
コード例 #6
0
def test_identical_partial():
    np.random.seed(0)

    # Create profile
    p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    s = flex.sum(p)
    p = p / s

    # Copy profile
    c = p.deep_copy()
    b = flex.double(flex.grid(9, 9, 9), 0)
    m = flex.bool(flex.grid(9, 9, 9), True)

    # Get the partial profiles
    pp = p[0:5, :, :]
    mp = m[0:5, :, :]
    cp = c[0:5, :, :]
    bp = b[0:5, :, :]

    # Fit
    fit = ProfileFitter(cp, bp, mp, pp)
    intensity = fit.intensity()
    V = fit.variance()
    assert fit.niter() < fit.maxiter()

    # Test intensity is the same
    eps = 1e-7
    assert intensity[0] == pytest.approx(flex.sum(p), abs=eps)
    assert V[0] == pytest.approx(flex.sum(p), abs=eps)
コード例 #7
0
  def tst_with_no_background(self):

    from dials.algorithms.integration.fit import ProfileFitter
    from scitbx.array_family import flex
    from numpy.random import seed
    seed(0)

    # Create profile
    p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    s = flex.sum(p)
    p = p / s

    # Copy profile
    c = add_poisson_noise(100 * p)
    b = flex.double(flex.grid(9, 9, 9), 0)
    m = flex.bool(flex.grid(9,9,9), True)

    # Fit
    fit = ProfileFitter(c, b, m, p)
    I = fit.intensity()
    V = fit.variance()
    assert fit.niter() < fit.maxiter()

    # Test intensity is the same
    eps = 1e-3
    assert(abs(I[0] - flex.sum(c)) < eps)
    assert(abs(V[0] - I[0]) < eps)

    print 'OK'
コード例 #8
0
 def rmsds(self, Ih_table, apm):
     """Calculate RMSDs for the matches. Also calculate R-factors."""
     R = flex.double([])
     n = 0
     if Ih_table.free_Ih_table:
         work_blocks = Ih_table.blocked_data_list[:-1]
         free_block = Ih_table.blocked_data_list[-1]
         self.rmsd_names = [
             "RMSD_I", "RMSD_I (no restraints)", "Free RMSD_I"
         ]
         self.rmsd_units = ["a.u", "a.u", "a.u"]
     else:
         work_blocks = Ih_table.blocked_data_list
         self.rmsd_names = ["RMSD_I"]
         self.rmsd_units = ["a.u"]
     for block in work_blocks:
         R.extend((self.calculate_residuals(block)**2) * block.weights)
         n += block.size
     unrestr_R = copy(R)
     if self.param_restraints:
         restraints = self.restraints_calculator.calculate_restraints(apm)
         if restraints:
             R.extend(restraints[0])
         else:
             self.param_restraints = False
     self._rmsds = [(flex.sum(R) / n)**0.5]
     if Ih_table.free_Ih_table:
         self._rmsds.append((flex.sum(unrestr_R) / n)**0.5)
         Rmsdfree = (self.calculate_residuals(free_block)**
                     2) * free_block.weights
         self._rmsds.append((flex.sum(Rmsdfree) / free_block.size)**0.5)
     return self._rmsds
コード例 #9
0
    def merge_reflections(reflections, min_multiplicity):
        '''Merge intensities of multiply-measured symmetry-reduced HKLs. The input reflection table must be sorted by symmetry-reduced HKLs.'''
        merged_reflections = reflection_table_utils.merged_reflection_table()
        for refls in reflection_table_utils.get_next_hkl_reflection_table(
                reflections=reflections):
            if refls.size() == 0:
                break  # unless the input "reflections" list is empty, generated "refls" lists cannot be empty

            hkl = refls[0]['miller_index_asymmetric']
            # This assert is timeconsuming when using a small number of cores
            #assert not (hkl in merged_reflections['miller_index']) # i.e. assert that the input reflection table came in sorted

            refls = refls.select(refls['intensity.sum.variance'] > 0.0)

            if refls.size() >= min_multiplicity:
                weighted_intensity_array = refls[
                    'intensity.sum.value'] / refls['intensity.sum.variance']
                weights_array = flex.double(
                    refls.size(), 1.0) / refls['intensity.sum.variance']

                weighted_mean_intensity = flex.sum(
                    weighted_intensity_array) / flex.sum(weights_array)
                standard_error_of_weighted_mean_intensity = 1.0 / math.sqrt(
                    flex.sum(weights_array))

                merged_reflections.append({
                    'miller_index': hkl,
                    'intensity': weighted_mean_intensity,
                    'sigma': standard_error_of_weighted_mean_intensity,
                    'multiplicity': refls.size()
                })
        return merged_reflections
コード例 #10
0
ファイル: error_modifier_ev11.py プロジェクト: dials/cctbx
  def calculate_delta_statistics(self):
    '''Calculate min, max, mean, and stddev for the normalized deltas'''
    delta_min = flex.min(self.deltas) if self.deltas.size() > 0 else float('inf')
    delta_max = flex.max(self.deltas) if self.deltas.size() > 0 else float('-inf')
    delta_sum = flex.sum(self.deltas) if self.deltas.size() > 0 else 0.0

    comm = self.mpi_helper.comm
    MPI = self.mpi_helper.MPI

    # global min and max
    self.global_delta_min = comm.allreduce(delta_min, MPI.MIN)
    self.global_delta_max = comm.allreduce(delta_max, MPI.MAX)

    # global mean
    self.global_delta_count = comm.allreduce(self.deltas.size(), MPI.SUM)
    if self.global_delta_count < 20:
      raise ValueError("Too few reflections available for ev11 algorithm")
    global_delta_sum = comm.allreduce(delta_sum, MPI.SUM)
    self.global_delta_mean = global_delta_sum / self.global_delta_count

    # global standard deviation
    array_of_global_delta_means = flex.double(self.deltas.size(), self.global_delta_mean)
    array_of_diffs = self.deltas - array_of_global_delta_means
    array_of_square_diffs = array_of_diffs * array_of_diffs
    sum_of_square_diffs = flex.sum(array_of_square_diffs)
    global_sum_of_square_diffs = comm.allreduce(sum_of_square_diffs, MPI.SUM)
    self.global_delta_stddev = math.sqrt(global_sum_of_square_diffs / (self.global_delta_count - 1))
    if self.mpi_helper.rank == 0:
      self.logger.main_log("Global delta statistics (count,min,max,mean,stddev): (%d,%f,%f,%f,%f)"%(self.global_delta_count, self.global_delta_min, self.global_delta_max, self.global_delta_mean, self.global_delta_stddev))
コード例 #11
0
 def calculate_gradients(self, apm):
     "calculate the gradient vector"
     x = apm.x
     R = self.error_model.sortedy - (x[1] * self.error_model.sortedx) - x[0]
     gradient = flex.double([
         -2.0 * flex.sum(R), -2.0 * flex.sum(R * self.error_model.sortedx)
     ])
     return gradient
コード例 #12
0
ファイル: sim.py プロジェクト: dials/dials_scratch
def compute_profile(experiments, reflection, reference, N):
    from dials.array_family import flex
    from dials.algorithms.profile_model.gaussian_rs import CoordinateSystem
    from dials.algorithms.profile_model.modeller import GridSampler
    from dials_scratch.jmp.sim import compute_profile_internal
    from random import uniform

    sbox = reflection["shoebox"]
    bbox = sbox.bbox
    zs = sbox.zsize()
    ys = sbox.ysize()
    xs = sbox.xsize()

    profile = flex.double(flex.grid(zs, ys, xs))

    m2 = experiments[0].goniometer.get_rotation_axis_datum()
    s0 = experiments[0].beam.get_s0()
    s1 = reflection["s1"]
    phi = reflection["xyzcal.mm"][2]
    detector = experiments[0].detector
    scan = experiments[0].scan

    cs = CoordinateSystem(m2, s0, s1, phi)

    scan_range = scan.get_array_range()
    image_size = detector[0].get_image_size()
    grid_size = (3, 3, 40)
    assert grid_size[0] * grid_size[1] * grid_size[2] == len(reference[0])

    sampler = GridSampler(image_size, scan_range, grid_size)

    xyz = reflection["xyzcal.px"]
    index = sampler.nearest(0, xyz)

    for g in reference[0]:
        assert abs(flex.sum(g) - 1.0) < 1e-7

    grid = reference[0][index]

    sigma_d = experiments[0].profile.sigma_b(deg=False)
    sigma_m = experiments[0].profile.sigma_m(deg=False)
    delta_d = 3.0 * sigma_d
    delta_m = 3.0 * sigma_m

    profile = compute_profile_internal(grid, bbox, zs, ys, xs, N, delta_d,
                                       delta_m, detector, scan, cs)

    # from dials_scratch.jmp.viewer import show_image_stack_multi_view
    # show_image_stack_multi_view(profile.as_numpy_array(), vmax=max(profile))
    sum_p = flex.sum(profile)
    print("Partiality: %f" % sum_p)
    try:
        assert sum_p > 0, "sum_p == 0"
    except Exception as e:
        print(e)
        return None

    return profile
コード例 #13
0
        def target(self, log_sigma):
            """The target for minimization."""
            sigma_m = math.exp(log_sigma[0])

            # Tiny value
            TINY = 1e-10
            assert sigma_m > TINY

            # Calculate the two components to the fraction
            a = scitbx.math.erf(self.e1 / sigma_m)
            b = scitbx.math.erf(self.e2 / sigma_m)
            n = self.n
            K = self.K

            # Calculate the fraction of observed reflection intensity
            zi = (a - b) / 2.0

            # Set any points <= 0 to 1e-10 (otherwise will get a floating
            # point error in log calculation below).
            assert zi.all_ge(0)
            mask = zi < TINY
            assert mask.count(True) < len(mask)
            zi.set_selected(mask, TINY)

            # Compute the likelihood
            #
            # The likelihood here is a result of the sum of two log likelihood
            # functions:
            #
            # The first is the same as the one in Kabsch2010 as applied to the
            # reflection as a whole. This results in the term log(Z)
            #
            # The second is the likelihood for each reflection modelling as a Poisson
            # distribtution with shape given by sigma M. This gives sum(ci log(zi)) -
            # sum(ci)*log(sum(zi))
            #
            # If the reflection is recorded on 1 frame, the second component is zero
            # and so the likelihood is dominated by the first term which can be seen
            # as a prior for sigma, which accounts for which reflections were actually
            # recorded.
            #
            L = 0
            for j, (i0,
                    i1) in enumerate(zip(self.indices[:-1], self.indices[1:])):
                selection = flex.size_t(range(i0, i1))
                zj = zi.select(selection)
                nj = n.select(selection)
                kj = K[j]
                Z = flex.sum(zj)
                # L += flex.sum(nj * flex.log(zj)) - kj * Z
                # L += flex.sum(nj * flex.log(zj)) - kj * math.log(Z)
                L += flex.sum(
                    nj * flex.log(zj)) - kj * math.log(Z) + math.log(Z)
            logger.debug("Sigma M: %f, log(L): %f", sigma_m * 180 / math.pi, L)

            # Return the logarithm of r
            return -L
コード例 #14
0
def create_datastructures_for_structural_model(reflections, experiments,
                                               cif_file):
    """Read a cif file, calculate intensities and scale them to the average
    intensity of the reflections. Return an experiment and reflection table to
    be used for the structural model in scaling."""

    # read model, compute Fc, square to F^2
    ic = intensity_array_from_cif_file(cif_file)
    exp = deepcopy(experiments[0])
    params = Mock()
    params.parameterisation.decay_term.return_value = False
    params.parameterisation.scale_term.return_value = True
    exp.scaling_model = KBSMFactory.create(params, [], [])
    exp.scaling_model.set_scaling_model_as_scaled(
    )  # Set as scaled to fix scale.

    # Now put the calculated I's on roughly a common scale with the data.
    miller_indices = flex.miller_index([])
    intensities = flex.double([])

    for refl in reflections:
        miller_indices.extend(refl["miller_index"])
        intensities.extend(refl["intensity.prf.value"])
    miller_set = miller.set(
        crystal_symmetry=crystal.symmetry(
            space_group=experiments[0].crystal.get_space_group()),
        indices=miller_indices,
        anomalous_flag=True,
    )
    idata = miller.array(miller_set, data=intensities)

    match = idata.match_indices(ic)
    pairs = match.pairs()

    icalc = flex.double()
    iobs = flex.double()
    miller_idx = flex.miller_index()
    for p in pairs:
        # Note : will create miller_idx duplicates in i_calc - problem?
        iobs.append(idata.data()[p[0]])
        icalc.append(ic.data()[p[1]])
        miller_idx.append(ic.indices()[p[1]])

    icalc *= flex.sum(iobs) / flex.sum(icalc)

    rt = flex.reflection_table()
    rt["intensity"] = icalc
    rt["miller_index"] = miller_idx

    used_ids = experiments.identifiers()
    unique_id = get_next_unique_id(len(used_ids), used_ids)
    exp.identifier = str(unique_id)
    rt.experiment_identifiers()[unique_id] = str(unique_id)
    rt["id"] = flex.int(rt.size(), unique_id)

    return exp, rt
コード例 #15
0
def estimate_ice_rings_width(imagesets, steps):

    from cctbx import miller, sgtbx, uctbx
    from matplotlib import pyplot as plt

    imageset = imagesets[0]
    detector = imageset.get_detector()
    beam = imageset.get_beam()

    from dials.util import masking

    params = masking.phil_scope.extract()
    params.resolution_range = []
    params.ice_rings.filter = True

    import numpy

    widths = flex.double(numpy.geomspace(start=0.0001, stop=0.01, num=steps))
    total_intensity = flex.double()
    n_pixels = flex.double()
    for width in widths:
        params.ice_rings.width = width
        generator = masking.MaskGenerator(params)
        mask = generator.generate(imageset)
        image = imageset.get_corrected_data(0)
        tot_intensity = 0
        n_pix = 0
        for im, m in zip(image, mask):
            im = im.as_1d()
            m = m.as_1d()
            print(m.count(True), m.count(False))
            print(flex.sum(im), flex.sum(im.select(m)),
                  flex.sum(im.select(~m)))
            tot_intensity += flex.sum(im.select(m))
            n_pix += m.count(True)
        total_intensity.append(tot_intensity)
        n_pixels.append(n_pix)
    average_intensity = total_intensity / n_pixels

    fig, axes = plt.subplots(nrows=2, figsize=(12, 8), sharex=True)
    axes[0].plot(widths,
                 average_intensity,
                 label="average intensity",
                 marker="+")
    axes[1].plot(widths, total_intensity, label="total intensity", marker="+")
    axes[0].set_ylabel("Average intensity per pixel")
    axes[1].set_xlabel("Ice ring width (1/d^2)")
    axes[1].set_ylabel("Total intensity")
    for ax in axes:
        ax.set_xlim(0, flex.max(widths))
    plt.savefig("ice_rings_width.png")
    plt.clf()
    return
コード例 #16
0
ファイル: test_error_model.py プロジェクト: huwjenkins/dials
def calculate_gradient_fd(target, parameterisation):
    """Calculate gradient array with finite difference approach."""
    delta = 1.0e-6
    parameterisation.set_param_vals([parameterisation.x[0] - (0.5 * delta)])
    target.predict(parameterisation)
    R_low = target.calculate_residuals(parameterisation)
    parameterisation.set_param_vals([parameterisation.x[0] + delta])
    target.predict(parameterisation)
    R_upper = target.calculate_residuals(parameterisation)
    parameterisation.set_param_vals([parameterisation.x[0] - (0.5 * delta)])
    target.predict(parameterisation)
    gradients = [(flex.sum(R_upper) - flex.sum(R_low)) / delta]
    return gradients
コード例 #17
0
 def compute_functional_and_gradients(self):
   values = self.parameterization(self.x)
   assert -150. < values.BFACTOR < 150. # limits on the exponent, please
   self.func = self.refinery.fvec_callable(values)
   functional = flex.sum(self.func*self.func)
   self.f = functional
   jacobian = self.refinery.jacobian_callable(values)
   self.g = flex.double(self.n)
   for ix in xrange(self.n):
     self.g[ix] = flex.sum(2. * self.func * jacobian[ix])
   print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
   values.show(self.out)
   return self.f, self.g
コード例 #18
0
 def compute_functional_and_gradients(self):
     values = self.parameterization(self.x)
     assert -150. < values.BFACTOR < 150.  # limits on the exponent, please
     self.func = self.refinery.fvec_callable(values)
     functional = flex.sum(self.func * self.func)
     self.f = functional
     jacobian = self.refinery.jacobian_callable(values)
     self.g = flex.double(self.n)
     for ix in xrange(self.n):
         self.g[ix] = flex.sum(2. * self.func * jacobian[ix])
     print >> self.out, "rms %10.3f" % math.sqrt(
         flex.mean(self.func * self.func)),
     values.show(self.out)
     return self.f, self.g
コード例 #19
0
 def compute_functional_and_gradients(self):
   values = self.parameterization(self.x)
   assert -150. < values.BFACTOR < 150,"B-factor out of range (+/-150) within rs2 functional and gradients"
   self.func = self.refinery.fvec_callable(values)
   functional = flex.sum(self.refinery.WEIGHTS*self.func*self.func)
   self.f = functional
   jacobian = self.refinery.jacobian_callable(values)
   self.g = flex.double(self.n)
   for ix in range(self.n):
     self.g[ix] = flex.sum(2. * self.refinery.WEIGHTS * self.func * jacobian[ix])
   print >> self.out, "rms %10.3f"%math.sqrt(flex.sum(self.refinery.WEIGHTS*self.func*self.func)/
                                             flex.sum(self.refinery.WEIGHTS)),
   values.show(self.out)
   return self.f, self.g
コード例 #20
0
def generate_3_profiles():
    p1 = gaussian((40, 9, 9), 1, (10.5, 4, 4), (2, 2, 2))
    p2 = gaussian((40, 9, 9), 1, (20.5, 4, 4), (2, 2, 2))
    p3 = gaussian((40, 9, 9), 1, (30.5, 4, 4), (2, 2, 2))
    p1 = p1 / flex.sum(p1)
    p2 = p2 / flex.sum(p2)
    p3 = p3 / flex.sum(p3)
    p1.reshape(flex.grid(1, 40, 9, 9))
    p2.reshape(flex.grid(1, 40, 9, 9))
    p3.reshape(flex.grid(1, 40, 9, 9))
    p = flex.double(flex.grid(3, 40, 9, 9))
    p[0:1, :, :, :] = p1
    p[1:2, :, :, :] = p2
    p[2:3, :, :, :] = p3
    return p
コード例 #21
0
  def get_weighted_rmsd(self, reflections):
    n = len(reflections)
    if n == 0:
      return 0
    #weights = 1/reflections['intensity.sum.variance']
    reflections = reflections.select(reflections['xyzobs.mm.variance'].norms() > 0)
    weights = 1/reflections['xyzobs.mm.variance'].norms()

    un_rmsd = math.sqrt( flex.sum(reflections['difference_vector_norms']**2)/n)
    print "Uweighted RMSD (mm)", un_rmsd

    w_rmsd = math.sqrt( flex.sum( weights*(reflections['difference_vector_norms']**2) )/flex.sum(weights))
    print "Weighted RMSD (mm)", w_rmsd

    return un_rmsd
コード例 #22
0
    def _beam_direction_variance_list(self,
                                      detector,
                                      reflections,
                                      centroid_definition="s1"):
        """Calculate the variance in beam direction for each spot.

        Params:
            detector The detector model
            reflections The list of reflections
            centroid_definition ENUM com or s1

        Returns:
            The list of variances
        """
        # Get the reflection columns
        shoebox = reflections["shoebox"]
        xyz = reflections["xyzobs.px.value"]

        # Loop through all the reflections
        variance = []

        if centroid_definition == "com":
            # Calculate the beam vector at the centroid
            s1_centroid = []
            for r in range(len(reflections)):
                panel = shoebox[r].panel
                s1_centroid.append(detector[panel].get_pixel_lab_coord(
                    xyz[r][0:2]))
        else:
            s1_centroid = reflections["s1"]

        for r in range(len(reflections)):
            # Get the coordinates and values of valid shoebox pixels
            # FIXME maybe I note in Kabsch (2010) s3.1 step (v) is
            # background subtraction, appears to be missing here.
            mask = shoebox[r].mask != 0
            values = shoebox[r].values(mask)
            s1 = shoebox[r].beam_vectors(detector, mask)

            angles = s1.angle(s1_centroid[r], deg=False)

            if flex.sum(values) > 1:
                variance.append(
                    flex.sum(values * flex.pow2(angles)) /
                    (flex.sum(values) - 1))

        # Return a list of variances
        return flex.double(variance)
コード例 #23
0
def Rt(xyr, xym):
    '''Implement https://en.wikipedia.org/wiki/Procrustes_analysis to match
    moving to reference.'''

    from dials.array_family import flex
    import math

    n = xyr.size()
    assert xym.size() == n

    xr, yr = xyr.parts()
    xm, ym = xym.parts()

    # compute centre of mass shift
    xr0, yr0 = flex.sum(xr) / xr.size(), flex.sum(yr) / yr.size()
    xm0, ym0 = flex.sum(xm) / xm.size(), flex.sum(ym) / ym.size()
    dx, dy = xr0 - xm0, yr0 - ym0

    xr, yr = xr - xr0, yr - yr0
    xm, ym = xm - xm0, ym - ym0

    # compute angle theta
    tan_theta = sum([_xm * _yr - _ym * _xr for _xr, _yr, _xm, _ym in
                     zip(xr, yr, xm, ym)]) / \
        sum([_xm * _xr + _ym * _yr for _xr, _yr, _xm, _ym in
             zip(xr, yr, xm, ym)])
    theta = math.atan(tan_theta)

    # compose Rt matrix, verify that the RMSD is small between xyr and Rt * xym
    from scitbx import matrix
    R = matrix.sqr(
        (math.cos(theta), -math.sin(theta), math.sin(theta), math.cos(theta)))

    rmsd = 0
    for j, (_xm, _ym) in enumerate(zip(xm, ym)):
        _xr, _yr = xr[j], yr[j]
        _xmr, _ymr = R * (_xm, _ym)
        rmsd += (_xmr - _xr)**2 + (_ymr - _yr)**2

    rmsd /= n

    # now compute additional t component due to rotation about origin not centre
    # of mass

    t0 = matrix.col((xm0, ym0)) - R * (xm0, ym0)
    t = t0 + matrix.col((dx, dy))

    return R, t.elems, math.sqrt(rmsd), n
コード例 #24
0
 def compute_functional_gradients(self, Ih_table):
     """Return the functional and gradients."""
     resids = self.calculate_residuals(Ih_table)
     gradients = self.calculate_gradients(Ih_table)
     weights = Ih_table.weights
     functional = flex.sum(resids**2 * weights)
     return functional, gradients
コード例 #25
0
def stats_single_image(imageset, reflections, i=None, resolution_analysis=True,
                       plot=False):
  reflections = map_to_reciprocal_space(reflections, imageset)
  if plot and i is not None:
    filename = "i_over_sigi_vs_resolution_%d.png" %(i+1)
    hist_filename = "spot_count_vs_resolution_%d.png" %(i+1)
    extra_filename = "log_sum_i_sigi_vs_resolution_%d.png" %(i+1)
    distl_method_1_filename = "distl_method_1_%d.png" %(i+1)
    distl_method_2_filename = "distl_method_2_%d.png" %(i+1)
  else:
    filename = None
    hist_filename = None
    extra_filename = None
    distl_method_1_filename = None
    distl_method_2_filename = None

  d_star_sq = flex.pow2(reflections['rlp'].norms())
  d_spacings = uctbx.d_star_sq_as_d(d_star_sq)

  #plot_ordered_d_star_sq(reflections, imageset)
  reflections_all = reflections
  ice_sel = ice_rings_selection(reflections_all)
  reflections_no_ice = reflections_all.select(~ice_sel)
  n_spots_total = len(reflections_all)
  n_spots_no_ice = len(reflections_no_ice)
  n_spot_4A = (d_spacings > 4).count(True)
  intensities = reflections_no_ice['intensity.sum.value']
  total_intensity = flex.sum(intensities)
  #print i
  if hist_filename is not None:
    resolution_histogram(
      reflections, imageset, plot_filename=hist_filename)
  if extra_filename is not None:
    log_sum_i_sigi_vs_resolution(
      reflections, imageset, plot_filename=extra_filename)
  if resolution_analysis and n_spots_no_ice > 10:
    estimated_d_min = estimate_resolution_limit(
      reflections_all, imageset, ice_sel=ice_sel, plot_filename=filename)
    d_min_distl_method_1, noisiness_method_1 \
      = estimate_resolution_limit_distl_method1(
        reflections_all, imageset, ice_sel, plot_filename=distl_method_1_filename)
    d_min_distl_method_2, noisiness_method_2 = \
      estimate_resolution_limit_distl_method2(
        reflections_all, imageset, ice_sel, plot_filename=distl_method_2_filename)
  else:
    estimated_d_min = -1.0
    d_min_distl_method_1 = -1.0
    noisiness_method_1 = -1.0
    d_min_distl_method_2 = -1.0
    noisiness_method_2 = -1.0

  return group_args(n_spots_total=n_spots_total,
                    n_spots_no_ice=n_spots_no_ice,
                    n_spots_4A=n_spot_4A,
                    total_intensity=total_intensity,
                    estimated_d_min=estimated_d_min,
                    d_min_distl_method_1=d_min_distl_method_1,
                    noisiness_method_1=noisiness_method_1,
                    d_min_distl_method_2=d_min_distl_method_2,
                    noisiness_method_2=noisiness_method_2)
コード例 #26
0
  def tst_with_flat_background(self):

    from dials.algorithms.integration.fit import ProfileFitter
    from scitbx.array_family import flex
    from numpy.random import seed
    seed(0)

    # Create profile
    p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    s = flex.sum(p)
    p = p / s

    # Copy profile
    c0 = add_poisson_noise(100 * p)
    b = flex.double(flex.grid(9, 9, 9), 10)
    m = flex.bool(flex.grid(9,9,9), True)
    b0 = add_poisson_noise(b)
    c = c0 + b0

    # Fit
    fit = ProfileFitter(c, b, m, p)
    I = fit.intensity()
    V = fit.variance()
    assert fit.niter() < fit.maxiter()

    Iknown = 201.67417836585147
    Vknown = 7491.6743173001205

    # Test intensity is the same
    eps = 1e-3
    assert(abs(I[0] - Iknown) < eps)
    assert(abs(V[0] - Vknown) < eps)

    print 'OK'
コード例 #27
0
def outlier_rejection(reflections):
  # http://scripts.iucr.org/cgi-bin/paper?ba0032
  if len(reflections) == 1:
    return reflections
  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']

  i_max = flex.max_index(intensities)

  sel = flex.bool(len(reflections), True)
  sel[i_max] = False

  i_test = intensities[i_max]
  var_test = variances[i_max]

  intensities_subset = intensities.select(sel)
  var_subset = variances.select(sel)

  var_prior = var_test + 1/flex.sum(1/var_subset)
  p_prior = 1/math.sqrt(2*math.pi * var_prior) * math.exp(
    -(i_test - flex.mean(intensities_subset))**2/(2 * var_prior))
  #print p_prior

  if p_prior > 1e-10:
    return reflections

  return outlier_rejection(reflections.select(sel))
コード例 #28
0
 def df(I):
   mask = flex.bool(flex.grid(9,9,9), False)
   for k in range(9):
     for j in range(9):
       for i in range(9):
         dx = 5 * (i - 4.5) / 4.5
         dy = 5 * (j - 4.5) / 4.5
         dz = 5 * (k - 4.5) / 4.5
         dd = sqrt(dx**2 + dy**2 + dz**2)
         if dd <= 3:
           mask[k,j,i] = True
   mask = mask.as_1d() & (ref_P.as_1d() > 0)
   p = ref_P.as_1d().select(mask)
   c = max_P.as_1d().select(mask)
   b = 0
   return flex.sum(p) - flex.sum(c*c / (I*I*p))
コード例 #29
0
def test_target_fixedIh(mock_multi_apm_withoutrestraints, mock_Ih_table):
    """Test the target function for targeted scaling (where Ih is fixed)."""

    target = ScalingTargetFixedIH()
    Ih_table = mock_Ih_table.blocked_data_list[0]
    R, _ = target.compute_residuals(Ih_table)
    expected_residuals = flex.double([-1.0, 0.0, 1.0])
    assert list(R) == list(expected_residuals)
    _, G = target.compute_functional_gradients(Ih_table)
    assert list(G) == [-44.0]
    # Add in finite difference check

    J = target.calculate_jacobian(Ih_table)
    assert J.n_cols == 1
    assert J.n_rows == 3
    assert J.non_zeroes == 3
    assert J[0, 0] == -11.0
    assert J[1, 0] == -22.0
    assert J[2, 0] == -33.0

    expected_rmsd = (flex.sum(expected_residuals**2) /
                     len(expected_residuals))**0.5
    assert target._rmsds is None
    rmsd = target.rmsds(mock_Ih_table, mock_multi_apm_withoutrestraints)
    assert target._rmsds == pytest.approx([expected_rmsd])
コード例 #30
0
ファイル: check_mosaic.py プロジェクト: lizhen-dlut/xia2
def mosaic_profile_xyz(profile):
    nz, ny, nx = profile.focus()

    x = flex.double(nx, 0.0)
    for j in range(nx):
        x[j] = flex.sum(profile[:, :, j:j + 1])

    y = flex.double(ny, 0.0)
    for j in range(ny):
        y[j] = flex.sum(profile[:, j:j + 1, :])

    z = flex.double(nz, 0.0)
    for j in range(nz):
        z[j] = flex.sum(profile[j:j + 1, :, :])

    return x, y, z
コード例 #31
0
    def tst_with_identical_non_negative_profiles(self):

        from scitbx.array_family import flex

        # Generate identical non-negative profiles
        reflections, profiles, profile = self.generate_identical_non_negative_profiles()

        # Create the reference learner
        modeller = Modeller(self.n, self.grid_size, self.threshold)

        # Do the modelling
        modeller.model(reflections, profiles)
        modeller.finalize()

        # Normalize the profile
        profile = self.normalize_profile(profile)

        # Check that all the reference profiles are the same
        eps = 1e-10
        for index in range(len(modeller)):
            reference = modeller.data(index)
            for k in range(self.grid_size[2]):
                for j in range(self.grid_size[1]):
                    for i in range(self.grid_size[0]):
                        assert abs(reference[k, j, i] - profile[k, j, i]) <= eps
            assert abs(flex.sum(reference) - 1.0) <= eps

        print "OK"
コード例 #32
0
def test_with_flat_background_partial():
    np.random.seed(0)

    # Create profile
    p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    s = flex.sum(p)
    p = p / s

    # Copy profile
    c0 = add_poisson_noise(100 * p)
    b = flex.double(flex.grid(9, 9, 9), 1)
    m = flex.bool(flex.grid(9, 9, 9), True)
    c = c0 + add_poisson_noise(b)

    # Get the partial profiles
    pp = p[0:5, :, :]
    mp = m[0:5, :, :]
    cp = c[0:5, :, :]
    bp = b[0:5, :, :]

    # Fit
    fit = ProfileFitter(cp, bp, mp, pp)
    intensity = fit.intensity()
    V = fit.variance()
    assert fit.niter() < fit.maxiter()

    Iknown = 99.06932141277105
    Vknown = 504.06932141277105

    # Test intensity is the same
    eps = 1e-7
    assert intensity[0] == pytest.approx(Iknown, abs=eps)
    assert V[0] == pytest.approx(Vknown, abs=eps)
コード例 #33
0
def outlier_rejection(reflections):
    # http://scripts.iucr.org/cgi-bin/paper?ba0032
    if len(reflections) == 1:
        return reflections
    intensities = reflections["intensity.sum.value"]
    variances = reflections["intensity.sum.variance"]

    i_max = flex.max_index(intensities)

    sel = flex.bool(len(reflections), True)
    sel[i_max] = False

    i_test = intensities[i_max]
    var_test = variances[i_max]

    intensities_subset = intensities.select(sel)
    var_subset = variances.select(sel)

    var_prior = var_test + 1 / flex.sum(1 / var_subset)
    p_prior = (1 / math.sqrt(2 * math.pi * var_prior) *
               math.exp(-((i_test - flex.mean(intensities_subset))**2) /
                        (2 * var_prior)))

    if p_prior > 1e-10:
        return reflections

    return outlier_rejection(reflections.select(sel))
コード例 #34
0
def test_with_flat_background():
    np.random.seed(0)

    # Create profile
    p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2))
    s = flex.sum(p)
    p = p / s

    # Copy profile
    c0 = add_poisson_noise(100 * p)
    b = flex.double(flex.grid(9, 9, 9), 10)
    m = flex.bool(flex.grid(9, 9, 9), True)
    b0 = add_poisson_noise(b)
    c = c0 + b0

    # Fit
    fit = ProfileFitter(c, b, m, p)
    intensity = fit.intensity()
    V = fit.variance()
    assert fit.niter() < fit.maxiter()

    Iknown = 201.67417836585147
    Vknown = 7491.6743173001205

    # Test intensity is the same
    eps = 1e-3
    assert intensity[0] == pytest.approx(Iknown, abs=eps)
    assert V[0] == pytest.approx(Vknown, abs=eps)
コード例 #35
0
def test_target_fixedIh():
    """Test the target function for targeted scaling (where Ih is fixed)."""

    target = ScalingTargetFixedIH()
    Ih_table = mock_Ih_table().blocked_data_list[0]
    R, _ = target.compute_residuals(Ih_table)
    expected_residuals = flex.double([-1.0, 0.0, 1.0])
    assert list(R) == pytest.approx(list(expected_residuals))
    _, G = target.compute_functional_gradients(Ih_table)
    assert list(G) == pytest.approx([-44.0])
    # Add in finite difference check

    Ih_table = mock_Ih_table().blocked_data_list[0]
    J = target.calculate_jacobian(Ih_table)
    assert J.n_cols == 1
    assert J.n_rows == 3
    assert J.non_zeroes == 3
    assert J[0, 0] == pytest.approx(-11.0)
    assert J[1, 0] == pytest.approx(-22.0)
    assert J[2, 0] == pytest.approx(-33.0)

    expected_rmsd = (flex.sum(flex.pow2(expected_residuals)) /
                     len(expected_residuals))**0.5
    assert target._rmsds is None
    target.param_restraints = False  # don't try to use apm to get restraints
    assert target.rmsds(mock_Ih_table(), [])
    assert target._rmsds == pytest.approx([expected_rmsd])
コード例 #36
0
 def calculate_intensity_statistics(self, reflections):
   '''Calculate statistics for hkl intensities distributed over resolution bins'''
   for refls in reflection_table_utils.get_next_hkl_reflection_table(reflections=reflections):
     assert refls.size() > 0
     hkl = refls[0]['miller_index_asymmetric']
     if hkl in self.hkl_resolution_bins:
       i_bin = self.hkl_resolution_bins[hkl]
       multiplicity = refls.size()
       self.n_sum[i_bin] += 1
       self.m_sum[i_bin] += multiplicity
       if multiplicity > 1:
         self.mm_sum[i_bin] += 1
         weighted_intensity_array = refls['intensity.sum.value'] / refls['intensity.sum.variance']
         weights_array = flex.double(refls.size(), 1.0) / refls['intensity.sum.variance']
         self.I_sum[i_bin]     += flex.sum(weighted_intensity_array) / flex.sum(weights_array)
         self.Isig_sum[i_bin]  += flex.sum(weighted_intensity_array) / math.sqrt(flex.sum(weights_array))
コード例 #37
0
    def tst_with_systematically_offset_profiles(self):
        from dials.algorithms.image.centroid import centroid_image
        from scitbx import matrix
        from scitbx.array_family import flex

        # Generate identical non-negative profiles
        reflections, profiles = self.generate_systematically_offset_profiles()

        # Create the reference learner
        modeller = Modeller(self.n, self.grid_size, self.threshold)

        # Do the modelling
        modeller.model(reflections, profiles)
        modeller.finalize()

        # Check that all the reference profiles are the same
        eps = 1e-10
        profile = None
        for index in range(len(modeller)):
            reference = modeller.data(index)
            if profile is not None:
                for k in range(self.grid_size[2]):
                    for j in range(self.grid_size[1]):
                        for i in range(self.grid_size[0]):
                            assert abs(reference[k, j, i] - profile[k, j, i]) <= eps
            else:
                profile = reference
            assert abs(flex.sum(reference) - 1.0) <= eps

        print "OK"
コード例 #38
0
ファイル: two_theta_refiner.py プロジェクト: dials/dials
  def _rmsds_core(self, reflections):
    """calculate unweighted RMSDs for the specified reflections"""

    resid_2theta = flex.sum(reflections['2theta_resid2'])
    n = len(reflections)

    rmsds = (sqrt(resid_2theta / n), )
    return rmsds
コード例 #39
0
  def check_reference(self, reference):
    ''' Check the reference spots. '''
    from dials.array_family import flex
    from dials.algorithms.image.centroid import centroid_image
    from math import sqrt

    # Get a load of stuff
    I_sim = reference['intensity.sim']
    I_exp = reference['intensity.exp']
    I_cal = reference['intensity.prf.value']
    I_var = reference['intensity.prf.variance']

    # Get the transformed shoeboxes
    profiles = reference['rs_shoebox']
    n_sigma = 3
    n_sigma2 = 5
    grid_size = 4
    step_size = n_sigma2 / (grid_size + 0.5)
    eps = 1e-7
    for i in range(len(profiles)):
      data = profiles[i].data
      #dmax = flex.max(data)
      #data = 100 * data / dmax
      #p = data.as_numpy_array()
      #p = p.astype(numpy.int)
      #print p
      print flex.sum(data), I_exp[i], I_cal[i]
      #assert(abs(flex.sum(data) - I_exp[i]) < eps)
      centroid = centroid_image(data)
      m = centroid.mean()
      v = centroid.variance()
      s1 = tuple(sqrt(vv) for vv in v)
      s2 = tuple(ss * step_size for ss in s1)
      assert(all(abs(mm - (grid_size + 0.5)) < 0.25 for mm in m))
      assert(all(abs(ss2 - n_sigma / n_sigma2) < 0.25 for ss2 in s2))

    # Calculate Z
    Z = (I_cal - I_exp) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f" % (Z_mean, Z_var)

    from matplotlib import pylab
    pylab.hist((I_cal - I_exp) / I_exp)
    pylab.show()
コード例 #40
0
  def check_profiles(self, learner):
    ''' Check the reference profiles. '''
    from dials.array_family import flex
    from dials.algorithms.image.centroid import centroid_image
    from math import sqrt

    # Get the reference locator
    locator = learner.locate()
    np = locator.size()
    assert(np == 9)
    assert(flex.sum(learner.counts()) == 10000)

    #profile = locator.profile(0)
    #pmax = flex.max(profile)
    #profile = 100 * profile / pmax
    #profile = profile.as_numpy_array()
    #import numpy
    #profile = profile.astype(numpy.int)
    #print profile

    # Check all the profiles
    eps = 1e-7
    n_sigma = 3
    n_sigma2 = 5
    grid_size = 4
    step_size = n_sigma2 / (grid_size + 0.5)
    for i in range(np):
      profile = locator.profile(i)
      assert(abs(flex.sum(profile) - 1.0) < eps)
      centroid = centroid_image(profile)
      m = centroid.mean()
      v = centroid.variance()
      s1 = tuple(sqrt(vv) for vv in v)
      s2 = tuple(ss * step_size for ss in s1)
      assert(all(abs(mm - (grid_size + 0.5)) < 0.25 for mm in m))
      assert(all(abs(ss2 - n_sigma / n_sigma2) < 0.25 for ss2 in s2))

    # Check all the profiles have good correlation coefficients
    cor = locator.correlations()
    assert(all(cor > 0.99))

    # Test passed
    print 'OK'
コード例 #41
0
  def compute_functional_and_gradients(self):
    values = self.parameterization(self.x)
    assert -150. < values.BFACTOR < 150. # limits on the exponent, please
    self.func = self.refinery.fvec_callable(values)
    functional = flex.sum(self.func*self.func)
    self.f = functional
    DELTA = 1.E-7
    self.g = flex.double()
    for x in xrange(self.n):
      templist = list(self.x)
      templist[x]+=DELTA
      dvalues = flex.double(templist)

      dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
      dfunctional = flex.sum(dfunc*dfunc)
      #calculate by finite_difference
      self.g.append( ( dfunctional-functional )/DELTA )
    self.g[2]=0.
    print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
    values.show(self.out)
    return self.f, self.g
コード例 #42
0
  def compute_functional_and_gradients_test_code(self):
    values = self.parameterization(self.x)
    assert -150. < values.BFACTOR < 150. # limits on the exponent, please
    self.func = self.refinery.fvec_callable(values)
    functional = flex.sum(self.func*self.func)
    self.f = functional
    jacobian = self.refinery.jacobian_callable(values)
    self.gg_0 = flex.sum(2. * self.func * jacobian[0])
    self.gg_1 = flex.sum(2. * self.func * jacobian[1])
    self.gg_3 = flex.sum(2. * self.func * jacobian[3])
    self.gg_4 = flex.sum(2. * self.func * jacobian[4])
    DELTA = 1.E-7
    self.g = flex.double()
    for x in xrange(self.n):
      templist = list(self.x)
      templist[x]+=DELTA
      dvalues = flex.double(templist)

      dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
      dfunctional = flex.sum(dfunc*dfunc)
      #calculate by finite_difference
      self.g.append( ( dfunctional-functional )/DELTA )
    self.g[2]=0.

    print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
    values.show(self.out)
    print >>self.out, "derivatives--> %15.5f    %15.5f    %9.7f   %5.2f   %5.2f"%tuple(self.g)
    print >>self.out, "  analytical-> %15.5f    %15.5f                %5.2f   %5.2f"%(
      self.gg_0,self.gg_1, self.gg_3,self.gg_4)
    self.g[0]=self.gg_0
    self.g[1]=self.gg_1
    self.g[3]=self.gg_3
    self.g[4]=self.gg_4
    return self.f, self.g
コード例 #43
0
def check_experiment(experiment, reflections):

  # predict reflections in place
  from dials.algorithms.spot_prediction import StillsReflectionPredictor
  sp = StillsReflectionPredictor(experiment)
  UB = experiment.crystal.get_U() * experiment.crystal.get_B()
  try:
    sp.for_reflection_table(reflections, UB)
  except RuntimeError:
    return False

  # calculate unweighted RMSDs
  x_obs, y_obs, _ = reflections['xyzobs.px.value'].parts()
  delpsi = reflections['delpsical.rad']
  x_calc, y_calc, _ = reflections['xyzcal.px'].parts()

  # calculate residuals and assign columns
  x_resid = x_calc - x_obs
  x_resid2 = x_resid**2
  y_resid = y_calc - y_obs
  y_resid2 = y_resid**2
  delpsical2 = delpsi**2
  r_x = flex.sum(x_resid2)
  r_y = flex.sum(y_resid2)
  r_z = flex.sum(delpsical2)

  # rmsd calculation
  n = len(reflections)
  rmsds = (sqrt(r_x / n),
           sqrt(r_y / n),
           sqrt(r_z / n))

  # check positional RMSDs are within 5 pixels
  print rmsds
  if rmsds[0] > 5: return False
  if rmsds[1] > 5: return False

  return True
コード例 #44
0
def log_sum_i_sigi_vs_resolution(reflections, imageset, plot_filename=None):
  d_star_sq = flex.pow2(reflections['rlp'].norms())
  hist = get_histogram(d_star_sq)

  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']

  sel = variances > 0
  intensities = intensities.select(sel)
  variances = intensities.select(sel)

  i_over_sigi = intensities/flex.sqrt(variances)
  #log_i_over_sigi = flex.log(i_over_sigi)

  slots = []
  for slot in hist.slot_infos():
    sel = (d_star_sq > slot.low_cutoff) & (d_star_sq < slot.high_cutoff)
    if sel.count(True) > 0:
      slots.append(math.log(flex.sum(i_over_sigi.select(sel))))
    else:
      slots.append(0)

  if plot_filename is not None:
    if pyplot is None:
      raise Sorry("matplotlib must be installed to generate a plot.")
    fig = pyplot.figure()
    ax = fig.add_subplot(1,1,1)
    #ax.bar(hist.slot_centers()-0.5*hist.slot_width(), hist.slots(),
    ax.scatter(hist.slot_centers()-0.5*hist.slot_width(), slots, s=20, color='blue', marker='o', alpha=0.5)
    ax.set_xlabel("d_star_sq")
    ax.set_ylabel("ln(sum(I/sigI))")

    ax_ = ax.twiny() # ax2 is responsible for "top" axis and "right" axis
    xticks = ax.get_xticks()
    xlim = ax.get_xlim()
    xticks_d = [
      uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks ]
    xticks_ = [ds2/(xlim[1]-xlim[0]) for ds2 in xticks]
    ax_.set_xticks(xticks)
    ax_.set_xlim(ax.get_xlim())
    ax_.set_xlabel(r"Resolution ($\AA$)")
    ax_.set_xticklabels(["%.1f" %d for d in xticks_d])
    #pyplot.show()
    pyplot.savefig(plot_filename)
    pyplot.close()
コード例 #45
0
  def plot_data_by_two_theta(self, reflections, tag):
    n_bins = 30
    arbitrary_padding = 1
    sorted_two_theta = flex.sorted(reflections['two_theta_obs'])
    bin_low = [sorted_two_theta[int((len(sorted_two_theta)/n_bins) * i)] for i in xrange(n_bins)]
    bin_high = [bin_low[i+1] for i in xrange(n_bins-1)]
    bin_high.append(sorted_two_theta[-1]+arbitrary_padding)

    title = "%sBinned data by two theta (n reflections per bin: %.1f)"%(tag, len(sorted_two_theta)/n_bins)

    x = flex.double()
    x_centers = flex.double()
    n_refls = flex.double()
    rmsds = flex.double()
    radial_rmsds = flex.double()
    transverse_rmsds = flex.double()
    rt_ratio = flex.double()
    #delta_two_theta = flex.double()
    rmsd_delta_two_theta = flex.double()

    for i in xrange(n_bins):
      x_centers.append(((bin_high[i]-bin_low[i])/2) + bin_low[i])
      refls = reflections.select((reflections['two_theta_obs'] >= bin_low[i]) & (reflections['two_theta_obs'] < bin_high[i]))
      n = len(refls)
      n_refls.append(n)
      rmsds.append(1000*math.sqrt(flex.sum_sq(refls['difference_vector_norms'])/n))
      radial_rmsds.append(1000*math.sqrt(flex.sum_sq(refls['radial_displacements'])/n))
      transverse_rmsds.append(1000*math.sqrt(flex.sum_sq(refls['transverse_displacements'])/n))
      rt_ratio.append(radial_rmsds[-1]/transverse_rmsds[-1])
      rmsd_delta_two_theta.append(math.sqrt(flex.sum_sq(refls['two_theta_obs']-refls['two_theta_cal'])/n))
      #delta_two_theta.append(flex.mean(refls['two_theta_obs']-refls['two_theta_cal']))
    assert len(reflections) == flex.sum(n_refls)

    self.plot_multi_data(x_centers,
                         [rt_ratio, (rmsds, radial_rmsds, transverse_rmsds), rmsd_delta_two_theta],
                         "Two theta (degrees)",
                         ["R/T RMSD ratio",
                          ("Overall RMSD","Radial RMSD","Transverse RMSD"),
                          "RMSD delta two theta"],
                         ["R/T RMSD ratio",
                          "Overall, radial, transverse RMSD (microns)",
                          "Delta two theta RMSD (degrees)"],
                         title)
コード例 #46
0
def run(args):
  from dials.array_family import flex
  from dials.util.command_line import Importer
  importer = Importer(args, check_format=True)
  assert len(importer.datablocks) == 1
  imageset = importer.datablocks[0].extract_imagesets()[0]

  total_counts = flex.int()
  for im in imageset:
    total_counts.append(flex.sum(im))

  with open("total_counts_per_image.txt", "wb") as f:
    for i, count in enumerate(total_counts):
      print >> f, "%i %i" %(i, count)

  from matplotlib import pyplot
  pyplot.plot(range(total_counts.size()), total_counts)
  pyplot.xlabel("Image number")
  pyplot.ylabel("Total counts")
  pyplot.show()
コード例 #47
0
  def check_profiles(self, learner):
    ''' Check the reference profiles. '''
    from dials.array_family import flex
    from dials.algorithms.image.centroid import centroid_image
    from math import sqrt

    # Get the reference locator
    locator = learner.locate()
    np = locator.size()
    assert(np == 9)
    cexp = [10000, 3713, 3817, 5023, 3844, 3723, 3768, 4886, 3781]
    assert(all(c1 == c2 for c1, c2 in zip(cexp, learner.counts())))

    #profile = locator.profile(0)
    #pmax = flex.max(profile)
    #profile = 100 * profile / pmax
    #profile = profile.as_numpy_array()
    #import numpy
    #profile = profile.astype(numpy.int)
    #print profile


    # Check all the profiles
    eps = 1e-7
    n_sigma = 3
    grid_size = 4
    step_size = n_sigma / (grid_size + 0.5)
    for i in range(np):
      profile = locator.profile(i)
      assert(abs(flex.sum(profile) - 1.0) < eps)
      centroid = centroid_image(profile)
      m = centroid.mean()
      v = centroid.variance()
      s1 = tuple(sqrt(vv) for vv in v)
      s2 = tuple(ss * step_size for ss in s1)
      assert(all(abs(mm - (grid_size + 0.5)) < 0.25 for mm in m))
      assert(all(abs(ss2 - 1.0) < 0.25 for ss2 in s2))

    # Test passed
    print 'OK'
コード例 #48
0
def residual(two_thetas_obs, miller_indices, wavelength, unit_cell):
  two_thetas_calc = unit_cell.two_theta(miller_indices, wavelength, deg=True)
  return flex.sum(flex.pow2(two_thetas_obs - two_thetas_calc))
コード例 #49
0
  from dials.util.command_line import Importer
  from dials.array_family import flex
  import sys

  # Get the imageset
  importer = Importer(sys.argv[1:])
  assert(len(importer.datablocks) == 1)
  imagesets = importer.datablocks[0].extract_imagesets()
  assert(len(imagesets) == 1)
  imageset = imagesets[0]

  # Create a mask
  image = imageset[0]
  mask = image >= 0

  # Loop through all the images and sum the pixels
  counts = []
  for i, image in enumerate(imageset):
    print 'Processing image %d' % i
    counts.append(flex.sum(image.select(mask.as_1d())))

  # Write the counts to file
  with open("counts_per_image.txt", "w") as outfile:
    for item in counts:
      outfile.write('%s\n' % str(item))

  from matplotlib import pylab
  pylab.plot(counts)
  pylab.show()
コード例 #50
0
ファイル: test.py プロジェクト: dials/dials_scratch
    zc = zcal[index]
    sbox = shoebox[index]
    s1c = matrix.col(detector[0].get_pixel_lab_coord((xc, yc)))
    phic = scan.get_angle_from_array_index(zc)
    for j in range(sbox.data.all()[1]):
      for i in range(sbox.data.all()[2]):
        x = sbox.bbox[0] + i
        y = sbox.bbox[2] + j
        s1ij = matrix.col(detector[0].get_pixel_lab_coord((x,y)))
        da = s1c.angle(s1ij)
        for k in range(sbox.data.all()[0]):
          z = sbox.bbox[4] + k
          phi = scan.get_angle_from_array_index(z)
          db = phic - phi
          c = sbox.data[k,j,i]
          diff_angle_d.extend([da] * int(1000.0*c / flex.sum(sbox.data)))
          diff_angle_m.extend([db] * int(1000.0*c/ flex.sum(sbox.data)))
          # counts.append(sbox.data[k,j,i])
    print index

  print min(diff_angle_d), max(diff_angle_d)
  print min(diff_angle_m), max(diff_angle_m)

  m = sum(diff_angle_m) / len(diff_angle_m)
  v = sum([(d - m)**2 for d in diff_angle_m]) / len(diff_angle_m)
  from math import sqrt
  print m, sqrt(v)

  from matplotlib import pylab
  pylab.hist(diff_angle_d, bins=100)
  pylab.show()
コード例 #51
0
  def test_for_reference(self):
    from dials.algorithms.integration import ProfileFittingReciprocalSpace
    from dials.array_family import flex
    from dials.algorithms.shoebox import MaskCode
    from dials.algorithms.statistics import \
      kolmogorov_smirnov_test_standard_normal
    from math import erf, sqrt, pi
    from copy import deepcopy
    from dials.algorithms.simulation.reciprocal_space import Simulator
    from os.path import basename

    # Integrate
    integration = ProfileFittingReciprocalSpace(
      grid_size=4,
      threshold=0.00,
      frame_interval=100,
      n_sigma=5,
      mask_n_sigma=3,
      sigma_b=0.024 * pi / 180.0,
      sigma_m=0.044 * pi / 180.0
    )

    # Integrate the reference profiles
    integration(self.experiment, self.reference)

    p = integration.learner.locate().profile(0)
    m = integration.learner.locate().mask(0)

    locator = integration.learner.locate()

    cor = locator.correlations()
    for j in range(cor.all()[0]):
      print ' '.join([str(cor[j,i]) for i in range(cor.all()[1])])
    #exit(0)
    #from matplotlib import pylab
    #pylab.imshow(cor.as_numpy_array(), interpolation='none', vmin=-1, vmax=1)
    #pylab.show()


    #n = locator.size()
    #for i in range(n):
      #c = locator.coord(i)
      #p = locator.profile(i)
      #vmax = flex.max(p)
      #from matplotlib import pylab
      #for j in range(9):
        #pylab.subplot(3, 3, j+1)
        #pylab.imshow(p.as_numpy_array()[j], vmin=0, vmax=vmax,
        #interpolation='none')
      #pylab.show()

    #print "NRef: ", n
    #x = []
    #y = []
    #for i in range(n):
      #c = locator.coord(i)
      #x.append(c[0])
      #y.append(c[1])
    #from matplotlib import pylab
    #pylab.scatter(x,y)
    #pylab.show()

    #exit(0)
    import numpy
    #pmax = flex.max(p)
    #scale = 100 / pmax
    #print "Scale: ", 100 / pmax
    #p = p.as_numpy_array() *100 / pmax
    #p = p.astype(numpy.int)
    #print p
    #print m.as_numpy_array()

    # Check the reference profiles and spots are ok
    #self.check_profiles(integration.learner)

    # Make sure background is zero
    profiles = self.reference['rs_shoebox']
    eps = 1e-7
    for p in profiles:
      assert(abs(flex.sum(p.background) - 0) < eps)
    print 'OK'

    # Only select variances greater than zero
    mask = self.reference.get_flags(self.reference.flags.integrated)
    I_cal = self.reference['intensity.sum.value']
    I_var = self.reference['intensity.sum.variance']
    B_sim = self.reference['background.sim'].as_double()
    I_sim = self.reference['intensity.sim'].as_double()
    I_exp = self.reference['intensity.exp']
    P_cor = self.reference['profile.correlation']
    X_pos, Y_pos, Z_pos = self.reference['xyzcal.px'].parts()
    I_cal = I_cal.select(mask)
    I_var = I_var.select(mask)
    I_sim = I_sim.select(mask)
    I_exp = I_exp.select(mask)
    P_cor = P_cor.select(mask)

    max_ind = flex.max_index(I_cal)
    max_I = I_cal[max_ind]
    max_P = self.reference[max_ind]['rs_shoebox'].data
    max_C = self.reference[max_ind]['xyzcal.px']
    max_S = self.reference[max_ind]['shoebox'].data


    min_ind = flex.min_index(P_cor)
    min_I = I_cal[min_ind]
    min_P = self.reference[min_ind]['rs_shoebox'].data
    min_C = self.reference[min_ind]['xyzcal.px']
    min_S = self.reference[min_ind]['shoebox'].data

    ##for k in range(max_S.all()[0]):
    #if False:
      #for j in range(max_S.all()[1]):
        #for i in range(max_S.all()[2]):
          #max_S[k,j,i] = 0
          #if (abs(i - max_S.all()[2] // 2) < 2 and
              #abs(j - max_S.all()[1] // 2) < 2 and
              #abs(k - max_S.all()[0] // 2) < 2):
            #max_S[k,j,i] = 100

    #p = max_P.as_numpy_array() * 100 / flex.max(max_P)
    #p = p.astype(numpy.int)
    #print p

    #from dials.scratch.jmp.misc.test_transform import test_transform
    #grid_size = 4
    #ndiv = 5
    #sigma_b = 0.024 * pi / 180.0
    #sigma_m = 0.044 * pi / 180.0
    #n_sigma = 4.0
    #max_P2 = test_transform(
      #self.experiment,
      #self.reference[max_ind]['shoebox'],
      #self.reference[max_ind]['s1'],
      #self.reference[max_ind]['xyzcal.mm'][2],
      #grid_size,
      #sigma_m,
      #sigma_b,
      #n_sigma,
      #ndiv)
    #max_P = max_P2

    ref_ind = locator.index(max_C)
    ref_P = locator.profile(ref_ind)
    ref_C = locator.coord(ref_ind)

    print "Max Index: ", max_ind, max_I, flex.sum(max_P), flex.sum(max_S)
    print "Coord: ", max_C, "Ref Coord: ", ref_C

    print "Min Index: ", min_ind, min_I, flex.sum(min_P), flex.sum(min_S)
    print "Coord: ", min_C, "Ref Coord: ", ref_C

    #vmax = flex.max(max_P)
    #print sum(max_S)
    #print sum(max_P)
    #from matplotlib import pylab, cm
    #for j in range(9):
      #pylab.subplot(3, 3, j+1)
      #pylab.imshow(max_P.as_numpy_array()[j], vmin=0, vmax=vmax,
      #interpolation='none', cmap=cm.Greys_r)
    #pylab.show()

    #vmax = flex.max(min_P)
    #print sum(min_S)
    #print sum(min_P)
    #from matplotlib import pylab, cm
    #for j in range(9):
      #pylab.subplot(3, 3, j+1)
      #pylab.imshow(min_P.as_numpy_array()[j], vmin=0, vmax=vmax,
      #interpolation='none', cmap=cm.Greys_r)
    #pylab.show()

    #for k in range(max_S.all()[0]):
      #print ''
      #print 'Slice %d' % k
      #for j in range(max_S.all()[1]):
        #print ' '.join(["%-4d" % int(max_S[k,j,i]) for i in range(max_S.all()[2])])

    print "Testing"

    def f(I):
      mask = flex.bool(flex.grid(9,9,9), False)
      for k in range(9):
        for j in range(9):
          for i in range(9):
            dx = 5 * (i - 4.5) / 4.5
            dy = 5 * (j - 4.5) / 4.5
            dz = 5 * (k - 4.5) / 4.5
            dd = sqrt(dx**2 + dy**2 + dz**2)
            if dd <= 3:
              mask[k,j,i] = True

      mask = mask.as_1d() & (ref_P.as_1d() > 0)
      p = ref_P.as_1d().select(mask)
      c = max_P.as_1d().select(mask)
      return flex.sum((c - I * p)**2 / (I * p))

    def df(I):
      mask = flex.bool(flex.grid(9,9,9), False)
      for k in range(9):
        for j in range(9):
          for i in range(9):
            dx = 5 * (i - 4.5) / 4.5
            dy = 5 * (j - 4.5) / 4.5
            dz = 5 * (k - 4.5) / 4.5
            dd = sqrt(dx**2 + dy**2 + dz**2)
            if dd <= 3:
              mask[k,j,i] = True
      mask = mask.as_1d() & (ref_P.as_1d() > 0)
      p = ref_P.as_1d().select(mask)
      c = max_P.as_1d().select(mask)
      b = 0
      return flex.sum(p) - flex.sum(c*c / (I*I*p))
      #return flex.sum(p - p*c*c / ((b + I*p)**2))
      #return flex.sum(3*p*p + (c*c*p*p - 4*b*p*p) / ((b + I*p)**2))
      #return flex.sum(p - c*c / (I*I*p))
      #return flex.sum(p * (-c+p*I)*(c+p*I)/((p*I)**2))

    def d2f(I):
      mask = flex.bool(flex.grid(9,9,9), False)
      for k in range(9):
        for j in range(9):
          for i in range(9):
            dx = 5 * (i - 4.5) / 4.5
            dy = 5 * (j - 4.5) / 4.5
            dz = 5 * (k - 4.5) / 4.5
            dd = sqrt(dx**2 + dy**2 + dz**2)
            if dd <= 3:
              mask[k,j,i] = True

      mask = mask.as_1d() & (ref_P.as_1d() > 0)
      p = ref_P.as_1d().select(mask)
      c = max_P.as_1d().select(mask)
      return flex.sum(2*c*c*p*p / (p*I)**3)

    I = 10703#flex.sum(max_P)
    mask = ref_P.as_1d() > 0
    p = ref_P.as_1d().select(mask)
    c = max_P.as_1d().select(mask)
    for i in range(10):
      I = I - df(I) / d2f(I)
      #v = I*p
      #I = flex.sum(c * p / v) / flex.sum(p*p / v)
      print I


    from math import log
    ff = []
    for I in range(9500, 11500):
      ff.append(f(I))
    print sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500
    from matplotlib import pylab
    pylab.plot(range(9500,11500), ff)
    pylab.show()
    #exit(0)

    #I = 10000
    #print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P)


    #I = 10100
    #print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P)
    #exit(0)


    print flex.sum(self.reference[0]['rs_shoebox'].data)
    print I_cal[0]

    # Calculate the z score
    perc = self.mv3n_tolerance_interval(3*3)
    Z = (I_cal - I_sim) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var))

    print len(I_cal)

    from matplotlib import pylab
    from mpl_toolkits.mplot3d import Axes3D
    #fig = pylab.figure()
    #ax = fig.add_subplot(111, projection='3d')
    #ax.scatter(X_pos, Y_pos, P_cor)

    #pylab.scatter(X_pos, P_cor)
    #pylab.scatter(Y_pos, P_cor)
    #pylab.scatter(Z_pos, P_cor)
    #pylab.hist(P_cor,100)
    #pylab.scatter(P_cor, (I_cal - I_exp) / I_exp)
    pylab.hist(Z, 100)
    #pylab.hist(I_cal,100)
    #pylab.hist(I_cal - I_sim, 100)
    pylab.show()
コード例 #52
0
  def test_for_reference(self):
    from dials.array_family import flex
    from math import sqrt, pi

    # Integrate
    integration = self.experiments[0].profile.fitting_class()(self.experiments[0])

    # Integrate the reference profiles
    integration(self.experiments, self.reference)
    locator = integration.learner.locate()
    # Check the reference profiles and spots are ok
    #self.check_profiles(integration.learner)

    # Make sure background is zero
    profiles = self.reference['rs_shoebox']
    eps = 1e-7
    for p in profiles:
      assert(abs(flex.sum(p.background) - 0) < eps)
    print 'OK'

    # Only select variances greater than zero
    mask = self.reference.get_flags(self.reference.flags.integrated, all=False)
    assert(mask.count(True) > 0)
    I_cal = self.reference['intensity.prf.value']
    I_var = self.reference['intensity.prf.variance']
    B_sim = self.reference['background.sim.a'].as_double()
    I_sim = self.reference['intensity.sim'].as_double()
    I_exp = self.reference['intensity.exp']
    P_cor = self.reference['profile.correlation']
    X_pos, Y_pos, Z_pos = self.reference['xyzcal.px'].parts()
    I_cal = I_cal.select(mask)
    I_var = I_var.select(mask)
    I_sim = I_sim.select(mask)
    I_exp = I_exp.select(mask)
    P_cor = P_cor.select(mask)

    max_ind = flex.max_index(flex.abs(I_cal-I_sim))
    max_I = I_cal[max_ind]
    max_P = self.reference[max_ind]['rs_shoebox'].data
    max_C = self.reference[max_ind]['xyzcal.px']
    max_S = self.reference[max_ind]['shoebox'].data

    ref_ind = locator.index(max_C)
    ref_P = locator.profile(ref_ind)
    ref_C = locator.coord(ref_ind)

    #def f(I):
      #mask = flex.bool(flex.grid(9,9,9), False)
      #for k in range(9):
        #for j in range(9):
          #for i in range(9):
            #dx = 5 * (i - 4.5) / 4.5
            #dy = 5 * (j - 4.5) / 4.5
            #dz = 5 * (k - 4.5) / 4.5
            #dd = sqrt(dx**2 + dy**2 + dz**2)
            #if dd <= 3:
              #mask[k,j,i] = True

      #mask = mask.as_1d() & (ref_P.as_1d() > 0)
      #p = ref_P.as_1d().select(mask)
      #c = max_P.as_1d().select(mask)
      #return flex.sum((c - I * p)**2 / (I * p))

    #ff = []
    #for I in range(9500, 11500):
      #ff.append(f(I))
    #print 'Old I: ', sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500

    #from matplotlib import pylab
    #pylab.plot(range(9500, 11500), ff)
    #pylab.show()

    #def estI(I):
      #mask = flex.bool(flex.grid(9,9,9), False)
      #for k in range(9):
        #for j in range(9):
          #for i in range(9):
            #dx = 5 * (i - 4.5) / 4.5
            #dy = 5 * (j - 4.5) / 4.5
            #dz = 5 * (k - 4.5) / 4.5
            #dd = sqrt(dx**2 + dy**2 + dz**2)
            #if dd <= 3:
              #mask[k,j,i] = True

      #mask = mask.as_1d() & (ref_P.as_1d() > 0)
      #p = ref_P.as_1d().select(mask)
      #c = max_P.as_1d().select(mask)
      #v = I * p
      #return flex.sum(c * p / v) / flex.sum(p*p/v)

    #def iterI(I0):
      #I = estI(I0)
      #print I
      #if abs(I - I0) < 1e-3:
        #return I
      #return iterI(I)

    #newI = iterI(10703)#flex.sum(max_P))
    #print "New I: ", newI

    # Calculate the z score
    perc = self.mv3n_tolerance_interval(3*3)
    Z = (I_cal - I_sim) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var))
コード例 #53
0
ファイル: find_spots_server.py プロジェクト: dials/dials
        # Compute the profile model
        experiments = ProfileModelFactory.create(params, experiments, reference)

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        #print len(reflections)

        stats['integrated_intensity'] = flex.sum(reflections['intensity.sum.value'])
      except Exception, e:
        logger.error(e)
        stats['error'] = str(e)
      finally:
        t4 = time.time()
        logger.info('Integration took %.2f seconds' %(t4-t3))

  return stats

class handler(server_base.BaseHTTPRequestHandler):
  def do_GET(s):
    '''Respond to a GET request.'''
    s.send_response(200)
    s.send_header('Content-type', 'text/xml')
    s.end_headers()
コード例 #54
0
  def __init__(self,measurements_orig, params, i_model, miller_set, result, out):
    measurements = measurements_orig.deep_copy()

    # Now manipulate the data to conform to unit cell, asu, and space group
    # of reference.  The resolution will be cut later.
    # Only works if there is NOT an indexing ambiguity!
    observations = measurements.customized_copy(
      anomalous_flag=not params.merge_anomalous,
      crystal_symmetry=miller_set.crystal_symmetry()
      ).map_to_asu()

    observations_original_index = measurements.customized_copy(
      anomalous_flag=not params.merge_anomalous,
      crystal_symmetry=miller_set.crystal_symmetry()
      )

    # Ensure that match_multi_indices() will return identical results
    # when a frame's observations are matched against the
    # pre-generated Miller set, self.miller_set, and the reference
    # data set, self.i_model.  The implication is that the same match
    # can be used to map Miller indices to array indices for intensity
    # accumulation, and for determination of the correlation
    # coefficient in the presence of a scaling reference.

    assert len(i_model.indices()) == len(miller_set.indices()) \
        and  (i_model.indices() ==
              miller_set.indices()).count(False) == 0

    matches = miller.match_multi_indices(
      miller_indices_unique=miller_set.indices(),
      miller_indices=observations.indices())

    pair1 = flex.int([pair[1] for pair in matches.pairs()])
    pair0 = flex.int([pair[0] for pair in matches.pairs()])
    # narrow things down to the set that matches, only
    observations_pair1_selected = observations.customized_copy(
      indices = flex.miller_index([observations.indices()[p] for p in pair1]),
      data = flex.double([observations.data()[p] for p in pair1]),
      sigmas = flex.double([observations.sigmas()[p] for p in pair1]),
    )
    observations_original_index_pair1_selected = observations_original_index.customized_copy(
      indices = flex.miller_index([observations_original_index.indices()[p] for p in pair1]),
      data = flex.double([observations_original_index.data()[p] for p in pair1]),
      sigmas = flex.double([observations_original_index.sigmas()[p] for p in pair1]),
    )
###################
    I_observed = observations_pair1_selected.data()
    MILLER = observations_original_index_pair1_selected.indices()
    ORI = result["current_orientation"][0]
    Astar = matrix.sqr(ORI.reciprocal_matrix())
    WAVE = result["wavelength"]
    BEAM = matrix.col((0.0,0.0,-1./WAVE))
    BFACTOR = 0.

    #calculation of correlation here
    I_reference = flex.double([i_model.data()[pair[0]] for pair in matches.pairs()])
    use_weights = False # New facility for getting variance-weighted correlation

    if use_weights:
       #variance weighting
      I_weight = flex.double(
        [1./(observations_pair1_selected.sigmas()[pair[1]])**2 for pair in matches.pairs()])
    else:
      I_weight = flex.double(len(observations_pair1_selected.sigmas()), 1.)

    """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
       include_negatives = True
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)

       include_negatives = False
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)
    """
    if params.include_negatives:
      SWC = simple_weighted_correlation(I_weight, I_reference, I_observed)
    else:
      non_positive = ( observations_pair1_selected.data() <= 0 )
      SWC = simple_weighted_correlation(I_weight.select(~non_positive),
            I_reference.select(~non_positive), I_observed.select(~non_positive))

    print >> out, "Old correlation is", SWC.corr
    if params.postrefinement.algorithm=="rs":
      Rhall = flex.double()
      for mill in MILLER:
        H = matrix.col(mill)
        Xhkl = Astar*H
        Rh = ( Xhkl + BEAM ).length() - (1./WAVE)
        Rhall.append(Rh)
      Rs = math.sqrt(flex.mean(Rhall*Rhall))

      RS = 1./10000. # reciprocal effective domain size of 1 micron
      RS = Rs        # try this empirically determined approximate, monochrome, a-mosaic value
      current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])

      parameterization_class = rs_parameterization
      refinery = rs_refinery(ORI=ORI, MILLER=MILLER, BEAM=BEAM, WAVE=WAVE,
        ICALCVEC = I_reference, IOBSVEC = I_observed)

    elif params.postrefinement.algorithm=="eta_deff":
      eta_init = 2. * result["ML_half_mosaicity_deg"][0] * math.pi/180.
      D_eff_init = 2.*result["ML_domain_size_ang"][0]
      current = flex.double([SWC.slope, BFACTOR, eta_init, 0., 0.,D_eff_init,])

      parameterization_class = eta_deff_parameterization
      refinery = eta_deff_refinery(ORI=ORI, MILLER=MILLER, BEAM=BEAM, WAVE=WAVE,
        ICALCVEC = I_reference, IOBSVEC = I_observed)

    func = refinery.fvec_callable(parameterization_class(current))
    functional = flex.sum(func*func)
    print >> out, "functional",functional
    self.current = current; self.parameterization_class = parameterization_class
    self.refinery = refinery; self.out=out; self.params = params;
    self.miller_set = miller_set
    self.observations_pair1_selected = observations_pair1_selected;
    self.observations_original_index_pair1_selected = observations_original_index_pair1_selected
コード例 #55
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)

    # Find all detector objects
    detectors = experiments.detectors()

    # Verify inputs
    if len(params.input.reflections) == len(detectors) and len(detectors) > 1:
      # case for passing in multiple images on the command line
      assert len(params.input.reflections) == len(detectors)
      reflections = flex.reflection_table()
      for expt_id in xrange(len(detectors)):
        subset = params.input.reflections[expt_id].data
        subset['id'] = flex.int(len(subset), expt_id)
        reflections.extend(subset)
    else:
      # case for passing in combined experiments and reflections
      reflections = flatten_reflections(params.input.reflections)[0]

    detector = detectors[0]

    #from dials.algorithms.refinement.prediction import ExperimentsPredictor
    #ref_predictor = ExperimentsPredictor(experiments, force_stills=experiments.all_stills())

    print "N reflections total:", len(reflections)
    if params.residuals.exclude_outliers:
      reflections = reflections.select(reflections.get_flags(reflections.flags.used_in_refinement))
      print "N reflections used in refinement:", len(reflections)
      print "Reporting only on those reflections used in refinement"

    if self.params.residuals.i_sigi_cutoff is not None:
      sel = (reflections['intensity.sum.value']/flex.sqrt(reflections['intensity.sum.variance'])) >= self.params.residuals.i_sigi_cutoff
      reflections = reflections.select(sel)
      print "After filtering by I/sigi cutoff of %f, there are %d reflections left"%(self.params.residuals.i_sigi_cutoff,len(reflections))

    reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()

    n = len(reflections)
    rmsd = self.get_weighted_rmsd(reflections)
    print "Dataset RMSD (microns)", rmsd * 1000

    if params.tag is None:
      tag = ''
    else:
      tag = '%s '%params.tag

    # set up delta-psi ratio heatmap
    p = flex.int() # positive
    n = flex.int() # negative
    for i in set(reflections['id']):
      exprefls = reflections.select(reflections['id']==i)
      p.append(len(exprefls.select(exprefls['delpsical.rad']>0)))
      n.append(len(exprefls.select(exprefls['delpsical.rad']<0)))
    plt.hist2d(p, n, bins=30)
    cb = plt.colorbar()
    cb.set_label("N images")
    plt.title(r"%s2D histogram of pos vs. neg $\Delta\Psi$ per image"%tag)
    plt.xlabel(r"N reflections with $\Delta\Psi$ > 0")
    plt.ylabel(r"N reflections with $\Delta\Psi$ < 0")

    self.delta_scalar = 50

    # Iterate through the detectors, computing detector statistics at the per-panel level (IE one statistic per panel)
    # Per panel dictionaries
    rmsds = {}
    refl_counts = {}
    transverse_rmsds = {}
    radial_rmsds = {}
    ttdpcorr = {}
    pg_bc_dists = {}
    mean_delta_two_theta = {}
    # per panelgroup flex arrays
    pg_rmsds = flex.double()
    pg_r_rmsds = flex.double()
    pg_t_rmsds = flex.double()
    pg_refls_count = flex.int()
    pg_refls_count_d = {}
    table_header = ["PG id", "RMSD","Radial", "Transverse", "N refls"]
    table_header2 = ["","(um)","RMSD (um)","RMSD (um)",""]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    # Compute a set of radial and transverse displacements for each reflection
    print "Setting up stats..."
    tmp = flex.reflection_table()
    # Need to construct a variety of vectors
    for panel_id, panel in enumerate(detector):
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      bcl = flex.vec3_double()
      tto = flex.double()
      ttc = flex.double()
      # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
      # the panel, if it did intersect the panel)
      for expt_id in set(panel_refls['id']):
        beam = experiments[expt_id].beam
        s0 = beam.get_s0()
        expt_refls = panel_refls.select(panel_refls['id'] == expt_id)
        beam_centre = panel.get_beam_centre_lab(s0)
        bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
        obs_x, obs_y, _ = expt_refls['xyzobs.px.value'].parts()
        cal_x, cal_y, _ = expt_refls['xyzcal.px'].parts()
        tto.extend(flex.double([panel.get_two_theta_at_pixel(s0, (obs_x[i], obs_y[i])) for i in xrange(len(expt_refls))]))
        ttc.extend(flex.double([panel.get_two_theta_at_pixel(s0, (cal_x[i], cal_y[i])) for i in xrange(len(expt_refls))]))
      panel_refls['beam_centre_lab'] = bcl
      panel_refls['two_theta_obs'] = tto * (180/math.pi)
      panel_refls['two_theta_cal'] = ttc * (180/math.pi) #+ (0.5*panel_refls['delpsical.rad']*panel_refls['two_theta_obs'])
      # Compute obs in lab space
      x, y, _ = panel_refls['xyzobs.mm.value'].parts()
      c = flex.vec2_double(x, y)
      panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
      # Compute deltaXY in panel space. This vector is relative to the panel origin
      x, y, _ = (panel_refls['xyzcal.mm'] - panel_refls['xyzobs.mm.value']).parts()
      # Convert deltaXY to lab space, subtracting off of the panel origin
      panel_refls['delta_lab_coords'] = panel.get_lab_coord(flex.vec2_double(x,y)) - panel.get_origin()
      tmp.extend(panel_refls)
    reflections = tmp
    # The radial vector points from the center of the reflection to the beam center
    radial_vectors = (reflections['obs_lab_coords'] - reflections['beam_centre_lab']).each_normalize()
    # The transverse vector is orthogonal to the radial vector and the beam vector
    transverse_vectors = radial_vectors.cross(reflections['beam_centre_lab']).each_normalize()
    # Compute the raidal and transverse components of each deltaXY
    reflections['radial_displacements']     = reflections['delta_lab_coords'].dot(radial_vectors)
    reflections['transverse_displacements'] = reflections['delta_lab_coords'].dot(transverse_vectors)

    # Iterate through the detector at the specified hierarchy level
    for pg_id, pg in enumerate(iterate_detector_at_level(detector.hierarchy(), 0, params.hierarchy_level)):
      pg_msd_sum = 0
      pg_r_msd_sum = 0
      pg_t_msd_sum = 0
      pg_refls = 0
      pg_delpsi = flex.double()
      pg_deltwotheta = flex.double()
      for p in iterate_panels(pg):
        panel_id = id_from_name(detector, p.get_name())
        panel_refls = reflections.select(reflections['panel'] == panel_id)
        n = len(panel_refls)
        pg_refls += n

        delta_x = panel_refls['xyzcal.mm'].parts()[0] - panel_refls['xyzobs.mm.value'].parts()[0]
        delta_y = panel_refls['xyzcal.mm'].parts()[1] - panel_refls['xyzobs.mm.value'].parts()[1]

        tmp = flex.sum((delta_x**2)+(delta_y**2))
        pg_msd_sum += tmp

        r = panel_refls['radial_displacements']
        t = panel_refls['transverse_displacements']
        pg_r_msd_sum += flex.sum_sq(r)
        pg_t_msd_sum += flex.sum_sq(t)

        pg_delpsi.extend(panel_refls['delpsical.rad']*180/math.pi)
        pg_deltwotheta.extend(panel_refls['two_theta_obs'] - panel_refls['two_theta_cal'])

      bc = col(pg.get_beam_centre_lab(s0))
      ori = get_center(pg)
      pg_bc_dists[pg.get_name()] = (ori-bc).length()
      if len(pg_deltwotheta) > 0:
        mean_delta_two_theta[pg.get_name()] = flex.mean(pg_deltwotheta)
      else:
        mean_delta_two_theta[pg.get_name()] = 0

      if pg_refls == 0:
        pg_rmsd = pg_r_rmsd = pg_t_rmsd = 0
      else:
        pg_rmsd = math.sqrt(pg_msd_sum/pg_refls) * 1000
        pg_r_rmsd = math.sqrt(pg_r_msd_sum/pg_refls) * 1000
        pg_t_rmsd = math.sqrt(pg_t_msd_sum/pg_refls) * 1000
      pg_rmsds.append(pg_rmsd)
      pg_r_rmsds.append(pg_r_rmsd)
      pg_t_rmsds.append(pg_t_rmsd)
      pg_refls_count.append(pg_refls)
      pg_refls_count_d[pg.get_name()] = pg_refls
      table_data.append(["%d"%pg_id, "%.1f"%pg_rmsd, "%.1f"%pg_r_rmsd, "%.1f"%pg_t_rmsd, "%6d"%pg_refls])

      refl_counts[pg.get_name()] = pg_refls
      if pg_refls == 0:
        rmsds[p.get_name()] = -1
        radial_rmsds[p.get_name()] = -1
        transverse_rmsds[p.get_name()] = -1
        ttdpcorr[pg.get_name()] = -1
      else:
        rmsds[pg.get_name()] = pg_rmsd
        radial_rmsds[pg.get_name()]     = pg_r_rmsd
        transverse_rmsds[pg.get_name()] = pg_t_rmsd

        lc = flex.linear_correlation(pg_delpsi, pg_deltwotheta)
        ttdpcorr[pg.get_name()] = lc.coefficient()


    r1 = ["Weighted mean"]
    r2 = ["Weighted stddev"]
    if len(pg_rmsds) > 1:
      stats = flex.mean_and_variance(pg_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_r_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_t_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
    else:
      r1.extend([""]*3)
      r2.extend([""]*3)
    r1.append("")
    r2.append("")
    table_data.append(r1)
    table_data.append(r2)
    table_data.append(["Mean", "", "", "", "%8.1f"%flex.mean(pg_refls_count.as_double())])

    from libtbx import table_utils
    print "Detector statistics.  Angles in degrees, RMSDs in microns"
    print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    self.histogram(reflections, '%sDifference vector norms (mm)'%tag)

    if params.show_plots:
      if self.params.tag is None:
        t = ""
      else:
        t = "%s "%self.params.tag
      self.image_rmsd_histogram(reflections, tag)

      # Plots! these are plots with callbacks to draw on individual panels
      self.detector_plot_refls(detector, reflections, '%sOverall positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltas)
      self.detector_plot_refls(detector, reflections, '%sRadial positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_radial_deltas)
      self.detector_plot_refls(detector, reflections, '%sTransverse positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_transverse_deltas)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta\Psi$'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltapsi, colorbar_units=r"$\circ$")
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY*%s'%(tag, self.delta_scalar), show=False, plot_callback=self.plot_deltas)
      self.detector_plot_refls(detector, reflections, '%sSP Manual CDF'%tag, show=False, plot_callback=self.plot_cdf_manually)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY Histograms'%tag, show=False, plot_callback=self.plot_histograms)
      self.detector_plot_refls(detector, reflections, r'%sRadial displacements vs. $\Delta\Psi$, colored by $\Delta$XY'%tag, show=False, plot_callback=self.plot_radial_displacements_vs_deltapsi)
      self.detector_plot_refls(detector, reflections, r'%sDistance vector norms'%tag, show=False, plot_callback=self.plot_difference_vector_norms_histograms)

      # Plot intensity vs. radial_displacement
      fig = plt.figure()
      panel_id = 15
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      a = panel_refls['radial_displacements']
      b = panel_refls['intensity.sum.value']
      sel = (a > -0.2) & (a < 0.2) & (b < 50000)
      plt.hist2d(a.select(sel), b.select(sel), bins=100)
      plt.title("%s2D histogram of intensity vs. radial displacement for panel %d"%(tag, panel_id))
      plt.xlabel("Radial displacement (mm)")
      plt.ylabel("Intensity")
      ax = plt.colorbar()
      ax.set_label("Counts")

      # Plot delta 2theta vs. deltapsi
      n_bins = 10
      bin_size = len(reflections)//n_bins
      bin_low = []
      bin_high = []
      data = flex.sorted(reflections['two_theta_obs'])
      for i in xrange(n_bins):
        bin_low = data[i*bin_size]
        if (i+1)*bin_size >= len(reflections):
          bin_high = data[-1]
        else:
          bin_high = data[(i+1)*bin_size]
        refls = reflections.select((reflections['two_theta_obs'] >= bin_low) &
                                   (reflections['two_theta_obs'] <= bin_high))
        a = refls['delpsical.rad']*180/math.pi
        b = refls['two_theta_obs'] - refls['two_theta_cal']
        fig = plt.figure()
        sel = (a > -0.2) & (a < 0.2) & (b > -0.05) & (b < 0.05)
        plt.hist2d(a.select(sel), b.select(sel), bins=50, range = [[-0.2, 0.2], [-0.05, 0.05]])
        cb = plt.colorbar()
        cb.set_label("N reflections")
        plt.title(r'%sBin %d (%.02f, %.02f 2$\Theta$) $\Delta2\Theta$ vs. $\Delta\Psi$. Showing %d of %d refls'%(tag,i,bin_low,bin_high,len(a.select(sel)),len(a)))
        plt.xlabel(r'$\Delta\Psi \circ$')
        plt.ylabel(r'$\Delta2\Theta \circ$')

      # Plot delta 2theta vs. 2theta
      a = reflections['two_theta_obs']#[:71610]
      b = reflections['two_theta_obs'] - reflections['two_theta_cal']
      fig = plt.figure()
      limits = -0.05, 0.05
      sel = (b > limits[0]) & (b < limits[1])
      plt.hist2d(a.select(sel), b.select(sel), bins=100, range=((0,50), limits))
      plt.clim((0,100))
      cb = plt.colorbar()
      cb.set_label("N reflections")
      plt.title(r'%s$\Delta2\Theta$ vs. 2$\Theta$. Showing %d of %d refls'%(tag,len(a.select(sel)),len(a)))
      plt.xlabel(r'2$\Theta \circ$')
      plt.ylabel(r'$\Delta2\Theta \circ$')

      # calc the trendline
      z = np.polyfit(a.select(sel), b.select(sel), 1)
      print 'y=%.7fx+(%.7f)'%(z[0],z[1])

      # Plots with single values per panel
      self.detector_plot_dict(detector, refl_counts, u"%s N reflections"%t, u"%6d", show=False)
      self.detector_plot_dict(detector, rmsds, "%s Positional RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, radial_rmsds, "%s Radial RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, transverse_rmsds, "%s Transverse RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, ttdpcorr, r"%s $\Delta2\Theta$ vs. $\Delta\Psi$ CC"%t, u"%5.3f", show=False)

      self.plot_unitcells(experiments)
      self.plot_data_by_two_theta(reflections, tag)

      # Plot data by panel group
      sorted_values = sorted(pg_bc_dists.values())
      vdict = {}
      for k in pg_bc_dists:
        vdict[pg_bc_dists[k]] = k
      sorted_keys = [vdict[v] for v in sorted_values if vdict[v] in rmsds]
      x = [sorted_values[i] for i in xrange(len(sorted_values)) if pg_bc_dists.keys()[i] in rmsds]

      self.plot_multi_data(x,
                           [[pg_refls_count_d[k] for k in sorted_keys],
                            ([rmsds[k] for k in sorted_keys],
                             [radial_rmsds[k] for k in sorted_keys],
                             [transverse_rmsds[k] for k in sorted_keys]),
                            [radial_rmsds[k]/transverse_rmsds[k] for k in sorted_keys],
                            [mean_delta_two_theta[k] for k in sorted_keys]],
                           "Panel group distance from beam center (mm)",
                           ["N reflections",
                            ("Overall RMSD",
                             "Radial RMSD",
                             "Transverse RMSD"),
                            "R/T RMSD ratio",
                            "Delta two theta"],
                           ["N reflections",
                            "RMSD (microns)",
                            "R/T RMSD ratio",
                            "Delta two theta (degrees)"],
                           "%sData by panelgroup"%tag)

      if self.params.save_pdf:
        pp = PdfPages('residuals_%s.pdf'%(tag.strip()))
        for i in plt.get_fignums():
          pp.savefig(plt.figure(i))
        pp.close()
      else:
        plt.show()
コード例 #56
0
ファイル: tst_transform.py プロジェクト: biochem-fan/dials
    def tst_conservation_of_counts(self):

        from scitbx import matrix
        from random import uniform
        from dials.algorithms.profile_model.gaussian_rs import CoordinateSystem
        from dials.algorithms.profile_model.gaussian_rs import transform
        from scitbx.array_family import flex

        assert len(self.detector) == 1

        s0 = self.beam.get_s0()
        m2 = self.gonio.get_rotation_axis()
        s0_length = matrix.col(self.beam.get_s0()).length()

        # Create an s1 map
        s1_map = transform.beam_vector_map(self.detector[0], self.beam, True)

        for i in range(100):

            # Get random x, y, z
            x = uniform(300, 1800)
            y = uniform(300, 1800)
            z = uniform(0, 9)

            # Get random s1, phi, panel
            s1 = matrix.col(self.detector[0].get_pixel_lab_coord((x, y))).normalize() * s0_length
            phi = self.scan.get_angle_from_array_index(z, deg=False)
            panel = 0

            # Calculate the bounding box
            bbox = self.calculate_bbox(s1, z, panel)
            x0, x1, y0, y1, z0, z1 = bbox

            # Create the coordinate system
            cs = CoordinateSystem(m2, s0, s1, phi)

            # The grid index generator
            step_size = self.delta_divergence / self.grid_size
            grid_index = transform.GridIndexGenerator(cs, x0, y0, (step_size, step_size), self.grid_size, s1_map)

            # Create the image
            # image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
            image = gaussian((z1 - z0, y1 - y0, x1 - x0), 10.0, (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0))
            mask = flex.bool(flex.grid(image.all()), False)
            for j in range(y1 - y0):
                for i in range(x1 - x0):
                    inside = False
                    gx00, gy00 = grid_index(j, i)
                    gx01, gy01 = grid_index(j, i + 1)
                    gx10, gy10 = grid_index(j + 1, i)
                    gx11, gy11 = grid_index(j + 1, i + 1)
                    mingx = min([gx00, gx01, gx10, gx11])
                    maxgx = max([gx00, gx01, gx10, gx11])
                    mingy = min([gy00, gy01, gy10, gy11])
                    maxgy = max([gy00, gy01, gy10, gy11])
                    if mingx >= 0 and maxgx < 2 * self.grid_size + 1 and mingy >= 0 and maxgy < 2 * self.grid_size + 1:
                        inside = True
                    for k in range(1, z1 - z0 - 1):
                        mask[k, j, i] = inside

            # Transform the image to the grid
            transformed = transform.TransformForward(self.spec, cs, bbox, 0, image.as_double(), mask)
            grid = transformed.profile()

            # Get the sums and ensure they're the same
            eps = 1e-7
            sum_grid = flex.sum(grid)
            sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
            assert abs(sum_grid - sum_image) <= eps

        # Test passed
        print "OK"
コード例 #57
0
ファイル: ascii_art.py プロジェクト: dials/dials
def spot_counts_per_image_plot(reflections, char='*', width=60, height=10):
  from dials.array_family import flex

  if len(reflections) == 0:
    return '\n'

  assert isinstance(char, basestring)
  assert len(char) == 1

  x,y,z = reflections['xyzobs.px.value'].parts()

  min_z = flex.min(z)
  max_z = flex.max(z)

  # image numbers to display on x-axis label
  xlab = (int(round(min_z + 0.5)), int(round(max_z + 0.5)))
  # estimate the total number of images
  image_count = xlab[1] - xlab[0] + 1

  z_range = max_z - min_z + 1
  if z_range <= 1:
    return '%i spots found on 1 image' %len(reflections)

  width = int(min(z_range, width))
  z_step = z_range / width
  z_bound = min_z + z_step - 0.5
# print [round(i * 10) / 10 for i in sorted(z)]

  counts = flex.double()

  sel = (z < z_bound)
  counts.append(sel.count(True))
# print 0, ('-', z_bound), sel.count(True)
  for i in range(1, width-1):
    sel = ((z >= z_bound) & (z < (z_bound + z_step)))
    counts.append(sel.count(True))
#   print i, (z_bound, z_bound + z_step), sel.count(True)
    z_bound += z_step
  sel = (z >= z_bound)
# print i + 1, (z_bound, '-'), sel.count(True)
  counts.append(sel.count(True))

  max_count = flex.max(counts)
  total_counts = flex.sum(counts)
  assert total_counts == len(z)
  counts *= (height/max_count)
  counts = counts.iround()

  rows = []
  rows.append('%i spots found on %i images (max %i / bin)' %(
    total_counts, image_count, max_count))

  for i in range(height, 0, -1):
    row = []
    for j, c in enumerate(counts):
      if c > (i - 1):
        row.append(char)
      else:
        row.append(' ')
    rows.append(''.join(row))

  padding = width - len(str(xlab[0])) - len(str(xlab[1]))
  rows.append('%i%s%i' % (xlab[0],
    (' ' if padding < 7 else 'image').center(padding),
    xlab[1]))
  return '\n'.join(rows)
コード例 #58
0
ファイル: tst_transform.py プロジェクト: biochem-fan/dials
    def tst_conservation_of_counts(self):

        from scitbx import matrix
        from random import uniform, seed
        from dials.algorithms.profile_model.gaussian_rs import CoordinateSystem
        from dials.algorithms.profile_model.gaussian_rs import transform
        from scitbx.array_family import flex

        seed(0)

        assert len(self.detector) == 1

        s0 = self.beam.get_s0()
        m2 = self.gonio.get_rotation_axis()
        s0_length = matrix.col(self.beam.get_s0()).length()

        # Create an s1 map
        s1_map = transform.beam_vector_map(self.detector[0], self.beam, True)

        for i in range(100):

            # Get random x, y, z
            x = uniform(300, 1800)
            y = uniform(300, 1800)
            z = uniform(500, 600)

            # Get random s1, phi, panel
            s1 = matrix.col(self.detector[0].get_pixel_lab_coord((x, y))).normalize() * s0_length
            phi = self.scan.get_angle_from_array_index(z, deg=False)
            panel = 0

            # Calculate the bounding box
            bbox = self.calculate_bbox(s1, z, panel)
            x0, x1, y0, y1, z0, z1 = bbox

            # Create the coordinate system
            cs = CoordinateSystem(m2, s0, s1, phi)
            if abs(cs.zeta()) < 0.1:
                continue

            # The grid index generator
            step_size = self.delta_divergence / self.grid_size
            grid_index = transform.GridIndexGenerator(cs, x0, y0, (step_size, step_size), self.grid_size, s1_map)

            # Create the image
            # image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
            image = gaussian((z1 - z0, y1 - y0, x1 - x0), 10.0, (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0))
            mask = flex.bool(flex.grid(image.all()), False)
            for j in range(y1 - y0):
                for i in range(x1 - x0):
                    inside = False
                    gx00, gy00 = grid_index(j, i)
                    gx01, gy01 = grid_index(j, i + 1)
                    gx10, gy10 = grid_index(j + 1, i)
                    gx11, gy11 = grid_index(j + 1, i + 1)
                    mingx = min([gx00, gx01, gx10, gx11])
                    maxgx = max([gx00, gx01, gx10, gx11])
                    mingy = min([gy00, gy01, gy10, gy11])
                    maxgy = max([gy00, gy01, gy10, gy11])
                    if mingx >= 0 and maxgx < 2 * self.grid_size + 1 and mingy >= 0 and maxgy < 2 * self.grid_size + 1:
                        inside = True
                    for k in range(1, z1 - z0 - 1):
                        mask[k, j, i] = inside

            # Transform the image to the grid
            transformed = transform.TransformForwardNoModel(self.spec, cs, bbox, 0, image.as_double(), mask)
            grid = transformed.profile()

            # Get the sums and ensure they're the same
            eps = 1e-7
            sum_grid = flex.sum(grid)
            sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
            assert abs(sum_grid - sum_image) <= eps

            mask = flex.bool(flex.grid(image.all()), True)
            transformed = transform.TransformForwardNoModel(self.spec, cs, bbox, 0, image.as_double(), mask)
            grid = transformed.profile()

            # Boost the bbox to make sure all intensity is included
            x0, x1, y0, y1, z0, z1 = bbox
            bbox2 = (x0 - 10, x1 + 10, y0 - 10, y1 + 10, z0 - 10, z1 + 10)

            # Do the reverse transform
            transformed = transform.TransformReverseNoModel(self.spec, cs, bbox2, 0, grid)
            image2 = transformed.profile()

            # Check the sum of pixels are the same
            sum_grid = flex.sum(grid)
            sum_image = flex.sum(image2)
            assert abs(sum_grid - sum_image) <= eps

            # Do the reverse transform
            transformed = transform.TransformReverseNoModel(self.spec, cs, bbox, 0, grid)
            image2 = transformed.profile()

            from dials.algorithms.statistics import pearson_correlation_coefficient

            cc = pearson_correlation_coefficient(image.as_1d().as_double(), image2.as_1d())
            assert cc >= 0.99
            # if cc < 0.99:
            #   print cc, bbox
            #   from matplotlib import pylab
            # pylab.plot(image.as_numpy_array()[(z1-z0)/2,(y1-y0)/2,:])
            # pylab.show()
            # pylab.plot(image2.as_numpy_array()[(z1-z0)/2,(y1-y0)/2,:])
            # pylab.show()
            # pylab.plot((image.as_double()-image2).as_numpy_array()[(z1-z0)/2,(y1-y0)/2,:])
            # pylab.show()

        # Test passed
        print "OK"
コード例 #59
0
ファイル: fit.py プロジェクト: dials/dials_scratch
    #pylab.imshow(scale_data.as_numpy_array(), vmin=0, vmax=2, interpolation='none')
    #pylab.colorbar()
    #pylab.savefig("scale_%d.png" % frame)
    #pylab.clf()
    ##pylab.show()
    #exit(0)

    #pylab.hist(scale_data.as_1d().select(scale_mask.as_1d()).as_numpy_array(),
    #           bins=100)
    #pylab.show()

    sd1 = scale_data.as_1d()
    sm1 = scale_mask.as_1d()
    scale_min = flex.min(sd1.select(sm1))
    scale_max = flex.max(sd1.select(sm1))
    scale_avr = flex.sum(sd1.select(sm1)) / sm1.count(True)

    background = model_data * scale_data

    reflections['shoebox'].select(indices).apply_pixel_data(
      data.as_double(),
      background,
      raw_mask,
      frame,
      1)

    subset = reflections.select(indices3)
    if len(subset) > 0:
      subset.compute_summed_intensity()
      subset.compute_centroid(experiments)
      reflections.set_selected(indices3, subset)
コード例 #60
0
ファイル: mb.py プロジェクト: dials/dials_scratch
    #  (3,3),
    #  6,3,0,2)
    #new_mask = flex.bool(mask.accessor())
    #threshold(data, mask, new_mask)
    #mask = mask & (~new_mask)

    import cPickle as pickle
    pickle.dump((data, mask), open("first_image.pickle", "w"))
    exit(0)
    m = (mask == True).as_1d().as_int()
    x = data.as_double() * m.as_double()
    sum_background += x
    sum_sq_background += x * x
    count += m

    average = flex.sum(sum_background) / flex.sum(count)

    print "Image %d: selected %d reflections, avr=%f" % (
      frame,
      len(subset),
      average)

    # from matplotlib import pylab
    # pylab.imshow((count > 0).as_numpy_array())
    # pylab.show()

  average = flex.double(len(sum_background))
  variance = flex.double(len(sum_background))
  count_mask = count > 1
  indices = flex.size_t(range(len(mask))).select(count_mask.as_1d())
  from matplotlib import pylab