コード例 #1
0
def filter_ice(reflections, steps):

    from cctbx import miller, sgtbx, uctbx
    from matplotlib import pyplot as plt

    d_spacings = 1 / reflections["rlp"].norms()
    d_star_sq = uctbx.d_as_d_star_sq(d_spacings)

    from dials.algorithms.spot_finding.per_image_analysis import ice_rings_selection

    from dials.algorithms.integration import filtering

    ice_uc = uctbx.unit_cell((4.498, 4.498, 7.338, 90, 90, 120))
    ice_sg = sgtbx.space_group_info(number=194).group()
    ice_generator = miller.index_generator(ice_uc, ice_sg.type(), False,
                                           flex.min(d_spacings))
    ice_indices = ice_generator.to_array()
    ice_d_spacings = flex.sorted(ice_uc.d(ice_indices))
    ice_d_star_sq = uctbx.d_as_d_star_sq(ice_d_spacings)

    cubic_ice_uc = uctbx.unit_cell((6.358, 6.358, 6.358, 90, 90, 90))
    cubic_ice_sg = sgtbx.space_group_info(number=227).group()
    cubic_ice_generator = miller.index_generator(cubic_ice_uc,
                                                 cubic_ice_sg.type(), False,
                                                 flex.min(d_spacings))
    cubic_ice_indices = cubic_ice_generator.to_array()
    cubic_ice_d_spacings = flex.sorted(cubic_ice_uc.d(cubic_ice_indices))
    cubic_ice_d_star_sq = uctbx.d_as_d_star_sq(cubic_ice_d_spacings)

    import numpy

    widths = flex.double(numpy.geomspace(start=0.0001, stop=0.01, num=steps))
    n_spots = flex.double()
    total_intensity = flex.double()
    for width in widths:
        d_min = flex.min(d_spacings)

        ice_filter = filtering.PowderRingFilter(ice_uc, ice_sg, d_min, width)
        ice_sel = ice_filter(d_spacings)

        n_spots.append(ice_sel.count(False))
        if "intensity.sum.value" in reflections:
            total_intensity.append(
                flex.sum(reflections["intensity.sum.value"].select(~ice_sel)))

    fig, axes = plt.subplots(nrows=2, figsize=(12, 8), sharex=True)
    axes[0].plot(widths, n_spots, label="#spots", marker="+")
    if total_intensity.size():
        axes[1].plot(widths,
                     total_intensity,
                     label="total intensity",
                     marker="+")
    axes[0].set_ylabel("# spots remaining")
    axes[1].set_xlabel("Ice ring width (1/d^2)")
    axes[1].set_ylabel("Total intensity")
    for ax in axes:
        ax.set_xlim(0, flex.max(widths))
    plt.savefig("ice_ring_filtering.png")
    plt.clf()
    return
コード例 #2
0
ファイル: filtering.py プロジェクト: jbeilstenedmands/dials
    def __init__(self, unit_cell, space_group, d_min, width):
        """
        Initialise the filter.

        :param unit_cell: The unit_cell of the powder rings
        :param space_group: The space group of the powder rings
        :param d_min: The maximum resolution to filter to
        :param width: The resolution width to filter around
        """
        assert d_min > 0
        assert width > 0

        # Correct unit cell
        unit_cell = space_group.average_unit_cell(unit_cell)

        self.half_width = width / 2.0
        d_min = uctbx.d_star_sq_as_d(
            uctbx.d_as_d_star_sq(d_min) + self.half_width)

        # Generate a load of indices
        generator = index_generator(unit_cell, space_group.type(), False,
                                    d_min)
        indices = generator.to_array()

        # Compute d spacings and sort by resolution
        self.d_star_sq = flex.sorted(unit_cell.d_star_sq(indices))
コード例 #3
0
    def __init__(self,
                 d_spacings,
                 target_n_per_bin=25,
                 max_slots=40,
                 min_slots=20):
        d_spacings = flex.double(list(set(d_spacings)))
        d_spacings_sorted = flex.sorted(d_spacings, reverse=True)
        d_star_cubed_sorted = flex.pow(1 / d_spacings_sorted, 3)

        # choose bin volume such that lowest resolution shell contains 5% of the
        # spots, or 25, whichever is greater
        low_res_count = int(
            math.ceil(
                min(
                    max(target_n_per_bin, 0.05 * len(d_spacings)),
                    0.25 * len(d_spacings),
                )))
        bin_step = d_star_cubed_sorted[low_res_count] - d_star_cubed_sorted[0]
        assert bin_step > 0
        n_slots = int(
            math.ceil(
                (d_star_cubed_sorted[-1] - d_star_cubed_sorted[0]) / bin_step))

        if max_slots is not None:
            n_slots = min(n_slots, max_slots)
        if min_slots is not None:
            n_slots = max(n_slots, min_slots)
        bin_step = (d_star_cubed_sorted[-1] - d_star_cubed_sorted[0]) / n_slots

        self.bins = []
        ds3_max = d_star_cubed_sorted[0]
        for i in range(n_slots):
            ds3_min = d_star_cubed_sorted[0] + (i + 1) * bin_step
            self.bins.append(Slot(1 / ds3_min**(1 / 3), 1 / ds3_max**(1 / 3)))
            ds3_max = ds3_min
コード例 #4
0
ファイル: filtering.py プロジェクト: kek-pf-mx/dials
    def __init__(self, unit_cell, space_group, d_min, width):
        '''
    Initialise the filter.

    :param unit_cell: The unit_cell of the powder rings
    :param space_group: The space group of the powder rings
    :param d_min: The maximum resolution to filter to
    :param width: The resolution width to filter around

    '''
        from cctbx.miller import index_generator
        from dials.array_family import flex
        assert (d_min > 0)
        assert (width > 0)

        # Correct unit cell
        unit_cell = space_group.average_unit_cell(unit_cell)

        # Generate a load of indices
        generator = index_generator(unit_cell, space_group.type(), False,
                                    d_min)
        indices = generator.to_array()

        # Compute d spacings and sort by resolution
        self.d_star_sq = flex.sorted(unit_cell.d_star_sq(indices))
        self.half_width = width / 2.0
コード例 #5
0
ファイル: filtering.py プロジェクト: biochem-fan/dials
  def __init__(self, unit_cell, space_group, d_min, width):
    '''
    Initialise the filter.

    :param unit_cell: The unit_cell of the powder rings
    :param space_group: The space group of the powder rings
    :param d_min: The maximum resolution to filter to
    :param width: The resolution width to filter around

    '''
    from cctbx.miller import index_generator
    from dials.array_family import flex
    assert(d_min > 0)
    assert(width > 0)

    # Correct unit cell
    unit_cell = space_group.average_unit_cell(unit_cell)

    # Generate a load of indices
    generator = index_generator(unit_cell, space_group.type(), False, d_min)
    indices = generator.to_array()

    # Compute d spacings and sort by resolution
    self.d_spacings = flex.sorted(unit_cell.d(indices))
    self.half_width = width / 2.0
コード例 #6
0
  def __init__(self, d_spacings, target_n_per_bin=25, max_slots=40, min_slots=20):
    d_spacings_sorted = flex.sorted(d_spacings, reverse=True)
    d_star_cubed_sorted = flex.pow(1/d_spacings_sorted, 3)

    # choose bin volume such that lowest resolution shell contains 5% of the
    # spots, or 25, whichever is greater
    low_res_count = int(
      math.ceil(min(max(target_n_per_bin, 0.05*len(d_spacings)),
                    0.25*len(d_spacings))))
    bin_step = d_star_cubed_sorted[low_res_count] - d_star_cubed_sorted[0]
    n_slots = int(
      math.ceil((d_star_cubed_sorted[-1] - d_star_cubed_sorted[0])/bin_step))

    #n_slots = len(d_spacings_sorted)//target_n_per_bin
    if max_slots is not None:
      n_slots = min(n_slots, max_slots)
    if min_slots is not None:
      n_slots = max(n_slots, min_slots)
    bin_step = (d_star_cubed_sorted[-1] - d_star_cubed_sorted[0])/n_slots

    self.bins = []
    ds3_max = d_star_cubed_sorted[0]
    for i in range(n_slots):
      ds3_min = d_star_cubed_sorted[0] + (i+1) * bin_step
      self.bins.append(slot(1/ds3_min**(1/3), 1/ds3_max**(1/3)))
      ds3_max = ds3_min
コード例 #7
0
ファイル: error_modifier_ev11.py プロジェクト: dials/cctbx
  def distribute_deltas_over_ranks(self):
    '''Use alltoall to accumulate all deltas of one delta bin at a single rank'''
    new_delta_bins = self.mpi_helper.comm.alltoall(self.delta_bins)

    self.deltas = flex.double()
    for delta_bin in new_delta_bins:
      self.deltas.extend(delta_bin)

    self.deltas = flex.sorted(self.deltas)

    self.logger.log("New deltas count: %d"%self.deltas.size())
コード例 #8
0
 def __init__(self, d_star_sq, target_n_per_bin=20, max_slots=20, min_slots=5):
     n_slots = len(d_star_sq) // target_n_per_bin
     if max_slots is not None:
         n_slots = min(n_slots, max_slots)
     if min_slots is not None:
         n_slots = max(n_slots, min_slots)
     self.bins = []
     n_per_bin = len(d_star_sq) / n_slots
     d_star_sq_sorted = flex.sorted(d_star_sq)
     d_sorted = uctbx.d_star_sq_as_d(d_star_sq_sorted)
     d_max = d_sorted[0]
     for i in range(n_slots):
         d_min = d_sorted[nint((i + 1) * n_per_bin) - 1]
         self.bins.append(Slot(d_min, d_max))
         d_max = d_min
コード例 #9
0
 def __init__(self, d_star_sq, target_n_per_bin=20, max_slots=20, min_slots=5):
   from libtbx.math_utils import nearest_integer as nint
   n_slots = len(d_star_sq)//target_n_per_bin
   if max_slots is not None:
     n_slots = min(n_slots, max_slots)
   if min_slots is not None:
     n_slots = max(n_slots, min_slots)
   self.bins = []
   n_per_bin = len(d_star_sq)/n_slots
   d_star_sq_sorted = flex.sorted(d_star_sq)
   d_sorted = uctbx.d_star_sq_as_d(d_star_sq_sorted)
   d_max = d_sorted[0]
   for i in range(n_slots):
     d_min = d_sorted[nint((i+1)*n_per_bin)-1]
     self.bins.append(slot(d_min, d_max))
     d_max = d_min
コード例 #10
0
 def calculate_sorted_deviations(self, parameters):
     """Sort the x,y data."""
     sigmaprime = calc_sigmaprime(parameters, self.filtered_Ih_table)
     delta_hl = calc_deltahl(self.filtered_Ih_table,
                             self.filtered_Ih_table.calc_nh(), sigmaprime)
     norm = normal_distribution()
     n = len(delta_hl)
     if n <= 10:
         a = 3 / 8
     else:
         a = 0.5
     self.sortedy = flex.sorted(flex.double(delta_hl))
     self.sortedx = flex.double(
         [norm.quantile((i + 1 - a) / (n + 1 - (2 * a))) for i in range(n)])
     central_sel = (self.sortedx < 1.5) & (self.sortedx > -1.5)
     self.sortedx = self.sortedx.select(central_sel)
     self.sortedy = self.sortedy.select(central_sel)
コード例 #11
0
  def plot_data_by_two_theta(self, reflections, tag):
    n_bins = 30
    arbitrary_padding = 1
    sorted_two_theta = flex.sorted(reflections['two_theta_obs'])
    bin_low = [sorted_two_theta[int((len(sorted_two_theta)/n_bins) * i)] for i in xrange(n_bins)]
    bin_high = [bin_low[i+1] for i in xrange(n_bins-1)]
    bin_high.append(sorted_two_theta[-1]+arbitrary_padding)

    title = "%sBinned data by two theta (n reflections per bin: %.1f)"%(tag, len(sorted_two_theta)/n_bins)

    x = flex.double()
    x_centers = flex.double()
    n_refls = flex.double()
    rmsds = flex.double()
    radial_rmsds = flex.double()
    transverse_rmsds = flex.double()
    rt_ratio = flex.double()
    #delta_two_theta = flex.double()
    rmsd_delta_two_theta = flex.double()

    for i in xrange(n_bins):
      x_centers.append(((bin_high[i]-bin_low[i])/2) + bin_low[i])
      refls = reflections.select((reflections['two_theta_obs'] >= bin_low[i]) & (reflections['two_theta_obs'] < bin_high[i]))
      n = len(refls)
      n_refls.append(n)
      rmsds.append(1000*math.sqrt(flex.sum_sq(refls['difference_vector_norms'])/n))
      radial_rmsds.append(1000*math.sqrt(flex.sum_sq(refls['radial_displacements'])/n))
      transverse_rmsds.append(1000*math.sqrt(flex.sum_sq(refls['transverse_displacements'])/n))
      rt_ratio.append(radial_rmsds[-1]/transverse_rmsds[-1])
      rmsd_delta_two_theta.append(math.sqrt(flex.sum_sq(refls['two_theta_obs']-refls['two_theta_cal'])/n))
      #delta_two_theta.append(flex.mean(refls['two_theta_obs']-refls['two_theta_cal']))
    assert len(reflections) == flex.sum(n_refls)

    self.plot_multi_data(x_centers,
                         [rt_ratio, (rmsds, radial_rmsds, transverse_rmsds), rmsd_delta_two_theta],
                         "Two theta (degrees)",
                         ["R/T RMSD ratio",
                          ("Overall RMSD","Radial RMSD","Transverse RMSD"),
                          "RMSD delta two theta"],
                         ["R/T RMSD ratio",
                          "Overall, radial, transverse RMSD (microns)",
                          "Delta two theta RMSD (degrees)"],
                         title)
コード例 #12
0
ファイル: cc_half_algorithm.py プロジェクト: kmdalton/dials
    def read_mtzfile(filename, batch_offset=None):
        """
        Read the mtz file
        """
        miller_arrays = mtz.object(file_name=filename).as_miller_arrays(
            merge_equivalents=False)

        # Select the desired columns
        intensities = None
        batches = None
        for array in miller_arrays:
            if array.info().labels == ["I", "SIGI"]:
                intensities = array
            if array.info().labels == ["BATCH"]:
                batches = array
        if not intensities:
            raise KeyError(
                "Intensities not found in mtz file, expected labels I, SIGI")
        if not batches:
            raise KeyError("Batch values not found")
        if batches.data().size() != intensities.data().size():
            raise ValueError("Batch and intensity array sizes do not match")

        # Get the unit cell and space group
        unit_cell = intensities.unit_cell()
        space_group = intensities.crystal_symmetry().space_group()

        # The reflection data
        table = flex.reflection_table()
        table["miller_index"] = intensities.indices()
        table["intensity"] = intensities.data()
        table["variance"] = flex.pow2(intensities.sigmas())

        # Create unit cell list
        zeroed_batches = batches.data() - flex.min(batches.data())
        dataset = flex.int(table.size(), 0)
        sorted_batches = flex.sorted(zeroed_batches)
        sel_perm = flex.sort_permutation(zeroed_batches)

        if not batch_offset:
            previous = 0
            potential_batch_offsets = flex.double()
            for i, b in enumerate(sorted_batches):
                if b - previous > 1:
                    potential_batch_offsets.append(b - previous)
                previous = b
            potential = flex.sorted(potential_batch_offsets)
            # potential is a list of low numbers (where images may not have any spots)
            # and larger numbers between batches.
            if len(potential) == 1:
                batch_offset = potential[0]
                logger.info(
                    """
Using a batch offset of %s to split datasets.
Batch offset can be specified with mtz.batch_offset=
""",
                    batch_offset,
                )
            elif len(potential) > 1:
                diffs = flex.double([
                    potential[i + 1] - p for i, p in enumerate(potential[:-1])
                ])
                i = flex.sort_permutation(diffs)[-1]
                batch_offset = int(potential[i + 1] - (0.2 * diffs[i]))
                logger.info(
                    """
Using an approximate batch offset of %s to split datasets.
Batch offset can be specified with mtz.batch_offset=
""",
                    batch_offset,
                )
            else:
                batch_offset = 1

        previous = 0
        dataset_no = 0
        for i, b in enumerate(sorted_batches):
            if b - previous > batch_offset - 1:
                dataset_no += 1
            dataset[i] = dataset_no
            previous = b

        table["dataset"] = flex.int(table.size(), 0)
        table["dataset"].set_selected(sel_perm, dataset)

        return table, unit_cell, space_group
コード例 #13
0
    def _calc_obs_data(self, reflections, experiments):
        """Calculates a set of low resolution observations to try to match to
        indices. Each observation will record its d* value as well as
        tolerated d* bands and a 'clock angle'"""

        spot_d_star = reflections["rlp"].norms()
        if self._params.candidate_spots.limit_resolution_by == "n_spots":
            n_spots = self._params.candidate_spots.n_spots
            n_spots = min(n_spots, len(reflections) - 1)
            d_star_max = flex.sorted(spot_d_star)[n_spots - 1]
            self._params.candidate_spots.d_min = 1.0 / d_star_max

        # First select low resolution spots only
        spot_d_star = reflections["rlp"].norms()
        d_star_max = 1.0 / self._params.candidate_spots.d_min
        sel = spot_d_star <= d_star_max
        self.spots = reflections.select(sel)
        self.spots["d_star"] = spot_d_star.select(sel)

        # XXX In what circumstance might there be more than one experiment?
        detector = experiments.detectors()[0]
        beam = experiments.beams()[0]

        # Lab coordinate of the beam centre, using the first spot's panel
        panel = detector[self.spots[0]["panel"]]
        bc = panel.get_ray_intersection(beam.get_s0())
        bc_lab = panel.get_lab_coord(bc)

        # Lab coordinate of each spot
        spot_lab = flex.vec3_double(len(self.spots))
        pnl_ids = set(self.spots["panel"])
        for pnl in pnl_ids:
            sel = self.spots["panel"] == pnl
            panel = detector[pnl]
            obs = self.spots["xyzobs.mm.value"].select(sel)
            x_mm, y_mm, _ = obs.parts()
            spot_lab.set_selected(
                sel, panel.get_lab_coord(flex.vec2_double(x_mm, y_mm)))

        # Radius vectors for each spot
        radius = spot_lab - bc_lab

        # Usually the radius vectors would all be in a single plane, but this might
        # not be the case if the spots are on different panels. To put them on the
        # same plane, project onto fast/slow of the panel used to get the beam
        # centre
        df = flex.vec3_double(len(self.spots), detector[0].get_fast_axis())
        ds = flex.vec3_double(len(self.spots), detector[0].get_slow_axis())
        clock_dirs = (radius.dot(df) * df +
                      radius.dot(ds) * ds).each_normalize()

        # From this, find positive angles of each vector around a clock, using the
        # fast axis as 12 o'clock
        angs = clock_dirs.angle(detector[0].get_fast_axis())
        dots = clock_dirs.dot(detector[0].get_slow_axis())
        sel = dots < 0  # select directions in the second half of the clock face
        angs.set_selected(sel, (TWO_PI - angs.select(sel)))
        self.spots["clock_angle"] = angs

        # Project radius vectors onto fast/slow of the relevant panels
        df = flex.vec3_double(len(self.spots))
        ds = flex.vec3_double(len(self.spots))
        for pnl in pnl_ids:
            sel = self.spots["panel"] == pnl
            panel = detector[pnl]
            df.set_selected(sel, panel.get_fast_axis())
            ds.set_selected(sel, panel.get_slow_axis())
        panel_dirs = (radius.dot(df) * df +
                      radius.dot(ds) * ds).each_normalize()

        # Calc error along each panel direction with simple error propagation
        # that assumes no covariance between x and y centroid errors.
        x = panel_dirs.dot(df)
        y = panel_dirs.dot(ds)
        x2, y2 = flex.pow2(x), flex.pow2(y)
        r2 = x2 + y2
        sig_x2, sig_y2, _ = self.spots["xyzobs.mm.variance"].parts()
        var_r = (x2 / r2) * sig_x2 + (y2 / r2) * sig_y2
        sig_r = flex.sqrt(var_r)

        # Pixel coordinates at limits of the band
        tol = self._params.candidate_spots.d_star_tolerance
        outer_spot_lab = spot_lab + panel_dirs * (tol * sig_r)
        inner_spot_lab = spot_lab - panel_dirs * (tol * sig_r)

        # Set d* at band limits
        inv_lambda = 1.0 / beam.get_wavelength()
        s1_outer = outer_spot_lab.each_normalize() * inv_lambda
        s1_inner = inner_spot_lab.each_normalize() * inv_lambda
        self.spots["d_star_outer"] = (s1_outer - beam.get_s0()).norms()
        self.spots["d_star_inner"] = (s1_inner - beam.get_s0()).norms()
        self.spots["d_star_band2"] = flex.pow2(self.spots["d_star_outer"] -
                                               self.spots["d_star_inner"])
コード例 #14
0
        def plotit(reflections, experiments):
            """
      Make the plots for a set of reflections and experiments.
      """
            detector = experiments.detectors()[0]
            beam = experiments.beams()[
                0]  # only used to compute resolution of 2theta
            reflections = reflections.select(
                reflections['intensity.sum.variance'] > 0)

            # Setup up deltaXY and two theta bins
            reflections['difference_vector_norms'] = (
                reflections['xyzcal.mm'] -
                reflections['xyzobs.mm.value']).norms()
            reflections = setup_stats(
                detector, experiments, reflections,
                two_theta_only=True)  # add two theta to reflection table
            sorted_two_theta = flex.sorted(reflections['two_theta_obs'])
            bin_low = [
                sorted_two_theta[int((len(sorted_two_theta) / n_bins) * i)]
                for i in range(n_bins)
            ]
            bin_high = [bin_low[i + 1] for i in range(n_bins - 1)]
            bin_high.append(sorted_two_theta[-1] + arbitrary_padding)

            x_centers = flex.double()
            n_refls = flex.int()
            rmsds = flex.double()
            p25r = flex.double()
            p50r = flex.double()
            p75r = flex.double()
            p25i = flex.double()
            p50i = flex.double()
            p75i = flex.double()
            print("# 2theta Res N dXY IsigI")

            # Compute stats for each bin
            for i in range(n_bins):
                refls = reflections.select(
                    (reflections['two_theta_obs'] >= bin_low[i])
                    & (reflections['two_theta_obs'] < bin_high[i]))
                # Only compute deltaXY stats on reflections with I/sigI at least 5
                i_sigi = refls['intensity.sum.value'] / flex.sqrt(
                    refls['intensity.sum.variance'])
                refls = refls.select(i_sigi >= 5)
                n = len(refls)
                if n < 10: continue
                min_r, q1_r, med_r, q3_r, max_r = five_number_summary(
                    1000 * refls['difference_vector_norms'])

                n_refls.append(n)

                rmsds_ = 1000 * math.sqrt(
                    flex.sum_sq(refls['difference_vector_norms']) / n)

                min_i, q1_i, med_i, q3_i, max_i = five_number_summary(i_sigi)
                p25i.append(q1_i)
                p50i.append(med_i)
                p75i.append(q3_i)
                # x_center
                c = ((bin_high[i] - bin_low[i]) / 2) + bin_low[i]
                # resolution
                d = beam.get_wavelength() / (2 * math.sin(math.pi * c /
                                                          (2 * 180)))
                x_centers.append(c)
                rmsds.append(rmsds_)
                print("%d % 5.1f % 5.1f % 8d %.1f %.1f" %
                      (i, c, d, n, med_r, med_i))
                p25r.append(q1_r)
                p50r.append(med_r)
                p75r.append(q3_r)

            # After binning, plot the results
            for plot in figures:
                ax1 = figures[plot]['ax1']
                ax2 = figures[plot]['ax2']
                if plot == 'isigi':
                    line, = ax1.plot(x_centers.as_numpy_array(),
                                     p50i.as_numpy_array(), '-')
                    line.set_label('Median')
                    ax1.fill_between(x_centers.as_numpy_array(),
                                     p25i.as_numpy_array(),
                                     p75i.as_numpy_array(),
                                     interpolate=True,
                                     alpha=0.50,
                                     color=line.get_color())
                    line, = ax2.plot(x_centers.as_numpy_array(),
                                     n_refls.as_numpy_array(),
                                     '-',
                                     color=line.get_color())
                    line.set_label('Median')
                elif plot == 'deltaXY':
                    line, = ax1.plot(x_centers.as_numpy_array(),
                                     p50r.as_numpy_array(), '-')
                    line.set_label('Median')
                    ax1.fill_between(x_centers.as_numpy_array(),
                                     p25r.as_numpy_array(),
                                     p75r.as_numpy_array(),
                                     interpolate=True,
                                     alpha=0.50,
                                     color=line.get_color())
                    line, = ax2.plot(x_centers.as_numpy_array(),
                                     n_refls.as_numpy_array(),
                                     '-',
                                     color=line.get_color())
                    line.set_label('Median')
                ax1.legend()
                ax2.legend()
コード例 #15
0
ファイル: plots.py プロジェクト: dwpaley/dials
def normal_probability_plot(data):
    """Plot the distribution of normal probabilities of errors."""
    norm = distributions.normal_distribution()

    n = len(data["delta_hl"])
    if n <= 10:
        a = 3 / 8
    else:
        a = 0.5

    y = flex.sorted(flex.double(data["delta_hl"]))
    x = [norm.quantile((i + 1 - a) / (n + 1 - (2 * a))) for i in range(n)]

    H, xedges, yedges = np.histogram2d(np.array(x),
                                       y.as_numpy_array(),
                                       bins=(200, 200))
    nonzeros = np.nonzero(H)
    z = np.empty(H.shape)
    z[:] = np.NAN
    z[nonzeros] = H[nonzeros]

    # also make a histogram
    histy = flex.histogram(y, n_slots=100)
    # make a gaussian for reference also
    n = y.size()
    width = histy.slot_centers()[1] - histy.slot_centers()[0]
    gaussian = [
        n * width * math.exp(-(sc**2) / 2.0) / ((2.0 * math.pi)**0.5)
        for sc in histy.slot_centers()
    ]

    return {
        "normal_distribution_plot": {
            "data": [
                {
                    "x": xedges.tolist(),
                    "y": yedges.tolist(),
                    "z": z.transpose().tolist(),
                    "type": "heatmap",
                    "name": "normalised deviations",
                    "colorbar": {
                        "title": "Number of reflections",
                        "titleside": "right",
                    },
                    "colorscale": "Jet",
                },
                {
                    "x": [-5, 5],
                    "y": [-5, 5],
                    "type": "scatter",
                    "mode": "lines",
                    "name": "z = m",
                    "color": "rgb(0,0,0)",
                },
            ],
            "layout": {
                "title": "Normal probability plot with error model applied",
                "xaxis": {
                    "anchor": "y",
                    "title": "Order statistic medians, m"
                },
                "yaxis": {
                    "anchor": "x",
                    "title": "Ordered responses, z"
                },
            },
            "help":
            """\
This plot shows the normalised devations (of each reflection from the
group-weighted mean), sorted in order and plotted against the expected order
based on a normal distribution model. A true normal distribution of deviations
would give the straight line indicated. If the errors are well described by
this model, the ordered responses should closely fit the straight line to
high absolute values of x (>3), where there is typically a deviation away from
the line due to wide tails of the distribution.
""",
        },
        "nor_dev_hist": {
            "data": [
                {
                    "x": list(histy.slot_centers()),
                    "y": list(histy.slots()),
                    "type": "bar",
                    "name": "dataset normalised deviations",
                },
                {
                    "x": list(histy.slot_centers()),
                    "y": gaussian,
                    "type": "scatter",
                    "name": "Ideal normal distribution",
                },
            ],
            "layout": {
                "title": "Normal deviations with error model applied",
                "xaxis": {
                    "anchor": "y",
                    "title": "Normalised deviation"
                },
                "yaxis": {
                    "anchor": "x",
                    "title": "Number of reflections"
                },
            },
            "help":
            """\
This plot shows the distribution of normalised devations (of each reflection
from the group-weighted mean), for the reflections used to minimise the error
model. A true normal distribution is indicated.
""",
        },
    }
コード例 #16
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)

    # Find all detector objects
    detectors = experiments.detectors()

    # Verify inputs
    if len(params.input.reflections) == len(detectors) and len(detectors) > 1:
      # case for passing in multiple images on the command line
      assert len(params.input.reflections) == len(detectors)
      reflections = flex.reflection_table()
      for expt_id in xrange(len(detectors)):
        subset = params.input.reflections[expt_id].data
        subset['id'] = flex.int(len(subset), expt_id)
        reflections.extend(subset)
    else:
      # case for passing in combined experiments and reflections
      reflections = flatten_reflections(params.input.reflections)[0]

    detector = detectors[0]

    #from dials.algorithms.refinement.prediction import ExperimentsPredictor
    #ref_predictor = ExperimentsPredictor(experiments, force_stills=experiments.all_stills())

    print "N reflections total:", len(reflections)
    if params.residuals.exclude_outliers:
      reflections = reflections.select(reflections.get_flags(reflections.flags.used_in_refinement))
      print "N reflections used in refinement:", len(reflections)
      print "Reporting only on those reflections used in refinement"

    if self.params.residuals.i_sigi_cutoff is not None:
      sel = (reflections['intensity.sum.value']/flex.sqrt(reflections['intensity.sum.variance'])) >= self.params.residuals.i_sigi_cutoff
      reflections = reflections.select(sel)
      print "After filtering by I/sigi cutoff of %f, there are %d reflections left"%(self.params.residuals.i_sigi_cutoff,len(reflections))

    reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()

    n = len(reflections)
    rmsd = self.get_weighted_rmsd(reflections)
    print "Dataset RMSD (microns)", rmsd * 1000

    if params.tag is None:
      tag = ''
    else:
      tag = '%s '%params.tag

    # set up delta-psi ratio heatmap
    p = flex.int() # positive
    n = flex.int() # negative
    for i in set(reflections['id']):
      exprefls = reflections.select(reflections['id']==i)
      p.append(len(exprefls.select(exprefls['delpsical.rad']>0)))
      n.append(len(exprefls.select(exprefls['delpsical.rad']<0)))
    plt.hist2d(p, n, bins=30)
    cb = plt.colorbar()
    cb.set_label("N images")
    plt.title(r"%s2D histogram of pos vs. neg $\Delta\Psi$ per image"%tag)
    plt.xlabel(r"N reflections with $\Delta\Psi$ > 0")
    plt.ylabel(r"N reflections with $\Delta\Psi$ < 0")

    self.delta_scalar = 50

    # Iterate through the detectors, computing detector statistics at the per-panel level (IE one statistic per panel)
    # Per panel dictionaries
    rmsds = {}
    refl_counts = {}
    transverse_rmsds = {}
    radial_rmsds = {}
    ttdpcorr = {}
    pg_bc_dists = {}
    mean_delta_two_theta = {}
    # per panelgroup flex arrays
    pg_rmsds = flex.double()
    pg_r_rmsds = flex.double()
    pg_t_rmsds = flex.double()
    pg_refls_count = flex.int()
    pg_refls_count_d = {}
    table_header = ["PG id", "RMSD","Radial", "Transverse", "N refls"]
    table_header2 = ["","(um)","RMSD (um)","RMSD (um)",""]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    # Compute a set of radial and transverse displacements for each reflection
    print "Setting up stats..."
    tmp = flex.reflection_table()
    # Need to construct a variety of vectors
    for panel_id, panel in enumerate(detector):
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      bcl = flex.vec3_double()
      tto = flex.double()
      ttc = flex.double()
      # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
      # the panel, if it did intersect the panel)
      for expt_id in set(panel_refls['id']):
        beam = experiments[expt_id].beam
        s0 = beam.get_s0()
        expt_refls = panel_refls.select(panel_refls['id'] == expt_id)
        beam_centre = panel.get_beam_centre_lab(s0)
        bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
        obs_x, obs_y, _ = expt_refls['xyzobs.px.value'].parts()
        cal_x, cal_y, _ = expt_refls['xyzcal.px'].parts()
        tto.extend(flex.double([panel.get_two_theta_at_pixel(s0, (obs_x[i], obs_y[i])) for i in xrange(len(expt_refls))]))
        ttc.extend(flex.double([panel.get_two_theta_at_pixel(s0, (cal_x[i], cal_y[i])) for i in xrange(len(expt_refls))]))
      panel_refls['beam_centre_lab'] = bcl
      panel_refls['two_theta_obs'] = tto * (180/math.pi)
      panel_refls['two_theta_cal'] = ttc * (180/math.pi) #+ (0.5*panel_refls['delpsical.rad']*panel_refls['two_theta_obs'])
      # Compute obs in lab space
      x, y, _ = panel_refls['xyzobs.mm.value'].parts()
      c = flex.vec2_double(x, y)
      panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
      # Compute deltaXY in panel space. This vector is relative to the panel origin
      x, y, _ = (panel_refls['xyzcal.mm'] - panel_refls['xyzobs.mm.value']).parts()
      # Convert deltaXY to lab space, subtracting off of the panel origin
      panel_refls['delta_lab_coords'] = panel.get_lab_coord(flex.vec2_double(x,y)) - panel.get_origin()
      tmp.extend(panel_refls)
    reflections = tmp
    # The radial vector points from the center of the reflection to the beam center
    radial_vectors = (reflections['obs_lab_coords'] - reflections['beam_centre_lab']).each_normalize()
    # The transverse vector is orthogonal to the radial vector and the beam vector
    transverse_vectors = radial_vectors.cross(reflections['beam_centre_lab']).each_normalize()
    # Compute the raidal and transverse components of each deltaXY
    reflections['radial_displacements']     = reflections['delta_lab_coords'].dot(radial_vectors)
    reflections['transverse_displacements'] = reflections['delta_lab_coords'].dot(transverse_vectors)

    # Iterate through the detector at the specified hierarchy level
    for pg_id, pg in enumerate(iterate_detector_at_level(detector.hierarchy(), 0, params.hierarchy_level)):
      pg_msd_sum = 0
      pg_r_msd_sum = 0
      pg_t_msd_sum = 0
      pg_refls = 0
      pg_delpsi = flex.double()
      pg_deltwotheta = flex.double()
      for p in iterate_panels(pg):
        panel_id = id_from_name(detector, p.get_name())
        panel_refls = reflections.select(reflections['panel'] == panel_id)
        n = len(panel_refls)
        pg_refls += n

        delta_x = panel_refls['xyzcal.mm'].parts()[0] - panel_refls['xyzobs.mm.value'].parts()[0]
        delta_y = panel_refls['xyzcal.mm'].parts()[1] - panel_refls['xyzobs.mm.value'].parts()[1]

        tmp = flex.sum((delta_x**2)+(delta_y**2))
        pg_msd_sum += tmp

        r = panel_refls['radial_displacements']
        t = panel_refls['transverse_displacements']
        pg_r_msd_sum += flex.sum_sq(r)
        pg_t_msd_sum += flex.sum_sq(t)

        pg_delpsi.extend(panel_refls['delpsical.rad']*180/math.pi)
        pg_deltwotheta.extend(panel_refls['two_theta_obs'] - panel_refls['two_theta_cal'])

      bc = col(pg.get_beam_centre_lab(s0))
      ori = get_center(pg)
      pg_bc_dists[pg.get_name()] = (ori-bc).length()
      if len(pg_deltwotheta) > 0:
        mean_delta_two_theta[pg.get_name()] = flex.mean(pg_deltwotheta)
      else:
        mean_delta_two_theta[pg.get_name()] = 0

      if pg_refls == 0:
        pg_rmsd = pg_r_rmsd = pg_t_rmsd = 0
      else:
        pg_rmsd = math.sqrt(pg_msd_sum/pg_refls) * 1000
        pg_r_rmsd = math.sqrt(pg_r_msd_sum/pg_refls) * 1000
        pg_t_rmsd = math.sqrt(pg_t_msd_sum/pg_refls) * 1000
      pg_rmsds.append(pg_rmsd)
      pg_r_rmsds.append(pg_r_rmsd)
      pg_t_rmsds.append(pg_t_rmsd)
      pg_refls_count.append(pg_refls)
      pg_refls_count_d[pg.get_name()] = pg_refls
      table_data.append(["%d"%pg_id, "%.1f"%pg_rmsd, "%.1f"%pg_r_rmsd, "%.1f"%pg_t_rmsd, "%6d"%pg_refls])

      refl_counts[pg.get_name()] = pg_refls
      if pg_refls == 0:
        rmsds[p.get_name()] = -1
        radial_rmsds[p.get_name()] = -1
        transverse_rmsds[p.get_name()] = -1
        ttdpcorr[pg.get_name()] = -1
      else:
        rmsds[pg.get_name()] = pg_rmsd
        radial_rmsds[pg.get_name()]     = pg_r_rmsd
        transverse_rmsds[pg.get_name()] = pg_t_rmsd

        lc = flex.linear_correlation(pg_delpsi, pg_deltwotheta)
        ttdpcorr[pg.get_name()] = lc.coefficient()


    r1 = ["Weighted mean"]
    r2 = ["Weighted stddev"]
    if len(pg_rmsds) > 1:
      stats = flex.mean_and_variance(pg_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_r_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_t_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
    else:
      r1.extend([""]*3)
      r2.extend([""]*3)
    r1.append("")
    r2.append("")
    table_data.append(r1)
    table_data.append(r2)
    table_data.append(["Mean", "", "", "", "%8.1f"%flex.mean(pg_refls_count.as_double())])

    from libtbx import table_utils
    print "Detector statistics.  Angles in degrees, RMSDs in microns"
    print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    self.histogram(reflections, '%sDifference vector norms (mm)'%tag)

    if params.show_plots:
      if self.params.tag is None:
        t = ""
      else:
        t = "%s "%self.params.tag
      self.image_rmsd_histogram(reflections, tag)

      # Plots! these are plots with callbacks to draw on individual panels
      self.detector_plot_refls(detector, reflections, '%sOverall positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltas)
      self.detector_plot_refls(detector, reflections, '%sRadial positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_radial_deltas)
      self.detector_plot_refls(detector, reflections, '%sTransverse positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_transverse_deltas)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta\Psi$'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltapsi, colorbar_units=r"$\circ$")
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY*%s'%(tag, self.delta_scalar), show=False, plot_callback=self.plot_deltas)
      self.detector_plot_refls(detector, reflections, '%sSP Manual CDF'%tag, show=False, plot_callback=self.plot_cdf_manually)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY Histograms'%tag, show=False, plot_callback=self.plot_histograms)
      self.detector_plot_refls(detector, reflections, r'%sRadial displacements vs. $\Delta\Psi$, colored by $\Delta$XY'%tag, show=False, plot_callback=self.plot_radial_displacements_vs_deltapsi)
      self.detector_plot_refls(detector, reflections, r'%sDistance vector norms'%tag, show=False, plot_callback=self.plot_difference_vector_norms_histograms)

      # Plot intensity vs. radial_displacement
      fig = plt.figure()
      panel_id = 15
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      a = panel_refls['radial_displacements']
      b = panel_refls['intensity.sum.value']
      sel = (a > -0.2) & (a < 0.2) & (b < 50000)
      plt.hist2d(a.select(sel), b.select(sel), bins=100)
      plt.title("%s2D histogram of intensity vs. radial displacement for panel %d"%(tag, panel_id))
      plt.xlabel("Radial displacement (mm)")
      plt.ylabel("Intensity")
      ax = plt.colorbar()
      ax.set_label("Counts")

      # Plot delta 2theta vs. deltapsi
      n_bins = 10
      bin_size = len(reflections)//n_bins
      bin_low = []
      bin_high = []
      data = flex.sorted(reflections['two_theta_obs'])
      for i in xrange(n_bins):
        bin_low = data[i*bin_size]
        if (i+1)*bin_size >= len(reflections):
          bin_high = data[-1]
        else:
          bin_high = data[(i+1)*bin_size]
        refls = reflections.select((reflections['two_theta_obs'] >= bin_low) &
                                   (reflections['two_theta_obs'] <= bin_high))
        a = refls['delpsical.rad']*180/math.pi
        b = refls['two_theta_obs'] - refls['two_theta_cal']
        fig = plt.figure()
        sel = (a > -0.2) & (a < 0.2) & (b > -0.05) & (b < 0.05)
        plt.hist2d(a.select(sel), b.select(sel), bins=50, range = [[-0.2, 0.2], [-0.05, 0.05]])
        cb = plt.colorbar()
        cb.set_label("N reflections")
        plt.title(r'%sBin %d (%.02f, %.02f 2$\Theta$) $\Delta2\Theta$ vs. $\Delta\Psi$. Showing %d of %d refls'%(tag,i,bin_low,bin_high,len(a.select(sel)),len(a)))
        plt.xlabel(r'$\Delta\Psi \circ$')
        plt.ylabel(r'$\Delta2\Theta \circ$')

      # Plot delta 2theta vs. 2theta
      a = reflections['two_theta_obs']#[:71610]
      b = reflections['two_theta_obs'] - reflections['two_theta_cal']
      fig = plt.figure()
      limits = -0.05, 0.05
      sel = (b > limits[0]) & (b < limits[1])
      plt.hist2d(a.select(sel), b.select(sel), bins=100, range=((0,50), limits))
      plt.clim((0,100))
      cb = plt.colorbar()
      cb.set_label("N reflections")
      plt.title(r'%s$\Delta2\Theta$ vs. 2$\Theta$. Showing %d of %d refls'%(tag,len(a.select(sel)),len(a)))
      plt.xlabel(r'2$\Theta \circ$')
      plt.ylabel(r'$\Delta2\Theta \circ$')

      # calc the trendline
      z = np.polyfit(a.select(sel), b.select(sel), 1)
      print 'y=%.7fx+(%.7f)'%(z[0],z[1])

      # Plots with single values per panel
      self.detector_plot_dict(detector, refl_counts, u"%s N reflections"%t, u"%6d", show=False)
      self.detector_plot_dict(detector, rmsds, "%s Positional RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, radial_rmsds, "%s Radial RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, transverse_rmsds, "%s Transverse RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, ttdpcorr, r"%s $\Delta2\Theta$ vs. $\Delta\Psi$ CC"%t, u"%5.3f", show=False)

      self.plot_unitcells(experiments)
      self.plot_data_by_two_theta(reflections, tag)

      # Plot data by panel group
      sorted_values = sorted(pg_bc_dists.values())
      vdict = {}
      for k in pg_bc_dists:
        vdict[pg_bc_dists[k]] = k
      sorted_keys = [vdict[v] for v in sorted_values if vdict[v] in rmsds]
      x = [sorted_values[i] for i in xrange(len(sorted_values)) if pg_bc_dists.keys()[i] in rmsds]

      self.plot_multi_data(x,
                           [[pg_refls_count_d[k] for k in sorted_keys],
                            ([rmsds[k] for k in sorted_keys],
                             [radial_rmsds[k] for k in sorted_keys],
                             [transverse_rmsds[k] for k in sorted_keys]),
                            [radial_rmsds[k]/transverse_rmsds[k] for k in sorted_keys],
                            [mean_delta_two_theta[k] for k in sorted_keys]],
                           "Panel group distance from beam center (mm)",
                           ["N reflections",
                            ("Overall RMSD",
                             "Radial RMSD",
                             "Transverse RMSD"),
                            "R/T RMSD ratio",
                            "Delta two theta"],
                           ["N reflections",
                            "RMSD (microns)",
                            "R/T RMSD ratio",
                            "Delta two theta (degrees)"],
                           "%sData by panelgroup"%tag)

      if self.params.save_pdf:
        pp = PdfPages('residuals_%s.pdf'%(tag.strip()))
        for i in plt.get_fignums():
          pp.savefig(plt.figure(i))
        pp.close()
      else:
        plt.show()