Exemplo n.º 1
0
def exercise_flood_fill():
  uc = uctbx.unit_cell('10 10 10 90 90 90')
  for uc in (uctbx.unit_cell('10 10 10 90 90 90'),
             uctbx.unit_cell('9 10 11 87 91 95')):
    gridding = maptbx.crystal_gridding(
      unit_cell=uc,
      pre_determined_n_real=(5,5,5))
    corner_cube = (0,4,20,24,100,104,120,124) # cube across all 8 corners
    channel = (12,37,38,39,42,43,62,63,67,68,87,112)
    data = flex.int(flex.grid(gridding.n_real()))
    for i in (corner_cube + channel): data[i] = 1
    flood_fill = masks.flood_fill(data, uc)
    assert data.count(0) == 105
    for i in corner_cube: assert data[i] == 2
    for i in channel: assert data[i] == 3
    assert approx_equal(flood_fill.centres_of_mass(),
                        ((-0.5, -0.5, -0.5), (-2.5, 7/3, 2.5)))
    assert approx_equal(flood_fill.centres_of_mass_frac(),
                        ((-0.1, -0.1, -0.1), (-0.5, 7/15, 0.5)))
    assert approx_equal(flood_fill.centres_of_mass_cart(),
                        uc.orthogonalize(flood_fill.centres_of_mass_frac()))
    assert flood_fill.n_voids() == 2
    assert approx_equal(flood_fill.grid_points_per_void(), (8, 12))
    if 0:
      from crys3d import wx_map_viewer
      wx_map_viewer.display(raw_map=data.as_double(), unit_cell=uc, wires=False)
    #
    gridding = maptbx.crystal_gridding(
      unit_cell=uc,
      pre_determined_n_real=(10,10,10))
    data = flex.int(flex.grid(gridding.n_real()))
    # parallelogram
    points = [(2,4,5),(3,4,5),(4,4,5),(5,4,5),(6,4,5),
              (3,5,5),(4,5,5),(5,5,5),(6,5,5),(7,5,5),
              (4,6,5),(5,6,5),(6,6,5),(7,6,5),(8,6,5)]
    points_frac = flex.vec3_double()
    for p in points:
      data[p] = 1
      points_frac.append([p[i]/gridding.n_real()[i] for i in range(3)])
    points_cart = uc.orthogonalize(points_frac)
    flood_fill = masks.flood_fill(data, uc)
    assert data.count(2) == 15
    assert approx_equal(flood_fill.centres_of_mass_frac(), ((0.5,0.5,0.5),))
    pai_cart = math.principal_axes_of_inertia(
      points=points_cart, weights=flex.double(points_cart.size(),1.0))
    F = matrix.sqr(uc.fractionalization_matrix())
    O = matrix.sqr(uc.orthogonalization_matrix())
    assert approx_equal(
      pai_cart.center_of_mass(), flood_fill.centres_of_mass_cart()[0])
    assert approx_equal(
      flood_fill.covariance_matrices_cart()[0],
      (F.transpose() * matrix.sym(
        sym_mat3=flood_fill.covariance_matrices_frac()[0]) * F).as_sym_mat3())
    assert approx_equal(
      pai_cart.inertia_tensor(), flood_fill.inertia_tensors_cart()[0])
    assert approx_equal(pai_cart.eigensystem().vectors(),
                        flood_fill.eigensystems_cart()[0].vectors())
    assert approx_equal(pai_cart.eigensystem().values(),
                        flood_fill.eigensystems_cart()[0].values())
  return
Exemplo n.º 2
0
    def find_peaks(self):
        grid_real_binary = self.grid_real.deep_copy()
        rmsd = math.sqrt(
            flex.mean(
                flex.pow2(grid_real_binary.as_1d() -
                          flex.mean(grid_real_binary.as_1d()))))
        grid_real_binary.set_selected(
            grid_real_binary < (self.params.rmsd_cutoff) * rmsd, 0)
        grid_real_binary.as_1d().set_selected(grid_real_binary.as_1d() > 0, 1)
        grid_real_binary = grid_real_binary.iround()
        from cctbx import masks
        flood_fill = masks.flood_fill(grid_real_binary, self.fft_cell)
        if flood_fill.n_voids() < 4:
            # Require at least peak at origin and one peak for each basis vector
            raise Sorry(
                "Indexing failed: fft3d peak search failed to find sufficient number of peaks."
            )
        # the peak at the origin might have a significantly larger volume than the
        # rest so exclude this peak from determining maximum volume
        isel = (flood_fill.grid_points_per_void() > int(
            self.params.fft3d.peak_volume_cutoff *
            flex.max(flood_fill.grid_points_per_void()[1:]))).iselection()

        if self.params.optimise_initial_basis_vectors:
            self.volumes = flood_fill.grid_points_per_void().select(isel)
            sites_cart = flood_fill.centres_of_mass_cart().select(isel)
            sites_cart_optimised = optimise_basis_vectors(
                self.reflections['rlp'].select(
                    self.reflections_used_for_indexing), sites_cart)

            self.sites = self.fft_cell.fractionalize(sites_cart_optimised)

            diffs = (sites_cart_optimised - sites_cart)
            norms = diffs.norms()
            flex.min_max_mean_double(norms).show()
            perm = flex.sort_permutation(norms, reverse=True)
            for p in perm[:10]:
                logger.debug(sites_cart[p], sites_cart_optimised[p], norms[p])

            # only use those vectors which haven't shifted too far from starting point
            sel = norms < (5 * self.fft_cell.parameters()[0] /
                           self.gridding[0])
            self.sites = self.sites.select(sel)
            self.volumes = self.volumes.select(sel)
            #diff = (self.sites - flood_fill.centres_of_mass_frac().select(isel))
            #flex.min_max_mean_double(diff.norms()).show()

        else:
            self.sites = flood_fill.centres_of_mass_frac().select(isel)
            self.volumes = flood_fill.grid_points_per_void().select(isel)
Exemplo n.º 3
0
    def _find_peaks(self, grid_real, d_min):
        grid_real_binary = grid_real.deep_copy()
        rmsd = math.sqrt(
            flex.mean(
                flex.pow2(grid_real_binary.as_1d() -
                          flex.mean(grid_real_binary.as_1d()))))
        grid_real_binary.set_selected(
            grid_real_binary < (self._params.rmsd_cutoff) * rmsd, 0)
        grid_real_binary.as_1d().set_selected(grid_real_binary.as_1d() > 0, 1)
        grid_real_binary = grid_real_binary.iround()
        from cctbx import masks

        # real space FFT grid dimensions
        cell_lengths = [self._n_points * d_min / 2 for i in range(3)]
        self._fft_cell = uctbx.unit_cell(cell_lengths + [90] * 3)

        flood_fill = masks.flood_fill(grid_real_binary, self._fft_cell)
        if flood_fill.n_voids() < 4:
            # Require at least peak at origin and one peak for each basis vector
            raise indexing.DialsIndexError(
                "Indexing failed: fft3d peak search failed to find sufficient number of peaks."
            )

        # the peak at the origin might have a significantly larger volume than the
        # rest so exclude any anomalously large peaks from determining minimum volume
        from scitbx.math import five_number_summary

        outliers = flex.bool(flood_fill.n_voids(), False)
        grid_points_per_void = flood_fill.grid_points_per_void()
        min_x, q1_x, med_x, q3_x, max_x = five_number_summary(
            grid_points_per_void)
        iqr_multiplier = 5
        iqr_x = q3_x - q1_x
        cut_x = iqr_multiplier * iqr_x
        outliers.set_selected(
            grid_points_per_void.as_double() > (q3_x + cut_x), True)
        # print q3_x + cut_x, outliers.count(True)
        isel = (grid_points_per_void > int(
            self._params.peak_volume_cutoff *
            flex.max(grid_points_per_void.select(~outliers)))).iselection()

        sites = flood_fill.centres_of_mass_frac().select(isel)
        volumes = flood_fill.grid_points_per_void().select(isel)
        return sites, volumes
Exemplo n.º 4
0
Arquivo: fft3d.py Projeto: dials/dials
  def find_peaks(self):
    grid_real_binary = self.grid_real.deep_copy()
    rmsd = math.sqrt(
      flex.mean(flex.pow2(grid_real_binary.as_1d()-flex.mean(grid_real_binary.as_1d()))))
    grid_real_binary.set_selected(grid_real_binary < (self.params.rmsd_cutoff)*rmsd, 0)
    grid_real_binary.as_1d().set_selected(grid_real_binary.as_1d() > 0, 1)
    grid_real_binary = grid_real_binary.iround()
    from cctbx import masks
    flood_fill = masks.flood_fill(grid_real_binary, self.fft_cell)
    if flood_fill.n_voids() < 4:
      # Require at least peak at origin and one peak for each basis vector
      raise Sorry("Indexing failed: fft3d peak search failed to find sufficient number of peaks.")
    # the peak at the origin might have a significantly larger volume than the
    # rest so exclude this peak from determining maximum volume
    isel = (flood_fill.grid_points_per_void() > int(
        self.params.fft3d.peak_volume_cutoff * flex.max(
          flood_fill.grid_points_per_void()[1:]))).iselection()

    if self.params.optimise_initial_basis_vectors:
      self.volumes = flood_fill.grid_points_per_void().select(isel)
      sites_cart = flood_fill.centres_of_mass_cart().select(isel)
      sites_cart_optimised = optimise_basis_vectors(
        self.reflections['rlp'].select(self.reflections_used_for_indexing),
        sites_cart)

      self.sites = self.fft_cell.fractionalize(sites_cart_optimised)

      diffs = (sites_cart_optimised - sites_cart)
      norms = diffs.norms()
      flex.min_max_mean_double(norms).show()
      perm = flex.sort_permutation(norms, reverse=True)
      for p in perm[:10]:
        logger.debug(sites_cart[p], sites_cart_optimised[p], norms[p])

      # only use those vectors which haven't shifted too far from starting point
      sel = norms < (5 * self.fft_cell.parameters()[0]/self.gridding[0])
      self.sites = self.sites.select(sel)
      self.volumes = self.volumes.select(sel)
      #diff = (self.sites - flood_fill.centres_of_mass_frac().select(isel))
      #flex.min_max_mean_double(diff.norms()).show()

    else:
      self.sites = flood_fill.centres_of_mass_frac().select(isel)
      self.volumes = flood_fill.grid_points_per_void().select(isel)
Exemplo n.º 5
0
def exercise_flood_fill():
    uc = uctbx.unit_cell('10 10 10 90 90 90')
    for uc in (uctbx.unit_cell('10 10 10 90 90 90'),
               uctbx.unit_cell('9 10 11 87 91 95')):
        gridding = maptbx.crystal_gridding(unit_cell=uc,
                                           pre_determined_n_real=(5, 5, 5))
        corner_cube = (0, 4, 20, 24, 100, 104, 120, 124
                       )  # cube across all 8 corners
        channel = (12, 37, 38, 39, 42, 43, 62, 63, 67, 68, 87, 112)
        data = flex.int(flex.grid(gridding.n_real()))
        for i in (corner_cube + channel):
            data[i] = 1
        flood_fill = masks.flood_fill(data, uc)
        assert data.count(0) == 105
        for i in corner_cube:
            assert data[i] == 2
        for i in channel:
            assert data[i] == 3
        assert approx_equal(flood_fill.centres_of_mass(),
                            ((-0.5, -0.5, -0.5), (-2.5, 7 / 3, 2.5)))
        assert approx_equal(flood_fill.centres_of_mass_frac(),
                            ((-0.1, -0.1, -0.1), (-0.5, 7 / 15, 0.5)))
        assert approx_equal(
            flood_fill.centres_of_mass_cart(),
            uc.orthogonalize(flood_fill.centres_of_mass_frac()))
        assert flood_fill.n_voids() == 2
        assert approx_equal(flood_fill.grid_points_per_void(), (8, 12))
        if 0:
            from crys3d import wx_map_viewer
            wx_map_viewer.display(raw_map=data.as_double(),
                                  unit_cell=uc,
                                  wires=False)
        #
        gridding = maptbx.crystal_gridding(unit_cell=uc,
                                           pre_determined_n_real=(10, 10, 10))
        data = flex.int(flex.grid(gridding.n_real()))
        # parallelogram
        points = [(2, 4, 5), (3, 4, 5), (4, 4, 5), (5, 4, 5), (6, 4, 5),
                  (3, 5, 5), (4, 5, 5), (5, 5, 5), (6, 5, 5), (7, 5, 5),
                  (4, 6, 5), (5, 6, 5), (6, 6, 5), (7, 6, 5), (8, 6, 5)]
        points_frac = flex.vec3_double()
        for p in points:
            data[p] = 1
            points_frac.append([p[i] / gridding.n_real()[i] for i in range(3)])
        points_cart = uc.orthogonalize(points_frac)
        flood_fill = masks.flood_fill(data, uc)
        assert data.count(2) == 15
        assert approx_equal(flood_fill.centres_of_mass_frac(),
                            ((0.5, 0.5, 0.5), ))
        pai_cart = math.principal_axes_of_inertia(points=points_cart,
                                                  weights=flex.double(
                                                      points_cart.size(), 1.0))
        F = matrix.sqr(uc.fractionalization_matrix())
        O = matrix.sqr(uc.orthogonalization_matrix())
        assert approx_equal(pai_cart.center_of_mass(),
                            flood_fill.centres_of_mass_cart()[0])
        assert approx_equal(
            flood_fill.covariance_matrices_cart()[0],
            (F.transpose() *
             matrix.sym(sym_mat3=flood_fill.covariance_matrices_frac()[0]) *
             F).as_sym_mat3())
        assert approx_equal(pai_cart.inertia_tensor(),
                            flood_fill.inertia_tensors_cart()[0])
        assert approx_equal(pai_cart.eigensystem().vectors(),
                            flood_fill.eigensystems_cart()[0].vectors())
        assert approx_equal(pai_cart.eigensystem().values(),
                            flood_fill.eigensystems_cart()[0].values())
    return
Exemplo n.º 6
0
    def integrate(self):
        from scitbx.array_family import flex
        from scitbx import matrix

        nslow, nfast = self.raw_data.focus()

        binary_map = self.get_signal_mask().as_1d().as_int()
        binary_map.reshape(flex.grid(1, nslow, nfast))

        # find connected regions of spots - hacking code for density modification
        # this is used to determine the integration masks for the reflections
        from cctbx import masks
        from cctbx import uctbx

        uc = uctbx.unit_cell((1, nslow, nfast, 90, 90, 90))
        flood_fill = masks.flood_fill(binary_map, uc)
        binary_map = binary_map.as_1d()

        coms = flood_fill.centres_of_mass()

        # now iterate through these blobs, find the intensity and error, and
        # find the Miller index

        UB = matrix.sqr(self.crystal.get_A())
        UBi = UB.inverse()

        winv = 1 / self.beam.get_wavelength()

        data = self.raw_data.as_double()
        background = self.make_background()

        from dials.array_family import flex

        reflections = flex.reflection_table()

        num_pixels_foreground = flex.int()
        background_mean = flex.double()
        background_sum_value = flex.double()
        background_sum_variance = flex.double()
        intensity_sum_value = flex.double()
        intensity_sum_variance = flex.double()
        miller_index = flex.miller_index()
        xyzcal_px = flex.vec3_double()
        bbox = flex.int6()
        dq = flex.double()

        fast = flex.int(self.raw_data.size(), -1)
        fast.reshape(self.raw_data.accessor())
        slow = flex.int(self.raw_data.size(), -1)
        slow.reshape(self.raw_data.accessor())

        nslow, nfast = fast.focus()
        for j in range(nslow):
            for i in range(nfast):
                fast[(j, i)] = i
                slow[(j, i)] = j

        for j in range(flood_fill.n_voids()):
            sel = binary_map == (j + 2)
            pixels = data.select(sel)
            if flex.min(pixels) < 0:
                continue

            bg_pixels = background.select(sel)
            n = pixels.size()
            d = flex.sum(pixels)
            b = flex.sum(bg_pixels)
            s = d - b

            # FIXME is this the best centre of mass? if this spot is actually
            # there, probably no, but if not there (i.e. no spot) then background
            # subtracted centre of mass likely to be very noisy - keeping the
            # background in likely to make this a little more stable
            xy = coms[j][2], coms[j][1]

            fs = fast.select(sel)
            ss = slow.select(sel)

            fd = fs.as_double()
            sd = ss.as_double()

            p = matrix.col(
                self.panel.get_pixel_lab_coord(xy)).normalize() * winv
            q = p - matrix.col(self.beam.get_s0())
            hkl = UBi * q
            ihkl = [int(round(h)) for h in hkl]

            dq.append((q - UB * ihkl).length())

            # puzzle out the bounding boxes - hack here, we have maps with the
            # fast and slow positions in; select from these then find max, min of
            # this selection
            f_min = flex.min(fs)
            f_max = flex.max(fs)

            s_min = flex.min(ss)
            s_max = flex.max(ss)

            bbox.append((f_min, f_max + 1, s_min, s_max + 1, 0, 1))

            num_pixels_foreground.append(n)
            background_mean.append(b / n)
            background_sum_value.append(b)
            background_sum_variance.append(b)
            intensity_sum_value.append(s)
            intensity_sum_variance.append(d + b)
            miller_index.append(ihkl)
            xyzcal_px.append((xy[0], xy[1], 0.0))

        reflections["num_pixels.foreground"] = num_pixels_foreground
        reflections["background.mean"] = background_mean
        reflections["background.sum.value"] = background_sum_value
        reflections["background.sum.variance"] = background_sum_variance
        reflections["intensity.sum.value"] = intensity_sum_value
        reflections["intensity.sum.variance"] = intensity_sum_variance
        reflections["miller_index"] = miller_index
        reflections["xyzcal.px"] = xyzcal_px
        reflections["id"] = flex.int(miller_index.size(), 0)
        reflections["panel"] = flex.size_t(miller_index.size(), 0)
        reflections["bbox"] = bbox
        reflections["dq"] = dq

        from dials.algorithms.shoebox import MaskCode

        reflections["shoebox"] = flex.shoebox(reflections["panel"],
                                              reflections["bbox"],
                                              allocate=True)

        reflections.extract_shoeboxes(self.imageset, verbose=True)

        # now iterate through these (how?!) and assign mask values
        fg = self.get_signal_mask()
        for reflection in reflections:
            s = reflection["shoebox"]
            b = reflection["bbox"]
            dz, dy, dx = s.mask.focus()
            for j in range(dy):
                for i in range(dx):
                    _i = b[0]
                    _j = b[2]
                    if fg[(j + _j, i + _i)]:
                        m = MaskCode.Valid | MaskCode.Foreground
                    else:
                        m = MaskCode.Valid | MaskCode.Background
                    s.mask[(0, j, i)] = m

        return reflections