Ejemplo n.º 1
0
def kernapply(x, k, circular=False):
  """Convolve a sequence x with a Kernel k"""

  x = flex.double(x).deep_copy()
  lenx = len(x)
  w = flex.double(lenx, 0.0)
  w.set_selected(flex.size_t_range(k.m + 1), k.coef)
  sel = lenx -1 - flex.size_t_range(k.m)
  w.set_selected(sel, k.coef[1:])

  # do convolution in the Fourier domain
  fft = fftpack.real_to_complex(lenx)
  n = fft.n_real()
  m = fft.m_real()
  x.extend(flex.double(m-n, 0.))
  w.extend(flex.double(m-n, 0.))
  conv = fft.forward(x) * fft.forward(w)

  # extend result by the reverse conjugate, omitting the DC offset and Nyquist
  # frequencies. If fft.n_real() is odd there is no Nyquist term.
  end = fft.n_complex() - (fft.n_real() + 1) % 2
  conv.extend(flex.conj(conv[1:end]).reversed())

  # transform back, take real part and scale
  fft = fftpack.complex_to_complex(len(conv))
  result = fft.backward(conv).parts()[0] / n

  if circular:
    return result
  else:
    return result[(k.m):(lenx-k.m)]
Ejemplo n.º 2
0
    def split_matches_into_blocks(self, nproc=1):
        """Return a list of the matches, split into blocks according to the
        gradient_calculation_blocksize parameter and the number of processes (if relevant).
        The number of blocks will be set such that the total number of reflections
        being processed by concurrent processes does not exceed gradient_calculation_blocksize"""

        self.update_matches()

        # Need to be able to track the indices of the original matches table for
        # scan-varying gradient calculations. A simple and robust (but slightly
        # expensive) way to do this is to add an index column to the matches table
        self._matches["imatch"] = flex.size_t_range(len(self._matches))

        if self._gradient_calculation_blocksize:
            nblocks = int(
                math.floor(
                    len(self._matches) * nproc /
                    self._gradient_calculation_blocksize))
        else:
            nblocks = nproc
        # ensure at least 100 reflections per block
        nblocks = min(nblocks, int(len(self._matches) / 100))
        nblocks = max(nblocks, 1)
        blocksize = int(math.floor(len(self._matches) / nblocks))
        blocks = []
        for block_num in range(nblocks - 1):
            start = block_num * blocksize
            end = (block_num + 1) * blocksize
            blocks.append(self._matches[start:end])
        start = (nblocks - 1) * blocksize
        end = len(self._matches)
        blocks.append(self._matches[start:end])
        return blocks
Ejemplo n.º 3
0
    def predict_for_reflection_table(self, reflections, skip_derivatives=False):
        """perform prediction for all reflections in the supplied table"""

        if "entering" not in reflections:
            reflections.calculate_entering_flags(self._experiments)

        # can only predict for experiments that exist and within the scan range
        # any other reflections will be left unchanged
        inc = flex.size_t_range(len(reflections))
        to_keep = flex.bool(len(inc), False)

        for iexp, exp in enumerate(self._experiments):
            sel = reflections["id"] == iexp

            # keep all reflections if there is no rotation axis
            if exp.goniometer is None:
                to_keep.set_selected(sel, True)
                continue

            # trim reflections outside the scan range
            phi = reflections["xyzobs.mm.value"].parts()[2]
            phi_min, phi_max = exp.scan.get_oscillation_range(deg=False)
            passed = (phi >= phi_min) & (phi <= phi_max)
            to_keep.set_selected(sel, passed)

        # determine indices to include and predict on the subset
        inc = inc.select(to_keep)
        sub_refl = reflections.select(inc)
        preds = self._predict_core(sub_refl, skip_derivatives)

        # set updated subset back into place
        reflections.set_selected(inc, preds)

        return reflections
Ejemplo n.º 4
0
    def __init__(self, constraints, n_full_params):

        self._constraints = constraints

        # constraints should be a list of EqualShiftConstraint objects
        assert len(self._constraints) > 0

        self._n_full_params = n_full_params
        full_idx = flex.size_t_range(n_full_params)
        self._constrained_gps = [c.indices for c in self._constraints]
        self._constrained_idx = flex.size_t(
            [i for c in self._constrained_gps for i in c])
        keep = flex.bool(self._n_full_params, True)
        keep.set_selected(self._constrained_idx, False)
        self._unconstrained_idx = full_idx.select(keep)
        self._n_unconstrained_params = len(self._unconstrained_idx)
Ejemplo n.º 5
0
  def test_MTRIX(self):
    '''Test MTRIX record processing'''
    # print sys._getframe().f_code.co_name
    cau_expected_results  = [
    [1.0, 1.0, 1.0], [1.0, -1.0, 1.0], [-0.366025, 1.366025, 1.0], [-1.366025, 0.366025, 1.0],
    [1.0, 1.5, 1.0], [94.618, -5.253, 91.582],
    [94.618, -91.582, -5.253], [51.858229, 79.315053, 91.582], [-42.759771, 84.568053, 91.582],
    [94.618, -4.753, 91.582], [62.395, 51.344, 80.786],
    [62.395, -80.786, 51.344], [-13.267688, 79.70763, 80.786], [-75.662688, 28.36363, 80.786],
    [62.395, 51.844, 80.786], [39.954, 51.526, 72.372],
    [39.954, -72.372, 51.526], [-24.645804, 60.364163, 72.372], [-64.599804, 8.838163, 72.372],
    [39.954, 52.026, 72.372]]

    # use MTRIX data
    multimer_data = multimer(
      file_name='multimer_test_data.pdb',
      reconstruction_type='cau')
    cau_multimer_xyz = list(multimer_data.sites_cart())

    cau_multimer_xyz.sort()
    cau_expected_results.sort()
    assert approx_equal(cau_expected_results,cau_multimer_xyz,eps=0.001)

    # Test that the number of MTRIX record to process is correct
    self.assertEqual(multimer_data.number_of_transforms,4)

    # Test getting non-rounded ASU
    source_xyz = multimer_data.get_ncs_hierarchy().atoms().extract_xyz()
    xyz = apply_transforms(
      ncs_coordinates = source_xyz,
      ncs_restraints_group_list = multimer_data.get_ncs_restraints_group_list(),
      total_asu_length = multimer_data.total_asu_length(),
      extended_ncs_selection = flex.size_t_range(source_xyz.size()),
      round_coordinates=False)
    cau_multimer_xyz = list(xyz)
    cau_multimer_xyz.sort()
    assert approx_equal(cau_expected_results,cau_multimer_xyz,eps=0.00001)

    # Test multimer without rounding
    multimer_data = multimer(
      file_name='multimer_test_data.pdb',
      round_coordinates=False,
      reconstruction_type='cau')
    cau_multimer_xyz = list(multimer_data.sites_cart())
    cau_multimer_xyz.sort()
    cau_expected_results.sort()
    assert approx_equal(cau_expected_results,cau_multimer_xyz,eps=0.00001)
Ejemplo n.º 6
0
 def rmsd_permutation(O, sites_cart_1, sites_cart_2):
     "simple, limited handling of flipped sites"
     assert sites_cart_1.size() == len(O.edge_sets)
     assert sites_cart_2.size() == len(O.edge_sets)
     from scitbx.array_family import flex
     result = flex.size_t_range(len(O.edge_sets))
     for i, esi in enumerate(O.edge_sets):
         if (len(esi) not in [2, 3]): continue
         n1 = flex.size_t()
         for j in esi:
             if (len(O.edge_sets[j]) == 1):
                 n1.append(j)
         if (len(n1) != 2): continue
         n1_rev = flex.size_t(reversed(n1))
         pair_1 = sites_cart_1.select(n1)
         rmsd_1 = pair_1.rms_difference(sites_cart_2.select(n1))
         rmsd_2 = pair_1.rms_difference(sites_cart_2.select(n1_rev))
         if (rmsd_2 < rmsd_1 * (1 - 1e-6)):
             result.set_selected(n1, n1_rev)
     return result
Ejemplo n.º 7
0
 def rmsd_permutation(O, sites_cart_1, sites_cart_2):
   "simple, limited handling of flipped sites"
   assert sites_cart_1.size() == len(O.edge_sets)
   assert sites_cart_2.size() == len(O.edge_sets)
   from scitbx.array_family import flex
   result = flex.size_t_range(len(O.edge_sets))
   for i,esi in enumerate(O.edge_sets):
     if (len(esi) not in[2, 3]): continue
     n1 = flex.size_t()
     for j in esi:
       if (len(O.edge_sets[j]) == 1):
         n1.append(j)
     if (len(n1) != 2): continue
     n1_rev = flex.size_t(reversed(n1))
     pair_1 = sites_cart_1.select(n1)
     rmsd_1 = pair_1.rms_difference(sites_cart_2.select(n1))
     rmsd_2 = pair_1.rms_difference(sites_cart_2.select(n1_rev))
     if (rmsd_2 < rmsd_1*(1-1e-6)):
       result.set_selected(n1, n1_rev)
   return result
Ejemplo n.º 8
0
  def predict_for_reflection_table(self, reflections):
    """perform prediction for all reflections in the supplied table"""

    # set the entering flags if this has not been done
    from dials.algorithms.refinement.reflection_manager import calculate_entering_flags
    if not reflections.has_key("entering"):
      reflections['entering'] = calculate_entering_flags(reflections, self._experiments)

    # can only predict for experiments that exist and within the scan range
    # any other reflections will be left unchanged
    inc = flex.size_t_range(len(reflections))
    to_keep = flex.bool(len(inc), False)

    for iexp, exp in enumerate(self._experiments):
      sel = reflections['id'] == iexp

      # keep all reflections if there is no rotation axis
      if exp.goniometer is None:
        to_keep.set_selected(sel, True)
        continue

      # trim reflections outside the scan range
      phi = reflections['xyzobs.mm.value'].parts()[2]
      phi_min, phi_max = exp.scan.get_oscillation_range(deg=False)
      passed = (phi >= phi_min) & (phi <= phi_max)
      to_keep.set_selected(sel, passed)

    # determine indices to include and predict on the subset
    inc = inc.select(to_keep)
    sub_refl = reflections.select(inc)
    preds = self._predict_core(sub_refl)

    # set updated subset back into place
    reflections.set_selected(inc, preds)

    return reflections
Ejemplo n.º 9
0
    def __init__(
        self,
        hierarchy=None,
        # XXX warning, ncs_phil_groups can be changed inside...
        ncs_phil_groups=None,
        params=None,
        log=None,
    ):
        """
    TODO:
    1. Transfer get_ncs_info_as_spec() to ncs/ncs.py:ncs

    Select method to build ncs_group_object

    order of implementation:
    1) ncs_phil_groups - user-supplied definitions are filtered
    2) hierarchy only - Performing NCS search

    Args:
    -----
      ncs_phil_groups:  iotbx.phil.parse(ncs_group_phil_str).extract().ncs_group
      chain_max_rmsd (float): limit of rms difference between chains to be considered
        as copies
      min_percent (float): Threshold for similarity between chains
        similarity define as:
        (number of matching res) / (number of res in longer chain)
      chain_similarity_threshold (float): min similarity between matching chains
      residue_match_radius (float): max allow distance difference between pairs of matching
        atoms of two residues
    """

        self.number_of_ncs_groups = 0  # consider removing/replacing with function

        self.ncs_restraints_group_list = class_ncs_restraints_group_list()
        # keep hierarchy for writing (To have a source of atoms labels)
        self.hierarchy = hierarchy
        # residues common to NCS copies. Used for .spec representation
        self.common_res_dict = {}
        # Collect messages, recommendation and errors
        self.messages = ''  # Not used outside...
        self.old_i_seqs = None
        self.original_hierarchy = None
        self.truncated_hierarchy = None
        self.truncated_h_asc = None
        self.chains_info = None

        extension = ''
        # set search parameters
        self.params = params
        if self.params is None:
            self.params = input.get_default_params().ncs_search
        #
        if log is None:
            self.log = sys.stdout
        else:
            self.log = log

        if hierarchy:
            # for a in hierarchy.atoms():
            #   print "oo", a.i_seq, a.id_str()
            # print "====="
            hierarchy.atoms().reset_i_seq()
            self.original_hierarchy = hierarchy.deep_copy()
            self.original_hierarchy.reset_atom_i_seqs()
            if self.params.exclude_selection is not None:
                # pdb_hierarchy_inp.hierarchy.write_pdb_file("in_ncs_pre_before.pdb")
                cache = hierarchy.atom_selection_cache()
                sel = cache.selection("not (%s)" %
                                      self.params.exclude_selection)
                self.truncated_hierarchy = hierarchy.select(sel)
            else:
                # this could be to save iseqs but I'm not sure
                self.truncated_hierarchy = hierarchy.select(
                    flex.size_t_range(hierarchy.atoms_size()))
            self.old_i_seqs = self.truncated_hierarchy.atoms().extract_i_seq()
            # print "self.old_i_seqs", list(self.old_i_seqs)
            # self.truncated_hierarchy.atoms().reset_i_seq()
            self.truncated_hierarchy.reset_atom_i_seqs()
            self.truncated_h_asc = self.truncated_hierarchy.atom_selection_cache(
            )
            # self.truncated_hierarchy.write_pdb_file("in_ncs_pre_after.pdb")
            self.chains_info = ncs_search.get_chains_info(
                self.truncated_hierarchy)

            if self.truncated_hierarchy.atoms_size() == 0:
                return

        #
        # print "ncs_groups before validation", ncs_phil_groups
        validated_ncs_phil_groups = None
        validated_ncs_phil_groups = self.validate_ncs_phil_groups(
            pdb_h=self.truncated_hierarchy,
            ncs_phil_groups=ncs_phil_groups,
            asc=self.truncated_h_asc)
        if validated_ncs_phil_groups is None:
            # print "Last chance, building from hierarchy"
            self.build_ncs_obj_from_pdb_asu(pdb_h=self.truncated_hierarchy,
                                            asc=self.truncated_h_asc)

        # error handling
        if self.ncs_restraints_group_list.get_n_groups() == 0:
            print >> self.log, '========== WARNING! ============\n'
            print >> self.log, '  No NCS relation were found !!!\n'
            print >> self.log, '================================\n'
        if self.messages != '':
            print >> self.log, self.messages
Ejemplo n.º 10
0
def tst_functional_gradient_calculator_invalid_arguments():
    """Check errors are raised as expected"""

    n_grp = 3
    n_dst = 5
    n_atm = 10

    target_uijs, target_weights, \
      base_uijs, base_sels, dataset_hash, \
      atomic_base, \
      real_group_amps, real_atomic_amps \
        = get_optimisation_test_set(n_grp, n_dst, n_atm)

    ########################################################
    # Check expected error messages are raised
    ########################################################

    # Starting values
    base_amplitudes_start = flex.double(n_grp * n_dst, 1.0)

    wgt_kw_args = dict(
        weight_sum_of_amplitudes=0.0,
        weight_sum_of_amplitudes_squared=0.0,
        weight_sum_of_squared_amplitudes=0.0,
    )

    # Should not error
    f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
        target_uijs=target_uijs,
        target_weights=target_weights,
        base_amplitudes=base_amplitudes_start,
        base_uijs=base_uijs,
        base_atom_indices=base_sels,
        base_dataset_hash=dataset_hash,
        atomic_uijs=atomic_base,
        **wgt_kw_args)
    f, g = f_g_calculator.compute_functional_and_gradients()

    # target_uijs

    msg = "invalid target_uijs: must be 2-dimensional flex array (currently 3)"
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst - 1, n_atm, 5)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), '"{}" does not match "{}"'.format(
        msg, str(e.value))

    # target_weights

    msg = "invalid dimension of target_weights (dimension 3): must be same dimension as target_uijs (dimension 2)"
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=flex.double(flex.grid((n_dst, n_atm, 5)), 1.0),
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "incompatible dimension of target_weights (axis 0): must be same size as target_uijs ({} != {})".format(
        n_dst, n_dst - 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst - 1, n_atm)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "incompatible dimension of target_weights (axis 1): must be same size as target_uijs ({} != {})".format(
        n_atm, n_atm + 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst, n_atm + 1)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    # base components

    msg = "invalid input base components. base_amplitudes (length {}), base_uijs (length {}) and base_atom_indices (length {}) must all be the same length"
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=flex.double(n_grp * n_dst - 1, 1.0),
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst - 1, n_grp * n_dst,
                      n_grp * n_dst) == str(e.value)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:-1],
            base_atom_indices=base_sels[:-1],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst, n_grp * n_dst - 1,
                      n_grp * n_dst - 1) == str(e.value)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:-1],
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst, n_grp * n_dst - 1,
                      n_grp * n_dst) == str(e.value)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels[:-1],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst, n_grp * n_dst,
                      n_grp * n_dst - 1) == str(e.value)

    msg = "incompatible pair (element 2) in base_uijs/base_atom_indices: pairwise elements must be the same length ({} and {})".format(
        n_atm + 1, len(base_uijs[2]))
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:2] + [flex.sym_mat3_double(n_atm + 1)] +
            base_uijs[3:],
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "incompatible pair (element 2) in base_uijs/base_atom_indices: pairwise elements must be the same length ({} and {})".format(
        len(base_sels[2]), n_atm + 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels[:2] + [flex.size_t_range(n_atm + 1)] +
            base_sels[3:],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "invalid selection in base_atom_indices ({}): attempting to select atom outside of array (size {})".format(
        n_atm, n_atm)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:2] + [flex.sym_mat3_double(n_atm + 1)] +
            base_uijs[3:],
            base_atom_indices=base_sels[:2] + [flex.size_t_range(n_atm + 1)] +
            base_sels[3:],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    # dataset_hash

    msg = "invalid base_dataset_hash (length {}): must be same length as base_amplitudes, base_uijs & base_atom_indices (length {})".format(
        len(dataset_hash) - 1, len(dataset_hash))
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=flex.size_t(list(dataset_hash)[:-1]),
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    msg = "invalid value in base_dataset_hash ({}): attempts to select element outside range of target_uijs (size {})".format(
        n_dst - 1, n_dst - 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst - 1, n_atm)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=flex.double(flex.grid((n_dst - 1, n_atm)),
                                       1.0),  # need to resize this also
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    msg = "Dataset index {} is not present in base_dataset_hash -- this dataset has no base elements associated with it.".format(
        n_dst)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst + 1, n_atm)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=flex.double(flex.grid((n_dst + 1, n_atm)),
                                       1.0),  # need to resize this also
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    # atomic uijs

    msg = "invalid size of atomic_uijs ({}): must match 2nd dimension of target_uijs ({})".format(
        n_atm - 1, n_atm)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=flex.sym_mat3_double(n_atm - 1),
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    # atomic mask

    f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
        target_uijs=target_uijs,
        target_weights=target_weights,
        base_amplitudes=base_amplitudes_start,
        base_uijs=base_uijs,
        base_atom_indices=base_sels,
        base_dataset_hash=dataset_hash,
        atomic_uijs=atomic_base,
        **wgt_kw_args)
    # should not error
    f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst, True))
    f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst, False))
    # should error
    msg = "Input array (size {}) must be the same length as number of datasets ({})".format(
        n_dst - 1, n_dst)
    with raises(Exception) as e:
        f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst - 1, True))
    assert msg == str(e.value)
    msg = "Input array (size {}) must be the same length as number of datasets ({})".format(
        n_dst + 1, n_dst)
    with raises(Exception) as e:
        f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst + 1, True))
    assert msg == str(e.value)

    # setting amplitudes

    # should not error
    f_g_calculator.set_current_amplitudes(flex.double(n_grp * n_dst + n_atm))
    # should error
    msg = "Input array (size {}) must be the same length as current_amplitudes (size {})".format(
        n_grp * n_dst + n_atm - 1, n_grp * n_dst + n_atm)
    with raises(Exception) as e:
        f_g_calculator.set_current_amplitudes(
            flex.double(n_grp * n_dst + n_atm - 1))
    assert msg == str(e.value)

    print('OK')
Ejemplo n.º 11
0
    def make_cif_block(self, experiments, reflections):
        """Write the data to a cif block"""
        # Select reflections
        selection = reflections.get_flags(reflections.flags.integrated, all=True)
        reflections = reflections.select(selection)

        # Filter out bad variances and other issues, but don't filter on ice rings
        # or alter partialities.

        # Assumes you want to apply the lp and dqe corrections to sum and prf
        # Do we want to combine partials?
        reflections = filter_reflection_table(
            reflections,
            self.params.intensity,
            combine_partials=False,
            partiality_threshold=0.0,
            d_min=self.params.mtz.d_min,
        )

        # Get the cif block
        cif_block = iotbx.cif.model.block()

        # Audit trail
        dials_version = dials.util.version.dials_version()
        cif_block["_audit.revision_id"] = 1
        cif_block["_audit.creation_method"] = dials_version
        cif_block["_audit.creation_date"] = datetime.date.today().isoformat()
        cif_block["_entry.id"] = "DIALS"
        # add software loop
        mmcif_software_header = (
            "_software.pdbx_ordinal",
            "_software.citation_id",
            "_software.name",  # as defined at [1]
            "_software.version",
            "_software.type",
            "_software.classification",
            "_software.description",
        )

        mmcif_citations_header = (
            "_citation.id",
            "_citation.journal_abbrev",
            "_citation.journal_volume",
            "_citation.journal_issue",
            "_citation.page_first",
            "_citation.page_last",
            "_citation.year",
            "_citation.title",
        )

        software_loop = iotbx.cif.model.loop(header=mmcif_software_header)
        citations_loop = iotbx.cif.model.loop(header=mmcif_citations_header)

        software_loop.add_row(
            (
                1,
                1,
                "DIALS",
                dials_version,
                "package",
                "data processing",
                "Data processing and integration within the DIALS software package",
            )
        )
        citations_loop.add_row(
            (
                1,
                "Acta Cryst. D",
                74,
                2,
                85,
                97,
                2018,
                "DIALS: implementation and evaluation of a new integration package",
            )
        )
        if "scale" in self.params.intensity:
            software_loop.add_row(
                (
                    2,
                    2,
                    "DIALS",
                    dials_version,
                    "program",
                    "data scaling",
                    "Data scaling and merging within the DIALS software package",
                )
            )
            citations_loop.add_row(
                (
                    2,
                    "Acta Cryst. D",
                    76,
                    4,
                    385,
                    399,
                    2020,
                    "Scaling diffraction data in the DIALS software package: algorithms and new approaches for multi-crystal scaling",
                )
            )
        cif_block.add_loop(software_loop)
        cif_block.add_loop(citations_loop)

        # Hard coding X-ray
        if self.params.mmcif.pdb_version == "v5_next":
            cif_block["_pdbx_diffrn_data_section.id"] = "dials"
            cif_block["_pdbx_diffrn_data_section.type_scattering"] = "x-ray"
            cif_block["_pdbx_diffrn_data_section.type_merged"] = "false"
            cif_block["_pdbx_diffrn_data_section.type_scaled"] = str(
                "scale" in self.params.intensity
            ).lower()

        # FIXME finish metadata addition - detector and source details needed
        # http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Categories/index.html

        # Add source information;
        # _diffrn_source.pdbx_wavelength_list = (list of wavelengths)
        # _diffrn_source.source = (general class of source e.g. synchrotron)
        # _diffrn_source.type = (specific beamline or instrument e.g DIAMOND BEAMLINE I04)

        wls = []
        epochs = []
        for exp in experiments:
            wls.append(round(exp.beam.get_wavelength(), 5))
            epochs.append(exp.scan.get_epochs()[0])
        unique_wls = set(wls)
        cif_block["_exptl_crystal.id"] = 1  # links to crystal_id
        cif_block["_diffrn.id"] = 1  # links to diffrn_id
        cif_block["_diffrn.crystal_id"] = 1
        cif_block["_diffrn_source.diffrn_id"] = 1
        cif_block["_diffrn_source.pdbx_wavelength_list"] = ", ".join(
            str(w) for w in unique_wls
        )

        # Add detector information;
        # _diffrn_detector.detector  = (general class e.g. PIXEL, PLATE etc)
        # _diffrn_detector.pdbx_collection_date = (Date of collection yyyy-mm-dd)
        # _diffrn_detector.type = (full name of detector e.g. DECTRIS PILATUS3 2M)
        # One date is required, so if multiple just use the first date.
        min_epoch = min(epochs)
        date_str = time.strftime("%Y-%m-%d", time.gmtime(min_epoch))
        cif_block["_diffrn_detector.diffrn_id"] = 1
        cif_block["_diffrn_detector.pdbx_collection_date"] = date_str

        # Write reflection data
        # Required columns
        header = (
            "_pdbx_diffrn_unmerged_refln.reflection_id",
            "_pdbx_diffrn_unmerged_refln.scan_id",
            "_pdbx_diffrn_unmerged_refln.image_id_begin",
            "_pdbx_diffrn_unmerged_refln.image_id_end",
            "_pdbx_diffrn_unmerged_refln.index_h",
            "_pdbx_diffrn_unmerged_refln.index_k",
            "_pdbx_diffrn_unmerged_refln.index_l",
        )

        extra_items = {
            "scales": ("_pdbx_diffrn_unmerged_refln.scale_value", "%5.3f"),
            "intensity.scale.value": (
                "_pdbx_diffrn_unmerged_refln.intensity_meas",
                "%8.3f",
            ),
            "intensity.scale.sigma": (
                "_pdbx_diffrn_unmerged_refln.intensity_sigma",
                "%8.3f",
            ),
            "intensity.sum.value": (
                "_pdbx_diffrn_unmerged_refln.intensity_sum",
                "%8.3f",
            ),
            "intensity.sum.sigma": (
                "_pdbx_diffrn_unmerged_refln.intensity_sum_sigma",
                "%8.3f",
            ),
            "intensity.prf.value": (
                "_pdbx_diffrn_unmerged_refln.intensity_prf",
                "%8.3f",
            ),
            "intensity.prf.sigma": (
                "_pdbx_diffrn_unmerged_refln.intensity_prf_sigma",
                "%8.3f",
            ),
            "angle": ("_pdbx_diffrn_unmerged_refln.scan_angle_reflection", "%7.4f"),
            "partiality": ("_pdbx_diffrn_unmerged_refln.partiality", "%7.4f"),
        }

        variables_present = []
        if "scale" in self.params.intensity:
            reflections["scales"] = 1.0 / reflections["inverse_scale_factor"]
            reflections["intensity.scale.sigma"] = flex.sqrt(
                reflections["intensity.scale.variance"]
            )
            variables_present.extend(
                ["scales", "intensity.scale.value", "intensity.scale.sigma"]
            )
        if "sum" in self.params.intensity:
            reflections["intensity.sum.sigma"] = flex.sqrt(
                reflections["intensity.sum.variance"]
            )
            variables_present.extend(["intensity.sum.value", "intensity.sum.sigma"])
        if "profile" in self.params.intensity:
            reflections["intensity.prf.sigma"] = flex.sqrt(
                reflections["intensity.prf.variance"]
            )
            variables_present.extend(["intensity.prf.value", "intensity.prf.sigma"])

        # Should always exist
        reflections["angle"] = reflections["xyzcal.mm"].parts()[2] * RAD2DEG
        variables_present.extend(["angle"])

        if "partiality" in reflections:
            variables_present.extend(["partiality"])

        for name in variables_present:
            if name in reflections:
                header += (extra_items[name][0],)
                self._fmt += " " + extra_items[name][1]

        if "scale" in self.params.intensity:
            # Write dataset_statistics - first make a miller array
            crystal_symmetry = cctbxcrystal.symmetry(
                space_group=experiments[0].crystal.get_space_group(),
                unit_cell=experiments[0].crystal.get_unit_cell(),
            )
            miller_set = miller.set(
                crystal_symmetry=crystal_symmetry,
                indices=reflections["miller_index"],
                anomalous_flag=False,
            )
            i_obs = miller.array(miller_set, data=reflections["intensity.scale.value"])
            i_obs.set_observation_type_xray_intensity()
            i_obs.set_sigmas(reflections["intensity.scale.sigma"])
            i_obs.set_info(
                miller.array_info(source="DIALS", source_type="reflection_tables")
            )

            result = dataset_statistics(
                i_obs=i_obs,
                crystal_symmetry=crystal_symmetry,
                use_internal_variance=False,
                eliminate_sys_absent=False,
            )
            merged_block = iotbx.cif.model.block()
            merged_block["_reflns.pdbx_ordinal"] = 1
            merged_block["_reflns.pdbx_diffrn_id"] = 1
            merged_block["_reflns.entry_id"] = "DIALS"
            merged_data = result.as_cif_block()
            merged_block.update(merged_data)
            cif_block.update(merged_block)

        # Write the crystal information
        # if v5, thats all so return
        if self.params.mmcif.pdb_version == "v5":
            return cif_block
        # continue if v5_next
        cif_loop = iotbx.cif.model.loop(
            header=(
                "_pdbx_diffrn_unmerged_cell.ordinal",
                "_pdbx_diffrn_unmerged_cell.crystal_id",
                "_pdbx_diffrn_unmerged_cell.wavelength",
                "_pdbx_diffrn_unmerged_cell.cell_length_a",
                "_pdbx_diffrn_unmerged_cell.cell_length_b",
                "_pdbx_diffrn_unmerged_cell.cell_length_c",
                "_pdbx_diffrn_unmerged_cell.cell_angle_alpha",
                "_pdbx_diffrn_unmerged_cell.cell_angle_beta",
                "_pdbx_diffrn_unmerged_cell.cell_angle_gamma",
                "_pdbx_diffrn_unmerged_cell.Bravais_lattice",
            )
        )
        crystals = experiments.crystals()
        crystal_to_id = {crystal: i + 1 for i, crystal in enumerate(crystals)}
        for i, exp in enumerate(experiments):
            crystal = exp.crystal
            crystal_id = crystal_to_id[crystal]
            wavelength = exp.beam.get_wavelength()
            a, b, c, alpha, beta, gamma = crystal.get_unit_cell().parameters()
            latt_type = str(
                bravais_types.bravais_lattice(group=crystal.get_space_group())
            )
            cif_loop.add_row(
                (i + 1, crystal_id, wavelength, a, b, c, alpha, beta, gamma, latt_type)
            )
            cif_block.add_loop(cif_loop)

        # Write the scan information
        cif_loop = iotbx.cif.model.loop(
            header=(
                "_pdbx_diffrn_scan.scan_id",
                "_pdbx_diffrn_scan.crystal_id",
                "_pdbx_diffrn_scan.image_id_begin",
                "_pdbx_diffrn_scan.image_id_end",
                "_pdbx_diffrn_scan.scan_angle_begin",
                "_pdbx_diffrn_scan.scan_angle_end",
            )
        )

        expid_to_scan_id = {exp.identifier: i + 1 for i, exp in enumerate(experiments)}

        for i, exp in enumerate(experiments):
            scan = exp.scan
            crystal_id = crystal_to_id[exp.crystal]
            image_range = scan.get_image_range()
            osc_range = scan.get_oscillation_range(deg=True)
            cif_loop.add_row(
                (
                    i + 1,
                    crystal_id,
                    image_range[0],
                    image_range[1],
                    osc_range[0],
                    osc_range[1],
                )
            )
            cif_block.add_loop(cif_loop)

        _, _, _, _, z0, z1 = reflections["bbox"].parts()
        h, k, l = [
            hkl.iround() for hkl in reflections["miller_index"].as_vec3_double().parts()
        ]
        # make scan id consistent with header as defined above
        scan_id = flex.int(reflections.size(), 0)
        for id_ in reflections.experiment_identifiers().keys():
            expid = reflections.experiment_identifiers()[id_]
            sel = reflections["id"] == id_
            scan_id.set_selected(sel, expid_to_scan_id[expid])

        loop_values = [
            flex.size_t_range(1, len(reflections) + 1),
            scan_id,
            z0,
            z1,
            h,
            k,
            l,
        ] + [reflections[name] for name in variables_present]
        cif_loop = iotbx.cif.model.loop(data=dict(zip(header, loop_values)))
        cif_block.add_loop(cif_loop)

        return cif_block
Ejemplo n.º 12
0
  def __init__(self, x, spans=None, demean=True, detrend=True):

    # Ensure x is copied as it will be changed in-place
    x = flex.double(x).deep_copy()
    n = len(x)

    if detrend:
      t = flex.size_t_range(n).as_double() + 1 - (n + 1)/2
      inv_sumt2 = 1./t.dot(t)
      x = x - flex.mean(x) - x.dot(t) * t * inv_sumt2
    elif demean:
      x -= flex.mean(x)

    # determine frequencies
    stop = ((n - (n % 2)) // 2) + 1
    self.freq = flex.double([i / n for i in range(1, stop)])

    fft = fftpack.real_to_complex(n)
    n = fft.n_real()
    m = fft.m_real()
    x.extend(flex.double(m-n, 0.))
    xf = fft.forward(x)

    # get abs length of complex and normalise by n to get the raw periodogram
    spec = flex.norm(xf) / n

    if spans is None:
      # if not doing smoothing, the spectrum is just the raw periodogram with
      # the uninteresting DC offset removed
      self.spec = spec[1:]
      return

    # for smoothing replace the DC offset term and extend the rest of the
    # sequence by its reverse conjugate, omitting the Nyquist term if it is
    # present
    spec[0] = spec[1]
    end = fft.n_complex() - (fft.n_real() + 1) % 2
    spec.extend(spec[1:end].reversed())

    try:
      # multiple integer spans
      nspans = len(spans)
      m = int(spans[0]) // 2
      multiple = True
    except TypeError:
      # single integer span
      m = int(spans) // 2
      multiple = False

    # construct smoothing kernel
    k = Kernel('modified.daniell', m)

    if multiple:
      for i in range(1, nspans):
        # construct kernel for convolution
        m1 = int(spans[i]) // 2
        k1 = Kernel('modified.daniell', m1)

        # expand coefficients of k to full kernel and zero pad for smoothing
        x1 = flex.double(k1.m, 0.0)
        x1.extend(k.coef.reversed())
        x1.extend(k.coef[1:])
        x1.extend(flex.double(k1.m, 0.0))

        # convolve kernels
        coef = kernapply(x1, k1, circular=True)
        m = len(coef)//2
        coef = coef[m:(len(coef))]
        k = Kernel(coef=coef)

    # apply smoothing kernel
    spec = kernapply(spec, k, circular=True)
    self.spec = spec[1:fft.n_complex()]

    return
Ejemplo n.º 13
0
def build_i_calc(work_params):
    from scitbx.array_family import flex
    d_min = work_params.d_min
    if (work_params.pdb_file is None):
        miller_set = work_params.unit_cell \
          .complete_miller_set_with_lattice_symmetry(
            d_min=d_min,
            anomalous_flag=True).expand_to_p1()
        if (work_params.intensity_symmetry is not None):
            miller_set = miller_set.customized_copy(
                space_group_info=work_params.intensity_symmetry,
                anomalous_flag=work_params.anomalous_flag
            ).unique_under_symmetry()
        mt = flex.mersenne_twister(seed=work_params.noise.random_seed)
        i_calc_asu = miller_set.array(data=mt.random_double(
            size=miller_set.indices().size()))
    else:
        import iotbx.pdb
        pdb_inp = iotbx.pdb.input(file_name=work_params.pdb_file)
        xs = pdb_inp.xray_structure_simple().change_basis(
            cb_op=work_params.change_of_basis_op_to_niggli_cell)
        assert xs.unit_cell().is_similar_to(other=work_params.unit_cell)
        _ = work_params.reset_b_factors_value
        if (_ is not None):
            from cctbx import adptbx
            u_iso = adptbx.b_as_u(_)
            xs.convert_to_isotropic()
            for sc in xs.scatterers():
                sc.u_iso = u_iso
        miller_set = work_params.unit_cell \
          .complete_miller_set_with_lattice_symmetry(
            d_min=d_min,
            anomalous_flag=work_params.anomalous_flag) \
              .expand_to_p1().customized_copy(
                space_group_info=xs.space_group_info()) \
                  .unique_under_symmetry() \
                  .remove_systematic_absences() \
                  .map_to_asu()
        i_calc_asu = miller_set.structure_factors_from_scatterers(
            xray_structure=xs).f_calc().intensities()
        if (i_calc_asu.data().size() != 0):
            i_calc_max = flex.max(i_calc_asu.data())
            if (i_calc_max > 0):
                i_calc_asu = i_calc_asu.array(data=i_calc_asu.data() *
                                              (1 / i_calc_max))
        i_calc_asu = i_calc_asu.customized_copy(
          space_group_info=i_calc_asu.space_group()
            .build_derived_reflection_intensity_group(anomalous_flag=True).info()) \
          .map_to_asu() \
          .complete_array(new_data_value=0)
    i_calc_asu = i_calc_asu.sort(by_value="resolution")
    assert not i_calc_asu.space_group().is_centric()
    assert i_calc_asu.space_group().n_ltr() == 1
    assert i_calc_asu.space_group_info().type().is_symmorphic()
    if (work_params.force_unit_spot_intensities):
        i_calc_asu = i_calc_asu.array(
            data=flex.double(i_calc_asu.indices().size(), 1))
    i_asu_array = i_calc_asu.customized_copy(
        data=flex.size_t_range(i_calc_asu.indices().size()))
    if (not i_asu_array.anomalous_flag()):
        i_asu_array = i_asu_array.generate_bijvoet_mates()
    i_asu_array = i_asu_array.expand_to_p1()
    asu_iselection = i_asu_array.data()
    i_calc_p1_anom = i_asu_array.customized_copy(
        data=i_calc_asu.data().select(asu_iselection))
    from libtbx import group_args
    return group_args(asu=i_calc_asu,
                      asu_iselection=asu_iselection,
                      p1_anom=i_calc_p1_anom)
Ejemplo n.º 14
0
    def write(self, experiments, reflections):
        """
        Write the experiments and reflections to file
        """

        # if mmcif filename is auto, then choose scaled.cif or integrated.cif
        if self.params.mmcif.hklout in (None, Auto, "auto"):
            if ("intensity.scale.value"
                    in reflections) and ("intensity.scale.variance"
                                         in reflections):
                filename = "scaled.cif"
                logger.info(
                    "Data appears to be scaled, setting mmcif.hklout = 'scaled.cif'"
                )
            else:
                filename = "integrated.cif"
                logger.info(
                    "Data appears to be unscaled, setting mmcif.hklout = 'integrated.cif'"
                )
        else:
            filename = self.params.mmcif.hklout

        # Select reflections
        selection = reflections.get_flags(reflections.flags.integrated,
                                          all=True)
        reflections = reflections.select(selection)

        # Filter out bad variances and other issues, but don't filter on ice rings
        # or alter partialities.

        # Assumes you want to apply the lp and dqe corrections to sum and prf
        # Do we want to combine partials?
        reflections = filter_reflection_table(
            reflections,
            self.params.intensity,
            combine_partials=False,
            partiality_threshold=0.0,
            d_min=self.params.mtz.d_min,
        )

        # Get the cif block
        cif_block = iotbx.cif.model.block()

        # Audit trail
        dials_version = dials.util.version.dials_version()
        cif_block["_audit.creation_method"] = dials_version
        cif_block["_audit.creation_date"] = datetime.date.today().isoformat()
        cif_block["_computing.data_reduction"] = (
            "%s (Winter, G. et al., 2018)" % dials_version)
        cif_block[
            "_publ.section_references"] = "Winter, G. et al. (2018) Acta Cryst. D74, 85-97."
        if "scale" in self.params.intensity:
            cif_block[
                "_publ.section_references"] += "\nBeilsten-Edmands, J. et al. (2020) Acta Cryst. D76, 385-399."

        # Hard coding X-ray
        cif_block["_pdbx_diffrn_data_section.id"] = "dials"
        cif_block["_pdbx_diffrn_data_section.type_scattering"] = "x-ray"
        cif_block["_pdbx_diffrn_data_section.type_merged"] = "false"
        cif_block["_pdbx_diffrn_data_section.type_scaled"] = str(
            "scale" in self.params.intensity).lower()

        # FIXME finish metadata addition - detector and source details needed
        # http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Categories/index.html

        # Add source information;
        # _diffrn_source.pdbx_wavelength_list = (list of wavelengths)
        # _diffrn_source.source = (general class of source e.g. synchrotron)
        # _diffrn_source.type = (specific beamline or instrument e.g DIAMOND BEAMLINE I04)

        wls = []
        epochs = []
        for exp in experiments:
            wls.append(round(exp.beam.get_wavelength(), 5))
            epochs.append(exp.scan.get_epochs()[0])
        unique_wls = set(wls)
        cif_block["_diffrn_source.pdbx_wavelength_list"] = ", ".join(
            str(w) for w in unique_wls)

        # Add detector information;
        # _diffrn_detector.detector  = (general class e.g. PIXEL, PLATE etc)
        # _diffrn_detector.pdbx_collection_date = (Date of collection yyyy-mm-dd)
        # _diffrn_detector.type = (full name of detector e.g. DECTRIS PILATUS3 2M)
        # One date is required, so if multiple just use the first date.
        min_epoch = min(epochs)
        date_str = time.strftime("%Y-%m-%d", time.gmtime(min_epoch))
        cif_block["_diffrn_detector.pdbx_collection_date"] = date_str

        # Write the crystal information
        cif_loop = iotbx.cif.model.loop(header=(
            "_pdbx_diffrn_unmerged_cell.ordinal",
            "_pdbx_diffrn_unmerged_cell.crystal_id",
            "_pdbx_diffrn_unmerged_cell.wavelength",
            "_pdbx_diffrn_unmerged_cell.cell_length_a",
            "_pdbx_diffrn_unmerged_cell.cell_length_b",
            "_pdbx_diffrn_unmerged_cell.cell_length_c",
            "_pdbx_diffrn_unmerged_cell.cell_angle_alpha",
            "_pdbx_diffrn_unmerged_cell.cell_angle_beta",
            "_pdbx_diffrn_unmerged_cell.cell_angle_gamma",
            "_pdbx_diffrn_unmerged_cell.Bravais_lattice",
        ))
        crystals = experiments.crystals()
        crystal_to_id = {crystal: i + 1 for i, crystal in enumerate(crystals)}
        for i, exp in enumerate(experiments):
            crystal = exp.crystal
            crystal_id = crystal_to_id[crystal]
            wavelength = exp.beam.get_wavelength()
            a, b, c, alpha, beta, gamma = crystal.get_unit_cell().parameters()
            latt_type = str(
                bravais_types.bravais_lattice(group=crystal.get_space_group()))
            cif_loop.add_row((i + 1, crystal_id, wavelength, a, b, c, alpha,
                              beta, gamma, latt_type))
            cif_block.add_loop(cif_loop)

        # Write the scan information
        cif_loop = iotbx.cif.model.loop(header=(
            "_pdbx_diffrn_scan.scan_id",
            "_pdbx_diffrn_scan.crystal_id",
            "_pdbx_diffrn_scan.image_id_begin",
            "_pdbx_diffrn_scan.image_id_end",
            "_pdbx_diffrn_scan.scan_angle_begin",
            "_pdbx_diffrn_scan.scan_angle_end",
        ))
        for i, exp in enumerate(experiments):
            scan = exp.scan
            crystal_id = crystal_to_id[exp.crystal]
            image_range = scan.get_image_range()
            osc_range = scan.get_oscillation_range(deg=True)
            cif_loop.add_row((
                i + 1,
                crystal_id,
                image_range[0],
                image_range[1],
                osc_range[0],
                osc_range[1],
            ))
            cif_block.add_loop(cif_loop)

        # Make a dict of unit_cell parameters
        unit_cell_parameters = {}
        if crystal.num_scan_points > 1:
            for i in range(crystal.num_scan_points):
                a, b, c, alpha, beta, gamma = crystal.get_unit_cell_at_scan_point(
                    i).parameters()
                unit_cell_parameters[i] = (a, b, c, alpha, beta, gamma)
        else:
            unit_cell_parameters[0] = (a, b, c, alpha, beta, gamma)

        # Write reflection data
        # Required columns
        header = (
            "_pdbx_diffrn_unmerged_refln.reflection_id",
            "_pdbx_diffrn_unmerged_refln.scan_id",
            "_pdbx_diffrn_unmerged_refln.image_id_begin",
            "_pdbx_diffrn_unmerged_refln.image_id_end",
            "_pdbx_diffrn_unmerged_refln.index_h",
            "_pdbx_diffrn_unmerged_refln.index_k",
            "_pdbx_diffrn_unmerged_refln.index_l",
        )

        extra_items = {
            "scales": ("_pdbx_diffrn_unmerged_refln.scale_value", "%5.3f"),
            "intensity.scale.value": (
                "_pdbx_diffrn_unmerged_refln.intensity_meas",
                "%8.3f",
            ),
            "intensity.scale.sigma": (
                "_pdbx_diffrn_unmerged_refln.intensity_sigma",
                "%8.3f",
            ),
            "intensity.sum.value": (
                "_pdbx_diffrn_unmerged_refln.intensity_sum",
                "%8.3f",
            ),
            "intensity.sum.sigma": (
                "_pdbx_diffrn_unmerged_refln.intensity_sum_sigma",
                "%8.3f",
            ),
            "intensity.prf.value": (
                "_pdbx_diffrn_unmerged_refln.intensity_prf",
                "%8.3f",
            ),
            "intensity.prf.sigma": (
                "_pdbx_diffrn_unmerged_refln.intensity_prf_sigma",
                "%8.3f",
            ),
            "angle":
            ("_pdbx_diffrn_unmerged_refln.scan_angle_reflection", "%7.4f"),
            "partiality": ("_pdbx_diffrn_unmerged_refln.partiality", "%7.4f"),
        }

        fmt = "%6i %2i %5i %5i %-2i %-2i %-2i"

        variables_present = []
        if "scale" in self.params.intensity:
            reflections["scales"] = 1.0 / reflections["inverse_scale_factor"]
            reflections["intensity.scale.sigma"] = flex.sqrt(
                reflections["intensity.scale.variance"])
            variables_present.extend(
                ["scales", "intensity.scale.value", "intensity.scale.sigma"])
        if "sum" in self.params.intensity:
            reflections["intensity.sum.sigma"] = flex.sqrt(
                reflections["intensity.sum.variance"])
            variables_present.extend(
                ["intensity.sum.value", "intensity.sum.sigma"])
        if "profile" in self.params.intensity:
            reflections["intensity.prf.sigma"] = flex.sqrt(
                reflections["intensity.prf.variance"])
            variables_present.extend(
                ["intensity.prf.value", "intensity.prf.sigma"])

        # Should always exist
        reflections["angle"] = reflections["xyzcal.mm"].parts()[2] * RAD2DEG
        variables_present.extend(["angle"])

        if "partiality" in reflections:
            variables_present.extend(["partiality"])

        for name in variables_present:
            if name in reflections:
                header += (extra_items[name][0], )
                fmt += " " + extra_items[name][1]

        loop_format_strings = {"_pdbx_diffrn_unmerged_refln": fmt}

        if "scale" in self.params.intensity:
            # Write dataset_statistics - first make a miller array
            crystal_symmetry = cctbxcrystal.symmetry(
                space_group=experiments[0].crystal.get_space_group(),
                unit_cell=experiments[0].crystal.get_unit_cell(),
            )
            miller_set = miller.set(
                crystal_symmetry=crystal_symmetry,
                indices=reflections["miller_index"],
                anomalous_flag=False,
            )
            i_obs = miller.array(miller_set,
                                 data=reflections["intensity.scale.value"])
            i_obs.set_observation_type_xray_intensity()
            i_obs.set_sigmas(reflections["intensity.scale.sigma"])
            i_obs.set_info(
                miller.array_info(source="DIALS",
                                  source_type="reflection_tables"))

            result = dataset_statistics(
                i_obs=i_obs,
                crystal_symmetry=crystal_symmetry,
                use_internal_variance=False,
                eliminate_sys_absent=False,
            )

            cif_block.update(result.as_cif_block())

        _, _, _, _, z0, z1 = reflections["bbox"].parts()
        h, k, l = [
            hkl.iround()
            for hkl in reflections["miller_index"].as_vec3_double().parts()
        ]
        loop_values = [
            flex.size_t_range(1,
                              len(reflections) + 1),
            reflections["id"] + 1,
            z0,
            z1,
            h,
            k,
            l,
        ] + [reflections[name] for name in variables_present]
        cif_loop = iotbx.cif.model.loop(data=dict(zip(header, loop_values)))
        cif_block.add_loop(cif_loop)

        # Add the block
        self._cif["dials"] = cif_block

        # Print to file
        if self.params.mmcif.compress and not filename.endswith(
                "." + self.params.mmcif.compress):
            filename += "." + self.params.mmcif.compress
        if self.params.mmcif.compress == "gz":
            open_fn = gzip.open
        elif self.params.mmcif.compress == "bz2":
            open_fn = bz2.open
        elif self.params.mmcif.compress == "xz":
            open_fn = lzma.open
        else:
            open_fn = open
        with open_fn(filename, "wt") as fh:
            self._cif.show(out=fh, loop_format_strings=loop_format_strings)

        # Log
        logger.info("Wrote reflections to %s" % filename)
Ejemplo n.º 15
0
def build_i_calc(work_params):
  from scitbx.array_family import flex
  d_min = work_params.d_min
  if (work_params.pdb_file is None):
    miller_set = work_params.unit_cell \
      .complete_miller_set_with_lattice_symmetry(
        d_min=d_min,
        anomalous_flag=True).expand_to_p1()
    if (work_params.intensity_symmetry is not None):
      miller_set = miller_set.customized_copy(
        space_group_info=work_params.intensity_symmetry,
        anomalous_flag=work_params.anomalous_flag).unique_under_symmetry()
    mt = flex.mersenne_twister(seed=work_params.noise.random_seed)
    i_calc_asu = miller_set.array(
      data=mt.random_double(size=miller_set.indices().size()))
  else:
    import iotbx.pdb
    pdb_inp = iotbx.pdb.input(file_name=work_params.pdb_file)
    xs = pdb_inp.xray_structure_simple().change_basis(
      cb_op=work_params.change_of_basis_op_to_niggli_cell)
    assert xs.unit_cell().is_similar_to(other=work_params.unit_cell)
    _ = work_params.reset_b_factors_value
    if (_ is not None):
      from cctbx import adptbx
      u_iso = adptbx.b_as_u(_)
      xs.convert_to_isotropic()
      for sc in xs.scatterers():
        sc.u_iso = u_iso
    miller_set = work_params.unit_cell \
      .complete_miller_set_with_lattice_symmetry(
        d_min=d_min,
        anomalous_flag=work_params.anomalous_flag) \
          .expand_to_p1().customized_copy(
            space_group_info=xs.space_group_info()) \
              .unique_under_symmetry() \
              .remove_systematic_absences() \
              .map_to_asu()
    i_calc_asu = miller_set.structure_factors_from_scatterers(
      xray_structure=xs).f_calc().intensities()
    if (i_calc_asu.data().size() != 0):
      i_calc_max = flex.max(i_calc_asu.data())
      if (i_calc_max > 0):
        i_calc_asu = i_calc_asu.array(data=i_calc_asu.data() * (1/i_calc_max))
    i_calc_asu = i_calc_asu.customized_copy(
      space_group_info=i_calc_asu.space_group()
        .build_derived_reflection_intensity_group(anomalous_flag=True).info()) \
      .map_to_asu() \
      .complete_array(new_data_value=0)
  i_calc_asu = i_calc_asu.sort(by_value="resolution")
  assert not i_calc_asu.space_group().is_centric()
  assert i_calc_asu.space_group().n_ltr() == 1
  assert i_calc_asu.space_group_info().type().is_symmorphic()
  if (work_params.force_unit_spot_intensities):
    i_calc_asu = i_calc_asu.array(
      data=flex.double(i_calc_asu.indices().size(), 1))
  i_asu_array = i_calc_asu.customized_copy(
    data=flex.size_t_range(i_calc_asu.indices().size()))
  if (not i_asu_array.anomalous_flag()):
    i_asu_array = i_asu_array.generate_bijvoet_mates()
  i_asu_array = i_asu_array.expand_to_p1()
  asu_iselection = i_asu_array.data()
  i_calc_p1_anom = i_asu_array.customized_copy(
    data=i_calc_asu.data().select(asu_iselection))
  from libtbx import group_args
  return group_args(
    asu=i_calc_asu,
    asu_iselection=asu_iselection,
    p1_anom=i_calc_p1_anom)