Пример #1
0
def run(args):
  assert len(args) == 0
  if '--exercise-retrieve-unless-exists' in args:
    exercise_retrieve_unless_exists()
  else:
    print('Skipping exercise_retrieve_unless_exists')
  exercise_misc()
  assert utils.sequence_index_dict(["a", "b"]) == {"a": 0, "b": 1}
  assert utils.flat_list(0) == [0]
  assert utils.flat_list([1,2,3]) == [1,2,3]
  assert utils.flat_list([1,[2,3,4],3]) == [1,2,3,4,3]
  assert utils.flat_list([1,[2,3,4],[[3,4],[5,6]]]) == [1,2,3,4,3,4,5,6]
  try:
    raise RuntimeError("Trial")
  except KeyboardInterrupt: raise
  except Exception:
    assert utils.format_exception() == "RuntimeError: Trial"
  else: raise Exception_expected
  try:
    assert 1 == 2
  except KeyboardInterrupt: raise
  except Exception:
    s = utils.format_exception()
    assert s.startswith("AssertionError: ")
    assert s.find("tst_utils.py line ") >= 0
  else: raise Exception_expected
  exercise_indented_display()
  exercise_approx_equal()
  exercise_file_utils()
  exercise_dir_utils()
  exercise_group_args()
  exercise_round2()
  print(utils.format_cpu_times())
Пример #2
0
def run(args):
    assert len(args) == 0
    exercise_forward_compatibility()
    exercise_misc()
    assert utils.sequence_index_dict(["a", "b"]) == {"a": 0, "b": 1}
    assert utils.flat_list(0) == [0]
    assert utils.flat_list([1, 2, 3]) == [1, 2, 3]
    assert utils.flat_list([1, [2, 3, 4], 3]) == [1, 2, 3, 4, 3]
    assert utils.flat_list([1, [2, 3, 4],
                            [[3, 4], [5, 6]]]) == [1, 2, 3, 4, 3, 4, 5, 6]
    try:
        raise RuntimeError("Trial")
    except KeyboardInterrupt:
        raise
    except Exception:
        assert utils.format_exception() == "RuntimeError: Trial"
    else:
        raise Exception_expected
    try:
        assert 1 == 2
    except KeyboardInterrupt:
        raise
    except Exception:
        s = utils.format_exception()
        assert s.startswith("AssertionError: ")
        assert s.find("tst_utils.py line ") >= 0
    else:
        raise Exception_expected
    exercise_indented_display()
    exercise_approx_equal()
    exercise_file_utils()
    exercise_dir_utils()
    print utils.format_cpu_times()
Пример #3
0
def run(args):
  assert len(args) == 0
  if '--exercise-retrieve-unless-exists' in args:
    exercise_retrieve_unless_exists()
  else:
    print 'Skipping exercise_retrieve_unless_exists'
  exercise_forward_compatibility()
  exercise_misc()
  assert utils.sequence_index_dict(["a", "b"]) == {"a": 0, "b": 1}
  assert utils.flat_list(0) == [0]
  assert utils.flat_list([1,2,3]) == [1,2,3]
  assert utils.flat_list([1,[2,3,4],3]) == [1,2,3,4,3]
  assert utils.flat_list([1,[2,3,4],[[3,4],[5,6]]]) == [1,2,3,4,3,4,5,6]
  try:
    raise RuntimeError("Trial")
  except KeyboardInterrupt: raise
  except Exception:
    assert utils.format_exception() == "RuntimeError: Trial"
  else: raise Exception_expected
  try:
    assert 1 == 2
  except KeyboardInterrupt: raise
  except Exception:
    s = utils.format_exception()
    assert s.startswith("AssertionError: ")
    assert s.find("tst_utils.py line ") >= 0
  else: raise Exception_expected
  exercise_indented_display()
  exercise_approx_equal()
  exercise_file_utils()
  exercise_dir_utils()
  print utils.format_cpu_times()
Пример #4
0
 def as_miller_arrays(self,
                      data_block_name=None,
                      crystal_symmetry=None,
                      force_symmetry=False,
                      merge_equivalents=True,
                      base_array_info=None):
     if base_array_info is None:
         base_array_info = miller.array_info(source=self.file_path,
                                             source_type="cif")
     if data_block_name is not None:
         arrays = self.build_miller_arrays(
             data_block_name=data_block_name,
             base_array_info=base_array_info).values()
     else:
         arrays = flat_list([
             arrays.values() for arrays in self.build_miller_arrays(
                 base_array_info=base_array_info).values()
         ])
     other_symmetry = crystal_symmetry
     for i, array in enumerate(arrays):
         if crystal_symmetry is not None:
             crystal_symmetry_from_file = array.crystal_symmetry()
             crystal_symmetry = crystal_symmetry_from_file.join_symmetry(
                 other_symmetry=other_symmetry, force=force_symmetry)
             arrays[i] = array.customized_copy(
                 crystal_symmetry=crystal_symmetry)
             arrays[i].set_info(array.info())
     return arrays
Пример #5
0
    def AddUntrustedPolygon(self, vertices):
        if len(vertices) < 4:
            return
        vertices.append(vertices[0])
        vertices = [self._pyslip.ConvertView2Geo(v) for v in vertices]
        vertices = [
            self._pyslip.tiles.map_relative_to_picture_fast_slow(*v) for v in vertices
        ]

        point_ = []
        panel_id = None
        for p in vertices:
            p1, p0, p_id = self._pyslip.tiles.flex_image.picture_to_readout(p[1], p[0])
            assert p_id >= 0, "Point must be within a panel"
            if panel_id is not None:
                assert (
                    panel_id == p_id
                ), "All points must be contained within a single panel"
            panel_id = p_id
            point_.append((p0, p1))
        vertices = point_

        from libtbx.utils import flat_list

        from dials.util import masking

        region = masking.phil_scope.extract().untrusted[0]
        points = flat_list(vertices)
        region.polygon = [int(p) for p in points]
        region.panel = panel_id

        self.params.masking.untrusted.append(region)
Пример #6
0
 def as_miller_arrays(self,
                      data_block_name=None,
                      crystal_symmetry=None,
                      force_symmetry=False,
                      merge_equivalents=True,
                      base_array_info=None,
                      anomalous=None):
     if base_array_info is None:
         base_array_info = miller.array_info(source=self.file_path,
                                             source_type="cif")
     if data_block_name is not None:
         arrays = list(
             self.build_miller_arrays(
                 data_block_name=data_block_name,
                 base_array_info=base_array_info).values())
     else:
         arrays = flat_list([
             list(arrays.values()) for arrays in self.build_miller_arrays(
                 base_array_info=base_array_info).values()
         ])
     other_symmetry = crystal_symmetry
     for i in range(len(arrays)):
         if crystal_symmetry is not None:
             crystal_symmetry_from_file = arrays[i].crystal_symmetry()
             crystal_symmetry = crystal_symmetry_from_file.join_symmetry(
                 other_symmetry=other_symmetry, force=force_symmetry)
             arrays[i] = arrays[i].customized_copy(
                 crystal_symmetry=crystal_symmetry, info=arrays[i].info())
         if anomalous is not None:
             arrays[i] = arrays[i].customized_copy(anomalous_flag=anomalous,
                                                   info=arrays[i].info())
     return arrays
Пример #7
0
 def as_miller_arrays(self, data_block_name=None,
                      crystal_symmetry=None,
                      force_symmetry=False,
                      merge_equivalents=True,
                      base_array_info=None):
   if base_array_info is None:
     base_array_info = miller.array_info(
       source=self.file_path, source_type="cif")
   if data_block_name is not None:
     arrays = self.build_miller_arrays(
       data_block_name=data_block_name,
       base_array_info=base_array_info).values()
   else:
     arrays = flat_list([
       arrays.values() for arrays in
       self.build_miller_arrays(base_array_info=base_array_info).values()])
   other_symmetry=crystal_symmetry
   for i, array in enumerate(arrays):
     if crystal_symmetry is not None:
       crystal_symmetry_from_file = array.crystal_symmetry()
       crystal_symmetry = crystal_symmetry_from_file.join_symmetry(
         other_symmetry=other_symmetry,
         force=force_symmetry)
       arrays[i] = array.customized_copy(crystal_symmetry=crystal_symmetry)
       arrays[i].set_info(array.info())
   return arrays
Пример #8
0
 def check(a, b, c, expected_free_vars, expected_sol):
   m = []
   t = []
   for i in range(3):
     m.append([a[i], b[i]])
     t.append(c[i])
   m_orig = matrix.rec(flat_list(m), (3,2))
   t_orig = list(t)
   free_vars = row_echelon.form_rational(m, t)
   assert free_vars == expected_free_vars
   sol = row_echelon.back_substitution_rational(m, t, free_vars, [3, 11])
   assert sol == expected_sol
   if (sol is not None):
     assert list(m_orig * sol) == t_orig
Пример #9
0
 def check(a, b, c, expected_free_vars, expected_sol):
   m = []
   t = []
   for i in xrange(3):
     m.append([a[i], b[i]])
     t.append(c[i])
   m_orig = matrix.rec(flat_list(m), (3,2))
   t_orig = list(t)
   free_vars = row_echelon.form_rational(m, t)
   assert free_vars == expected_free_vars
   sol = row_echelon.back_substitution_rational(m, t, free_vars, [3, 11])
   assert sol == expected_sol
   if (sol is not None):
     assert list(m_orig * sol) == t_orig
Пример #10
0
  def calculate_state_uncertainties(self, var_cov):
    """Given a variance-covariance array for the parameters of this model,
    propagate those estimated errors into the uncertainties of the model state"""

    # the gradients are in an n-element list, each element of which is an
    # object of length m with the same dimensions as the model state. The
    # elements of this object contains the gradient of the state element in
    # the equivalent position
    grads = self.get_ds_dp()

    if len(grads) == 0: return

    if self._is_multi_state:
      # in this case the gradients are an n-element list, each element of which
      # is a list of length of the number of states. Each element of the list
      # is an object of length m with the same dimensions as the model state.
      # Reshape this data so that the list over the number of states becomes
      # the outer level.
      reshaped = []
      for i in range(len(grads[0])):
        reshaped.append([g[i] for g in grads])
      grads = reshaped
    else:
      # we only have a single state, so put gradients in a 1-elt list to mimic
      # the structure of the multi-state case
      grads = [grads]

    # the jacobian is the m*n matrix of partial derivatives of the m state
    # elements wrt the n parameters
    from libtbx.utils import flat_list
    from scitbx.array_family import flex

    state_covs = []
    for grads_one_state in grads:
      jacobian_t = flex.double(flat_list(grads_one_state))
      jacobian_t.reshape(flex.grid(len(grads_one_state),
                                   len(grads_one_state[0].elems)))

      # propagation of errors takes the variance-covariance matrix of parameters,
      # along with the jacobian mapping changes in parameter values to changes
      # in the model state elements, to calculate an approximate variance-
      # covariance matrix of the state elements. That is, state_cov is the
      # matrix product: jacobian * var_cov * jacobian_t
      tmp = var_cov.matrix_multiply(jacobian_t)
      state_cov = jacobian_t.matrix_transpose_multiply(tmp).as_scitbx_matrix()
      state_covs.append(state_cov)

    return state_covs
Пример #11
0
    def calculate_state_uncertainties(self, var_cov):
        """Given a variance-covariance array for the parameters of this model,
    propagate those estimated errors into the uncertainties of the model state"""

        grads = []
        if self._is_multi_state:
            i = 0
            while True:
                try:
                    grads.append(self.get_ds_dp(multi_state_elt=i))
                    i += 1
                except IndexError:
                    break
        else:
            grads.append(self.get_ds_dp())

        if len(grads[0]) == 0: return

        # the jacobian is the m*n matrix of partial derivatives of the m state
        # elements wrt the n parameters
        from libtbx.utils import flat_list
        from scitbx.array_family import flex

        state_covs = []
        for grads_one_state in grads:
            jacobian_t = flex.double(flat_list(grads_one_state))
            jacobian_t.reshape(
                flex.grid(len(grads_one_state), len(grads_one_state[0].elems)))

            # propagation of errors takes the variance-covariance matrix of parameters,
            # along with the jacobian mapping changes in parameter values to changes
            # in the model state elements, to calculate an approximate variance-
            # covariance matrix of the state elements. That is, state_cov is the
            # matrix product: jacobian * var_cov * jacobian_t
            tmp = var_cov.matrix_multiply(jacobian_t)
            state_cov = jacobian_t.matrix_transpose_multiply(
                tmp).as_scitbx_matrix()
            state_covs.append(state_cov)

        return state_covs
Пример #12
0
  def calculate_state_uncertainties(self, var_cov):
    """Given a variance-covariance array for the parameters of this model,
    propagate those estimated errors into the uncertainties of the model state"""

    grads = []
    if self._is_multi_state:
      i = 0
      while True:
        try:
          grads.append(self.get_ds_dp(multi_state_elt=i))
          i += 1
        except IndexError:
          break
    else:
      grads.append(self.get_ds_dp())

    if len(grads[0]) == 0: return

    # the jacobian is the m*n matrix of partial derivatives of the m state
    # elements wrt the n parameters
    from libtbx.utils import flat_list
    from scitbx.array_family import flex

    state_covs = []
    for grads_one_state in grads:
      jacobian_t = flex.double(flat_list(grads_one_state))
      jacobian_t.reshape(flex.grid(len(grads_one_state),
                                   len(grads_one_state[0].elems)))

      # propagation of errors takes the variance-covariance matrix of parameters,
      # along with the jacobian mapping changes in parameter values to changes
      # in the model state elements, to calculate an approximate variance-
      # covariance matrix of the state elements. That is, state_cov is the
      # matrix product: jacobian * var_cov * jacobian_t
      tmp = var_cov.matrix_multiply(jacobian_t)
      state_cov = jacobian_t.matrix_transpose_multiply(tmp).as_scitbx_matrix()
      state_covs.append(state_cov)

    return state_covs
Пример #13
0
def exercise_rational():
  from scitbx.matrix import row_echelon
  from scitbx import matrix
  from libtbx.utils import flat_list
  from boost_adaptbx.boost import rational
  import random
  rng = random.Random(0)
  #
  m = [[0]]
  t = [0]
  free_vars = row_echelon.form_rational(m, t)
  assert m == [[0]]
  assert t == [0]
  assert free_vars == [0]
  sol = row_echelon.back_substitution_rational(m, t, free_vars, [1])
  assert sol == [1]
  sol = row_echelon.back_substitution_rational(m, None, free_vars, [2])
  assert sol == [2]
  #
  m = [[0]]
  t = [1]
  free_vars = row_echelon.form_rational(m, t)
  assert m == [[0]]
  assert t == [1]
  assert free_vars == [0]
  sol = row_echelon.back_substitution_rational(m, t, free_vars, [1])
  assert sol is None
  #
  m = [[1]]
  t = [2]
  free_vars = row_echelon.form_rational(m, t)
  assert m == [[1]]
  assert t == [2]
  assert free_vars == []
  sol = row_echelon.back_substitution_rational(m, t, free_vars, [1])
  assert sol == [2]
  #
  def rr():
    return rational.int(rng.randrange(-5,6), rng.randrange(1,10))
  #
  for i_trial in range(10):
    for nr in [1,2,3]:
      for nc in [1,2,3]:
        m = []
        for ir in range(nr):
          m.append([rr() for ic in range(nc)])
        m_orig = matrix.rec(flat_list(m), (nr,nc))
        sol_orig = [rr() for ic in range(nc)]
        t_orig = list(m_orig * matrix.col(sol_orig))
        t = list(t_orig)
        free_vars = row_echelon.form_rational(m, t)
        sol = [None] * nc
        for ic in free_vars:
          sol[ic] = sol_orig[ic]
        sol = row_echelon.back_substitution_rational(m, t, free_vars, sol)
        assert sol is not None
        assert sol.count(None) == 0
        assert sol == sol_orig
        sol = [1] * nc
        sol = row_echelon.back_substitution_rational(m, None, free_vars, sol)
        assert sol is not None
        assert (m_orig * matrix.col(sol)).dot() == 0
  #
  for i_trial in range(10):
    from itertools import count
    for i in count(10):
      a = matrix.col([rr(), rr(), rr()])
      b = matrix.col([rr(), rr(), rr()])
      if (a.cross(b).dot() != 0):
        break
    else:
      raise RuntimeError
    p = rng.randrange(-5,6)
    q = rng.randrange(-5,6)
    def check(a, b, c, expected_free_vars, expected_sol):
      m = []
      t = []
      for i in range(3):
        m.append([a[i], b[i]])
        t.append(c[i])
      m_orig = matrix.rec(flat_list(m), (3,2))
      t_orig = list(t)
      free_vars = row_echelon.form_rational(m, t)
      assert free_vars == expected_free_vars
      sol = row_echelon.back_substitution_rational(m, t, free_vars, [3, 11])
      assert sol == expected_sol
      if (sol is not None):
        assert list(m_orig * sol) == t_orig
    check(a, b, p*a+q*b, [], [p,q])
    check(a, b, a.cross(b), [], None)
    check(a, 5*a, -7*a, [1], [-62,11])
    check(a, 5*a, b, [1], None)
    check([0,0,0], [0,0,0], [0,0,0], [0,1], [3,11])
Пример #14
0
  def find_basis_vector_combinations_cluster_analysis(self):
    # hijack the xray.structure class to facilitate calculation of distances
    xs = xray.structure(crystal_symmetry=self.crystal_symmetry)
    for i, site in enumerate(self.sites):
      xs.add_scatterer(xray.scatterer("C%i" %i, site=site))

    xs = xs.sites_mod_short()
    xs = xs.select(xs.sites_frac().norms() < 0.45)
    cell_multiplier = 10
    xs1 = xs.customized_copy(
      unit_cell=uctbx.unit_cell([xs.unit_cell().parameters()[0]*cell_multiplier]*3))
    xs1.set_sites_cart(xs.sites_cart())
    xs = xs1
    sites_cart = xs.sites_cart()
    lengths = flex.double([matrix.col(sc).length() for sc in sites_cart])
    xs = xs.select(flex.sort_permutation(lengths))
    if self.params.debug:
      with open('peaks.pdb', 'wb') as f:
        print >> f, xs.as_pdb_file()

    vector_heights = flex.double()

    sites_frac = xs.sites_frac()
    pair_asu_table = xs.pair_asu_table(distance_cutoff=self.params.max_cell)
    asu_mappings = pair_asu_table.asu_mappings()
    distances = crystal.calculate_distances(pair_asu_table, sites_frac)
    vectors = []
    difference_vectors = []
    pairs = []
    for di in distances:
      if di.distance < self.params.min_cell: continue
      i_seq, j_seq = di.i_seq, di.j_seq
      if i_seq > j_seq: continue
      pairs.append((i_seq, j_seq))
      rt_mx_ji = di.rt_mx_ji
      site_frac_ji = rt_mx_ji * sites_frac[j_seq]
      site_cart_ji = xs.unit_cell().orthogonalize(site_frac_ji)
      site_cart_i = xs.unit_cell().orthogonalize(sites_frac[i_seq])
      vectors.append(matrix.col(site_cart_ji))
      diff_vec = matrix.col(site_cart_i) - matrix.col(site_cart_ji)
      if diff_vec[0] < 0:
        # only one hemisphere of difference vector space
        diff_vec = -diff_vec
      difference_vectors.append(diff_vec)

    params = self.params.multiple_lattice_search.cluster_analysis
    if params.method == 'dbscan':
      i_cluster = self.cluster_analysis_dbscan(difference_vectors)
      min_cluster_size = 1
    elif params.method == 'hcluster':
      i_cluster = self.cluster_analysis_hcluster(difference_vectors)
      i_cluster -= 1 # hcluster starts counting at 1
      min_cluster_size = params.min_cluster_size

    if self.params.debug_plots:
      self.debug_plot_clusters(
        difference_vectors, i_cluster, min_cluster_size=min_cluster_size)


    clusters = []
    min_cluster_size = params.min_cluster_size
    for i in range(max(i_cluster)+1):
      isel = (i_cluster == i).iselection()
      if len(isel) < min_cluster_size:
        continue
      clusters.append(isel)

    cluster_point_sets = []
    centroids = []
    cluster_sizes = flex.int()

    difference_vectors = flex.vec3_double(difference_vectors)

    from libtbx.utils import flat_list
    for cluster in clusters:
      points = flat_list([pairs[i] for i in cluster])
      cluster_point_sets.append(set(points))
      d_vectors = difference_vectors.select(cluster)
      cluster_sizes.append(len(d_vectors))
      centroids.append(d_vectors.mean())

    # build a graph where each node is a centroid from the difference vector
    # cluster analysis above, and an edge is defined when there is a
    # significant overlap between the sets of peaks in the FFT map that
    # contributed to the difference vectors in two clusters
    import networkx as nx
    G = nx.Graph()
    G.add_nodes_from(range(len(cluster_point_sets)))

    cutoff_frac = 0.25
    for i in range(len(cluster_point_sets)):
      for j in range(i+1, len(cluster_point_sets)):
        intersection_ij = cluster_point_sets[i].intersection(
            cluster_point_sets[j])
        union_ij = cluster_point_sets[i].union(cluster_point_sets[j])
        frac_connected = len(intersection_ij)/len(union_ij)
        if frac_connected > cutoff_frac:
          G.add_edge(i, j)

    # iteratively find the maximum cliques in the graph
    # break from the loop if there are no cliques remaining or there are
    # fewer than 3 vectors in the remaining maximum clique
    # Allow 1 basis vector to be shared between two cliques, to allow for
    # cases where two lattices share one basis vectors (e.g. two plate
    # crystals exactly aligned in one direction, but not in the other two)
    distinct_cliques = []
    cliques = list(nx.find_cliques(G))
    cliques = sorted(cliques, key=len, reverse=True)
    for i, clique in enumerate(cliques):
      clique = set(clique)
      if len(clique) < 3:
        break
      is_distinct = True
      for c in distinct_cliques:
        if len(c.intersection(clique)) > 1:
          is_distinct = False
          break
      if is_distinct:
        distinct_cliques.append(clique)
        this_set = set()
        for i_cluster in clique:
          this_set = this_set.union(cluster_point_sets[i_cluster])
        logger.info("Clique %i: %i lattice points" %(i+1, len(this_set)))

    assert len(distinct_cliques) > 0

    logger.info("Estimated number of lattices: %i" %len(distinct_cliques))

    self.candidate_basis_vectors = []
    self.candidate_crystal_models = []

    for clique in distinct_cliques:
      sel = flex.size_t(list(clique))
      vectors = flex.vec3_double(centroids).select(sel)
      perm = flex.sort_permutation(vectors.norms())
      vectors = [matrix.col(vectors[p]) for p in perm]

      # exclude vectors that are (approximately) integer multiples of a shorter
      # vector
      unique_vectors = []
      for v in vectors:
        is_unique = True
        for v_u in unique_vectors:
          if is_approximate_integer_multiple(v_u, v,
                                             relative_tolerance=0.01,
                                             angular_tolerance=0.5):
            is_unique = False
            break
        if is_unique:
          unique_vectors.append(v)
      vectors = unique_vectors

      self.candidate_basis_vectors.extend(vectors)
      candidate_orientation_matrices \
        = self.find_candidate_orientation_matrices(
          vectors,
          max_combinations=self.params.basis_vector_combinations.max_try)
      if len(candidate_orientation_matrices) == 0:
        continue
      crystal_model, n_indexed = self.choose_best_orientation_matrix(
        candidate_orientation_matrices)
      if crystal_model is None: continue
      # map to minimum reduced cell
      crystal_symmetry = crystal.symmetry(
        unit_cell=crystal_model.get_unit_cell(),
        space_group=crystal_model.get_space_group())
      cb_op = crystal_symmetry.change_of_basis_op_to_minimum_cell()
      crystal_model = crystal_model.change_basis(cb_op)
      self.candidate_crystal_models.append(crystal_model)

    if self.params.debug:
      file_name = "vectors.pdb"
      a = self.params.max_cell
      cs = crystal.symmetry(unit_cell=(a,a,a,90,90,90), space_group="P1")
      xs = xray.structure(crystal_symmetry=cs)
      for v in difference_vectors:
        v = matrix.col(v)
        xs.add_scatterer(xray.scatterer("C", site=v/(a/10)))
      xs.sites_mod_short()
      with open(file_name, 'wb') as f:
        print >> f, xs.as_pdb_file()

    for crystal_model in self.candidate_crystal_models:
      logger.debug(crystal_model)
Пример #15
0
def exercise_rational():
  from scitbx.matrix import row_echelon
  from scitbx import matrix
  from libtbx.utils import flat_list
  from boost import rational
  import random
  rng = random.Random(0)
  #
  m = [[0]]
  t = [0]
  free_vars = row_echelon.form_rational(m, t)
  assert m == [[0]]
  assert t == [0]
  assert free_vars == [0]
  sol = row_echelon.back_substitution_rational(m, t, free_vars, [1])
  assert sol == [1]
  sol = row_echelon.back_substitution_rational(m, None, free_vars, [2])
  assert sol == [2]
  #
  m = [[0]]
  t = [1]
  free_vars = row_echelon.form_rational(m, t)
  assert m == [[0]]
  assert t == [1]
  assert free_vars == [0]
  sol = row_echelon.back_substitution_rational(m, t, free_vars, [1])
  assert sol is None
  #
  m = [[1]]
  t = [2]
  free_vars = row_echelon.form_rational(m, t)
  assert m == [[1]]
  assert t == [2]
  assert free_vars == []
  sol = row_echelon.back_substitution_rational(m, t, free_vars, [1])
  assert sol == [2]
  #
  def rr():
    return rational.int(rng.randrange(-5,6), rng.randrange(1,10))
  #
  for i_trial in xrange(10):
    for nr in [1,2,3]:
      for nc in [1,2,3]:
        m = []
        for ir in xrange(nr):
          m.append([rr() for ic in xrange(nc)])
        m_orig = matrix.rec(flat_list(m), (nr,nc))
        sol_orig = [rr() for ic in xrange(nc)]
        t_orig = list(m_orig * matrix.col(sol_orig))
        t = list(t_orig)
        free_vars = row_echelon.form_rational(m, t)
        sol = [None] * nc
        for ic in free_vars:
          sol[ic] = sol_orig[ic]
        sol = row_echelon.back_substitution_rational(m, t, free_vars, sol)
        assert sol is not None
        assert sol.count(None) == 0
        assert sol == sol_orig
        sol = [1] * nc
        sol = row_echelon.back_substitution_rational(m, None, free_vars, sol)
        assert sol is not None
        assert (m_orig * matrix.col(sol)).dot() == 0
  #
  for i_trial in xrange(10):
    from itertools import count
    for i in count(10):
      a = matrix.col([rr(), rr(), rr()])
      b = matrix.col([rr(), rr(), rr()])
      if (a.cross(b).dot() != 0):
        break
    else:
      raise RuntimeError
    p = rng.randrange(-5,6)
    q = rng.randrange(-5,6)
    def check(a, b, c, expected_free_vars, expected_sol):
      m = []
      t = []
      for i in xrange(3):
        m.append([a[i], b[i]])
        t.append(c[i])
      m_orig = matrix.rec(flat_list(m), (3,2))
      t_orig = list(t)
      free_vars = row_echelon.form_rational(m, t)
      assert free_vars == expected_free_vars
      sol = row_echelon.back_substitution_rational(m, t, free_vars, [3, 11])
      assert sol == expected_sol
      if (sol is not None):
        assert list(m_orig * sol) == t_orig
    check(a, b, p*a+q*b, [], [p,q])
    check(a, b, a.cross(b), [], None)
    check(a, 5*a, -7*a, [1], [-62,11])
    check(a, 5*a, b, [1], None)
    check([0,0,0], [0,0,0], [0,0,0], [0,1], [3,11])