Beispiel #1
0
    def constrain_jacobian(self, jacobian):
        '''sparse matrix version of constrain_jacobian'''

        # select unconstrained columns only
        unconstr_block = jacobian.select_columns(self._unconstrained_idx)

        # create constrained columns
        constr_block = sparse.matrix(jacobian.n_rows, len(self._constraints))

        mask = flex.bool(jacobian.n_rows, True)
        for i, (gp,
                c) in enumerate(zip(self._constrained_gps,
                                    constr_block.cols())):
            # this copies, so c is no longer the matrix column but a new vector
            for j in gp:
                c += jacobian.col(j)
            # so assign back into the matrix directly
            constr_block[:, i] = c

        # construct the constrained Jacobian
        constrained_jacobian = sparse.matrix(
            jacobian.n_rows, unconstr_block.n_cols + constr_block.n_cols)
        constrained_jacobian.assign_block(unconstr_block, 0, 0)
        constrained_jacobian.assign_block(constr_block, 0,
                                          unconstr_block.n_cols)

        return constrained_jacobian
 def calculate_scales_and_derivatives(self, block_id=0):
     if self._n_refl[block_id] > 1:
         value, weight, sumweight = self._smoother.multi_value_weight(
             self._normalised_x_values[block_id],
             self._normalised_y_values[block_id],
             self._normalised_z_values[block_id],
             self.value,
         )
         inv_sw = 1.0 / sumweight
         dv_dp = row_multiply(weight, inv_sw)
     elif self._n_refl[block_id] == 1:
         value, weight, sumweight = self._smoother.value_weight(
             self._normalised_x_values[block_id][0],
             self._normalised_y_values[block_id][0],
             self._normalised_z_values[block_id][0],
             self.value,
         )
         dv_dp = sparse.matrix(1, weight.size)
         b = flex.double(weight.as_dense_vector() / sumweight)
         b.reshape(flex.grid(1, b.size()))
         dv_dp.assign_block(b, 0, 0)
         value = flex.double(1, value)
     else:
         return flex.double([]), sparse.matrix(0, 0)
     return value, dv_dp
def exercise_a_tr_diag_a():
  a = sparse.matrix(9, 7)
  for i in xrange(a.n_rows):
    for j in xrange(a.n_cols):
      if (2*i + j) % 3 == 1: a[i,j] = 1
  w = flex.double([ (-1)**i*i for i in xrange(a.n_rows) ])
  b = a.self_transpose_times_diagonal_times_self(w)
  b0 = sparse.matrix(7, 7)
  b0[0, 0] = 5.
  b0[0, 3] = 5.
  b0[0, 6] = 5.
  b0[1, 1] = 3.
  b0[1, 4] = 3.
  b0[2, 2] = -4.
  b0[2, 5] = -4.
  b0[3, 0] = 5.
  b0[3, 3] = 5.
  b0[3, 6] = 5.
  b0[4, 1] = 3.
  b0[4, 4] = 3.
  b0[5, 2] = -4.
  b0[5, 5] = -4.
  b0[6, 0] = 5.
  b0[6, 3] = 5.
  b0[6, 6] = 5.
  assert sparse.approx_equal(tolerance=1e-12)(b, b0)
Beispiel #4
0
def test_multiscaler_update_for_minimisation():
    """Test the multiscaler update_for_minimisation method."""

    p, e = (generated_param(), generated_exp(2))
    p.reflection_selection.method = "use_all"
    r1 = generated_refl(id_=0)
    r1["intensity.sum.value"] = r1["intensity"]
    r1["intensity.sum.variance"] = r1["variance"]
    r2 = generated_refl(id_=1)
    r2["intensity.sum.value"] = r2["intensity"]
    r2["intensity.sum.variance"] = r2["variance"]
    p.scaling_options.nproc = 2
    p.model = "physical"
    exp = create_scaling_model(p, e, [r1, r2])
    singlescaler1 = create_scaler(p, [exp[0]], [r1])
    singlescaler2 = create_scaler(p, [exp[1]], [r2])

    multiscaler = MultiScaler([singlescaler1, singlescaler2])
    pmg = ScalingParameterManagerGenerator(
        multiscaler.active_scalers,
        ScalingTarget,
        multiscaler.params.scaling_refinery.refinement_order,
    )
    multiscaler.single_scalers[0].components["scale"].parameters /= 2.0
    multiscaler.single_scalers[1].components["scale"].parameters *= 1.5
    apm = pmg.parameter_managers()[0]
    multiscaler.update_for_minimisation(apm, 0)
    multiscaler.update_for_minimisation(apm, 1)
    # bf[0], bf[1] should be list of scales and derivatives
    s1, d1 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[0], 0)
    s2, d2 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[1], 0)
    s3, d3 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[0], 1)
    s4, d4 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[1], 1)
    expected_scales_for_block_1 = s1
    expected_scales_for_block_1.extend(s2)
    expected_scales_for_block_2 = s3
    expected_scales_for_block_2.extend(s4)

    expected_derivatives_for_block_1 = sparse.matrix(
        expected_scales_for_block_1.size(), apm.n_active_params)
    expected_derivatives_for_block_2 = sparse.matrix(
        expected_scales_for_block_2.size(), apm.n_active_params)

    expected_derivatives_for_block_1.assign_block(d1, 0, 0)
    expected_derivatives_for_block_1.assign_block(d2, d1.n_rows,
                                                  apm.apm_data[1]["start_idx"])
    expected_derivatives_for_block_2.assign_block(d3, 0, 0)
    expected_derivatives_for_block_2.assign_block(d4, d3.n_rows,
                                                  apm.apm_data[1]["start_idx"])

    block_list = multiscaler.Ih_table.blocked_data_list

    assert block_list[0].inverse_scale_factors == expected_scales_for_block_1
    assert block_list[1].inverse_scale_factors == expected_scales_for_block_2
    assert block_list[1].derivatives == expected_derivatives_for_block_2
    assert block_list[0].derivatives == expected_derivatives_for_block_1
def exercise_column_selection():
    columns = [{0: 1, 3: 3}, {1: -1, 5: -2}, {2: 3, 4: 1}, {3: 4, 5: 1}]
    a = sparse.matrix(6, 4, columns)
    p = flex.size_t((1, 3))
    b = a.select_columns(p)
    b1 = sparse.matrix(6, len(p), [columns[k] for k in p])
    assert b == b1
    q = flex.size_t((3, 0, 2, 1))
    c = a.select_columns(q)
    c1 = sparse.matrix(6, len(q), [columns[k] for k in q])
    assert c == c1
Beispiel #6
0
def exercise_matrix():
    a = sparse.matrix(10, 7)
    assert a.n_rows == 10 and a.n_cols == 7
    for c in a.cols():
        assert c.is_structurally_zero()
    a[0, 1] = 1.
    a[9, 5] = 2.
    assert a.non_zeroes == 2
    for i in xrange(10):
        for j in xrange(7):
            if (i, j) == (0, 1): assert a[i, j] == 1.
            elif (i, j) == (9, 5): assert a[i, j] == 2.
            else: assert a[i, j] == 0, (i, j, a[i, j])

    a = sparse.matrix(6, 3)
    assert a.n_rows == 6
    a[1, 1] = 1.
    a[3, 2] = 2.
    a[5, 1] = 2.
    a[4, 0] = 1.
    assert a.non_zeroes == 4
    assert a.n_rows == 6
    a[7, 0] = 1.
    assert a[7, 0] == 0
    assert a.n_rows == 6

    a = sparse.matrix(4, 3)
    a[0, 1] = 1.01
    b = sparse.matrix(4, 3)
    b[0, 1] = 1.02
    b[3, 2] = 0.001
    approx_equal = sparse.approx_equal(tolerance=0.1)
    assert approx_equal(a, b)

    m = 10
    a = sparse.matrix(m, 2)
    columns = (sparse.matrix_column(m, {
        1: 0.1,
        2: 0.2
    }), sparse.matrix_column(m, {
        4: 0.4,
        8: 0.8
    }))
    a[:, 0] = columns[0]
    a[:, 1] = columns[1]
    assert a[:, 0], a[:, 1] == columns

    try:
        a[1, :] = sparse.vector(2, {1: 1})
        raise Exception_expected
    except RuntimeError, e:
        assert str(e)
Beispiel #7
0
def mock_single_Ih_table():
    """Mock Ih table to use for testing the target function."""
    Ih_table = Mock()
    Ih_table.inverse_scale_factors = flex.double([1.0, 1.0 / 1.1, 1.0])
    Ih_table.intensities = flex.double([10.0, 10.0, 12.0])
    Ih_table.Ih_values = flex.double([11.0, 11.0, 11.0])
    # These values should give residuals of [-1.0, 0.0, 1.0]
    Ih_table.weights = flex.double([1.0, 1.0, 1.0])
    Ih_table.size = 3
    Ih_table.derivatives = sparse.matrix(3, 1, [{0: 1.0, 1: 2.0, 2: 3.0}])
    Ih_table.h_index_matrix = sparse.matrix(3, 2, [{0: 1, 1: 1}, {2: 1}])
    Ih_table.h_expand_matrix = Ih_table.h_index_matrix.transpose()
    return Ih_table
Beispiel #8
0
def exercise_column_selection():
  columns = [ { 0:1, 3:3 },
              { 1:-1, 5:-2 },
              { 2:3, 4:1 },
              { 3:4, 5:1 } ]
  a = sparse.matrix(6, 4, columns)
  p = flex.size_t((1, 3))
  b = a.select_columns(p)
  b1 = sparse.matrix(6, len(p), [ columns[k] for k in p ])
  assert b == b1
  q= flex.size_t((3, 0, 2, 1))
  c = a.select_columns(q)
  c1 = sparse.matrix(6, len(q), [ columns[k] for k in q ])
  assert c == c1
Beispiel #9
0
def test_SHScalefactor():
    """Test the spherical harmonic absorption component."""
    initial_param = 0.1
    initial_val = 0.2

    SF = SHScaleComponent(flex.double([initial_param] * 3))
    assert SF.n_params == 3
    assert list(SF.parameters) == [initial_param] * 3

    # Test functionality just by setting sph_harm_table directly and calling
    # update_reflection_data to initialise the harmonic values.
    harmonic_values = sparse.matrix(3, 1)
    harmonic_values[0, 0] = initial_val
    harmonic_values[1, 0] = initial_val
    harmonic_values[2, 0] = initial_val
    SF.data = {"sph_harm_table": harmonic_values}
    SF.update_reflection_data()
    print(SF.harmonic_values)
    assert SF.harmonic_values[0][0, 0] == initial_val
    assert SF.harmonic_values[0][0, 1] == initial_val
    assert SF.harmonic_values[0][0, 2] == initial_val
    s, d = SF.calculate_scales_and_derivatives()
    assert list(s) == [1.0 + (3.0 * initial_val * initial_param)]
    assert d[0, 0] == initial_val
    assert d[0, 1] == initial_val
    assert d[0, 2] == initial_val
    s, d = SF.calculate_scales_and_derivatives()

    # Test functionality of passing in a selection
    harmonic_values = sparse.matrix(3, 2)
    harmonic_values[0, 0] = initial_val
    harmonic_values[0, 1] = initial_val
    harmonic_values[2, 0] = initial_val
    SF.data = {"sph_harm_table": harmonic_values}
    SF.update_reflection_data(flex.bool([False, True]))
    assert SF.harmonic_values[0].n_rows == 1
    assert SF.harmonic_values[0].n_cols == 3
    assert SF.n_refl[0] == 1

    # Test setting of restraints and that restraints are calculated.
    # Not testing actual calculation as may want to change the form.
    SF.parameter_restraints = flex.double([0.1, 0.2, 0.3])
    assert SF.parameter_restraints == flex.double([0.1, 0.2, 0.3])
    restraints = SF.calculate_restraints()
    assert restraints[0] is not None
    assert restraints[1] is not None
    jacobian_restraints = SF.calculate_jacobian_restraints()
    assert jacobian_restraints[0] is not None
    assert jacobian_restraints[1] is not None
Beispiel #10
0
    def _packed_corr_mat(m):
        """Return a 1D flex array containing the upper diagonal values of the
    correlation matrix calculated between columns of 2D matrix m"""

        nr, nc = m.all()

        try:  # convert a flex.double matrix to sparse
            nr, nc = m.all()
            from scitbx import sparse
            m2 = sparse.matrix(nr, nc)
            m2.assign_block(m, 0, 0)
            m = m2
        except AttributeError:
            pass  # assume m is already scitbx_sparse_ext.matrix

        packed_len = (m.n_cols * (m.n_cols + 1)) // 2
        i = 0
        tmp = flex.double(packed_len)
        for col1 in range(m.n_cols):
            for col2 in range(col1, m.n_cols):
                tmp[i] = flex.linear_correlation(
                    m.col(col1).as_dense_vector(),
                    m.col(col2).as_dense_vector()).coefficient()
                i += 1

        return tmp
Beispiel #11
0
    def _packed_corr_mat(m):
        """Return a 1D flex array containing the upper diagonal values of the
        correlation matrix calculated between columns of 2D matrix m"""

        nr, nc = m.all()

        try:  # convert a flex.double matrix to sparse
            from scitbx import sparse

            m2 = sparse.matrix(nr, nc)
            m2.assign_block(m, 0, 0)
            m = m2
        except AttributeError:
            pass  # assume m is already scitbx_sparse_ext.matrix

        packed_len = (m.n_cols * (m.n_cols + 1)) // 2
        i = 0
        tmp = flex.double(packed_len)
        for col1 in range(m.n_cols):
            for col2 in range(col1, m.n_cols):
                if col1 == col2:
                    tmp[i] = 1.0
                else:
                    # Avoid spuriously high correlation between a column that should be
                    # zero (such as the gradient of X residuals wrt the Shift2 parameter)
                    # and another column (such as the gradient of X residuals wrt the
                    # Dist parameter) by rounding values to 15 places. It seems that such
                    # spurious correlations may occur in cases where gradients are
                    # calculated to be zero by matrix operations, rather than set to zero.
                    v1 = m.col(col1).as_dense_vector().round(15)
                    v2 = m.col(col2).as_dense_vector().round(15)
                    tmp[i] = flex.linear_correlation(v1, v2).coefficient()
                i += 1

        return tmp
Beispiel #12
0
 def calculate_jacobian_restraints(cls, single_parameter_manager):
     """Calculate jacobian restraints for a single dataset from the scaling model
     components, using a single active_parameter_manager. Return None if no
     restraints, else return a residuals vector and a matrix of size
     n_restrained_parameters x n_parameters, and a weights vector."""
     residual_restraints = cls.calculate_restraints(single_parameter_manager)
     if residual_restraints:
         n_restraints = residual_restraints[0].size()
         weights = flex.double([])
         restraints_vector = flex.double([])
         jacobian = sparse.matrix(
             n_restraints, single_parameter_manager.n_active_params
         )
         cumul_restr_pos = 0
         for comp in single_parameter_manager.components.values():
             restraints = comp["object"].calculate_jacobian_restraints()
             if restraints:
                 jacobian.assign_block(
                     restraints[1], cumul_restr_pos, comp["start_idx"]
                 )
                 cumul_restr_pos += comp["n_params"]
                 restraints_vector.extend(restraints[0])
                 weights.extend(restraints[2])
         # Return the restraints vector, jacobian and weights (unity as weights
         # contained in individual jacobian/weoghts calculations).
         return [restraints_vector, jacobian, weights]
     return None
Beispiel #13
0
def exercise_linear_normal_equations():
    py_eqs = [
        (1, (-1, 0, 0), 1),
        (2, (2, -1, 0), 3),
        (-1, (0, 2, 1), 2),
        (-2, (0, 1, 0), -2),
    ]

    eqs_0 = normal_eqns.linear_ls(3)
    for b, a, w in py_eqs:
        eqs_0.add_equation(right_hand_side=b,
                           design_matrix_row=flex.double(a),
                           weight=w)

    eqs_1 = normal_eqns.linear_ls(3)
    b = flex.double()
    w = flex.double()
    a = sparse.matrix(len(py_eqs), 3)
    for i, (b_, a_, w_) in enumerate(py_eqs):
        b.append(b_)
        w.append(w_)
        for j in xrange(3):
            if a_[j]: a[i, j] = a_[j]
    eqs_1.add_equations(right_hand_side=b, design_matrix=a, weights=w)

    assert approx_equal(eqs_0.normal_matrix_packed_u(),
                        eqs_1.normal_matrix_packed_u(),
                        eps=1e-15)
    assert approx_equal(eqs_0.right_hand_side(),
                        eqs_1.right_hand_side(),
                        eps=1e-15)
    assert approx_equal(list(eqs_0.normal_matrix_packed_u()),
                        [13, -6, 0, 9, 4, 2],
                        eps=1e-15)
    assert approx_equal(list(eqs_0.right_hand_side()), [11, -6, -2], eps=1e-15)
Beispiel #14
0
 def calculate_jacobian_restraints(cls, multi_parameter_manager):
     """Calculate jacobian restraints for multi-dataset scaling, using a
     multi active_parameter_manager. First, the restraints are calculated - if
     not None is returned, then one restraints vector, jacobian matrix and
     weights vector is composed for the multiple datasets, else None is returned.
     The jacobian restraints matrix is of size n_restrained_parameters x
     n_parameters (across all datasets), while the residuals and weights vector
     are of length n_restrainted_parameters."""
     residual_restraints = cls.calculate_restraints(multi_parameter_manager)
     if residual_restraints:
         n_restraints = residual_restraints[0].size()
         weights = flex.double([])
         restraints_vector = flex.double([])
         jacobian = sparse.matrix(
             n_restraints, multi_parameter_manager.n_active_params
         )
         cumul_restr_pos = 0
         for i, single_apm in enumerate(multi_parameter_manager.apm_list):
             restraints = (
                 SingleScalingRestraintsCalculator.calculate_jacobian_restraints(
                     single_apm
                 )
             )
             if restraints:
                 jacobian.assign_block(
                     restraints[1],
                     cumul_restr_pos,
                     multi_parameter_manager.apm_data[i]["start_idx"],
                 )
                 cumul_restr_pos += restraints[1].n_rows
                 restraints_vector.extend(restraints[0])
                 weights.extend(restraints[2])
         return [restraints_vector, jacobian, weights]
     return None
Beispiel #15
0
    def scales_and_derivatives(self, phi):
        """Calculate the overall scale factor at each position in 'phi' and the
    derivatives of that scale factor wrt all parameters of the model"""

        # obtain data from all scale factor components
        data = [f.get_factors_and_derivatives(phi) for f in self._factors]

        # FIXME only using phi at the moment. In future will need other information
        # such as s1 directions or central impacts, and will have to pass the right
        # bits of information to the right ScaleFactor components contained here
        scale_components, grad_components = zip(*data)

        # the overall scale is the product of the separate scale components
        overall_scale = reduce(lambda fac1, fac2: fac1 * fac2,
                               scale_components)

        # to convert derivatives of each scale component to derivatives of the
        # overall scale we multiply by the product of the other scale components,
        # omitting the factor that has been differentiated
        if len(scale_components) > 1:
            omit_one_prods = products_omitting_one_item(scale_components)

            grad_components = [row_multiply(g, coeff) for g, coeff in \
                               zip(grad_components, omit_one_prods)]

        # Now combine the gradient components by columns to produce a single
        # gradient matrix for the overall scale
        each_ncol = [g.n_cols for g in grad_components]
        tot_ncol = sum(each_ncol)
        grad = sparse.matrix(len(overall_scale), tot_ncol)
        col_start = [0] + each_ncol[:-1]
        for icol, g in zip(col_start, grad_components):
            grad.assign_block(g, 0, icol)

        return overall_scale, grad
 def calculate_scales_and_derivatives(self, block_id=0):
     """Calculate and return inverse scales and derivatives for a given block."""
     scales = flex.double(self.n_refl[block_id], self._parameters[0])
     derivatives = sparse.matrix(self.n_refl[block_id], 1)
     for i in range(self.n_refl[block_id]):
         derivatives[i, 0] = 1.0
     return scales, derivatives
Beispiel #17
0
    def _packed_corr_mat(m):
        """Return a 1D flex array containing the upper diagonal values of the
    correlation matrix calculated between columns of 2D matrix m"""

        try:  # convert a flex.double matrix to sparse
            nr, nc = m.all()
            from scitbx import sparse

            m2 = sparse.matrix(nr, nc)
            m2.assign_block(m, 0, 0)
            m = m2
        except AttributeError:
            pass  # assume m is already scitbx_sparse_ext.matrix

        packed_len = (m.n_cols * (m.n_cols + 1)) // 2
        i = 0
        tmp = flex.double(packed_len)
        for col1 in range(m.n_cols):
            for col2 in range(col1, m.n_cols):
                tmp[i] = flex.linear_correlation(
                    m.col(col1).as_dense_vector(), m.col(col2).as_dense_vector()
                ).coefficient()
                i += 1

        return tmp
Beispiel #18
0
  def scales_and_derivatives(self, phi):
    """Calculate the overall scale factor at each position in 'phi' and the
    derivatives of that scale factor wrt all parameters of the model"""

    # obtain data from all scale factor components
    data = [f.get_factors_and_derivatives(phi) for f in self._factors]

    # FIXME only using phi at the moment. In future will need other information
    # such as s1 directions or central impacts, and will have to pass the right
    # bits of information to the right ScaleFactor components contained here
    scale_components, grad_components = zip(*data)

    # the overall scale is the product of the separate scale components
    overall_scale = reduce(lambda fac1, fac2: fac1 * fac2, scale_components)

    # to convert derivatives of each scale component to derivatives of the
    # overall scale we multiply by the product of the other scale components,
    # omitting the factor that has been differentiated
    if len(scale_components) > 1:
      omit_one_prods = products_omitting_one_item(scale_components)

      grad_components = [row_multiply(g, coeff) for g, coeff in \
                         zip(grad_components, omit_one_prods)]

    # Now combine the gradient components by columns to produce a single
    # gradient matrix for the overall scale
    each_ncol = [g.n_cols for g in grad_components]
    tot_ncol = sum(each_ncol)
    grad = sparse.matrix(len(overall_scale), tot_ncol)
    col_start = [0] + each_ncol[:-1]
    for icol, g in zip(col_start, grad_components):
      grad.assign_block(g, 0, icol)

    return overall_scale, grad
Beispiel #19
0
def _perform_quasi_random_selection(Ih_table, n_datasets, min_per_class,
                                    min_total, max_total):

    class_matrix = sparse.matrix(n_datasets, Ih_table.size)
    Ih_table.Ih_table["class_index"] = Ih_table.Ih_table["dataset_id"]

    class_matrix = _build_class_matrix(Ih_table.Ih_table, class_matrix)
    segments_in_groups = class_matrix * Ih_table.h_index_matrix
    total = flex.double(segments_in_groups.n_cols, 0)
    for i, col in enumerate(segments_in_groups.cols()):
        total[i] = col.non_zeroes
    perm = flex.sort_permutation(total, reverse=True)
    sorted_class_matrix = segments_in_groups.select_columns(perm)
    # matrix of segment index vs asu groups

    # now want to fill up until good coverage across board
    total_in_classes, cols_not_used = _loop_over_class_matrix(
        sorted_class_matrix, min_per_class, min_total, max_total)

    cols_used = flex.bool(sorted_class_matrix.n_cols, True)
    cols_used.set_selected(cols_not_used, False)
    actual_cols_used = perm.select(cols_used)

    # now need to get reflection selection
    reduced_Ih = Ih_table.select_on_groups_isel(actual_cols_used)
    indices_this_res = reduced_Ih.Ih_table["loc_indices"]
    dataset_ids_this_res = reduced_Ih.Ih_table["dataset_id"]

    n_groups_used = len(actual_cols_used)

    return indices_this_res, dataset_ids_this_res, n_groups_used, total_in_classes
Beispiel #20
0
def calculate_jacobian_fd(target, scaler, apm, block_id=0):
    """Calculate jacobian matrix with finite difference approach."""
    delta = 1.0e-7
    jacobian = sparse.matrix(scaler.Ih_table.blocked_data_list[block_id].size,
                             apm.n_active_params)
    Ih_table = scaler.Ih_table.blocked_data_list[block_id]
    # iterate over parameters, varying one at a time and calculating the residuals
    for i in range(apm.n_active_params):
        new_x = copy.copy(apm.x)
        new_x[i] -= 0.5 * delta
        apm.set_param_vals(new_x)
        scaler.update_for_minimisation(apm, 0)
        R_low = target.calculate_residuals(
            Ih_table)  # unweighted unsquared residual
        new_x[i] += delta
        apm.set_param_vals(new_x)
        scaler.update_for_minimisation(apm, 0)
        R_upper = target.calculate_residuals(
            Ih_table)  # unweighted unsquared residual
        new_x[i] -= 0.5 * delta
        apm.set_param_vals(new_x)
        scaler.update_for_minimisation(apm, 0)
        fin_difference = (R_upper - R_low) / delta
        for j in range(fin_difference.size()):
            jacobian[j, i] = fin_difference[j]
    return jacobian
def exercise_linear_normal_equations():
  py_eqs = [ ( 1, (-1,  0,  0),  1),
             ( 2, ( 2, -1,  0),  3),
             (-1, ( 0,  2,  1),  2),
             (-2, ( 0,  1,  0), -2),
             ]

  eqs_0 = normal_eqns.linear_ls(3)
  for b, a, w in py_eqs:
    eqs_0.add_equation(right_hand_side=b,
                       design_matrix_row=flex.double(a),
                       weight=w)

  eqs_1 = normal_eqns.linear_ls(3)
  b = flex.double()
  w = flex.double()
  a = sparse.matrix(len(py_eqs), 3)
  for i, (b_, a_, w_) in enumerate(py_eqs):
    b.append(b_)
    w.append(w_)
    for j in xrange(3):
      if a_[j]: a[i, j] = a_[j]
  eqs_1.add_equations(right_hand_side=b, design_matrix=a, weights=w)

  assert approx_equal(
    eqs_0.normal_matrix_packed_u(), eqs_1.normal_matrix_packed_u(), eps=1e-15)
  assert approx_equal(
    eqs_0.right_hand_side(), eqs_1.right_hand_side(), eps=1e-15)
  assert approx_equal(
    list(eqs_0.normal_matrix_packed_u()), [ 13, -6, 0, 9, 4, 2 ], eps=1e-15)
  assert approx_equal(
    list(eqs_0.right_hand_side()), [ 11, -6, -2 ], eps=1e-15)
Beispiel #22
0
def exercise_matrix():
  a = sparse.matrix(10,7)
  assert a.n_rows == 10 and a.n_cols == 7
  for c in a.cols():
    assert c.is_structurally_zero()
  a[0,1] = 1.
  a[9,5] = 2.
  assert a.non_zeroes == 2
  for i in xrange(10):
    for j in xrange(7):
      if (i,j) == (0,1): assert a[i,j] == 1.
      elif (i,j) == (9,5): assert a[i,j] == 2.
      else: assert a[i,j] == 0, (i, j, a[i,j])

  a = sparse.matrix(6, 3)
  assert a.n_rows == 6
  a[1,1] = 1.
  a[3,2] = 2.
  a[5,1] = 2.
  a[4,0] = 1.
  assert a.non_zeroes == 4
  assert a.n_rows == 6
  a[7,0] = 1.
  assert a[7,0] == 0
  assert a.n_rows == 6

  a = sparse.matrix(4,3)
  a[0,1] = 1.01
  b = sparse.matrix(4,3)
  b[0,1] = 1.02
  b[3,2] = 0.001
  approx_equal = sparse.approx_equal(tolerance=0.1)
  assert approx_equal(a,b)

  m = 10
  a = sparse.matrix(m, 2)
  columns = ( sparse.matrix_column(m, {1:0.1, 2:0.2}),
              sparse.matrix_column(m, {4:0.4, 8:0.8}) )
  a[:,0] = columns[0]
  a[:,1] = columns[1]
  assert a[:,0], a[:,1] == columns

  try:
    a[1,:] = sparse.vector(2, {1:1})
    raise Exception_expected
  except RuntimeError, e:
    assert str(e)
Beispiel #23
0
def mock_component():
    """Return a mock component of a general model."""
    component = Mock()
    component.free_parameters = flex.double([1.0])
    component.free_parameter_esds = None
    component.n_params = 1
    component.var_cov_matrix = sparse.matrix(1, 1)
    return component
Beispiel #24
0
def exercise_gilbert_peierls_lu_factorization():
  check = test(sparse.gilbert_peierls_lu_factorization)

  """ mathematica
        a := SparseArray[ {
              {3, j_}  -> 1.5 - j/5,
              {7, j_}  -> -0.8 + j/5,
              {_, 5}   -> 2.1,
              {i_, i_} -> i
              }, {8, 8} ]
  """
  a = sparse.matrix(8,8)
  for j,c in enumerate(a.cols()):
    j += 1
    for i in range(a.n_rows):
      i += 1
      if i == 3:
          c[i-1] = 1.5 - j/5.
      elif i == 7:
          c[i-1] = -0.8 + j/5.
      else:
          if    j == 5: c[i-1] = 2.1
          elif  i == j: c[i-1] = i
  check(a)

  """ rectangular matrix m x n with m < n """
  b = sparse.matrix(5,8)
  b[4,0] = 1.
  b[1,1] = -1.
  b[1,2] = 0.5
  b[2,1] = 1.8
  b[2,2] = -2.
  b[0,3] = 1.;
  b[2,4] = -1.
  b[2,5] = 1.
  b[2,6] = 0.5
  b[3,4] = 1.
  b[3,5] = 0.5
  b[3,6] = 1.
  b[0,7] = 0.1
  b[1,7] = 0.2
  check(b);

  """ rectangular matrix m x n with m > n """
  c = b.transpose()
  check(c)
def exercise_gilbert_peierls_lu_factorization():
  check = test(sparse.gilbert_peierls_lu_factorization)

  """ mathematica
        a := SparseArray[ {
              {3, j_}  -> 1.5 - j/5,
              {7, j_}  -> -0.8 + j/5,
              {_, 5}   -> 2.1,
              {i_, i_} -> i
              }, {8, 8} ]
  """
  a = sparse.matrix(8,8)
  for j,c in enumerate(a.cols()):
    j += 1
    for i in xrange(a.n_rows):
      i += 1
      if i == 3:
          c[i-1] = 1.5 - j/5.
      elif i == 7:
          c[i-1] = -0.8 + j/5.
      else:
          if    j == 5: c[i-1] = 2.1
          elif  i == j: c[i-1] = i
  check(a)

  """ rectangular matrix m x n with m < n """
  b = sparse.matrix(5,8)
  b[4,0] = 1.
  b[1,1] = -1.
  b[1,2] = 0.5
  b[2,1] = 1.8
  b[2,2] = -2.
  b[0,3] = 1.;
  b[2,4] = -1.
  b[2,5] = 1.
  b[2,6] = 0.5
  b[3,4] = 1.
  b[3,5] = 0.5
  b[3,6] = 1.
  b[0,7] = 0.1
  b[1,7] = 0.2
  check(b);

  """ rectangular matrix m x n with m > n """
  c = b.transpose()
  check(c)
Beispiel #26
0
def test_SmoothScaleFactor3D():
    """Test the 2D smooth scale factor class."""
    with pytest.raises(AssertionError):  # Test incorrect shape initialisation
        SF = SmoothScaleComponent3D(flex.double(150, 1.1), shape=(5, 5, 5))
    SF = SmoothScaleComponent3D(flex.double(150, 1.1), shape=(6, 5, 5))
    assert SF.n_x_params == 6
    assert SF.n_y_params == 5
    assert SF.n_z_params == 5
    assert SF.n_params == 150

    assert list(SF.parameters) == [1.1] * 150
    norm_rot = flex.double(150, 0.5)
    norm_time = flex.double(150, 0.5)
    norm_z = flex.double(150, 0.5)
    norm_rot[0] = 0.0
    norm_time[0] = 0.0
    norm_z[0] = 0.0
    norm_rot[149] = 3.99
    norm_time[149] = 2.99
    norm_z[149] = 2.99
    SF.data = {"x": norm_rot, "y": norm_time, "z": norm_z}
    SF.update_reflection_data()
    SF.smoother.set_smoothing(3, 1.0)  # will average 3 in x,y,z dims for test.
    assert list(SF.smoother.x_positions()) == [-0.5, 0.5, 1.5, 2.5, 3.5, 4.5]
    assert list(SF.smoother.y_positions()) == [-0.5, 0.5, 1.5, 2.5, 3.5]
    assert list(SF.smoother.z_positions()) == [-0.5, 0.5, 1.5, 2.5, 3.5]
    s, d = SF.calculate_scales_and_derivatives()
    s2 = SF.calculate_scales()
    assert list(s) == list(s2)
    assert list(s) == pytest.approx([1.1] * 150)
    sumexp = (exp(-0.0) + (6.0 * exp(-1.0 / 1.0)) + (8.0 * exp(-3.0 / 1.0)) +
              (12.0 * exp(-2.0 / 1.0)))
    assert d[1, 7] == pytest.approx(exp(-1.0) / sumexp)  # Just check one

    # Test that if one or none in block, then doesn't fail but returns sensible value
    SF._normalised_x_values = [flex.double([0.5])]
    SF._normalised_y_values = [flex.double([0.5])]
    SF._normalised_z_values = [flex.double([0.5])]
    SF._n_refl = [1]
    assert list(SF.normalised_x_values[0]) == [0.5]
    assert list(SF.normalised_y_values[0]) == [0.5]
    assert list(SF.normalised_z_values[0]) == [0.5]
    s, d = SF.calculate_scales_and_derivatives()
    s2 = SF.calculate_scales()
    assert list(s) == list(s2)
    assert list(s) == pytest.approx([1.1])
    SF._normalised_x_values = [flex.double()]
    SF._normalised_y_values = [flex.double()]
    SF._normalised_z_values = [flex.double()]
    SF._n_refl = [0]
    assert list(SF.normalised_x_values[0]) == []
    assert list(SF.normalised_y_values[0]) == []
    assert list(SF.normalised_z_values[0]) == []
    s, d = SF.calculate_scales_and_derivatives()
    assert list(s) == []
    s2 = SF.calculate_scales()
    assert list(s) == list(s2)
    assert d == sparse.matrix(0, 0)
    def run(self):
        self.reparam.linearise()
        self.reparam.store()
        uc = self.cs.unit_cell()
        _ = mat.col
        xh, xo, x1, x2 = [
            uc.orthogonalize(sc.site)
            for sc in (self.h, self.o, self.c1, self.c2)
        ]
        u_12 = _(x2) - _(x1)
        u_o1 = _(x1) - _(xo)
        u_oh = _(xh) - _(xo)
        assert approx_equal(u_12.cross(u_o1).dot(u_oh), 0, self.eps)
        assert approx_equal(
            u_12.cross(u_o1).angle(u_oh, deg=True), 90, self.eps)
        assert approx_equal(abs(u_oh), self.bond_length, self.eps)
        assert approx_equal(u_o1.angle(u_oh, deg=True), 109.47, 0.01)

        jt0 = sparse.matrix(3, 14)
        for i in xrange(3):
            jt0[self.xo + i, self.xo + i] = 1.
            jt0[self.xo + i, self.xh + i] = 1.
        jt = self.reparam.jacobian_transpose
        assert sparse.approx_equal(self.eps)(jt, jt0)

        if self.verbose:
            # finite difference derivatives to compare with
            # the crude riding approximation used for analytical ones
            def differentiate(sc):
                eta = 1.e-4
                jac = []
                for i in xrange(3):
                    x0 = tuple(sc.site)
                    x = list(x0)
                    x[i] += eta
                    sc.site = tuple(x)
                    self.reparam.linearise()
                    self.reparam.store()
                    xp = _(self.h.site)
                    x[i] -= 2 * eta
                    sc.site = tuple(x)
                    self.reparam.linearise()
                    self.reparam.store()
                    xm = _(self.h.site)
                    sc.site = tuple(x0)
                    jac.extend((xp - xm) / (2 * eta))
                return mat.sqr(jac)

            jac_o = differentiate(self.o)
            jac_1 = differentiate(self.c1)
            jac_2 = differentiate(self.c2)
            print("staggered: %s" % self.staggered)
            print("J_o:")
            print(jac_o.mathematica_form())
            print("J_1:")
            print(jac_1.mathematica_form())
            print("J_2:")
            print(jac_2.mathematica_form())
Beispiel #28
0
def test_loop_over_class_matrix():
    """Test a few different limits of the method.

    { 3, 1, 3, 2, 1, 1, 0 },
    { 2, 2, 0, 0, 3, 1, 0 },
    { 1, 4, 2, 1, 0, 0, 5 },
    """
    sorted_class_matrix = sparse.matrix(
        3,
        7,
        elements_by_columns=[
            {
                0: 3,
                1: 2,
                2: 1
            },
            {
                0: 1,
                1: 2,
                2: 4
            },
            {
                0: 3,
                2: 2
            },
            {
                0: 2,
                2: 1
            },
            {
                0: 1,
                1: 3
            },
            {
                0: 1
            },
            {
                2: 5
            },
        ],
    )

    # first test if don't meet the minimum number
    total_in_classes, cols_not_used = _loop_over_class_matrix(
        sorted_class_matrix, 1, 8, 20)
    assert list(cols_not_used) == [2, 3, 4, 5, 6]
    assert list(total_in_classes) == [4.0, 4.0, 5.0]
    # now test if max per bin reached
    total_in_classes, cols_not_used = _loop_over_class_matrix(
        sorted_class_matrix, 2, 3, 8)
    assert list(cols_not_used) == [2, 3, 4, 5, 6]
    assert list(total_in_classes) == [4.0, 4.0, 5.0]

    # now test if request more than available - should find all
    total_in_classes, cols_not_used = _loop_over_class_matrix(
        sorted_class_matrix, 9, 50, 100)
    assert not cols_not_used
    assert list(total_in_classes) == [11.0, 7.0, 13.0]
def exercise_a_tr_a():
  a = sparse.matrix(6, 3,
                    elements_by_columns = [ { 0: 1, 3:2, 5:3 },
                                            { 1:-1, 3:3, 4:-2 },
                                            { 2:1, } ])
  aa = a.as_dense_matrix()
  b = a.self_transpose_times_self()
  bb = b.as_dense_matrix()
  assert bb.all_eq(aa.matrix_transpose().matrix_multiply(aa))
Beispiel #30
0
    def _build_jacobian(grads_each_dim, nelem=None, nparam=None):
        """construct Jacobian from lists of sparse gradient vectors."""

        nref = int(nelem / len(grads_each_dim))

        blocks = [sparse.matrix(nref, nparam) for _ in grads_each_dim]
        jacobian = sparse.matrix(nelem, nparam)

        # loop over parameters, building full width blocks of the full Jacobian
        for i in range(nparam):
            for block, grad in zip(blocks, grads_each_dim):
                block[:, i] = grad[i]

        # set the blocks in the Jacobian
        for i, block in enumerate(blocks):
            jacobian.assign_block(block, (i * nref), 0)

        return jacobian
Beispiel #31
0
def mock_scaling_component():
    """Mock scaling component to allow creation of a scaling model."""
    component = MagicMock()
    component.n_params = 2
    component.inverse_scales = [flex.double([0.9, 1.1])]
    component.derivatives = [sparse.matrix(2, 2)]
    component.derivatives[0][0, 0] = 0.5
    component.derivatives[0][1, 0] = 0.4
    return component
Beispiel #32
0
 def calculate_scales_and_derivatives(self, block_id=0):
     """Calculate and return inverse scales and derivatives for a given block."""
     scales = flex.exp(self._parameters[0] * self._x[block_id] /
                       (self._d_values[block_id]**2))
     derivatives = sparse.matrix(self._n_refl[block_id], 1)
     for i in range(self._n_refl[block_id]):
         derivatives[i, 0] = scales[i] * (self._x[block_id][i] /
                                          (self._d_values[block_id][i]**2))
     return scales, derivatives
    def run(self):
        self.reparam.linearise()
        self.reparam.store()
        x_c0, x_c1, x_h = [
            mat.col(sc.site) for sc in (self.c0, self.c1, self.h)
        ]
        if self.with_special_position_pivot:
            assert approx_equal(x_c0, self.site_symm.exact_site(), self.eps)
        assert approx_equal(self.uc.angle(x_c1, x_c0, x_h), 180, self.eps)
        assert approx_equal(self.uc.distance(x_c0, x_h), self.bond_length,
                            self.eps)

        if self.with_special_position_pivot:
            jt0 = sparse.matrix(
                1 + 3,  # y0, x1
                1 + 3 + 3 + 1 + 3)  # y0, x0, x1, l, x_h
        else:
            jt0 = sparse.matrix(
                3 + 3,  # x0, x1
                3 + 3 + +1 + 3)  # x0, x1, l, x_h

        # Identity for independent parameters
        if self.with_special_position_pivot:
            jt0[self.y0, self.y0] = 1.
        for i in xrange(3):
            jt0[self.x0 + i, self.x0 + i] = 1.
        for i in xrange(3):
            jt0[self.x1 + i, self.x1 + i] = 1.

        # special position x0
        if self.with_special_position_pivot:
            jt0[self.y0, self.x0] = 0
            jt0[self.y0, self.x0 + 1] = 0
            jt0[self.y0, self.x0 + 2] = 1.

        # riding
        if self.with_special_position_pivot:
            jt0[self.y0, self.x_h + 2] = 1.
        else:
            for i in xrange(3):
                jt0[self.x0 + i, self.x_h + i] = 1.

        jt = self.reparam.jacobian_transpose
        assert sparse.approx_equal(self.eps)(jt, jt0)
 def calculate_scales_and_derivatives(self, block_id=0):
     """Calculate and return inverse scales and derivatives for a given block."""
     d_squared = self._d_values[block_id] * self._d_values[block_id]
     scales = flex.exp(
         flex.double(self._n_refl[block_id], self._parameters[0]) / (2.0 * d_squared)
     )
     derivatives = sparse.matrix(self._n_refl[block_id], 1)
     for i in range(self._n_refl[block_id]):
         derivatives[i, 0] = scales[i] / (2.0 * d_squared[i])
     return scales, derivatives
 def calculate_scales_and_derivatives(self, block_id=0):
     scales, derivatives = super(
         SmoothBScaleComponent1D, self
     ).calculate_scales_and_derivatives(block_id)
     if self._n_refl[block_id] == 0:
         return flex.double([]), sparse.matrix(0, 0)
     prefac = 1.0 / (2.0 * (self._d_values[block_id] * self._d_values[block_id]))
     s = flex.exp(scales * prefac)
     d = row_multiply(derivatives, s * prefac)
     return s, d
def exercise_row_vector_x_matrix():
    u = flex.double((1, 2, 3))
    a = sparse.matrix(3, 5)
    a[1, 0] = 1
    a[2, 1] = 1
    a[0, 2] = 1
    a[-1, 3] = 1
    a[-2, 4] = 1
    v = u * a
    assert list(v) == [2, 3, 1, -2, -6]
Beispiel #37
0
def exercise_row_vector_x_matrix():
  u = flex.double((1,2,3))
  a = sparse.matrix(3,5)
  a[1,0] = 1
  a[2,1] = 1
  a[0,2] = 1
  a[-1,3] = 1
  a[-2,4] = 1
  v = u*a
  assert list(v) == [ 2, 3, 1, -2, -6 ]
  def run(self):
    self.reparam.linearise()
    self.reparam.store()
    uc = self.cs.unit_cell()
    _ = mat.col
    xh, xo, x1, x2 = [ uc.orthogonalize(sc.site)
                       for sc in (self.h, self.o, self.c1, self.c2) ]
    u_12 = _(x2) - _(x1)
    u_o1 = _(x1) - _(xo)
    u_oh = _(xh) - _(xo)
    assert approx_equal(u_12.cross(u_o1).dot(u_oh), 0, self.eps)
    assert approx_equal(u_12.cross(u_o1).angle(u_oh, deg=True), 90, self.eps)
    assert approx_equal(abs(u_oh), self.bond_length, self.eps)
    assert approx_equal(u_o1.angle(u_oh, deg=True), 109.47, 0.01)

    jt0 = sparse.matrix(3, 14)
    for i in xrange(3):
      jt0[self.xo + i, self.xo + i] = 1.
      jt0[self.xo + i, self.xh + i] = 1.
    jt = self.reparam.jacobian_transpose
    assert sparse.approx_equal(self.eps)(jt, jt0)

    if self.verbose:
      # finite difference derivatives to compare with
      # the crude riding approximation used for analytical ones
      def differentiate(sc):
        eta = 1.e-4
        jac = []
        for i in xrange(3):
          x0 = tuple(sc.site)
          x = list(x0)
          x[i] += eta
          sc.site = tuple(x)
          self.reparam.linearise()
          self.reparam.store()
          xp = _(self.h.site)
          x[i] -= 2*eta
          sc.site = tuple(x)
          self.reparam.linearise()
          self.reparam.store()
          xm = _(self.h.site)
          sc.site = tuple(x0)
          jac.extend( (xp - xm)/(2*eta) )
        return mat.sqr(jac)

      jac_o = differentiate(self.o)
      jac_1 = differentiate(self.c1)
      jac_2 = differentiate(self.c2)
      print "staggered: %s" % self.staggered
      print "J_o:"
      print jac_o.mathematica_form()
      print "J_1:"
      print jac_1.mathematica_form()
      print "J_2:"
      print jac_2.mathematica_form()
Beispiel #39
0
 def _construct_grad_block(self, param_grads, i):
   '''helper function to construct a block of gradients. The length of
   param_grads is the number of columns of the block. i selects a row of
   interest from the block corresponding to the residual for a particular
   unit cell'''
   param_grads *= self._gradfac
   block = sparse.matrix(self._nxls, len(param_grads))
   for j, g in enumerate(param_grads):
     if abs(g) > 1e-20: # skip gradient close to zero
       block[i, j] = g
   return block
Beispiel #40
0
def exercise_block_assignment():
  a = sparse.matrix(4, 6)
  a[1, 2] = 3
  a[3, 3] = 6
  a[0, 5] = 5
  a[2, 0] = 2
  b = sparse.matrix(2, 3)
  b[0, 0] = 1
  b[0, 1] = 2
  b[1, 2] = 3
  a.assign_block(b, 1, 2)
  assert list(a.as_dense_matrix()) == [ 0, 0, 0, 0, 0, 5,
                                        0, 0, 1, 2, 0, 0,
                                        2, 0, 0, 0, 3, 0,
                                        0, 0, 0, 6, 0, 0 ]
  assert a.is_structural_zero(1, 4)
  assert a.is_structural_zero(2, 2)
  assert a.is_structural_zero(2, 3)

  try:
    a.assign_block(b, 3, 3)
    raise Exception_expected
  except RuntimeError:
    pass
  try:
    a.assign_block(b, 1, 4)
    raise Exception_expected
  except RuntimeError:
    pass

  c = flex.double(( 1,  2,
                    0, -1,
                    1,  0 ))
  c.reshape(flex.grid(3, 2))
  a.assign_block(c, 1, 1)
  assert list(a.as_dense_matrix()) == [ 0, 0,  0, 0, 0, 5,
                                        0, 1,  2, 2, 0, 0,
                                        2, 0, -1, 0, 3, 0,
                                        0, 1,  0, 6, 0, 0 ]
  assert a.is_structural_zero(2, 1)
  assert a.is_structural_zero(3, 2)
Beispiel #41
0
  def _build_jacobian(dX_dp, dY_dp, dZ_dp, nelem, nparam):
    """construct Jacobian from lists of sparse gradient vectors."""

    nref = int(nelem / 3)
    X_mat = sparse.matrix(nref, nparam)
    Y_mat = sparse.matrix(nref, nparam)
    Z_mat = sparse.matrix(nref, nparam)
    jacobian = sparse.matrix(nelem, nparam)

    # loop over parameters, building full width blocks of the full Jacobian
    for i in range(nparam):
      X_mat[:,i] = dX_dp[i]
      Y_mat[:,i] = dY_dp[i]
      Z_mat[:,i] = dZ_dp[i]

    # set the blocks in the Jacobian
    jacobian.assign_block(X_mat, 0, 0)
    jacobian.assign_block(Y_mat, nref, 0)
    jacobian.assign_block(Z_mat, 2*nref, 0)

    return jacobian
Beispiel #42
0
 def _construct_grad_block(self, param_grads, i):
   '''helper function to construct a block of gradients. The length of
   param_grads is the number of columns of the block. i selects a row of
   interest from the block corresponding to the residual for a particular
   unit cell'''
   # this override removes the product with self._gradfac, which is only
   # relevant for the 'mean' versions of this class.
   # param_grads *= self._gradfac
   block = sparse.matrix(self._nxls, len(param_grads))
   for j, g in enumerate(param_grads):
     if abs(g) > 1e-20: # skip gradient close to zero
       block[i, j] = g
   return block
  def run(self):
    self.reparam.linearise()
    self.reparam.store()
    x_c0, x_c1, x_h = [ mat.col(sc.site)
                        for sc in (self.c0, self.c1, self.h) ]
    if self.with_special_position_pivot:
      assert approx_equal(x_c0, self.site_symm.exact_site(), self.eps)
    assert approx_equal(self.uc.angle(x_c1, x_c0, x_h), 180, self.eps)
    assert approx_equal(
      self.uc.distance(x_c0, x_h), self.bond_length, self.eps)

    if self.with_special_position_pivot:
      jt0 = sparse.matrix(1 + 3, # y0, x1
                          1 + 3 + 3 + 1 + 3) # y0, x0, x1, l, x_h
    else:
      jt0 = sparse.matrix(3 + 3, # x0, x1
                          3 + 3 + + 1 + 3) # x0, x1, l, x_h

    # Identity for independent parameters
    if self.with_special_position_pivot:
      jt0[self.y0, self.y0] = 1.
    for i in xrange(3): jt0[self.x0 + i, self.x0 + i] = 1.
    for i in xrange(3): jt0[self.x1 + i, self.x1 + i] = 1.

    # special position x0
    if self.with_special_position_pivot:
      jt0[self.y0, self.x0    ] = 0
      jt0[self.y0, self.x0 + 1] = 0
      jt0[self.y0, self.x0 + 2] = 1.

    # riding
    if self.with_special_position_pivot:
      jt0[self.y0, self.x_h + 2] = 1.
    else:
      for i in xrange(3): jt0[self.x0 + i, self.x_h + i] = 1.

    jt = self.reparam.jacobian_transpose
    assert sparse.approx_equal(self.eps)(jt, jt0)
Beispiel #44
0
 def _construct_grad_block(self, param_grads, i):
   '''helper function to construct a block of gradients. The length of
   param_grads is the number of columns of the block. i selects a row of
   interest from the block corresponding to the residual for a particular
   unit cell'''
   mean_grads = param_grads * self._meangradfac
   param_grads *= self._gradfac
   block = sparse.matrix(self._nxls, len(param_grads))
   for j, (g, mg) in enumerate(zip(param_grads, mean_grads)):
     if abs(mg) > 1e-20: # skip gradients close to zero
       col = flex.double(flex.grid(self._nxls, 1), -1. * mg)
       block.assign_block(col, 0, j)
     if abs(g) > 1e-20: # skip gradient close to zero
       block[i, j] = g
   return block
def test_row_multiply():

  m = sparse.matrix(3, 2)
  m[0,0] = 1.
  m[0,1] = 2.
  m[1,1] = 3.
  m[2,0] = 4.

  fac = flex.double((3, 2, 1))

  m2 = row_multiply(m, fac)

  assert m2.as_dense_matrix().as_1d() == flex.double(
    [3.0, 6.0, 0.0, 6.0, 4.0, 0.0]).all_eq(True)
  print "OK"
 def __init__(self, I, w, hkl, frames, G, Ih):
   self.I = flex.double(I)
   self.w = flex.double(w)
   self.hkl = flex.size_t(hkl)
   self.frames = flex.size_t(frames)
   self.G = G
   self.Ih = flex.double(Ih)
   self.S = flex.double(Ih)
   self.n_obs= len(I)
   self.n_frames = len(G)
   self.n_prm = self.n_frames + len(self.S)
   self.count=0
   self.weight = flex.double(self.n_obs,1)
   self.jacobian_sparse = sparse.matrix(self.n_obs, self.n_prm)
   super(xscale, self).__init__(n_parameters=self.n_prm)
   self.restart()
  def get_residuals_gradients_and_weights(self):

    residuals = flex.double()
    weights = flex.double()
    row_start = []
    irow = 0

    # process restraints residuals and weights for single models
    for r in self._single_model_restraints:
      res = r.restraint.residuals()
      wgt = r.restraint.weights()
      residuals.extend(flex.double(res))
      weights.extend(flex.double(wgt))
      row_start.append(irow)
      irow += len(res)

    # keep track of the row at the start of group models
    group_model_irow = irow

    # process restraints residuals and weights for groups of models
    for r in self._group_model_restraints:
      residuals.extend(flex.double(r.restraint.residuals()))
      weights.extend(flex.double(r.restraint.weights()))

    # now it is clear how many residuals there are we can set up a sparse
    # matrix for the restraints jacobian
    nrows = len(residuals)
    gradients = sparse.matrix(nrows, self._nparam)

    # assign gradients in blocks for the single model restraints
    for irow, r in zip(row_start, self._single_model_restraints):
      icol = r.istart
      # convert square list-of-lists into a 2D array for block assignment
      grads = flex.double(r.restraint.gradients())
      gradients.assign_block(grads, irow, icol)

    # assign gradients in blocks for the group model restraints
    for r in self._group_model_restraints:
      # loop over the included unit cell models, k
      for k, (icol, grads) in enumerate(zip(r.istart, r.restraint.gradients())):
        irow = group_model_irow
        for grad in grads:
          gradients.assign_block(grad, irow, icol)
          irow += grad.n_rows
      group_model_irow = irow

    return residuals, gradients, weights
 def run(self):
   self.reparam.linearise()
   self.reparam.store()
   assert approx_equal(self.sc.u_star, (19/6, 19/6, 17/2,
                                        11/6, 9/2, 9/2), self.eps)
   jt0 = sparse.matrix(2, 8)
   jt0[0, 0] = 1
   jt0[1, 1] = 1
   jac_u_star_trans = self.site_symm.adp_constraints().gradient_sum_matrix()
   jac_u_star_trans.reshape(flex.grid(
     self.site_symm.adp_constraints().n_independent_params(), 6))
   (m,n) = jac_u_star_trans.focus()
   for i in xrange(m):
     for j in xrange(n):
       jt0[i, j + 2] = jac_u_star_trans[i, j]
   jt = self.reparam.jacobian_transpose
   assert sparse.approx_equal(self.eps)(jt, jt0)
def exercise_u_iso_proportional_to_pivot_u_eq():
  xs = xray.structure(
    crystal_symmetry=crystal.symmetry(
      unit_cell=(),
      space_group_symbol='hall: P 2x 2y'),
    scatterers=flex.xray_scatterer((
      xray.scatterer('C0', u=(1, 1, 1, 0, 0, 0)),
      xray.scatterer('C1'),
      xray.scatterer('C2', site=(0.1, 0.2, 0.3), u=(1, 2, 3, 0, 0, 0)),
      xray.scatterer('C3'),
      )))
  r = constraints.ext.reparametrisation(xs.unit_cell())
  sc = xs.scatterers()
  sc[0].flags.set_grad_u_aniso(True)
  sc[2].flags.set_grad_u_aniso(True)

  u_0 = r.add(constraints.special_position_u_star_parameter,
              site_symmetry=xs.site_symmetry_table().get(0),
              scatterer=sc[0])
  u_iso_1 = r.add(constraints.u_iso_proportional_to_pivot_u_eq,
                pivot_u=u_0,
                multiplier=3,
                scatterer=sc[1])
  u_2 = r.add(constraints.independent_u_star_parameter, sc[2])
  u_iso_3 = r.add(constraints.u_iso_proportional_to_pivot_u_eq,
                pivot_u=u_2,
                multiplier=2,
                scatterer=sc[3])
  r.finalise()
  m = 3 + 6
  n = m + 6 + 1 + 1
  r.linearise()
  assert approx_equal(u_iso_1.value, 3, eps=1e-15)
  assert approx_equal(u_iso_3.value, 4, eps=1e-15)
  jt0 = sparse.matrix(m, n)
  for i in xrange(m): jt0[i, i] = 1
  p, q = u_0.argument(0).index, u_0.index
  jt0[p, q] = jt0[p+1, q+1] = jt0[p+2, q+2] = 1
  q = u_iso_1.index
  jt0[p, q] = jt0[p+1, q] = jt0[p+2, q] = 1
  p, q = u_2.index, u_iso_3.index
  jt0[p, q] = jt0[p+1, q] = jt0[p+2, q] = 2/3
  assert sparse.approx_equal(tolerance=1e-15)(r.jacobian_transpose, jt0)
def exercise_add_equation():
  linearised_eqns = restraints.linearised_eqns_of_restraint(10, 10)
  delta = 0.5
  grads = flex.double((0,0,1,0,0,2,0,0,-1, 0))
  w = 10
  linearised_eqns.add_equation(delta, grads, w)
  assert linearised_eqns.n_restraints() == 1
  linearised_eqns.add_equation(delta, grads, w)
  linearised_eqns.add_equation(delta, grads, w)
  assert linearised_eqns.n_restraints() == 3
  from scitbx import sparse
  assert approx_equal(
    linearised_eqns.design_matrix.as_dense_matrix(),
    sparse.matrix(rows=10, columns=10,
                  elements_by_columns=[ { 0: 0, 1: 0, 2: 0 },
                                        { 0: 0, 1: 0, 2: 0 },
                                        { 0: 1, 1: 1, 2: 1 },
                                        { 0: 0, 1: 0, 2: 0 },
                                        { 0: 0, 1: 0, 2: 0 },
                                        { 0: 2, 1: 2, 2: 2 },
                                        { 0: 0, 1: 0, 2: 0 },
                                        { 0: 0, 1: 0, 2: 0 },
                                        { 0: -1, 1: -1, 2: -1 },
                                        { 0: 0, 1: 0, 2: 0 }, ]).as_dense_matrix())
Beispiel #51
0
  m = 10
  a = sparse.matrix(m, 2)
  columns = ( sparse.matrix_column(m, {1:0.1, 2:0.2}),
              sparse.matrix_column(m, {4:0.4, 8:0.8}) )
  a[:,0] = columns[0]
  a[:,1] = columns[1]
  assert a[:,0], a[:,1] == columns

  try:
    a[1,:] = sparse.vector(2, {1:1})
    raise Exception_expected
  except RuntimeError, e:
    assert str(e)

  a = sparse.matrix(10, 3,
                    elements_by_columns=[ { 1: 1, 4: 4, },
                                          { 0: -1, 8:8, },
                                          { 6: 6, 9: 9, } ])
  assert "\n%s" % a == """
{
{ 0, -1, 0 },
{ 1, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 0 },
{ 4, 0, 0 },
{ 0, 0, 0 },
{ 0, 0, 6 },
{ 0, 0, 0 },
{ 0, 8, 0 },
{ 0, 0, 9 }
}
"""
def exercise_affine_occupancy_parameter():
  xs = xray.structure(
    crystal_symmetry=crystal.symmetry(unit_cell=(), space_group_symbol='hall: P 1'),
    scatterers=flex.xray_scatterer((
      xray.scatterer('C0', occupancy=1),
      xray.scatterer('C1', occupancy=1),
      xray.scatterer('C2', occupancy=1),
      xray.scatterer('C3', occupancy=1),
    )))
  sc = xs.scatterers()
  sc.flags_set_grad_occupancy(flex.size_t_range(4))

  # Two occupancies adding up to 1 (most common case of disorder)
  r = constraints.ext.reparametrisation(xs.unit_cell())
  occ_1 = r.add(constraints.independent_occupancy_parameter, sc[1])
  occ_3 = r.add(constraints.affine_asu_occupancy_parameter,
                dependee=occ_1, a=-1, b=1, scatterer=sc[3])
  r.finalise()
  r.linearise()
  assert approx_equal(occ_1.value, 1)
  assert approx_equal(occ_3.value, 0)
  jt0 = sparse.matrix(1, 2,
                     [ {0:1},   # 1st col = derivatives of occ_1
                       {0:-1},   # 2nd col = derivatives of occ_3
                     ])
  assert sparse.approx_equal(tolerance=1e-15)(r.jacobian_transpose, jt0)

  # Example illustrating the instruction SUMP in SHELX 97 manual (p. 7-26)
  # We disregard the issue of the special position which is orthogonal to the
  # point we want to test here.
  xs = xray.structure(
    crystal_symmetry=crystal.symmetry(unit_cell=(), space_group_symbol='hall: P 1'),
    scatterers=flex.xray_scatterer((
      xray.scatterer('Na+', occupancy=1),
      xray.scatterer('Ca2+', occupancy=1),
      xray.scatterer('Al3+', occupancy=0.35),
      xray.scatterer('K+', occupancy=0.15),
    )))
  sc = xs.scatterers()
  sc.flags_set_grad_occupancy(flex.size_t_range(4))

  # The constraints are:
  # fully occupied: occ(Na+) + occ(Ca2+) + occ(Al3+) + occ(K+) = 1
  # average charge +2: occ(Na+) + 2 occ(Ca2+) + 3 occ(Al3+) + occ(K+) = +2
  # This can be solved as:
  # occ(Na+)  = occ(Al3+) - occ(K+)
  # occ(Ca2+) = 1 - 2 occ(Al3+)
  r = constraints.ext.reparametrisation(xs.unit_cell())
  occ_Al = r.add(constraints.independent_occupancy_parameter, sc[2])
  occ_K  = r.add(constraints.independent_occupancy_parameter, sc[3])
  occ_Na = r.add(constraints.affine_asu_occupancy_parameter,
                 occ_Al, 1, occ_K, -1, 0, scatterer=sc[0])
  occ_Ca = r.add(constraints.affine_asu_occupancy_parameter,
                 occ_Al, -2, 1, scatterer=sc[1])
  r.finalise()
  r.linearise()
  assert approx_equal(occ_Na.value, 0.2)
  assert approx_equal(occ_Ca.value, 0.3)
  assert approx_equal(occ_Al.value, 0.35)
  assert approx_equal(occ_K.value, 0.15)
  jt0 = sparse.matrix(2, 4,
                     [
                       {0:1},         # diff occ(Al3+)
                       {1:1} ,        # diff occ(K+)
                       {0:1, 1:-1},   # diff occ(Na+)
                       {0:-2},        # diff occ(Ca2+)
                     ])
  assert sparse.approx_equal(tolerance=1e-15)(r.jacobian_transpose, jt0)