def torsion_search_nested (
      clusters,
      sites_cart,
      last_chi_symmetric=None,
      increment_degrees=10) :
  """
  Iterate over all possible sidechain Chi angle combinations.
  """
  from scitbx.array_family import flex
  from scitbx.matrix import rotate_point_around_axis
  n_angles = len(clusters)
  assert (n_angles >= 1)
  angle_range = 180
  r1 = [ifloor(-angle_range/increment_degrees)] * n_angles
  r2 = [iceil(angle_range/increment_degrees)] * n_angles
  if (last_chi_symmetric) :
    r1[-1] = ifloor(-90/increment_degrees)
    r2[-1] = iceil(90/increment_degrees)
  nested_loop = flex.nested_loop(begin=r1, end=r2, open_range=False)
  selection = clusters[0].atoms_to_rotate
  for angles in nested_loop:
    xyz_moved = sites_cart.deep_copy()
    for i, angle_fraction in enumerate(angles):
      cl = clusters[i]
      for atom in cl.atoms_to_rotate:
        new_xyz = rotate_point_around_axis(
          axis_point_1 = xyz_moved[cl.axis[0]],
          axis_point_2 = xyz_moved[cl.axis[1]],
          point        = xyz_moved[atom],
          angle        = angle_fraction*increment_degrees,
          deg=True)
        xyz_moved[atom] = new_xyz
    yield xyz_moved
Esempio n. 2
0
def torsion_search_nested(clusters,
                          sites_cart,
                          last_chi_symmetric=None,
                          increment_degrees=10):
    """
  Iterate over all possible sidechain Chi angle combinations.
  """
    from scitbx.array_family import flex
    from scitbx.matrix import rotate_point_around_axis
    n_angles = len(clusters)
    assert (n_angles >= 1)
    angle_range = 180
    r1 = [ifloor(-angle_range / increment_degrees)] * n_angles
    r2 = [iceil(angle_range / increment_degrees)] * n_angles
    if (last_chi_symmetric):
        r1[-1] = ifloor(-90 / increment_degrees)
        r2[-1] = iceil(90 / increment_degrees)
    nested_loop = flex.nested_loop(begin=r1, end=r2, open_range=False)
    selection = clusters[0].atoms_to_rotate
    for angles in nested_loop:
        xyz_moved = sites_cart.deep_copy()
        for i, angle_fraction in enumerate(angles):
            cl = clusters[i]
            for atom in cl.atoms_to_rotate:
                new_xyz = rotate_point_around_axis(
                    axis_point_1=xyz_moved[cl.axis[0]],
                    axis_point_2=xyz_moved[cl.axis[1]],
                    point=xyz_moved[atom],
                    angle=angle_fraction * increment_degrees,
                    deg=True)
                xyz_moved[atom] = new_xyz
        yield xyz_moved
Esempio n. 3
0
def exercise_real_space_refinement(verbose):
    if (verbose):
        out = sys.stdout
    else:
        out = StringIO()
    out_of_bounds_clamp = maptbx.out_of_bounds_clamp(0)
    out_of_bounds_raise = maptbx.out_of_bounds_raise()
    crystal_symmetry = crystal.symmetry(unit_cell=(10, 10, 10, 90, 90, 90),
                                        space_group_symbol="P 1")
    xray_structure = xray.structure(crystal_symmetry=crystal_symmetry,
                                    scatterers=flex.xray_scatterer([
                                        xray.scatterer(label="C",
                                                       site=(0, 0, 0))
                                    ]))
    miller_set = miller.build_set(crystal_symmetry=crystal_symmetry,
                                  anomalous_flag=False,
                                  d_min=1)
    f_calc = miller_set.structure_factors_from_scatterers(
        xray_structure=xray_structure).f_calc()
    fft_map = f_calc.fft_map()
    fft_map.apply_sigma_scaling()
    real_map = fft_map.real_map_unpadded()
    #### unit_cell test
    delta_h = .005
    basic_map = maptbx.basic_map(
        maptbx.basic_map_unit_cell_flag(), real_map, real_map.focus(),
        crystal_symmetry.unit_cell().orthogonalization_matrix(),
        out_of_bounds_clamp.as_handle(), crystal_symmetry.unit_cell())
    testing_function_for_rsfit(basic_map, delta_h, xray_structure, out)
    ### non_symmetric test
    #
    minfrac = crystal_symmetry.unit_cell().fractionalize((-5, -5, -5))
    maxfrac = crystal_symmetry.unit_cell().fractionalize((5, 5, 5))
    gridding_first = [ifloor(n * b) for n, b in zip(fft_map.n_real(), minfrac)]
    gridding_last = [iceil(n * b) for n, b in zip(fft_map.n_real(), maxfrac)]
    data = maptbx.copy(real_map, gridding_first, gridding_last)
    #
    basic_map = maptbx.basic_map(
        maptbx.basic_map_non_symmetric_flag(), data, fft_map.n_real(),
        crystal_symmetry.unit_cell().orthogonalization_matrix(),
        out_of_bounds_clamp.as_handle(), crystal_symmetry.unit_cell())
    testing_function_for_rsfit(basic_map, delta_h, xray_structure, out)
    ### asu test
    #
    minfrac = crystal_symmetry.unit_cell().fractionalize((0, 0, 0))
    maxfrac = crystal_symmetry.unit_cell().fractionalize((10, 10, 10))
    gridding_first = [ifloor(n * b) for n, b in zip(fft_map.n_real(), minfrac)]
    gridding_last = [iceil(n * b) for n, b in zip(fft_map.n_real(), maxfrac)]
    data = maptbx.copy(real_map, gridding_first, gridding_last)
    #
    basic_map = maptbx.basic_map(
        maptbx.basic_map_asu_flag(), data, crystal_symmetry.space_group(),
        crystal_symmetry.direct_space_asu().as_float_asu(), real_map.focus(),
        crystal_symmetry.unit_cell().orthogonalization_matrix(),
        out_of_bounds_clamp.as_handle(), crystal_symmetry.unit_cell(), 0.5,
        True)
    testing_function_for_rsfit(basic_map, delta_h, xray_structure, out)
Esempio n. 4
0
 def translate_image (self, delta_x, delta_y) :
   """
   Translate the viewport to a different area of the image.  Arguments are
   in pixels.
   """
   scale = self.get_scale()
   x_new = max(0, ifloor(self.img_x_offset - (delta_x / scale)))
   y_new = max(0, ifloor(self.img_y_offset - (delta_y / scale)))
   max_x = ifloor(self.img_w - (self.screen_w / scale))
   max_y = ifloor(self.img_h - (self.screen_h / scale))
   self.img_x_offset = min(x_new, max_x)
   self.img_y_offset = min(y_new, max_y)
Esempio n. 5
0
 def screen_coords_as_image_coords (self, x, y) :
   """
   Convert pixel coordinates in the viewport to pixel coordinates in the
   raw image.
   """
   scale = self.get_scale()
   xi, yi, w, h = self.get_bitmap_params()
   x1 = x - max(0, (self.screen_w - (w*scale)) / 2)
   y1 = y - max(0, (self.screen_h - (h*scale)) / 2)
   x2 = self.img_x_offset + (x1 / scale)
   y2 = self.img_y_offset + (y1 / scale)
   return (ifloor(x2) + 1, ifloor(y2) + 1)
Esempio n. 6
0
 def screen_coords_as_image_coords(self, x, y):
     """
 Convert pixel coordinates in the viewport to pixel coordinates in the
 raw image.
 """
     scale = self.get_scale()
     xi, yi, w, h = self.get_bitmap_params()
     x1 = x - max(0, (self.screen_w - (w * scale)) / 2)
     y1 = y - max(0, (self.screen_h - (h * scale)) / 2)
     x2 = self.img_x_offset + (x1 / scale)
     y2 = self.img_y_offset + (y1 / scale)
     return (ifloor(x2) + 1, ifloor(y2) + 1)
Esempio n. 7
0
 def translate_image(self, delta_x, delta_y):
     """
 Translate the viewport to a different area of the image.  Arguments are
 in pixels.
 """
     scale = self.get_scale()
     x_new = max(0, ifloor(self.img_x_offset - (delta_x / scale)))
     y_new = max(0, ifloor(self.img_y_offset - (delta_y / scale)))
     max_x = ifloor(self.img_w - (self.screen_w / scale))
     max_y = ifloor(self.img_h - (self.screen_h / scale))
     self.img_x_offset = min(x_new, max_x)
     self.img_y_offset = min(y_new, max_y)
Esempio n. 8
0
def get_bounds_around_model(
    map_manager=None,
    model=None,
    box_cushion=None,
):
    '''
      Calculate the lower and upper bounds to box around a model
      Allow bounds to go outside the available box (this has to be
        dealt with at the boxing stage)
    '''

    # get items needed to do the shift
    cs = map_manager.crystal_symmetry()
    uc = cs.unit_cell()
    sites_cart = model.get_sites_cart()
    sites_frac = uc.fractionalize(sites_cart)
    map_data = map_manager.map_data()
    # convert box_cushion into fractional vector
    cushion_frac = flex.double(uc.fractionalize((box_cushion, ) * 3))
    # find fractional corners
    frac_min = sites_frac.min()
    frac_max = sites_frac.max()
    frac_max = list(flex.double(frac_max) + cushion_frac)
    frac_min = list(flex.double(frac_min) - cushion_frac)
    # find corner grid nodes
    all_orig = map_data.all()

    lower_bounds = [ifloor(f * n) for f, n in zip(frac_min, all_orig)]
    upper_bounds = [iceil(f * n) for f, n in zip(frac_max, all_orig)]
    return group_args(
        lower_bounds=lower_bounds,
        upper_bounds=upper_bounds,
    )
Esempio n. 9
0
def write_xplor_map(sites_cart,
                    unit_cell,
                    map_data,
                    n_real,
                    file_name,
                    buffer=10):
    import iotbx.xplor.map
    if sites_cart is not None:
        frac_min, frac_max = unit_cell.box_frac_around_sites(
            sites_cart=sites_cart, buffer=buffer)
    else:
        frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
    gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
    gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
    gridding = iotbx.xplor.map.gridding(n=map_data.focus(),
                                        first=gridding_first,
                                        last=gridding_last)
    iotbx.xplor.map.writer(file_name=file_name,
                           is_p1_cell=True,
                           title_lines=[
                               ' None',
                           ],
                           unit_cell=unit_cell,
                           gridding=gridding,
                           data=map_data,
                           average=-1,
                           standard_deviation=-1)
Esempio n. 10
0
def write_ccp4_map(sites_cart,
                   unit_cell,
                   map_data,
                   n_real,
                   file_name,
                   buffer=10):
    import iotbx.ccp4_map
    from cctbx import sgtbx
    from scitbx.array_family import flex
    if sites_cart is not None:
        frac_min, frac_max = unit_cell.box_frac_around_sites(
            sites_cart=sites_cart, buffer=buffer)
    else:
        frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
    gridding_first = tuple([ifloor(f * n) for f, n in zip(frac_min, n_real)])
    gridding_last = tuple([iceil(f * n) for f, n in zip(frac_max, n_real)])
    space_group = sgtbx.space_group_info("P1").group()
    iotbx.ccp4_map.write_ccp4_map(
        file_name=file_name,
        unit_cell=unit_cell,
        space_group=space_group,
        gridding_first=gridding_first,
        gridding_last=gridding_last,
        map_data=map_data,
        labels=flex.std_string(["iotbx.map_conversion.write_ccp4_map_box"]))
Esempio n. 11
0
def rt_mx_as_rational(rot_mat):
    # make sure one provides an integer matrix!
    tmp_mat = rot_mat.r().as_double()
    rational_list = []
    for ij in tmp_mat:
        rational_list.append(rational.int(ifloor(ij)))
    return matrix.sqr(rational_list)
Esempio n. 12
0
def rt_mx_as_rational(rot_mat):
    # make sure one provides an integer matrix!
    tmp_mat = rot_mat.num()
    rational_list = []
    for ij in tmp_mat:
        rational_list.append(rational.int(ifloor(ij)))
    return matrix.sqr(rational_list)
Esempio n. 13
0
def run(args):
    if (len(args) == 0): args = ["--help"]
    command_line = (libtbx.option_parser.option_parser(
        usage="iotbx.python pdb_to_map_simple.py [options] pdb_file..."
    ).option(None,
             "--d_min",
             type="float",
             default=3,
             help="high-resolution limit for structure-factor calculation",
             metavar="FLOAT")).process(args=args)
    d_min = command_line.options.d_min
    assert d_min > 0
    for file_name in command_line.args:
        pdb_inp = iotbx.pdb.input(file_name=file_name)
        xray_structure = pdb_inp.xray_structure_simple()
        xray_structure.show_summary()
        print()
        print("d_min:", d_min)
        f_calc = xray_structure.structure_factors(d_min=d_min).f_calc()
        f_calc.show_summary()
        print()
        fft_map = f_calc.fft_map()
        n = fft_map.n_real()
        print("unit cell gridding:", n)
        fft_map.as_xplor_map(file_name="unit_cell.map")
        print()
        block_first = tuple([ifloor(i * 0.2) for i in n])
        block_last = tuple(
            [max(f + 10, iceil(i * 0.7)) for f, i in zip(block_first, n)])
        print("block first:", block_first)
        print("block last: ", block_last)
        fft_map.as_xplor_map(file_name="block.map",
                             gridding_first=block_first,
                             gridding_last=block_last)
        print()
Esempio n. 14
0
def resample_ordered_list_of_values(vals, redundancy=8):
    """resample a list of values with interpolation"""
    # Number of vals given
    num_inp_vals = len(vals)
    # Number of vals to be returned
    num_samp_vals = int(1 + (num_inp_vals - 1) / redundancy)
    # Sort in descending order
    ordered_vals = sorted(vals, reverse=True)
    sampled_vals = []

    if num_samp_vals == 1:
        return [ordered_vals[0]]
    else:
        sample_dist = (num_inp_vals - 1) / (num_samp_vals - 1)


#        # Make sure it doesn't overrun the end of the array
#        while sample_dist*(num_samp_vals-1) > num_inp_vals-1:
#            sample_dist = 0.99999999*sample_dist

# Resample points with interpolation
    for i_point in range(num_samp_vals - 1):
        sample_index = sample_dist * i_point
        p1 = ifloor(sample_index)
        v1 = ordered_vals[p1]
        p2 = iceil(sample_index)
        v2 = ordered_vals[p2]
        sample_val = interpolate(x=sample_index, p1=p1, v1=v1, p2=p2, v2=v2)
        sampled_vals.append(sample_val)
    # Add the last point
    sampled_vals.append(ordered_vals[-1])

    assert len(sampled_vals) == num_samp_vals

    return sampled_vals
Esempio n. 15
0
def run(args):
  if (len(args) == 0): args = ["--help"]
  command_line = (libtbx.option_parser.option_parser(
    usage="iotbx.python pdb_to_map_simple.py [options] pdb_file...")
    .option(None, "--d_min",
      type="float",
      default=3,
      help="high-resolution limit for structure-factor calculation",
      metavar="FLOAT")
  ).process(args=args)
  d_min = command_line.options.d_min
  assert d_min > 0
  for file_name in command_line.args:
    pdb_inp = iotbx.pdb.input(file_name=file_name)
    xray_structure = pdb_inp.xray_structure_simple()
    xray_structure.show_summary()
    print
    print "d_min:", d_min
    f_calc = xray_structure.structure_factors(d_min=d_min).f_calc()
    f_calc.show_summary()
    print
    fft_map = f_calc.fft_map()
    n = fft_map.n_real()
    print "unit cell gridding:", n
    fft_map.as_xplor_map(file_name="unit_cell.map")
    print
    block_first = tuple([ifloor(i*0.2) for i in n])
    block_last = tuple([max(f+10, iceil(i*0.7)) for f,i in zip(block_first, n)])
    print "block first:", block_first
    print "block last: ", block_last
    fft_map.as_xplor_map(
      file_name="block.map",
      gridding_first=block_first,
      gridding_last=block_last)
    print
Esempio n. 16
0
    def __init__(self,
                 map_manager,
                 model,
                 box_cushion,
                 wrapping=None,
                 log=sys.stdout):

        self._map_manager = map_manager
        self._model = model

        self._force_wrapping = wrapping
        if wrapping is None:
            wrapping = self.map_manager().wrapping()
        self.basis_for_boxing_string = 'using_model, wrapping = %s' % (
            wrapping)

        # safeguards
        assert isinstance(map_manager, iotbx.map_manager.map_manager)
        assert isinstance(model, mmtbx.model.manager)
        assert self._map_manager.map_data().accessor().origin() == (0, 0, 0)

        # Make sure working model and map_manager crystal_symmetry match

        assert map_manager.is_compatible_model(model)

        assert box_cushion >= 0

        if self.map_manager().wrapping():  # map must be entire unit cell
            assert map_manager.unit_cell_grid == map_manager.map_data().all()

        # NOTE: We are going to use crystal_symmetry and sites_frac based on
        #   the map_manager (the model could still have different crystal_symmetry)

        # get items needed to do the shift
        cs = map_manager.crystal_symmetry()
        uc = cs.unit_cell()
        sites_cart = model.get_sites_cart()
        sites_frac = uc.fractionalize(sites_cart)
        map_data = map_manager.map_data()
        # convert box_cushion into fractional vector
        cushion_frac = flex.double(uc.fractionalize((box_cushion, ) * 3))
        # find fractional corners
        frac_min = sites_frac.min()
        frac_max = sites_frac.max()
        frac_max = list(flex.double(frac_max) + cushion_frac)
        frac_min = list(flex.double(frac_min) - cushion_frac)
        # find corner grid nodes
        all_orig = map_data.all()
        self.gridding_first = [
            ifloor(f * n) for f, n in zip(frac_min, all_orig)
        ]
        self.gridding_last = [iceil(f * n) for f, n in zip(frac_max, all_orig)]

        # Ready with gridding...set up shifts and box crystal_symmetry
        self.set_shifts_and_crystal_symmetry()

        # Apply boxing to model, ncs, and map (if available)
        self.apply_to_model_ncs_and_map()
Esempio n. 17
0
 def center_view_from_thumbnail (self, x, y) :
   """
   Translate the viewport to center on the X,Y coordinates equivalent to the
   point clicked in the thumbnail view.  Arguments are in screen coordinates
   relative to the upper left corner of the thumbnail (which is assumed to be
   displayed in its entirety).
   """
   if (self.zoom == 0) : return
   self.last_thumb_x = x
   self.last_thumb_y = y
   x0, y0, w, h = self.get_bitmap_params()
   img_x = max(0, ifloor((x * self.thumb_ratio) - (w / 2)))
   img_y = max(0, ifloor((y * self.thumb_ratio) - (h / 2)))
   scale = self.get_scale()
   max_x = ifloor(self.img_w - (self.screen_w / scale))
   max_y = ifloor(self.img_h - (self.screen_h / scale))
   self.img_x_offset = min(img_x, max_x)
   self.img_y_offset = min(img_y, max_y)
Esempio n. 18
0
 def center_view_from_thumbnail(self, x, y):
     """
 Translate the viewport to center on the X,Y coordinates equivalent to the
 point clicked in the thumbnail view.  Arguments are in screen coordinates
 relative to the upper left corner of the thumbnail (which is assumed to be
 displayed in its entirety).
 """
     if (self.zoom == 0): return
     self.last_thumb_x = x
     self.last_thumb_y = y
     x0, y0, w, h = self.get_bitmap_params()
     img_x = max(0, ifloor((x * self.thumb_ratio) - (w / 2)))
     img_y = max(0, ifloor((y * self.thumb_ratio) - (h / 2)))
     scale = self.get_scale()
     max_x = ifloor(self.img_w - (self.screen_w / scale))
     max_y = ifloor(self.img_h - (self.screen_h / scale))
     self.img_x_offset = min(img_x, max_x)
     self.img_y_offset = min(img_y, max_y)
Esempio n. 19
0
 def get_padding(text, margin=2, center=self.center):
     from libtbx.math_utils import ifloor, iceil
     fill = max(0, width - len(text) - (margin * 2))
     if (center):
         rfill = ifloor(fill / 2)
         lfill = iceil(fill / 2)
     else:
         rfill = 0
         lfill = fill
     return (rfill, lfill)
Esempio n. 20
0
 def get_padding (text, margin=2, center=self.center) :
   from libtbx.math_utils import ifloor, iceil
   fill = max(0, width - len(text) - (margin * 2))
   if (center) :
     rfill = ifloor(fill / 2)
     lfill = iceil(fill / 2)
   else :
     rfill = 0
     lfill = fill
   return (rfill, lfill)
Esempio n. 21
0
def exercise_integer():
  from libtbx.math_utils import iround, iceil, ifloor, nearest_integer
  assert iround(0) == 0
  assert iround(1.4) == 1
  assert iround(-1.4) == -1
  assert iround(1.6) == 2
  assert iround(-1.6) == -2
  assert iceil(0) == 0
  assert iceil(1.1) == 2
  assert iceil(-1.1) == -1
  assert iceil(1.9) == 2
  assert iceil(-1.9) == -1
  assert ifloor(0) == 0
  assert ifloor(1.1) == 1
  assert ifloor(-1.1) == -2
  assert ifloor(1.9) == 1
  assert ifloor(-1.9) == -2
  for i in xrange(-3,3+1):
    assert nearest_integer(i+0.3) == i
    assert nearest_integer(i+0.7) == i+1
Esempio n. 22
0
def exercise_integer():
    from libtbx.math_utils import iround, iceil, ifloor, nearest_integer
    assert iround(0) == 0
    assert iround(1.4) == 1
    assert iround(-1.4) == -1
    assert iround(1.6) == 2
    assert iround(-1.6) == -2
    assert iceil(0) == 0
    assert iceil(1.1) == 2
    assert iceil(-1.1) == -1
    assert iceil(1.9) == 2
    assert iceil(-1.9) == -1
    assert ifloor(0) == 0
    assert ifloor(1.1) == 1
    assert ifloor(-1.1) == -2
    assert ifloor(1.9) == 1
    assert ifloor(-1.9) == -2
    for i in xrange(-3, 3 + 1):
        assert nearest_integer(i + 0.3) == i
        assert nearest_integer(i + 0.7) == i + 1
Esempio n. 23
0
def find_max_x_multi(null_fit,
                     existing_gaussian,
                     target_powers,
                     minimize_using_sigmas,
                     shift_sqrt_b_mod_n,
                     b_min,
                     max_max_error,
                     n_start_fractions,
                     n_repeats_minimization,
                     factor_y_x_begin=0.9,
                     factor_y_x_end=0.1,
                     factor_x_step=2.):
    i_x_begin = None
    i_x_end = None
    y0 = null_fit.table_y()[0]
    for i, target_value in enumerate(null_fit.table_y()):
        if (i_x_begin is None and target_value < y0 * factor_y_x_begin):
            i_x_begin = i
        if (i_x_end is None and target_value < y0 * factor_y_x_end):
            i_x_end = i + 1
            break
    if (i_x_end is None):
        i_x_end = null_fit.table_y().size()
    if (i_x_begin is None):
        i_x_begin = min(existing_gaussian.n_parameters(), i_x_end - 1)
    assert i_x_end > 0
    assert i_x_begin < i_x_end
    n_terms = existing_gaussian.n_terms() + 1
    i_x_step = max(1, ifloor(
        (i_x_end - i_x_begin) / (factor_x_step * n_terms)))
    if (n_terms == 1): n_start_fractions = 2
    best_min = None
    for i_x in range(i_x_begin, i_x_end, i_x_step):
        for i_split in range(-1, existing_gaussian.n_terms()):
            for i_start_fraction in range(0, n_start_fractions):
                gaussian_fit = make_start_gaussian(
                    null_fit=null_fit,
                    existing_gaussian=existing_gaussian,
                    i_split=i_split,
                    i_x=i_x,
                    start_fraction=i_start_fraction / float(n_start_fractions))
                for target_power in target_powers:
                    good_min = find_max_x(
                        gaussian_fit=gaussian_fit,
                        target_powers=[target_power],
                        minimize_using_sigmas=minimize_using_sigmas,
                        n_repeats_minimization=n_repeats_minimization,
                        shift_sqrt_b_mod_n=shift_sqrt_b_mod_n,
                        b_min=b_min,
                        max_max_error=max_max_error)
                    if (good_min is not None
                            and good_min.is_better_than(best_min)):
                        best_min = good_min
    return best_min
Esempio n. 24
0
 def __init__(self,
              params,
              coeffs,
              atom_selection_manager=None,
              xray_structure=None):
     adopt_init_args(self, locals())
     fft_map = coeffs.fft_map(
         resolution_factor=self.params.grid_resolution_factor)
     if (self.params.scale == "volume"): fft_map.apply_volume_scaling()
     elif (self.params.scale == "sigma"): fft_map.apply_sigma_scaling()
     else: raise RuntimeError
     title_lines = [
         "REMARK file: %s" %
         show_string(os.path.basename(self.params.file_name))
     ]
     title_lines.append("REMARK directory: %s" %
                        show_string(os.path.dirname(self.params.file_name)))
     title_lines.append("REMARK %s" % date_and_time())
     assert self.params.region in ["selection", "cell"]
     if (self.params.region == "selection" and xray_structure is not None):
         map_iselection = None
         if atom_selection_manager is not None:
             map_iselection = self.atom_iselection()
         frac_min, frac_max = self.box_around_selection(
             iselection=map_iselection,
             buffer=self.params.atom_selection_buffer)
         n_real = fft_map.n_real()
         gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
         gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
         title_lines.append('REMARK map around selection')
         title_lines.append('REMARK   atom_selection=%s' %
                            show_string(self.params.atom_selection))
         title_lines.append('REMARK   atom_selection_buffer=%.6g' %
                            self.params.atom_selection_buffer)
         if (map_iselection is None):
             sel_size = self.xray_structure.scatterers().size()
         else:
             sel_size = map_iselection.size()
         title_lines.append('REMARK   number of atoms selected: %d' %
                            sel_size)
     else:
         gridding_first = None
         gridding_last = None
         title_lines.append("REMARK map covering the unit cell")
     if params.format == "xplor":
         fft_map.as_xplor_map(file_name=self.params.file_name,
                              title_lines=title_lines,
                              gridding_first=gridding_first,
                              gridding_last=gridding_last)
     else:
         fft_map.as_ccp4_map(file_name=self.params.file_name,
                             gridding_first=gridding_first,
                             gridding_last=gridding_last,
                             labels=title_lines)
Esempio n. 25
0
def find_max_x_multi(null_fit,
                     existing_gaussian,
                     target_powers,
                     minimize_using_sigmas,
                     shift_sqrt_b_mod_n,
                     b_min,
                     max_max_error,
                     n_start_fractions,
                     n_repeats_minimization,
                     factor_y_x_begin=0.9,
                     factor_y_x_end=0.1,
                     factor_x_step=2.):
  i_x_begin = None
  i_x_end = None
  y0 = null_fit.table_y()[0]
  for i,target_value in enumerate(null_fit.table_y()):
    if (i_x_begin is None and target_value < y0 * factor_y_x_begin):
      i_x_begin = i
    if (i_x_end is None and target_value < y0 * factor_y_x_end):
      i_x_end = i+1
      break
  if (i_x_end is None):
    i_x_end = null_fit.table_y().size()
  if (i_x_begin is None):
    i_x_begin = min(existing_gaussian.n_parameters(), i_x_end-1)
  assert i_x_end > 0
  assert i_x_begin < i_x_end
  n_terms = existing_gaussian.n_terms() + 1
  i_x_step = max(1, ifloor((i_x_end-i_x_begin) / (factor_x_step*n_terms)))
  if (n_terms == 1): n_start_fractions = 2
  best_min = None
  for i_x in xrange(i_x_begin, i_x_end, i_x_step):
    for i_split in xrange(-1, existing_gaussian.n_terms()):
      for i_start_fraction in xrange(0,n_start_fractions):
        gaussian_fit = make_start_gaussian(
          null_fit=null_fit,
          existing_gaussian=existing_gaussian,
          i_split=i_split,
          i_x=i_x,
          start_fraction=i_start_fraction/float(n_start_fractions))
        for target_power in target_powers:
          good_min = find_max_x(
            gaussian_fit=gaussian_fit,
            target_powers=[target_power],
            minimize_using_sigmas=minimize_using_sigmas,
            n_repeats_minimization=n_repeats_minimization,
            shift_sqrt_b_mod_n=shift_sqrt_b_mod_n,
            b_min=b_min,
            max_max_error=max_max_error)
          if (good_min is not None and good_min.is_better_than(best_min)):
            best_min = good_min
  return best_min
Esempio n. 26
0
def write_xplor_map_file(coeffs, frac_min, frac_max, file_base):
    fft_map = coeffs.fft_map(resolution_factor=1 / 3.0)
    fft_map.apply_sigma_scaling()
    n_real = fft_map.n_real()
    gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
    gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
    title_lines = ["REMARK map covering model + 3.0A buffer"]
    file_name = "%s.map" % file_base
    fft_map.as_xplor_map(file_name=file_name,
                         title_lines=title_lines,
                         gridding_first=gridding_first,
                         gridding_last=gridding_last)
    return file_name
Esempio n. 27
0
def mask_grid(xrs, buffer, map_data, n_real):
    # XXX move to C++
    frac_min, frac_max = xrs.unit_cell().box_frac_around_sites(
        sites_cart=xrs.sites_cart(), buffer=buffer - 1.5)
    gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
    gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
    new_map = flex.double(flex.grid(n_real), 0)
    for i in range(gridding_first[0], gridding_last[0]):
        for j in xrange(gridding_first[1], gridding_last[1]):
            for k in xrange(gridding_first[2], gridding_last[2]):
                if (i > 0 and i < n_real[0] and j > 0 and j < n_real[1]
                        and k > 0 and k < n_real[2]):
                    new_map[(i, j, k)] = map_data[(i, j, k)]
    return new_map
Esempio n. 28
0
def write_xplor_map_file (coeffs, frac_min, frac_max, file_base) :
  fft_map = coeffs.fft_map(resolution_factor=1/3.0)
  fft_map.apply_sigma_scaling()
  n_real = fft_map.n_real()
  gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
  gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
  title_lines=["REMARK map covering model + 3.0A buffer"]
  file_name = "%s.map" % file_base
  fft_map.as_xplor_map(
    file_name=file_name,
    title_lines=title_lines,
    gridding_first=gridding_first,
    gridding_last=gridding_last)
  return file_name
Esempio n. 29
0
 def __init__(self, params, coeffs, atom_selection_manager=None,
              xray_structure=None):
   adopt_init_args(self, locals())
   fft_map = coeffs.fft_map(resolution_factor =
     self.params.grid_resolution_factor)
   if(self.params.scale == "volume"): fft_map.apply_volume_scaling()
   elif(self.params.scale == "sigma"): fft_map.apply_sigma_scaling()
   else: raise RuntimeError
   title_lines=["REMARK file: %s" %
     show_string(os.path.basename(self.params.file_name))]
   title_lines.append("REMARK directory: %s" %
     show_string(os.path.dirname(self.params.file_name)))
   title_lines.append("REMARK %s" % date_and_time())
   assert self.params.region in ["selection", "cell"]
   if(self.params.region == "selection" and xray_structure is not None) :
     map_iselection = None
     if atom_selection_manager is not None :
       map_iselection = self.atom_iselection()
     frac_min, frac_max = self.box_around_selection(
       iselection = map_iselection,
       buffer     = self.params.atom_selection_buffer)
     n_real = fft_map.n_real()
     gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
     gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
     title_lines.append('REMARK map around selection')
     title_lines.append('REMARK   atom_selection=%s' %
       show_string(self.params.atom_selection))
     title_lines.append('REMARK   atom_selection_buffer=%.6g' %
       self.params.atom_selection_buffer)
     if(map_iselection is None):
       sel_size = self.xray_structure.scatterers().size()
     else:
       sel_size = map_iselection.size()
     title_lines.append('REMARK   number of atoms selected: %d' % sel_size)
   else:
     gridding_first = None
     gridding_last = None
     title_lines.append("REMARK map covering the unit cell")
   if params.format == "xplor" :
     fft_map.as_xplor_map(
       file_name      = self.params.file_name,
       title_lines    = title_lines,
       gridding_first = gridding_first,
       gridding_last  = gridding_last)
   else :
     fft_map.as_ccp4_map(
       file_name      = self.params.file_name,
       gridding_first = gridding_first,
       gridding_last  = gridding_last,
       labels=title_lines)
Esempio n. 30
0
def mask_grid(xrs, buffer, map_data, n_real):
  # XXX move to C++
  frac_min, frac_max = xrs.unit_cell().box_frac_around_sites(
    sites_cart = xrs.sites_cart(), buffer = buffer-1.5)
  gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
  gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
  new_map = flex.double(flex.grid(n_real),0)
  for i in range(gridding_first[0], gridding_last[0]):
    for j in xrange(gridding_first[1], gridding_last[1]):
      for k in xrange(gridding_first[2], gridding_last[2]):
        if(i> 0 and i<n_real[0] and
           j> 0 and j<n_real[1] and
           k> 0 and k<n_real[2]):
          new_map[(i,j,k)] = map_data[(i,j,k)]
  return new_map
Esempio n. 31
0
def get_processes (processes) :
  """
  Determine number of processes dynamically: number of CPUs minus the current
  load average (with a minimum of 1).
  """
  if (processes in [None, Auto]) :
    if (os.name == "nt") or (sys.version_info < (2,6)) :
      return 1
    from libtbx import introspection
    auto_adjust = (processes is Auto)
    processes = introspection.number_of_processors()
    if (auto_adjust) :
      processes = max(ifloor(processes - os.getloadavg()[0]), 1)
  else :
    assert (processes > 0)
  return processes
Esempio n. 32
0
    def modify_mask_box(self, mask_data, sites_frac):
        box_buffer = self.params.box_buffer
        # Number of selected atoms
        n_selected = self.selection_bool.count(True)
        na = mask_data.all()
        n_selected_p1 = sites_frac.size()
        n_boxes = int(n_selected_p1 / n_selected)
        box_list = [[] for i in range(n_boxes)]
        for n_box in range(n_boxes):
            for i in range(n_selected):
                box_list[n_box].append(sites_frac[n_box + n_boxes * i])
        na = self.mask_data_all.all()
        k = 0
        for box in box_list:
            k += 1
            x_min = min(frac[0] for frac in box)
            y_min = min(frac[1] for frac in box)
            z_min = min(frac[2] for frac in box)
            x_max = max(frac[0] for frac in box)
            y_max = max(frac[1] for frac in box)
            z_max = max(frac[2] for frac in box)
            frac_min = [x_min, y_min, z_min]
            frac_max = [x_max, y_max, z_max]

            cs = self.xray_structure.crystal_symmetry()

            # Add buffer to box if indicated.
            if (box_buffer is not None):
                cushion = flex.double(cs.unit_cell().fractionalize(
                    (box_buffer, ) * 3))
                frac_min = list(flex.double(frac_min) - cushion)
                frac_max = list(flex.double(frac_max) + cushion)

            gridding_first = [ifloor(f * n) for f, n in zip(frac_min, na)]
            gridding_last = [iceil(f * n) for f, n in zip(frac_max, na)]

            for j in range(3):
                if (gridding_last[j] - gridding_first[j] >= na[j]):
                    raise Sorry(
                        "The box is too big. Decrease box_buffer or use a " +
                        "different selection")

            maptbx.set_box(value=0,
                           map_data_to=mask_data,
                           start=gridding_first,
                           end=gridding_last)
        return mask_data
Esempio n. 33
0
    def __init__(self, map_manager, model, cushion, wrapping, log=sys.stdout):
        adopt_init_args(self, locals())
        self.basis_for_boxing_string = 'using model, wrapping=%s' % (wrapping)
        # safeguards
        assert isinstance(wrapping, bool)
        assert isinstance(map_manager, iotbx.map_manager.map_manager)
        assert isinstance(model, mmtbx.model.manager)
        assert self.map_manager.map_data().accessor().origin() == (0, 0, 0)

        # Make sure original map_manager symmetry matches model or original model
        original_uc_symmetry = map_manager.original_unit_cell_crystal_symmetry
        assert (original_uc_symmetry.is_similar_symmetry(
            model.crystal_symmetry())
                or (model.get_shift_manager()
                    and original_uc_symmetry.is_similar_symmetry(
                        model.get_shift_manager().get_original_cs())))

        assert cushion >= 0
        if wrapping:
            assert map_manager.unit_cell_grid == map_manager.map_data().all()
        # get items needed to do the shift
        cs = map_manager.crystal_symmetry()
        uc = cs.unit_cell()
        sites_frac = model.get_sites_frac()
        map_data = map_manager.map_data()
        # convert cushion into fractional vector
        cushion_frac = flex.double(uc.fractionalize((cushion, ) * 3))
        # find fractional corners
        frac_min = sites_frac.min()
        frac_max = sites_frac.max()
        frac_max = list(flex.double(frac_max) + cushion_frac)
        frac_min = list(flex.double(frac_min) - cushion_frac)
        # find corner grid nodes
        all_orig = map_data.all()
        self.gridding_first = [
            ifloor(f * n) for f, n in zip(frac_min, all_orig)
        ]
        self.gridding_last = [iceil(f * n) for f, n in zip(frac_max, all_orig)]

        # Ready with gridding...set up shifts and box crystal_symmetry
        self.set_shifts_and_crystal_symmetry()

        # Apply to model and to map_manager so that self.model()
        #  and self.map_manager are boxed versions

        self.map_manager = self.apply_to_map(self.map_manager)
        self.model = self.apply_to_model(self.model)
Esempio n. 34
0
def elliptical_truncation(array,
                          b_cart,
                          scale_factor=1.0,
                          target_completeness=None):
    from cctbx import adptbx
    from scitbx.array_family import flex
    indices = array.indices()
    axis_index = -1
    min_b_directional = sys.maxsize
    for n, b_index in enumerate(b_cart[0:3]):
        if (b_index < min_b_directional):
            min_b_directional = b_index
            axis_index = n
    assert (0 <= axis_index <= 3)
    max_index_along_axis = [0, 0, 0]
    for hkl in indices:
        if (hkl[axis_index] > max_index_along_axis[axis_index]):
            max_index_along_axis[axis_index] = hkl[axis_index]
    assert (max_index_along_axis != [0, 0, 0])
    scale_cutoff = array.unit_cell().debye_waller_factor(
        miller_index=max_index_along_axis, b_cart=b_cart) * scale_factor
    scale = array.debye_waller_factors(b_cart=b_cart).data()
    if (target_completeness is not None):
        assert (target_completeness > 0) and (target_completeness <= 100)
        completeness_start = array.completeness() * 100.0
        assert (completeness_start > target_completeness)
        scale_srt = sorted(scale)
        i_max = ifloor(
            len(scale_srt) * (completeness_start - target_completeness) / 100)
        scale_cutoff = scale_srt[i_max]
    data = array.data()
    sigmas = array.sigmas()
    n_hkl = indices.size()
    i = 0
    while (i < len(indices)):
        if (scale[i] < scale_cutoff):
            del indices[i]
            del data[i]
            del scale[i]
            if (sigmas is not None):
                del sigmas[i]
        else:
            i += 1
    delta_n_hkl = n_hkl - indices.size()
    return (n_hkl, delta_n_hkl)
Esempio n. 35
0
    def __init__(self,
                 map_manager=None,
                 model=None,
                 cushion=None,
                 wrapping=None):
        self.map_manager = map_manager
        self.model = model
        self.wrapping = wrapping

        self.basis_for_boxing_string = 'using model, wrapping=%s' % (wrapping)

        # safeguards
        assert wrapping is not None
        assert isinstance(map_manager, iotbx.map_manager.map_manager)
        assert isinstance(model, mmtbx.model.manager)
        assert self.map_manager.map_data().accessor().origin() == (0, 0, 0)
        assert map_manager.crystal_symmetry().is_similar_symmetry(
            model.crystal_symmetry())
        assert cushion >= 0

        if wrapping:
            assert map_manager.unit_cell_grid == map_manager.map_data().all()

        # get items needed to do the shift
        cs = map_manager.crystal_symmetry()
        uc = cs.unit_cell()
        sites_frac = model.get_sites_frac()
        map_data = map_manager.map_data()
        # convert cushion into fractional vector
        cushion_frac = flex.double(uc.fractionalize((cushion, ) * 3))
        # find fractional corners
        frac_min = sites_frac.min()
        frac_max = sites_frac.max()
        frac_max = list(flex.double(frac_max) + cushion_frac)
        frac_min = list(flex.double(frac_min) - cushion_frac)

        # find corner grid nodes
        all_orig = map_data.all()
        self.gridding_first = [
            ifloor(f * n) for f, n in zip(frac_min, all_orig)
        ]
        self.gridding_last = [iceil(f * n) for f, n in zip(frac_max, all_orig)]

        # Ready with gridding...set up shifts and box crystal_symmetry
        self.set_shifts_and_crystal_symmetry()
def elliptical_truncation (array,
                           b_cart,
                           scale_factor=1.0,
                           target_completeness=None) :
  from cctbx import adptbx
  from scitbx.array_family import flex
  indices = array.indices()
  axis_index = -1
  min_b_directional = sys.maxint
  for n, b_index in enumerate(b_cart[0:3]) :
    if (b_index < min_b_directional) :
      min_b_directional = b_index
      axis_index = n
  assert (0 <= axis_index <= 3)
  max_index_along_axis = [0,0,0]
  for hkl in indices :
    if (hkl[axis_index] > max_index_along_axis[axis_index]) :
      max_index_along_axis[axis_index] = hkl[axis_index]
  assert (max_index_along_axis != [0,0,0])
  scale_cutoff = array.unit_cell().debye_waller_factor(
    miller_index=max_index_along_axis, b_cart=b_cart) * scale_factor
  scale = array.debye_waller_factors(b_cart=b_cart).data()
  if (target_completeness is not None) :
    assert (target_completeness > 0) and (target_completeness <= 100)
    completeness_start = array.completeness() * 100.0
    assert (completeness_start > target_completeness)
    scale_srt = sorted(scale)
    i_max = ifloor(len(scale_srt)*(completeness_start-target_completeness)/100)
    scale_cutoff = scale_srt[i_max]
  data = array.data()
  sigmas = array.sigmas()
  n_hkl = indices.size()
  i = 0
  while (i < len(indices)) :
    if (scale[i] < scale_cutoff) :
      del indices[i]
      del data[i]
      del scale[i]
      if (sigmas is not None) :
        del sigmas[i]
    else :
      i += 1
  delta_n_hkl = n_hkl - indices.size()
  return (n_hkl, delta_n_hkl)
Esempio n. 37
0
def get_processes(processes):
    """
  Determine number of processes dynamically: number of CPUs minus the current
  load average (with a minimum of 1).

  :param processes: default number of processes (may be None or Auto)
  :returns: actual number of processes to use
  """
    if (processes in [None, Auto]):
        if os.name == "nt":
            return 1
        from libtbx import introspection
        auto_adjust = (processes is Auto)
        processes = introspection.number_of_processors()
        if (auto_adjust):
            processes = max(ifloor(processes - os.getloadavg()[0]), 1)
    else:
        assert (processes > 0)
    return processes
Esempio n. 38
0
    def __init__(self, nb, bf=1, skew=0, taper=1):
        """
% autoTree  Create System Models of Kinematic Trees
% autoTree(nb,bf,skew,taper) creates system models of kinematic trees
% having revolute joints.  nb and bf specify the number of bodies in the
% tree, and the branching factor, respectively.  The latter is the average
% number of children of a nonterminal node, and must be >=1.  bf=1 produces
% an unbranched tree; bf=2 produces a binary tree; and non-integer values
% produce trees in which the number of children alternates between
% floor(bf) and ceil(bf) in such a way that the average is bf.  Trees are
% constructed (and numbered) breadth-first.  Link i is a thin-walled
% cylindrical tube of length l(i), radius l(i)/20, and mass m(i), lying
% between 0 and l(i) on the x axis of its local coordinate system.  The
% values of l(i) and m(i) are determined by the tapering coefficient:
% l(i)=taper^(i-1) and m(i)=taper^(3*(i-1)).  Thus, if taper=1 then
% m(i)=l(i)=1 for all i.  The inboard joint axis of link i lies on the
% local z axis, and its outboard axis passes through the point (l(i),0,0)
% and is rotated about the x axis by an angle of skew radians relative to
% the inboard axis.  If the link has more than one outboard joint then they
% all have the same axis.  If skew=0 then the mechanism is planar.  The
% final one, two or three arguments can be omitted, in which case they
% assume default values of taper=1, skew=0 and bf=1.
    """
        self.NB = nb
        self.pitch = [0] * nb
        self.parent = [None] * nb
        self.Xtree = []
        self.I = []
        len_ = []
        for i in xrange(nb):
            self.parent[i] = ifloor((i - 1 + math.ceil(bf)) / bf) - 1
            if (self.parent[i] == -1):
                self.Xtree.append(Xtrans([0, 0, 0]))
            else:
                self.Xtree.append(
                    Xrotx(skew) * Xtrans([len_[self.parent[i]], 0, 0]))
            len_.append(taper**i)
            mass = taper**(3 * i)
            CoM = len_[i] * matrix.col([0.5, 0, 0])
            Icm = mass * len_[i]**2 * matrix.diag(
                [0.0025, 1.015 / 12, 1.015 / 12])
            self.I.append(mcI(mass, CoM, Icm))
Esempio n. 39
0
  def __init__(self, nb, bf=1, skew=0, taper=1):
    """
% autoTree  Create System Models of Kinematic Trees
% autoTree(nb,bf,skew,taper) creates system models of kinematic trees
% having revolute joints.  nb and bf specify the number of bodies in the
% tree, and the branching factor, respectively.  The latter is the average
% number of children of a nonterminal node, and must be >=1.  bf=1 produces
% an unbranched tree; bf=2 produces a binary tree; and non-integer values
% produce trees in which the number of children alternates between
% floor(bf) and ceil(bf) in such a way that the average is bf.  Trees are
% constructed (and numbered) breadth-first.  Link i is a thin-walled
% cylindrical tube of length l(i), radius l(i)/20, and mass m(i), lying
% between 0 and l(i) on the x axis of its local coordinate system.  The
% values of l(i) and m(i) are determined by the tapering coefficient:
% l(i)=taper^(i-1) and m(i)=taper^(3*(i-1)).  Thus, if taper=1 then
% m(i)=l(i)=1 for all i.  The inboard joint axis of link i lies on the
% local z axis, and its outboard axis passes through the point (l(i),0,0)
% and is rotated about the x axis by an angle of skew radians relative to
% the inboard axis.  If the link has more than one outboard joint then they
% all have the same axis.  If skew=0 then the mechanism is planar.  The
% final one, two or three arguments can be omitted, in which case they
% assume default values of taper=1, skew=0 and bf=1.
    """
    self.NB = nb
    self.pitch = [0] * nb
    self.parent = [None] * nb
    self.Xtree = []
    self.I = []
    len_ = []
    for i in xrange(nb):
      self.parent[i] = ifloor((i-1+math.ceil(bf))/bf)-1
      if (self.parent[i] == -1):
        self.Xtree.append(Xtrans([0,0,0]))
      else:
        self.Xtree.append(Xrotx(skew) * Xtrans([len_[self.parent[i]],0,0]))
      len_.append(taper**i)
      mass = taper**(3*i)
      CoM = len_[i] * matrix.col([0.5,0,0])
      Icm = mass * len_[i]**2 * matrix.diag([0.0025,1.015/12,1.015/12])
      self.I.append(mcI(mass, CoM, Icm))
Esempio n. 40
0
def write_dsn6_map (sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10) :
  import iotbx.dsn6
  from cctbx import sgtbx
  from scitbx.array_family import flex
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first = tuple([ifloor(f*n) for f,n in zip(frac_min,n_real)])
  gridding_last = tuple([iceil(f*n) for f,n in zip(frac_max,n_real)])
  print "n_real:", n_real
  print "gridding start:", gridding_first
  print "gridding end:", gridding_last
  iotbx.dsn6.write_dsn6_map(
    file_name=file_name,
    unit_cell=unit_cell,
    gridding_first=gridding_first,
    gridding_last=gridding_last,
    map_data=map_data)
Esempio n. 41
0
def write_ccp4_map (sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10) :
  import iotbx.ccp4_map
  from cctbx import sgtbx
  from scitbx.array_family import flex
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first = tuple([ifloor(f*n) for f,n in zip(frac_min,n_real)])
  gridding_last = tuple([iceil(f*n) for f,n in zip(frac_max,n_real)])
  space_group = sgtbx.space_group_info("P1").group()
  iotbx.ccp4_map.write_ccp4_map(
    file_name=file_name,
    unit_cell=unit_cell,
    space_group=space_group,
    gridding_first=gridding_first,
    gridding_last=gridding_last,
    map_data=map_data,
    labels=flex.std_string(["iotbx.map_conversion.write_ccp4_map_box"]))
Esempio n. 42
0
def write_dsn6_map(sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10):
  import iotbx.dsn6
  from cctbx import sgtbx
  from scitbx.array_family import flex
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first = tuple([ifloor(f*n) for f,n in zip(frac_min,n_real)])
  gridding_last = tuple([iceil(f*n) for f,n in zip(frac_max,n_real)])
  print "n_real:", n_real
  print "gridding start:", gridding_first
  print "gridding end:", gridding_last
  iotbx.dsn6.write_dsn6_map(
    file_name=file_name,
    unit_cell=unit_cell,
    gridding_first=gridding_first,
    gridding_last=gridding_last,
    map_data=map_data)
Esempio n. 43
0
def write_xplor_map(sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10) :
  import iotbx.xplor.map
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
  gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
  gridding = iotbx.xplor.map.gridding(n     = map_data.focus(),
                                      first = gridding_first,
                                      last  = gridding_last)
  iotbx.xplor.map.writer(
    file_name          = file_name,
    is_p1_cell         = True,
    title_lines        = [' None',],
    unit_cell          = unit_cell,
    gridding           = gridding,
    data               = map_data,
    average            = -1,
    standard_deviation = -1)
Esempio n. 44
0
def pdbMap(fileName):
    d_min = 0.4
    pdb_inp = iotbx.pdb.input(fileName)
    xray_structure = pdb_inp.xray_structure_simple()
    print("d_min:", d_min)
    f_calc = xray_structure.structure_factors(d_min=d_min).f_calc()
    x = f_calc.amplitudes()
    file2 = open(r"computedValues.txt", 'w')
    for i in x:
        for m in i:
            file2.write(str(m) + " ")
        file2.write('\n')
    fft_map = f_calc.fft_map()
    n = fft_map.n_real()
    print("unit cell gridding:", n)
    fft_map.as_xplor_map(file_name="unit_cell.map")
    block_first = tuple([ifloor(i * 0.2) for i in n])
    block_last = tuple(
        [max(f + 10, iceil(i * 0.7)) for f, i in zip(block_first, n)])
    print("block first:", block_first)
    print "block last: ", block_last
    fft_map.as_xplor_map(file_name="block.map",
                         gridding_first=block_first,
                         gridding_last=block_last)
Esempio n. 45
0
 def format_x_axis(self):
     if self.tables[0].x_is_inverse_d_min:
         xdata = self.tables[0].get_x_as_resolution()
         self.p.set_xlabel("Resolution",
                           fontproperties=self.get_font("axis_label"))
         if (getattr(self.tables[0], "force_exact_x_labels", False)):
             xticks_ = self.tables[0].get_x_values()
         else:
             xticks_ = self.p.get_xticks()
         n_skip = max(1, ifloor(len(xticks_) / 10))
         xticks = []
         xticklabels = []
         k = 0
         while (k < len(xticks_)):
             x = xticks_[k]
             xticks.append(x)
             if (x != 0):
                 if x > 0.0:
                     x = math.sqrt(1 / x)
                 else:
                     x = -math.sqrt(1 / abs(x))
                 xticklabels.append("%.2f" % x)
             else:  # FIXME?
                 xticklabels.append("")
             k += n_skip
         self.p.set_xticks(xticks)
         self.p.set_xticklabels(xticklabels)
     else:
         if self.graph.x_axis_label is not None:
             self.p.set_xlabel(self.graph.x_axis_label,
                               fontproperties=self.get_font("axis_label"))
         else:
             self.p.set_xlabel(self.graph.x_label,
                               fontproperties=self.get_font("axis_label"))
     for ticklabel in self.p.get_xticklabels():
         ticklabel.set_fontproperties(self.get_font("value_label"))
def exercise_real_space_refinement(verbose):
  if (verbose):
    out = sys.stdout
  else:
    out = StringIO()
  out_of_bounds_clamp = maptbx.out_of_bounds_clamp(0)
  out_of_bounds_raise = maptbx.out_of_bounds_raise()
  crystal_symmetry = crystal.symmetry(
    unit_cell=(10,10,10,90,90,90),
    space_group_symbol="P 1")
  xray_structure = xray.structure(
    crystal_symmetry=crystal_symmetry,
    scatterers=flex.xray_scatterer([
      xray.scatterer(label="C", site=(0,0,0))]))
  miller_set = miller.build_set(
    crystal_symmetry=crystal_symmetry,
    anomalous_flag=False,
    d_min=1)
  f_calc = miller_set.structure_factors_from_scatterers(
    xray_structure=xray_structure).f_calc()
  fft_map = f_calc.fft_map()
  fft_map.apply_sigma_scaling()
  real_map = fft_map.real_map_unpadded()
  #### unit_cell test
  delta_h = .005
  basic_map = maptbx.basic_map(
    maptbx.basic_map_unit_cell_flag(),
    real_map,
    real_map.focus(),
    crystal_symmetry.unit_cell().orthogonalization_matrix(),
    out_of_bounds_clamp.as_handle(),
    crystal_symmetry.unit_cell())
  testing_function_for_rsfit(basic_map,delta_h,xray_structure,out)
  ### non_symmetric test
  #
  minfrac = crystal_symmetry.unit_cell().fractionalize((-5,-5,-5))
  maxfrac = crystal_symmetry.unit_cell().fractionalize((5,5,5))
  gridding_first = [ifloor(n*b) for n,b in zip(fft_map.n_real(), minfrac)]
  gridding_last = [iceil(n*b) for n,b in zip(fft_map.n_real(), maxfrac)]
  data=maptbx.copy(real_map, gridding_first, gridding_last)
  #
  basic_map = maptbx.basic_map(
    maptbx.basic_map_non_symmetric_flag(),
    data,
    fft_map.n_real(),
    crystal_symmetry.unit_cell().orthogonalization_matrix(),
    out_of_bounds_clamp.as_handle(),
    crystal_symmetry.unit_cell())
  testing_function_for_rsfit(basic_map,delta_h,xray_structure,out)
  ### asu test
  #
  minfrac = crystal_symmetry.unit_cell().fractionalize((0,0,0))
  maxfrac = crystal_symmetry.unit_cell().fractionalize((10,10,10))
  gridding_first = [ifloor(n*b) for n,b in zip(fft_map.n_real(), minfrac)]
  gridding_last = [iceil(n*b) for n,b in zip(fft_map.n_real(), maxfrac)]
  data=maptbx.copy(real_map, gridding_first, gridding_last)
  #
  basic_map = maptbx.basic_map(
    maptbx.basic_map_asu_flag(),
    data,
    crystal_symmetry.space_group(),
    crystal_symmetry.direct_space_asu().as_float_asu(),
    real_map.focus(),
    crystal_symmetry.unit_cell().orthogonalization_matrix(),
    out_of_bounds_clamp.as_handle(),
    crystal_symmetry.unit_cell(),
    0.5,
    True)
  testing_function_for_rsfit(basic_map,delta_h,xray_structure,out)
Esempio n. 47
0
 def line_between_points (self, x1, y1, x2, y2, n_values=100) :
   """
   Given two points on the image, sample intensities along a line connecting
   them (using linear interpolation).  This also calculates the coordinates
   of each sample point, which is used for lattice dimension calculations
   once peaks have been identified.  Arguments are in image pixel coordinates
   (starting at 1,1).
   """
   x1_, y1_ = self.image_coords_as_array_coords(x1, y1)
   x2_, y2_ = self.image_coords_as_array_coords(x2, y2)
   n_values = ifloor(math.sqrt((x2_-x1_)**2 + (y2_-y1_)**2))
   delta_x = (x2_ - x1_) / (n_values - 1)
   delta_y = (y2_ - y1_) / (n_values - 1)
   vals = []
   img_coords = []
   d = self._raw.linearintdata
   # TODO remarkably, this is reasonably fast in Python, but it would
   # probably be more at home in scitbx.math
   for n in range(n_values) :
     x = x1_ + (n * delta_x)
     y = y1_ + (n * delta_y)
     xd, yd = self.array_coords_as_detector_coords(x, y)
     img_coords.append((xd,yd))
     x_1 = ifloor(x)
     x_2 = iceil(x)
     y_1 = ifloor(y)
     y_2 = iceil(y)
     v11 = d[(x_1, y_1)]
     v12 = d[(x_1, y_2)]
     v21 = d[(x_2, y_1)]
     v22 = d[(x_2, y_2)]
     if (x_2 == x_1) :
       if (y_2 == y_1) :
         vxy = v11
       else :
         vxy = ((v12 * (y - y_1)) + (v11 * (y_2 - y))) / (y_2 - y_1)
     elif (y_2 == y_1) :
       vxy =  ((v21 * (x - x_1)) + (v11 * (x_2 - x))) / (x_2 - x_1)
     else :
       dxdy = (y_2 - y_1) * (x_2 - x_1)
       vxy = ((v11 / dxdy) * (x_2 - x) * (y_2 - y)) + \
             ((v21 / dxdy) * (x - x_1) * (y_2 - y)) + \
             ((v12 / dxdy) * (x_2 - x) * (y - y_1)) + \
             ((v22 / dxdy) * (x - x_1) * (y - y_1))
     vals.append(vxy)
   lattice_length = None
   if (len(vals) > 5) :
     # first find peaks in the profile
     peaks = []
     avg = sum(vals) / len(vals)
     filtered_vals = []
     for x in vals :
       if (x <= avg*3) :
         filtered_vals.append(x)
     background = sum(filtered_vals) / len(filtered_vals)
     i = 2
     while (i < len(vals) - 2) :
       x = vals[i]
       if (x <= background) :
         pass
       elif ((x > vals[i-1]) and (x > vals[i-2]) and
             (x > vals[i+1]) and (x > vals[i+2])) :
         peaks.append(i)
       i += 1
     if (len(peaks) > 0) :
       # calculate the average lattice length
       center_x, center_y = self.get_beam_center_mm()
       distances = []
       i = 1
       while (i < len(peaks)) :
         x1,y1 = img_coords[peaks[i-1]]
         x2,y2 = img_coords[peaks[i]]
         rs_distance = rstbx.utils.reciprocal_space_distance(x1, y1, x2, y2,
           wavelength=self.get_wavelength(),
           center_x=center_x,
           center_y=center_y,
           distance=self.get_detector_distance(),
           detector_two_theta=self.get_detector_2theta(),
           distance_is_corrected=True)
         assert (rs_distance > 0)
         distances.append(1 / rs_distance)
         i += 1
       lattice_length = sum(distances) / len(distances)
   distance = self.distance_between_points(x1, y1, x2, y2)
   return line_profile(vals, distance, lattice_length)
Esempio n. 48
0
def region_density_correlation(
      large_unit_cell,
      large_d_min,
      large_density_map,
      sites_cart,
      site_radii,
      work_scatterers):
  sites_frac_large = large_unit_cell.fractionalize(sites_cart)
  large_frac_min = sites_frac_large.min()
  large_frac_max = sites_frac_large.max()
  large_n_real = large_density_map.focus()
  from scitbx import fftpack
  from libtbx.math_utils import ifloor, iceil
  large_ucp = large_unit_cell.parameters()
  small_n_real = [0,0,0]
  small_origin_in_large_grid = [0,0,0]
  small_abc = [0,0,0]
  sites_frac_shift = [0,0,0]
  for i in xrange(3):
    grid_step = large_ucp[i] / large_n_real[i]
    buffer = large_d_min / grid_step
    grid_min = ifloor(large_frac_min[i] * large_n_real[i] - buffer)
    grid_max = iceil(large_frac_max[i] * large_n_real[i] + buffer)
    min_grid = grid_max - grid_min + 1
    small_n_real[i] = fftpack.adjust_gridding(min_grid=min_grid, max_prime=5)
    if (small_n_real[i] < large_n_real[i]):
      shift_min = (small_n_real[i] - min_grid) // 2
      small_origin_in_large_grid[i] = grid_min - shift_min
      small_abc[i] = small_n_real[i] * grid_step
      sites_frac_shift[i] = small_origin_in_large_grid[i] / large_n_real[i]
    else:
      small_n_real[i] = large_n_real[i]
      small_origin_in_large_grid[i] = 0
      small_abc[i] = large_ucp[i]
      sites_frac_shift[i] = 0
  sites_cart_shift = large_unit_cell.orthogonalize(sites_frac_shift)
  sites_cart_small = sites_cart - sites_cart_shift
  from cctbx import xray
  small_xray_structure = xray.structure(
    crystal_symmetry=crystal.symmetry(
      unit_cell=tuple(small_abc)+large_ucp[3:],
      space_group_symbol="P1"),
    scatterers=work_scatterers)
  small_xray_structure.set_sites_cart(sites_cart=sites_cart_small)
  small_f_calc = small_xray_structure.structure_factors(
    d_min=large_d_min).f_calc()
  small_gridding = crystal_gridding(
    unit_cell=small_f_calc.unit_cell(),
    space_group_info=small_f_calc.space_group_info(),
    pre_determined_n_real=small_n_real)
  from cctbx import miller
  small_fft_map = miller.fft_map(
    crystal_gridding=small_gridding,
    fourier_coefficients=small_f_calc)
  small_fft_map.apply_sigma_scaling()
  small_map = small_fft_map.real_map_unpadded()
  grid_indices = grid_indices_around_sites(
    unit_cell=small_xray_structure.unit_cell(),
    fft_n_real=small_n_real,
    fft_m_real=small_n_real,
    sites_cart=sites_cart_small,
    site_radii=site_radii)
  small_copy_from_large_map = copy(
    map_unit_cell=large_density_map,
    first=small_origin_in_large_grid,
    last=matrix.col(small_origin_in_large_grid)
       + matrix.col(small_n_real)
       - matrix.col((1,1,1)))
  assert small_copy_from_large_map.all() == small_map.all()
  corr = flex.linear_correlation(
    x=small_map.select(grid_indices),
    y=small_copy_from_large_map.select(grid_indices))
  if (not corr.is_well_defined()):
    return None
  return corr.coefficient()
def rho_stats(
      xray_structure,
      d_min,
      resolution_factor,
      electron_sum_radius,
      zero_out_f000):
  n_real = []
  n_half_plus = []
  n_half_minus = []
  s2 = d_min * resolution_factor * 2
  for l in xray_structure.unit_cell().parameters()[:3]:
    nh = ifloor(l / s2)
    n_real.append(2*nh+1)
    n_half_plus.append(nh)
    n_half_minus.append(-nh)
  n_real = tuple(n_real)
  n_real_product = matrix.col(n_real).product()
  crystal_gridding = maptbx.crystal_gridding(
    unit_cell=xray_structure.unit_cell(),
    space_group_info=xray_structure.space_group_info(),
    pre_determined_n_real=n_real)
  miller_indices = flex.miller_index()
  miller_indices.reserve(n_real_product)
  for h in flex.nested_loop(n_half_minus, n_half_plus, open_range=False):
    miller_indices.append(h)
  assert miller_indices.size() == n_real_product
  #
  miller_set = miller.set(
    crystal_symmetry=xray_structure,
    anomalous_flag=True,
    indices=miller_indices).sort(by_value="resolution")
  assert miller_set.indices()[0] == (0,0,0)
  f_calc = miller_set.structure_factors_from_scatterers(
    xray_structure=xray_structure,
    algorithm="direct",
    cos_sin_table=False).f_calc()
  if (zero_out_f000):
    f_calc.data()[0] = 0j
  #
  unit_cell_volume = xray_structure.unit_cell().volume()
  voxel_volume = unit_cell_volume / n_real_product
  number_of_miller_indices = []
  rho_max = []
  electron_sums_around_atoms = []
  densities_along_x = []
  for f in [f_calc, f_calc.resolution_filter(d_min=d_min)]:
    assert f.indices()[0] == (0,0,0)
    number_of_miller_indices.append(f.indices().size())
    fft_map = miller.fft_map(
      crystal_gridding=crystal_gridding,
      fourier_coefficients=f)
    assert fft_map.n_real() == n_real
    rho = fft_map.real_map_unpadded() / unit_cell_volume
    assert approx_equal(voxel_volume*flex.sum(rho), f_calc.data()[0])
    if (xray_structure.scatterers().size() == 1):
      assert flex.max_index(rho) == 0
      rho_max.append(rho[0])
    else:
      rho_max.append(flex.max(rho))
    site_cart = xray_structure.sites_cart()[0]
    gias = maptbx.grid_indices_around_sites(
      unit_cell=xray_structure.unit_cell(),
      fft_n_real=n_real,
      fft_m_real=n_real,
      sites_cart=flex.vec3_double([site_cart]),
      site_radii=flex.double([electron_sum_radius]))
    electron_sums_around_atoms.append(
      flex.sum(rho.as_1d().select(gias))*voxel_volume)
    #
    a = xray_structure.unit_cell().parameters()[0]
    nx = n_real[0]
    nxh = nx//2
    x = []
    y = []
    for ix in xrange(-nxh,nxh+1):
      x.append(a*ix/nx)
      y.append(rho[(ix%nx,0,0)])
    densities_along_x.append((x,y))
  #
  print \
    "%3.1f %4.2f %-12s %5d %5d | %6.3f %6.3f | %6.3f %6.3f | %4.2f %5.1f" % (
      d_min,
      resolution_factor,
      n_real,
      number_of_miller_indices[0],
      number_of_miller_indices[1],
      electron_sums_around_atoms[0],
      electron_sums_around_atoms[1],
      rho_max[0],
      rho_max[1],
      f_calc.data()[0].real,
      u_as_b(xray_structure.scatterers()[0].u_iso))
  #
  return densities_along_x
Esempio n. 50
0
 def get_zoom_box(self, x, y, boxsize=400, mag=16):
     #assert ((boxsize % mag) == 0)
     n_pixels = iceil(boxsize / mag)
     x0 = min(self.img_w - n_pixels, ifloor(x - (n_pixels / 2)))
     y0 = min(self.img_h - n_pixels, ifloor(y - (n_pixels / 2)))
     return (x0, y0, n_pixels, n_pixels)
def hcp_fill_box(cb_op_original_to_sampling, float_asu, continuous_shift_flags,
                 point_distance,
                 buffer_thickness=-1, all_twelve_neighbors=False,
                 exercise_cpp=True):
  if (exercise_cpp):
    cpp = close_packing.hexagonal_sampling_generator(
      cb_op_original_to_sampling=cb_op_original_to_sampling,
      float_asu=float_asu,
      continuous_shift_flags=continuous_shift_flags,
      point_distance=point_distance,
      buffer_thickness=buffer_thickness,
      all_twelve_neighbors=all_twelve_neighbors)
  assert point_distance > 0
  if (buffer_thickness < 0):
    buffer_thickness = point_distance * (2/3. * (.5 * math.sqrt(3)))
  if (exercise_cpp):
    assert cpp.cb_op_original_to_sampling().c()==cb_op_original_to_sampling.c()
    assert cpp.float_asu().unit_cell().is_similar_to(float_asu.unit_cell())
    assert cpp.continuous_shift_flags() == continuous_shift_flags
    assert approx_equal(cpp.point_distance(), point_distance)
    assert approx_equal(cpp.buffer_thickness(), buffer_thickness)
    assert cpp.all_twelve_neighbors() == all_twelve_neighbors
  float_asu_buffer = float_asu.add_buffer(thickness=buffer_thickness)
  hex_cell = hexagonal_sampling_cell(point_distance=point_distance)
  hex_box = hexagonal_box(
    hex_cell=hex_cell,
    vertices_cart=float_asu.shape_vertices(cartesian=True))
  hex_box_buffer = hexagonal_box(
    hex_cell=hex_cell,
    vertices_cart=float_asu_buffer.shape_vertices(cartesian=True))
  box_lower = []
  box_upper = []
  for i in xrange(3):
    if (continuous_shift_flags[i]):
      box_lower.append(0)
      box_upper.append(0)
    else:
      n = iceil(abs(hex_box.max[i]-hex_box.pivot[i]))
      box_lower.append(min(-2,ifloor(hex_box_buffer.min[i]-hex_box.pivot[i])))
      box_upper.append(n+max(2,iceil(hex_box_buffer.max[i]-hex_box.max[i])))
  if (exercise_cpp):
    assert list(cpp.box_lower()) == box_lower
    assert list(cpp.box_upper()) == box_upper
  hex_to_frac_matrix = (
      matrix.sqr(float_asu.unit_cell().fractionalization_matrix())
    * matrix.sqr(hex_cell.orthogonalization_matrix()))
  sites_frac = flex.vec3_double()
  for point in flex.nested_loop(begin=box_lower,
                                end=box_upper,
                                open_range=False):
    site_hex = matrix.col(hex_box.pivot) \
             + matrix.col(hex_indices_as_site(point))
    site_frac = hex_to_frac_matrix * site_hex
    if (float_asu_buffer.is_inside(site_frac)):
      sites_frac.append(site_frac)
    elif (all_twelve_neighbors):
      for offset in [(1,0,0),(1,1,0),(0,1,0),(-1,0,0),(-1,-1,0),(0,-1,0),
                     (0,0,1),(-1,-1,1),(0,-1,1),
                     (0,0,-1),(-1,-1,-1),(0,-1,-1)]:
        offset_hex = hex_indices_as_site(offset, layer=point[2])
        offset_frac = hex_to_frac_matrix * matrix.col(offset_hex)
        other_site_frac = site_frac + offset_frac
        if (float_asu.is_inside(other_site_frac)):
          sites_frac.append(site_frac)
          break
  assert sites_frac.size() > 0
  rt = cb_op_original_to_sampling.c_inv().as_double_array()
  sites_frac = rt[:9] * sites_frac
  sites_frac += rt[9:]
  if (exercise_cpp):
    assert not cpp.at_end()
    cpp_sites_frac = cpp.all_sites_frac()
    assert cpp.at_end()
    assert cpp_sites_frac.size() == sites_frac.size()
    assert approx_equal(cpp_sites_frac, sites_frac)
    cpp.restart()
    assert not cpp.at_end()
    assert approx_equal(cpp.next_site_frac(), sites_frac[0])
    assert cpp.count_sites() == sites_frac.size()-1
    assert cpp.at_end()
    cpp.restart()
    n = 0
    for site in cpp: n += 1
    assert n == sites_frac.size()
  return sites_frac
 def update_solvent_and_scale_2(self, fast, params, apply_back_trace,
                                refine_hd_scattering, log):
   if(params is None): params = bss.master_params.extract()
   if(self.xray_structure is not None):
     # Figure out Fcalc and Fmask based on presence of H
     hd_selection = self.xray_structure.hd_selection()
     xrs_no_h = self.xray_structure.select(~hd_selection)
     xrs_h    = self.xray_structure.select(hd_selection)
   # Create data container for scalers. If H scattering is refined then it is
   # assumed that self.f_calc() does not contain H contribution at all.
   fmodel_kbu = mmtbx.f_model.manager_kbu(
     f_obs   = self.f_obs(),
     f_calc  = self.f_calc(),
     f_masks = self.f_masks(),
     ss      = self.ss)
   # Compute k_total and k_mask using one of the two methods (anal or min).
   # Note: this intentionally ignores previously existing f_part1 and f_part2.
   #
   k_sol, b_sol, b_cart, b_adj = [None,]*4
   if(fast): # analytical
     assert len(fmodel_kbu.f_masks)==1
     result = mmtbx.bulk_solvent.scaler.run_simple(
       fmodel_kbu     = fmodel_kbu,
       r_free_flags   = self.r_free_flags(),
       bulk_solvent   = params.bulk_solvent,
       bin_selections = self.bin_selections)
     r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
   else: # using minimization: exp solvent and scale model (k_sol,b_sol,b_cart)
     result = bss.bulk_solvent_and_scales(
       fmodel_kbu = fmodel_kbu,
       params     = params)
     k_sol, b_sol, b_cart = result.k_sols(), result.b_sols(), result.b_cart()
     r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
   if(apply_back_trace and len(fmodel_kbu.f_masks)==1 and
      self.xray_structure is not None):
     o = result.apply_back_trace_of_overall_exp_scale_matrix(
       xray_structure = self.xray_structure)
     b_adj = o.b_adj
     if(not fast): b_sol, b_cart = [o.b_sol], o.b_cart
     self.update_xray_structure(
       xray_structure = o.xray_structure,
       update_f_calc  = True)
     fmodel_kbu = fmodel_kbu.update(f_calc = self.f_calc())
     self.show(prefix = "overall B=%s to atoms"%str("%7.2f"%o.b_adj).strip(),
       log = log)
   # Update self with new arrays so that H correction knows current R factor.
   # If no H to account for, then this is the final result.
   k_masks       = result.k_masks()
   k_anisotropic = result.k_anisotropic()
   k_isotropic   = result.k_isotropic()
   self.update_core(
     k_mask        = k_masks,
     k_anisotropic = k_anisotropic,
     k_isotropic   = k_isotropic)
   self.show(prefix = "bulk-solvent and scaling", log = log)
   # Consistency check
   assert approx_equal(self.r_all(), r_all_from_scaler)
   # Add contribution from H (if present and riding). This goes to f_part2.
   kh, bh = 0, 0
   if(refine_hd_scattering and
      self.need_to_refine_hd_scattering_contribution()):
     # Obsolete previous contribution f_part2
     f_part2 = fmodel_kbu.f_calc.array(data=fmodel_kbu.f_calc.data()*0)
     self.update_core(f_part2 = f_part2)
     xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
     f_h = self.compute_f_calc(xray_structure = xrs_h)
     # Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
     data = fmodel_kbu.f_calc.data()
     for k_mask_, f_mask_ in zip(k_masks, fmodel_kbu.f_masks):
       data = data + k_mask_*f_mask_.data()
     f_calc_plus_f_bulk_no_scales = fmodel_kbu.f_calc.array(data = data)
     # Consistency check
     assert approx_equal(self.f_model().data(),
       f_calc_plus_f_bulk_no_scales.data()*k_isotropic*k_anisotropic)
     assert approx_equal(self.f_model_no_scales().data(),
       f_calc_plus_f_bulk_no_scales.data())
     #
     # Compute contribution from H (F_H)
     #
     # Coarse sampling
     b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
     b_min = int(max(0,b_mean)*0.5)
     b_max = int(b_mean*1.5)
     sc = 1000.
     kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
     br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
     o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
       f_obs       = fmodel_kbu.f_obs.data(),
       f_calc      = f_calc_plus_f_bulk_no_scales.data(),
       f_mask      = f_h.data(),
       k_total     = k_isotropic*k_anisotropic,
       ss          = fmodel_kbu.ss,
       k_sol_range = flex.double(kr),
       b_sol_range = flex.double(br),
       r_ref       = self.r_work())
     if(o.updated()):
       f_part2 = f_h.array(data = o.k_mask()*f_h.data())
       kh, bh = o.k_sol(), o.b_sol()
       self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
     # Fine sampling
     k_min = max(0,o.k_sol()-0.1)
     k_max = o.k_sol()+0.1
     b_min = max(0,o.b_sol()-5.)
     b_max = o.b_sol()+5.
     kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
     br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(1.*sc))]
     o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
       f_obs       = fmodel_kbu.f_obs.data(),
       f_calc      = f_calc_plus_f_bulk_no_scales.data(),
       f_mask      = f_h.data(),
       k_total     = k_isotropic*k_anisotropic,
       ss          = fmodel_kbu.ss,
       k_sol_range = flex.double(kr),
       b_sol_range = flex.double(br),
       r_ref       = o.r())
     if(o.updated()):
       f_part2 = f_h.array(data = o.k_mask()*f_h.data())
       kh, bh = o.k_sol(), o.b_sol()
       self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
     # THIS HELPS if fast=true is used, see how it works in reality
     #
     if(fast):
       fmodel_kbu_ = mmtbx.f_model.manager_kbu(
         f_obs   = self.f_obs(),
         f_calc  = f_calc_plus_f_bulk_no_scales,
         f_masks = [f_part2],
         ss      = self.ss)
       result = mmtbx.bulk_solvent.scaler.run_simple(
         fmodel_kbu     = fmodel_kbu_,
         r_free_flags   = self.r_free_flags(),
         bulk_solvent   = params.bulk_solvent,
         bin_selections = self.bin_selections)
       f_part2 = f_part2.array(data = result.core.k_mask()*f_part2.data())
       k_isotropic   = result.core.k_isotropic*result.core.k_isotropic_exp
       k_anisotropic = result.core.k_anisotropic
     # Update self with final scales
     self.update_core(
       k_mask        = k_masks,
       k_anisotropic = k_anisotropic,
       k_isotropic   = k_isotropic,
       f_part2       = f_part2)
     # Make sure what came out of scaling matches what self thinks it really is
     # It must match at least up to 1.e-6.
     self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
     if(fast):
       assert approx_equal(result.r_factor(), self.r_work())
     else:
       assert approx_equal(self.r_all(), o.r()), [self.r_all(), o.r()]
   return group_args(
     k_sol  = k_sol,
     b_sol  = b_sol,
     b_cart = b_cart,
     k_h    = kh,
     b_h    = bh,
     b_adj  = b_adj)
Esempio n. 53
0
 def get_zoom_box (self, x, y, boxsize=400, mag=16) :
   #assert ((boxsize % mag) == 0)
   n_pixels = iceil(boxsize / mag)
   x0 = min(self.img_w - n_pixels, ifloor(x - (n_pixels / 2)))
   y0 = min(self.img_h - n_pixels, ifloor(y - (n_pixels / 2)))
   return (x0, y0, n_pixels, n_pixels)
Esempio n. 54
0
 def line_between_points(self, x1, y1, x2, y2, n_values=100):
     """
 Given two points on the image, sample intensities along a line connecting
 them (using linear interpolation).  This also calculates the coordinates
 of each sample point, which is used for lattice dimension calculations
 once peaks have been identified.  Arguments are in image pixel coordinates
 (starting at 1,1).
 """
     x1_, y1_ = self.image_coords_as_array_coords(x1, y1)
     x2_, y2_ = self.image_coords_as_array_coords(x2, y2)
     n_values = ifloor(math.sqrt((x2_ - x1_)**2 + (y2_ - y1_)**2))
     delta_x = (x2_ - x1_) / (n_values - 1)
     delta_y = (y2_ - y1_) / (n_values - 1)
     vals = []
     img_coords = []
     d = self._raw.linearintdata
     # TODO remarkably, this is reasonably fast in Python, but it would
     # probably be more at home in scitbx.math
     for n in range(n_values):
         x = x1_ + (n * delta_x)
         y = y1_ + (n * delta_y)
         xd, yd = self.array_coords_as_detector_coords(x, y)
         img_coords.append((xd, yd))
         x_1 = ifloor(x)
         x_2 = iceil(x)
         y_1 = ifloor(y)
         y_2 = iceil(y)
         v11 = d[(x_1, y_1)]
         v12 = d[(x_1, y_2)]
         v21 = d[(x_2, y_1)]
         v22 = d[(x_2, y_2)]
         if (x_2 == x_1):
             if (y_2 == y_1):
                 vxy = v11
             else:
                 vxy = ((v12 * (y - y_1)) + (v11 * (y_2 - y))) / (y_2 - y_1)
         elif (y_2 == y_1):
             vxy = ((v21 * (x - x_1)) + (v11 * (x_2 - x))) / (x_2 - x_1)
         else:
             dxdy = (y_2 - y_1) * (x_2 - x_1)
             vxy = ((v11 / dxdy) * (x_2 - x) * (y_2 - y)) + \
                   ((v21 / dxdy) * (x - x_1) * (y_2 - y)) + \
                   ((v12 / dxdy) * (x_2 - x) * (y - y_1)) + \
                   ((v22 / dxdy) * (x - x_1) * (y - y_1))
         vals.append(vxy)
     lattice_length = None
     if (len(vals) > 5):
         # first find peaks in the profile
         peaks = []
         avg = sum(vals) / len(vals)
         filtered_vals = []
         for x in vals:
             if (x <= avg * 3):
                 filtered_vals.append(x)
         background = sum(filtered_vals) / len(filtered_vals)
         i = 2
         while (i < len(vals) - 2):
             x = vals[i]
             if (x <= background):
                 pass
             elif ((x > vals[i - 1]) and (x > vals[i - 2])
                   and (x > vals[i + 1]) and (x > vals[i + 2])):
                 peaks.append(i)
             i += 1
         if (len(peaks) > 0):
             # calculate the average lattice length
             center_x, center_y = self.get_beam_center_mm()
             distances = []
             i = 1
             while (i < len(peaks)):
                 x1, y1 = img_coords[peaks[i - 1]]
                 x2, y2 = img_coords[peaks[i]]
                 rs_distance = rstbx.utils.reciprocal_space_distance(
                     x1,
                     y1,
                     x2,
                     y2,
                     wavelength=self.get_wavelength(),
                     center_x=center_x,
                     center_y=center_y,
                     distance=self.get_detector_distance(),
                     detector_two_theta=self.get_detector_2theta(),
                     distance_is_corrected=True)
                 assert (rs_distance > 0)
                 distances.append(1 / rs_distance)
                 i += 1
             lattice_length = sum(distances) / len(distances)
     distance = self.distance_between_points(x1, y1, x2, y2)
     return line_profile(vals, distance, lattice_length)
  def update_solvent_and_scale_twin(self, refine_hd_scattering, log):
    if(not self.twinned()): return
    assert len(self.f_masks()) == 1
    # Re-set all scales to unit or zero
    self.show(prefix = "update scales twin start", log = log)
    self.reset_all_scales()
    self.show(prefix = "reset f_part, k_(total,mask)", log = log)
    f_calc_data      = self.f_calc().data()
    f_calc_data_twin = self.f_calc_twin().data()
    # Initial trial set
    sc = 1000.
    ksr = [i/sc for i in range(ifloor(0*sc), iceil(0.6*sc)+1,  int(0.05*sc))]
    bsr = [i/sc for i in range(ifloor(0*sc), iceil(150.*sc)+1, int(10.*sc))]
    o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
      f_obs          = self.f_obs().data(),
      f_calc_1       = f_calc_data,
      f_calc_2       = f_calc_data_twin,
      f_mask_1       = self.arrays.core.f_masks[0].data(),
      f_mask_2       = self.arrays.core_twin.f_masks[0].data(),
      ss             = self.ss,
      twin_fraction  = self.twin_fraction,
      k_sol_range    = flex.double(ksr),
      b_sol_range    = flex.double(bsr),
      miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
      unit_cell      = self.f_obs().unit_cell(),
      r_ref          = self.r_all())
    if(o_kbu_sol.updated()):
      self.update(
        k_mask        = o_kbu_sol.k_mask(),
        k_anisotropic = o_kbu_sol.k_anisotropic())
    # Second (finer) trial set
    k_min = max(o_kbu_sol.k_sol()-0.05, 0)
    k_max = min(o_kbu_sol.k_sol()+0.05, 0.6)
    ksr = [i/sc for i in range(ifloor(k_min*sc), iceil(k_max*sc)+1, int(0.01*sc))]
    b_min = max(o_kbu_sol.b_sol()-10, 0)
    b_max = min(o_kbu_sol.b_sol()+10, 150)
    bsr = [i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(1.*sc))]
    o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
      f_obs          = self.f_obs().data(),
      f_calc_1       = f_calc_data,
      f_calc_2       = f_calc_data_twin,
      f_mask_1       = self.arrays.core.f_masks[0].data(),
      f_mask_2       = self.arrays.core_twin.f_masks[0].data(),
      ss             = self.ss,
      twin_fraction  = self.twin_fraction,
      k_sol_range    = flex.double(ksr),
      b_sol_range    = flex.double(bsr),
      miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
      unit_cell      = self.f_obs().unit_cell(),
      r_ref          = o_kbu_sol.r())
    if(o_kbu_sol.updated()):
      self.update(
        k_mask        = o_kbu_sol.k_mask(),
        k_anisotropic = o_kbu_sol.k_anisotropic())
      assert approx_equal(self.r_all(), o_kbu_sol.r())
      ##############
      # use apply_back_trace in if below
      if(self.xray_structure is not None):
        o = mmtbx.bulk_solvent.scaler.tmp(
          xray_structure = self.xray_structure,
          k_anisotropic  = o_kbu_sol.k_anisotropic(),
          k_masks        = [o_kbu_sol.k_mask()],
          ss             = self.ss)
        self.update_xray_structure(
          xray_structure = o.xray_structure,
          update_f_calc  = True)
      #############
        self.update(
          k_mask        = o.k_masks,
          k_anisotropic = o.k_anisotropic)

    self.show(prefix = "bulk-solvent and scaling", log = log)
    #
    # Add contribution from H (if present and riding). This goes to f_part2.
    #
    kh, bh = 0, 0
    if(refine_hd_scattering and
       self.need_to_refine_hd_scattering_contribution()):
      hd_selection = self.xray_structure.hd_selection()
      xrs_no_h = self.xray_structure.select(~hd_selection)
      xrs_h    = self.xray_structure.select(hd_selection)
      # Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
      data = self.f_calc().data()+self.f_masks()[0].data()*self.k_masks()[0]
      f_calc_plus_f_bulk_no_scales = self.f_calc().array(data = data)
      data = self.f_calc_twin().data()+\
        self.f_masks_twin()[0].data()*self.k_masks_twin()[0]
      f_calc_plus_f_bulk_no_scales_twin = self.f_calc_twin().array(data = data)
      # Initial FH contribution
      xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
      f_h = self.compute_f_calc(xray_structure = xrs_h)
      f_h_twin = self.compute_f_calc(xray_structure = xrs_h,
        miller_array = self.f_calc_twin())
      # Coarse sampling
      b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
      b_min = int(max(0,b_mean)*0.5)
      b_max = int(b_mean*1.5)
      sc = 1000.
      kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
      br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
      obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
        f_obs          = self.f_obs().data(),
        f_calc_1       = f_calc_plus_f_bulk_no_scales.data(),
        f_calc_2       = f_calc_plus_f_bulk_no_scales_twin.data(),
        f_mask_1       = f_h.data(),
        f_mask_2       = f_h_twin.data(),
        ss             = self.ss,
        twin_fraction  = self.twin_fraction,
        k_sol_range    = flex.double(kr),
        b_sol_range    = flex.double(br),
        miller_indices = self.f_obs().indices(), # XXX What about twin-related?
        unit_cell      = self.f_obs().unit_cell(),
        r_ref          = self.r_work())
      if(obj.updated()):
        f_part2      = f_h.array(     data = obj.k_mask()*f_h.data())
        f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
        kh, bh = obj.k_sol(), obj.b_sol()
      # Fine sampling
      k_min = max(0,obj.k_sol()-0.1)
      k_max = obj.k_sol()+0.1
      b_min = max(0,obj.b_sol()-5.)
      b_max = obj.b_sol()+5.
      kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
      br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(5.*sc))]
      obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
        f_obs          = self.f_obs().data(),
        f_calc_1       = f_calc_plus_f_bulk_no_scales.data(),
        f_calc_2       = f_calc_plus_f_bulk_no_scales_twin.data(),
        f_mask_1       = f_h.data(),
        f_mask_2       = f_h_twin.data(),
        ss             = self.ss,
        twin_fraction  = self.twin_fraction,
        k_sol_range    = flex.double(kr),
        b_sol_range    = flex.double(br),
        miller_indices = self.f_obs().indices(), # XXX What about twin-related?
        unit_cell      = self.f_obs().unit_cell(),
        r_ref          = obj.r())
      if(obj.updated()):
        f_part2      = f_h.array(     data = obj.k_mask()*f_h.data())
        f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
        kh, bh = obj.k_sol(), obj.b_sol()
      self.update_core(
        f_part2       = f_part2,
        f_part2_twin  = f_part2_twin,
        k_anisotropic = obj.k_anisotropic())
      self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
    b_cart = adptbx.u_as_b(adptbx.u_star_as_u_cart(
                             self.f_obs().unit_cell(), o_kbu_sol.u_star()))
    return group_args(
      k_sol  = o_kbu_sol.k_sol(),
      b_sol  = o_kbu_sol.b_sol(),
      b_cart = b_cart,
      k_h    = kh,
      b_h    = bh)