def plot_positions(values, positions, file_name, cmap=pyplot.cm.Reds,
                     vmin=None, vmax=None, invalid='white'):
    values = values.as_double()
    assert positions.size() >= values.size()
    positions = positions[:values.size()]

    if vmin is None:
      vmin = flex.min(values)
    if vmax is None:
      vmax = flex.max(values)

    x, y = positions.parts()
    dx = flex.abs(x[1:] - x[:-1])
    dy = flex.abs(y[1:] - y[:-1])
    dx = dx.select(dx > 0)
    dy = dy.select(dy > 0)

    scale = 1/flex.min(dx)
    #print scale
    x = (x * scale).iround()
    y = (y * scale).iround()

    from libtbx.math_utils import iceil
    z = flex.double(flex.grid(iceil(flex.max(y))+1, iceil(flex.max(x))+1), -2)
    #print z.all()
    for x_, y_, z_ in zip(x, y, values):
      z[y_, x_] = z_

    plot_grid(z.as_1d(), z.all(), file_name, cmap=cmap, vmin=vmin, vmax=vmax,
              invalid=invalid)
    return
Exemple #2
0
def blank_integrated_analysis(reflections, scan, phi_step, fractional_loss):
    prf_sel = reflections.get_flags(reflections.flags.integrated_prf)
    if prf_sel.count(True) > 0:
        reflections = reflections.select(prf_sel)
        intensities = reflections["intensity.prf.value"]
        variances = reflections["intensity.prf.variance"]
    else:
        sum_sel = reflections.get_flags(reflections.flags.integrated_sum)
        reflections = reflections.select(sum_sel)
        intensities = reflections["intensity.sum.value"]
        variances = reflections["intensity.sum.variance"]

    i_sigi = intensities / flex.sqrt(variances)

    xyz_px = reflections["xyzobs.px.value"]
    x_px, y_px, z_px = xyz_px.parts()
    phi = scan.get_angle_from_array_index(z_px)

    osc = scan.get_oscillation()[1]
    n_images_per_step = iceil(phi_step / osc)
    phi_step = n_images_per_step * osc

    phi_min = flex.min(phi)
    phi_max = flex.max(phi)
    n_steps = iceil((phi_max - phi_min) / phi_step)

    hist = flex.histogram(z_px, n_slots=n_steps)

    mean_i_sigi = flex.double()
    for i, slot_info in enumerate(hist.slot_infos()):
        sel = (z_px >= slot_info.low_cutoff) & (z_px < slot_info.high_cutoff)
        if sel.count(True) == 0:
            mean_i_sigi.append(0)
        else:
            mean_i_sigi.append(flex.mean(i_sigi.select(sel)))
    fractional_mean_i_sigi = mean_i_sigi / flex.max(mean_i_sigi)

    potential_blank_sel = mean_i_sigi <= (fractional_loss * flex.max(mean_i_sigi))

    xmin, xmax = zip(*[(slot_info.low_cutoff, slot_info.high_cutoff) for slot_info in hist.slot_infos()])

    d = {
        "data": [
            {
                "x": list(hist.slot_centers()),
                "y": list(mean_i_sigi),
                "xlow": xmin,
                "xhigh": xmax,
                "blank": list(potential_blank_sel),
                "type": "bar",
                "name": "blank_counts_analysis",
            }
        ],
        "layout": {"xaxis": {"title": "z observed (images)"}, "yaxis": {"title": "Number of reflections"}, "bargap": 0},
    }

    blank_regions = blank_regions_from_sel(d["data"][0])
    d["blank_regions"] = blank_regions

    return d
  def plot_positions(values, positions, file_name, cmap=pyplot.cm.Reds,
                     vmin=None, vmax=None, invalid='white'):
    values = values.as_double()
    assert positions.size() >= values.size()
    positions = positions[:values.size()]

    if vmin is None:
      vmin = flex.min(values)
    if vmax is None:
      vmax = flex.max(values)

    x, y = positions.parts()
    dx = flex.abs(x[1:] - x[:-1])
    dy = flex.abs(y[1:] - y[:-1])
    dx = dx.select(dx > 0)
    dy = dy.select(dy > 0)

    scale = 1/flex.min(dx)
    #print scale
    x = (x * scale).iround()
    y = (y * scale).iround()

    from libtbx.math_utils import iceil
    z = flex.double(flex.grid(iceil(flex.max(y))+1, iceil(flex.max(x))+1), -2)
    #print z.all()
    for x_, y_, z_ in zip(x, y, values):
      z[y_, x_] = z_

    plot_grid(z.as_1d(), z.all(), file_name, cmap=cmap, vmin=vmin, vmax=vmax,
              invalid=invalid)
    return
def torsion_search_nested (
      clusters,
      sites_cart,
      last_chi_symmetric=None,
      increment_degrees=10) :
  """
  Iterate over all possible sidechain Chi angle combinations.
  """
  from scitbx.array_family import flex
  from scitbx.matrix import rotate_point_around_axis
  n_angles = len(clusters)
  assert (n_angles >= 1)
  angle_range = 180
  r1 = [ifloor(-angle_range/increment_degrees)] * n_angles
  r2 = [iceil(angle_range/increment_degrees)] * n_angles
  if (last_chi_symmetric) :
    r1[-1] = ifloor(-90/increment_degrees)
    r2[-1] = iceil(90/increment_degrees)
  nested_loop = flex.nested_loop(begin=r1, end=r2, open_range=False)
  selection = clusters[0].atoms_to_rotate
  for angles in nested_loop:
    xyz_moved = sites_cart.deep_copy()
    for i, angle_fraction in enumerate(angles):
      cl = clusters[i]
      for atom in cl.atoms_to_rotate:
        new_xyz = rotate_point_around_axis(
          axis_point_1 = xyz_moved[cl.axis[0]],
          axis_point_2 = xyz_moved[cl.axis[1]],
          point        = xyz_moved[atom],
          angle        = angle_fraction*increment_degrees,
          deg=True)
        xyz_moved[atom] = new_xyz
    yield xyz_moved
Exemple #5
0
def torsion_search_nested(clusters,
                          sites_cart,
                          last_chi_symmetric=None,
                          increment_degrees=10):
    """
  Iterate over all possible sidechain Chi angle combinations.
  """
    from scitbx.array_family import flex
    from scitbx.matrix import rotate_point_around_axis
    n_angles = len(clusters)
    assert (n_angles >= 1)
    angle_range = 180
    r1 = [ifloor(-angle_range / increment_degrees)] * n_angles
    r2 = [iceil(angle_range / increment_degrees)] * n_angles
    if (last_chi_symmetric):
        r1[-1] = ifloor(-90 / increment_degrees)
        r2[-1] = iceil(90 / increment_degrees)
    nested_loop = flex.nested_loop(begin=r1, end=r2, open_range=False)
    selection = clusters[0].atoms_to_rotate
    for angles in nested_loop:
        xyz_moved = sites_cart.deep_copy()
        for i, angle_fraction in enumerate(angles):
            cl = clusters[i]
            for atom in cl.atoms_to_rotate:
                new_xyz = rotate_point_around_axis(
                    axis_point_1=xyz_moved[cl.axis[0]],
                    axis_point_2=xyz_moved[cl.axis[1]],
                    point=xyz_moved[atom],
                    angle=angle_fraction * increment_degrees,
                    deg=True)
                xyz_moved[atom] = new_xyz
        yield xyz_moved
Exemple #6
0
def exercise_real_space_refinement(verbose):
    if (verbose):
        out = sys.stdout
    else:
        out = StringIO()
    out_of_bounds_clamp = maptbx.out_of_bounds_clamp(0)
    out_of_bounds_raise = maptbx.out_of_bounds_raise()
    crystal_symmetry = crystal.symmetry(unit_cell=(10, 10, 10, 90, 90, 90),
                                        space_group_symbol="P 1")
    xray_structure = xray.structure(crystal_symmetry=crystal_symmetry,
                                    scatterers=flex.xray_scatterer([
                                        xray.scatterer(label="C",
                                                       site=(0, 0, 0))
                                    ]))
    miller_set = miller.build_set(crystal_symmetry=crystal_symmetry,
                                  anomalous_flag=False,
                                  d_min=1)
    f_calc = miller_set.structure_factors_from_scatterers(
        xray_structure=xray_structure).f_calc()
    fft_map = f_calc.fft_map()
    fft_map.apply_sigma_scaling()
    real_map = fft_map.real_map_unpadded()
    #### unit_cell test
    delta_h = .005
    basic_map = maptbx.basic_map(
        maptbx.basic_map_unit_cell_flag(), real_map, real_map.focus(),
        crystal_symmetry.unit_cell().orthogonalization_matrix(),
        out_of_bounds_clamp.as_handle(), crystal_symmetry.unit_cell())
    testing_function_for_rsfit(basic_map, delta_h, xray_structure, out)
    ### non_symmetric test
    #
    minfrac = crystal_symmetry.unit_cell().fractionalize((-5, -5, -5))
    maxfrac = crystal_symmetry.unit_cell().fractionalize((5, 5, 5))
    gridding_first = [ifloor(n * b) for n, b in zip(fft_map.n_real(), minfrac)]
    gridding_last = [iceil(n * b) for n, b in zip(fft_map.n_real(), maxfrac)]
    data = maptbx.copy(real_map, gridding_first, gridding_last)
    #
    basic_map = maptbx.basic_map(
        maptbx.basic_map_non_symmetric_flag(), data, fft_map.n_real(),
        crystal_symmetry.unit_cell().orthogonalization_matrix(),
        out_of_bounds_clamp.as_handle(), crystal_symmetry.unit_cell())
    testing_function_for_rsfit(basic_map, delta_h, xray_structure, out)
    ### asu test
    #
    minfrac = crystal_symmetry.unit_cell().fractionalize((0, 0, 0))
    maxfrac = crystal_symmetry.unit_cell().fractionalize((10, 10, 10))
    gridding_first = [ifloor(n * b) for n, b in zip(fft_map.n_real(), minfrac)]
    gridding_last = [iceil(n * b) for n, b in zip(fft_map.n_real(), maxfrac)]
    data = maptbx.copy(real_map, gridding_first, gridding_last)
    #
    basic_map = maptbx.basic_map(
        maptbx.basic_map_asu_flag(), data, crystal_symmetry.space_group(),
        crystal_symmetry.direct_space_asu().as_float_asu(), real_map.focus(),
        crystal_symmetry.unit_cell().orthogonalization_matrix(),
        out_of_bounds_clamp.as_handle(), crystal_symmetry.unit_cell(), 0.5,
        True)
    testing_function_for_rsfit(basic_map, delta_h, xray_structure, out)
Exemple #7
0
def blank_counts_analysis(reflections, scan, phi_step, fractional_loss):
    if not len(reflections):
        raise Sorry('Input contains no reflections')

    xyz_px = reflections['xyzobs.px.value']
    x_px, y_px, z_px = xyz_px.parts()
    phi = scan.get_angle_from_array_index(z_px)

    osc = scan.get_oscillation()[1]
    n_images_per_step = iceil(phi_step / osc)
    phi_step = n_images_per_step * osc

    array_range = scan.get_array_range()
    phi_min = scan.get_angle_from_array_index(array_range[0])
    phi_max = scan.get_angle_from_array_index(array_range[1])
    assert phi_min <= flex.min(phi)
    assert phi_max >= flex.max(phi)
    n_steps = iceil((phi_max - phi_min) / phi_step)

    hist = flex.histogram(z_px, n_slots=n_steps)

    counts = hist.slots()
    fractional_counts = counts.as_double() / flex.max(counts)

    potential_blank_sel = fractional_counts <= fractional_loss

    xmin, xmax = zip(*[(slot_info.low_cutoff, slot_info.high_cutoff)
                       for slot_info in hist.slot_infos()])

    d = {
        'data': [{
            'x': list(hist.slot_centers()),
            'y': list(hist.slots()),
            'xlow': xmin,
            'xhigh': xmax,
            'blank': list(potential_blank_sel),
            'type': 'bar',
            'name': 'blank_counts_analysis'
        }],
        'layout': {
            'xaxis': {
                'title': 'z observed (images)'
            },
            'yaxis': {
                'title': 'Number of reflections'
            },
            'bargap': 0,
        },
    }

    blank_regions = blank_regions_from_sel(d['data'][0])
    d['blank_regions'] = blank_regions

    return d
 def __init__(self, sweep, params):
   self.params = params
   flex.set_random_seed(params.random_seed)
   unit_cell = params.unit_cell
   assert unit_cell is not None
   sgi = params.space_group
   if sgi is None:
     sgi = sgtbx.space_group_info(symbol="P 1")
   B = scitbx.matrix.sqr(unit_cell.fractionalization_matrix()).transpose()
   U = scitbx.matrix.sqr(flex.random_double_r3_rotation_matrix())
   direct_matrix = (U * B).inverse()
   crystal_model = Crystal(direct_matrix[0:3],
                           direct_matrix[3:6],
                           direct_matrix[6:9],
                           space_group=sgi.group())
   scan = sweep.get_scan()
   angle = self.params.rotation_angle
   scan.set_image_range((1, iceil(angle/scan.get_oscillation()[1])))
   predicted = predict_reflections(sweep, crystal_model)
   beam_vectors = predicted.beam_vector()
   S = beam_vectors - sweep.get_beam().get_s0()
   centroids = S.rotate_around_origin(sweep.get_goniometer().get_rotation_axis(),
                                      -predicted.rotation_angle())
   self.d_min = self.params.reciprocal_space_grid.d_min
   self.gridding = tuple([self.params.reciprocal_space_grid.n_points]*3)
   centroids = centroids.select((1/centroids.norms())>=self.d_min)
   assert len(centroids) > 0
   self.map_to_grid(sweep, centroids)
   self.fft()
   debug_write_reciprocal_lattice_points_as_pdb(centroids)
   self.debug_write_ccp4_map(self.grid_real, "fft.map")
 def __init__(self, sweep, params):
     self.params = params
     flex.set_random_seed(params.random_seed)
     unit_cell = params.unit_cell
     assert unit_cell is not None
     sgi = params.space_group
     if sgi is None:
         sgi = sgtbx.space_group_info(symbol="P 1")
     B = scitbx.matrix.sqr(unit_cell.fractionalization_matrix()).transpose()
     U = scitbx.matrix.sqr(flex.random_double_r3_rotation_matrix())
     direct_matrix = (U * B).inverse()
     crystal_model = Crystal(
         direct_matrix[0:3],
         direct_matrix[3:6],
         direct_matrix[6:9],
         space_group=sgi.group(),
     )
     scan = sweep.get_scan()
     angle = self.params.rotation_angle
     scan.set_image_range((1, iceil(angle / scan.get_oscillation()[1])))
     predicted = predict_reflections(sweep, crystal_model)
     beam_vectors = predicted.beam_vector()
     S = beam_vectors - sweep.get_beam().get_s0()
     centroids = S.rotate_around_origin(
         sweep.get_goniometer().get_rotation_axis(), -predicted.rotation_angle()
     )
     self.d_min = self.params.reciprocal_space_grid.d_min
     self.gridding = tuple([self.params.reciprocal_space_grid.n_points] * 3)
     centroids = centroids.select((1 / centroids.norms()) >= self.d_min)
     assert len(centroids) > 0
     self.map_to_grid(sweep, centroids)
     self.fft()
     debug_write_reciprocal_lattice_points_as_pdb(centroids)
     self.debug_write_ccp4_map(self.grid_real, "fft.map")
Exemple #10
0
def run(args):
    if (len(args) == 0): args = ["--help"]
    command_line = (libtbx.option_parser.option_parser(
        usage="iotbx.python pdb_to_map_simple.py [options] pdb_file..."
    ).option(None,
             "--d_min",
             type="float",
             default=3,
             help="high-resolution limit for structure-factor calculation",
             metavar="FLOAT")).process(args=args)
    d_min = command_line.options.d_min
    assert d_min > 0
    for file_name in command_line.args:
        pdb_inp = iotbx.pdb.input(file_name=file_name)
        xray_structure = pdb_inp.xray_structure_simple()
        xray_structure.show_summary()
        print()
        print("d_min:", d_min)
        f_calc = xray_structure.structure_factors(d_min=d_min).f_calc()
        f_calc.show_summary()
        print()
        fft_map = f_calc.fft_map()
        n = fft_map.n_real()
        print("unit cell gridding:", n)
        fft_map.as_xplor_map(file_name="unit_cell.map")
        print()
        block_first = tuple([ifloor(i * 0.2) for i in n])
        block_last = tuple(
            [max(f + 10, iceil(i * 0.7)) for f, i in zip(block_first, n)])
        print("block first:", block_first)
        print("block last: ", block_last)
        fft_map.as_xplor_map(file_name="block.map",
                             gridding_first=block_first,
                             gridding_last=block_last)
        print()
Exemple #11
0
def write_xplor_map(sites_cart,
                    unit_cell,
                    map_data,
                    n_real,
                    file_name,
                    buffer=10):
    import iotbx.xplor.map
    if sites_cart is not None:
        frac_min, frac_max = unit_cell.box_frac_around_sites(
            sites_cart=sites_cart, buffer=buffer)
    else:
        frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
    gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
    gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
    gridding = iotbx.xplor.map.gridding(n=map_data.focus(),
                                        first=gridding_first,
                                        last=gridding_last)
    iotbx.xplor.map.writer(file_name=file_name,
                           is_p1_cell=True,
                           title_lines=[
                               ' None',
                           ],
                           unit_cell=unit_cell,
                           gridding=gridding,
                           data=map_data,
                           average=-1,
                           standard_deviation=-1)
Exemple #12
0
def calculate_grid_size(min_carts, max_carts, grid_spacing):
    """Calculate the number of points to be sampled for a box size and sampling distance. Returns the number of points to be sampled along each axis. Box may be larger than max_carts."""
    cart_size = tuple(
        [float(max_c - min_c) for min_c, max_c in zip(min_carts, max_carts)])
    grid_size = tuple(
        [iceil(c_size / (1.0 * grid_spacing)) + 1 for c_size in cart_size])
    return grid_size
def run(args):
  if (len(args) == 0): args = ["--help"]
  command_line = (libtbx.option_parser.option_parser(
    usage="iotbx.python pdb_to_map_simple.py [options] pdb_file...")
    .option(None, "--d_min",
      type="float",
      default=3,
      help="high-resolution limit for structure-factor calculation",
      metavar="FLOAT")
  ).process(args=args)
  d_min = command_line.options.d_min
  assert d_min > 0
  for file_name in command_line.args:
    pdb_inp = iotbx.pdb.input(file_name=file_name)
    xray_structure = pdb_inp.xray_structure_simple()
    xray_structure.show_summary()
    print
    print "d_min:", d_min
    f_calc = xray_structure.structure_factors(d_min=d_min).f_calc()
    f_calc.show_summary()
    print
    fft_map = f_calc.fft_map()
    n = fft_map.n_real()
    print "unit cell gridding:", n
    fft_map.as_xplor_map(file_name="unit_cell.map")
    print
    block_first = tuple([ifloor(i*0.2) for i in n])
    block_last = tuple([max(f+10, iceil(i*0.7)) for f,i in zip(block_first, n)])
    print "block first:", block_first
    print "block last: ", block_last
    fft_map.as_xplor_map(
      file_name="block.map",
      gridding_first=block_first,
      gridding_last=block_last)
    print
Exemple #14
0
def write_ccp4_map(sites_cart,
                   unit_cell,
                   map_data,
                   n_real,
                   file_name,
                   buffer=10):
    import iotbx.ccp4_map
    from cctbx import sgtbx
    from scitbx.array_family import flex
    if sites_cart is not None:
        frac_min, frac_max = unit_cell.box_frac_around_sites(
            sites_cart=sites_cart, buffer=buffer)
    else:
        frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
    gridding_first = tuple([ifloor(f * n) for f, n in zip(frac_min, n_real)])
    gridding_last = tuple([iceil(f * n) for f, n in zip(frac_max, n_real)])
    space_group = sgtbx.space_group_info("P1").group()
    iotbx.ccp4_map.write_ccp4_map(
        file_name=file_name,
        unit_cell=unit_cell,
        space_group=space_group,
        gridding_first=gridding_first,
        gridding_last=gridding_last,
        map_data=map_data,
        labels=flex.std_string(["iotbx.map_conversion.write_ccp4_map_box"]))
Exemple #15
0
def resample_ordered_list_of_values(vals, redundancy=8):
    """resample a list of values with interpolation"""
    # Number of vals given
    num_inp_vals = len(vals)
    # Number of vals to be returned
    num_samp_vals = int(1 + (num_inp_vals - 1) / redundancy)
    # Sort in descending order
    ordered_vals = sorted(vals, reverse=True)
    sampled_vals = []

    if num_samp_vals == 1:
        return [ordered_vals[0]]
    else:
        sample_dist = (num_inp_vals - 1) / (num_samp_vals - 1)


#        # Make sure it doesn't overrun the end of the array
#        while sample_dist*(num_samp_vals-1) > num_inp_vals-1:
#            sample_dist = 0.99999999*sample_dist

# Resample points with interpolation
    for i_point in range(num_samp_vals - 1):
        sample_index = sample_dist * i_point
        p1 = ifloor(sample_index)
        v1 = ordered_vals[p1]
        p2 = iceil(sample_index)
        v2 = ordered_vals[p2]
        sample_val = interpolate(x=sample_index, p1=p1, v1=v1, p2=p2, v2=v2)
        sampled_vals.append(sample_val)
    # Add the last point
    sampled_vals.append(ordered_vals[-1])

    assert len(sampled_vals) == num_samp_vals

    return sampled_vals
Exemple #16
0
def get_bounds_around_model(
    map_manager=None,
    model=None,
    box_cushion=None,
):
    '''
      Calculate the lower and upper bounds to box around a model
      Allow bounds to go outside the available box (this has to be
        dealt with at the boxing stage)
    '''

    # get items needed to do the shift
    cs = map_manager.crystal_symmetry()
    uc = cs.unit_cell()
    sites_cart = model.get_sites_cart()
    sites_frac = uc.fractionalize(sites_cart)
    map_data = map_manager.map_data()
    # convert box_cushion into fractional vector
    cushion_frac = flex.double(uc.fractionalize((box_cushion, ) * 3))
    # find fractional corners
    frac_min = sites_frac.min()
    frac_max = sites_frac.max()
    frac_max = list(flex.double(frac_max) + cushion_frac)
    frac_min = list(flex.double(frac_min) - cushion_frac)
    # find corner grid nodes
    all_orig = map_data.all()

    lower_bounds = [ifloor(f * n) for f, n in zip(frac_min, all_orig)]
    upper_bounds = [iceil(f * n) for f, n in zip(frac_max, all_orig)]
    return group_args(
        lower_bounds=lower_bounds,
        upper_bounds=upper_bounds,
    )
Exemple #17
0
def blank_counts_analysis(reflections, scan, phi_step, fractional_loss):
    if not len(reflections):
        raise Sorry("Input contains no reflections")

    xyz_px = reflections["xyzobs.px.value"]
    x_px, y_px, z_px = xyz_px.parts()
    phi = scan.get_angle_from_array_index(z_px)

    osc = scan.get_oscillation()[1]
    n_images_per_step = iceil(phi_step / osc)
    phi_step = n_images_per_step * osc

    array_range = scan.get_array_range()
    phi_min = scan.get_angle_from_array_index(array_range[0])
    phi_max = scan.get_angle_from_array_index(array_range[1])
    assert phi_min <= flex.min(phi)
    assert phi_max >= flex.max(phi)
    n_steps = iceil((phi_max - phi_min) / phi_step)

    hist = flex.histogram(z_px, n_slots=n_steps)

    counts = hist.slots()
    fractional_counts = counts.as_double() / flex.max(counts)

    potential_blank_sel = fractional_counts <= fractional_loss

    xmin, xmax = zip(*[(slot_info.low_cutoff, slot_info.high_cutoff) for slot_info in hist.slot_infos()])

    d = {
        "data": [
            {
                "x": list(hist.slot_centers()),
                "y": list(hist.slots()),
                "xlow": xmin,
                "xhigh": xmax,
                "blank": list(potential_blank_sel),
                "type": "bar",
                "name": "blank_counts_analysis",
            }
        ],
        "layout": {"xaxis": {"title": "z observed (images)"}, "yaxis": {"title": "Number of reflections"}, "bargap": 0},
    }

    blank_regions = blank_regions_from_sel(d["data"][0])
    d["blank_regions"] = blank_regions

    return d
Exemple #18
0
def blank_counts_analysis(reflections, scan, phi_step, fractional_loss):
    if not len(reflections):
        raise ValueError("Input contains no reflections")

    xyz_px = reflections["xyzobs.px.value"]
    x_px, y_px, z_px = xyz_px.parts()
    phi = scan.get_angle_from_array_index(z_px)

    osc = scan.get_oscillation()[1]
    n_images_per_step = iceil(phi_step / osc)
    phi_step = n_images_per_step * osc

    array_range = scan.get_array_range()
    phi_min = scan.get_angle_from_array_index(array_range[0])
    phi_max = scan.get_angle_from_array_index(array_range[1])
    assert phi_min <= flex.min(phi)
    assert phi_max >= flex.max(phi)
    n_steps = max(int(round((phi_max - phi_min) / phi_step)), 1)
    hist = flex.histogram(
        z_px, data_min=array_range[0], data_max=array_range[1], n_slots=n_steps
    )
    logger.debug("Histogram:")
    logger.debug(hist.as_str())

    counts = hist.slots()
    fractional_counts = counts.as_double() / flex.max(counts)

    potential_blank_sel = fractional_counts <= fractional_loss

    xmin, xmax = zip(
        *[
            (slot_info.low_cutoff, slot_info.high_cutoff)
            for slot_info in hist.slot_infos()
        ]
    )

    d = {
        "data": [
            {
                "x": list(hist.slot_centers()),
                "y": list(hist.slots()),
                "xlow": xmin,
                "xhigh": xmax,
                "blank": list(potential_blank_sel),
                "type": "bar",
                "name": "blank_counts_analysis",
            }
        ],
        "layout": {
            "xaxis": {"title": "z observed (images)"},
            "yaxis": {"title": "Number of reflections"},
            "bargap": 0,
        },
    }

    blank_regions = blank_regions_from_sel(d["data"][0])
    d["blank_regions"] = blank_regions

    return d
Exemple #19
0
    def __init__(self,
                 map_manager,
                 model,
                 box_cushion,
                 wrapping=None,
                 log=sys.stdout):

        self._map_manager = map_manager
        self._model = model

        self._force_wrapping = wrapping
        if wrapping is None:
            wrapping = self.map_manager().wrapping()
        self.basis_for_boxing_string = 'using_model, wrapping = %s' % (
            wrapping)

        # safeguards
        assert isinstance(map_manager, iotbx.map_manager.map_manager)
        assert isinstance(model, mmtbx.model.manager)
        assert self._map_manager.map_data().accessor().origin() == (0, 0, 0)

        # Make sure working model and map_manager crystal_symmetry match

        assert map_manager.is_compatible_model(model)

        assert box_cushion >= 0

        if self.map_manager().wrapping():  # map must be entire unit cell
            assert map_manager.unit_cell_grid == map_manager.map_data().all()

        # NOTE: We are going to use crystal_symmetry and sites_frac based on
        #   the map_manager (the model could still have different crystal_symmetry)

        # get items needed to do the shift
        cs = map_manager.crystal_symmetry()
        uc = cs.unit_cell()
        sites_cart = model.get_sites_cart()
        sites_frac = uc.fractionalize(sites_cart)
        map_data = map_manager.map_data()
        # convert box_cushion into fractional vector
        cushion_frac = flex.double(uc.fractionalize((box_cushion, ) * 3))
        # find fractional corners
        frac_min = sites_frac.min()
        frac_max = sites_frac.max()
        frac_max = list(flex.double(frac_max) + cushion_frac)
        frac_min = list(flex.double(frac_min) - cushion_frac)
        # find corner grid nodes
        all_orig = map_data.all()
        self.gridding_first = [
            ifloor(f * n) for f, n in zip(frac_min, all_orig)
        ]
        self.gridding_last = [iceil(f * n) for f, n in zip(frac_max, all_orig)]

        # Ready with gridding...set up shifts and box crystal_symmetry
        self.set_shifts_and_crystal_symmetry()

        # Apply boxing to model, ncs, and map (if available)
        self.apply_to_model_ncs_and_map()
Exemple #20
0
 def get_padding (text, margin=2, center=self.center) :
   from libtbx.math_utils import ifloor, iceil
   fill = max(0, width - len(text) - (margin * 2))
   if (center) :
     rfill = ifloor(fill / 2)
     lfill = iceil(fill / 2)
   else :
     rfill = 0
     lfill = fill
   return (rfill, lfill)
Exemple #21
0
 def get_padding(text, margin=2, center=self.center):
     from libtbx.math_utils import ifloor, iceil
     fill = max(0, width - len(text) - (margin * 2))
     if (center):
         rfill = ifloor(fill / 2)
         lfill = iceil(fill / 2)
     else:
         rfill = 0
         lfill = fill
     return (rfill, lfill)
Exemple #22
0
def exercise_integer():
    from libtbx.math_utils import iround, iceil, ifloor, nearest_integer
    assert iround(0) == 0
    assert iround(1.4) == 1
    assert iround(-1.4) == -1
    assert iround(1.6) == 2
    assert iround(-1.6) == -2
    assert iceil(0) == 0
    assert iceil(1.1) == 2
    assert iceil(-1.1) == -1
    assert iceil(1.9) == 2
    assert iceil(-1.9) == -1
    assert ifloor(0) == 0
    assert ifloor(1.1) == 1
    assert ifloor(-1.1) == -2
    assert ifloor(1.9) == 1
    assert ifloor(-1.9) == -2
    for i in xrange(-3, 3 + 1):
        assert nearest_integer(i + 0.3) == i
        assert nearest_integer(i + 0.7) == i + 1
Exemple #23
0
def exercise_integer():
  from libtbx.math_utils import iround, iceil, ifloor, nearest_integer
  assert iround(0) == 0
  assert iround(1.4) == 1
  assert iround(-1.4) == -1
  assert iround(1.6) == 2
  assert iround(-1.6) == -2
  assert iceil(0) == 0
  assert iceil(1.1) == 2
  assert iceil(-1.1) == -1
  assert iceil(1.9) == 2
  assert iceil(-1.9) == -1
  assert ifloor(0) == 0
  assert ifloor(1.1) == 1
  assert ifloor(-1.1) == -2
  assert ifloor(1.9) == 1
  assert ifloor(-1.9) == -2
  for i in xrange(-3,3+1):
    assert nearest_integer(i+0.3) == i
    assert nearest_integer(i+0.7) == i+1
 def __init__(O, mt, n_refl, target_type, obs_type):
   O.target_type = target_type
   O.obs_type = obs_type
   O.obs = mt.random_double(size=n_refl)
   O.weights = mt.random_double(size=n_refl)
   rff = flex.bool(max(1,iceil(n_refl*0.6)), False)
   rff.resize(n_refl, True)
   O.r_free_flags = rff.select(mt.random_permutation(size=n_refl))
   O.scale_factor = 1 + mt.random_double()
   O.a = mt.random_double(size=n_refl)
   O.b = mt.random_double(size=n_refl)
Exemple #25
0
 def __init__(self,
              params,
              coeffs,
              atom_selection_manager=None,
              xray_structure=None):
     adopt_init_args(self, locals())
     fft_map = coeffs.fft_map(
         resolution_factor=self.params.grid_resolution_factor)
     if (self.params.scale == "volume"): fft_map.apply_volume_scaling()
     elif (self.params.scale == "sigma"): fft_map.apply_sigma_scaling()
     else: raise RuntimeError
     title_lines = [
         "REMARK file: %s" %
         show_string(os.path.basename(self.params.file_name))
     ]
     title_lines.append("REMARK directory: %s" %
                        show_string(os.path.dirname(self.params.file_name)))
     title_lines.append("REMARK %s" % date_and_time())
     assert self.params.region in ["selection", "cell"]
     if (self.params.region == "selection" and xray_structure is not None):
         map_iselection = None
         if atom_selection_manager is not None:
             map_iselection = self.atom_iselection()
         frac_min, frac_max = self.box_around_selection(
             iselection=map_iselection,
             buffer=self.params.atom_selection_buffer)
         n_real = fft_map.n_real()
         gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
         gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
         title_lines.append('REMARK map around selection')
         title_lines.append('REMARK   atom_selection=%s' %
                            show_string(self.params.atom_selection))
         title_lines.append('REMARK   atom_selection_buffer=%.6g' %
                            self.params.atom_selection_buffer)
         if (map_iselection is None):
             sel_size = self.xray_structure.scatterers().size()
         else:
             sel_size = map_iselection.size()
         title_lines.append('REMARK   number of atoms selected: %d' %
                            sel_size)
     else:
         gridding_first = None
         gridding_last = None
         title_lines.append("REMARK map covering the unit cell")
     if params.format == "xplor":
         fft_map.as_xplor_map(file_name=self.params.file_name,
                              title_lines=title_lines,
                              gridding_first=gridding_first,
                              gridding_last=gridding_last)
     else:
         fft_map.as_ccp4_map(file_name=self.params.file_name,
                             gridding_first=gridding_first,
                             gridding_last=gridding_last,
                             labels=title_lines)
Exemple #26
0
 def __init__(O, mt, n_refl, target_type, obs_type):
     O.target_type = target_type
     O.obs_type = obs_type
     O.obs = mt.random_double(size=n_refl)
     O.weights = mt.random_double(size=n_refl)
     rff = flex.bool(max(1, iceil(n_refl * 0.6)), False)
     rff.resize(n_refl, True)
     O.r_free_flags = rff.select(mt.random_permutation(size=n_refl))
     O.scale_factor = 1 + mt.random_double()
     O.a = mt.random_double(size=n_refl)
     O.b = mt.random_double(size=n_refl)
Exemple #27
0
def exercise_writer(space_group_info, n_sites=100, d_min=1.5):
    if op.isfile("tst_iotbx_dsn6.omap"):
        os.remove("tst_iotbx_dsn6.omap")
    xrs = random_structure.xray_structure(
        space_group_info=space_group_info,
        elements=(("O", "N", "C") * (n_sites // 3 + 1))[:n_sites],
        volume_per_atom=50,
        min_distance=1.5)
    fc = xrs.structure_factors(d_min=d_min).f_calc()
    fft_map = fc.fft_map(resolution_factor=1 / 3).apply_sigma_scaling()
    n_real = fft_map.n_real()
    n_blocks = iceil(n_real[0] / 8) * iceil(n_real[1] / 8) * iceil(
        n_real[2] / 8)
    fft_map.as_dsn6_map(file_name="tst_iotbx_dsn6.omap")
    assert op.isfile("tst_iotbx_dsn6.omap")
    size = op.getsize("tst_iotbx_dsn6.omap")
    assert (size == (n_blocks + 1) * 512)
    os.remove("tst_iotbx_dsn6.omap")
    fft_map.as_dsn6_map(file_name="tst_iotbx_dsn6.omap",
                        gridding_first=[-5, -5, -5],
                        gridding_last=n_real)
    n_blocks = iceil(1 + n_real[0] / 8) * iceil(1 + n_real[1] /
                                                8) * iceil(1 + n_real[2] / 8)
    size = op.getsize("tst_iotbx_dsn6.omap")
    assert (size == (n_blocks + 1) * 512)
Exemple #28
0
    def fit_mu(self, dataset_maps, map_data_size):
        """Calculate the average map from all of the different observations"""
        print("\t### Fitting mu!")

        # Extract the maps to be used for averaging
        if len(dataset_maps) == 1:

            # Extract the map from the list
            m = dataset_maps[0]
            # Mean and median are simply the map value -- copy directly to the statistical maps
            mean_map_vals = medn_map_vals = numpy.array(m.data)

        else:

            # Chunk the points into groups - Compromise between cpu time and memory usage - ~200 dataset -> chunksize of 5000
            chunk_size = 500 * iceil(1000.0 / len(dataset_maps))
            chunk_idxs = [i for i in range(0, map_data_size, chunk_size)]
            num_chunks = len(chunk_idxs)

            t1 = time.time()

            mean_map_vals = numpy.zeros(map_data_size)
            medn_map_vals = numpy.zeros(map_data_size)

            for i_chunk, chunk_start in enumerate(chunk_idxs):
                status_bar_2(n=i_chunk, n_max=num_chunks)

                tmp_map_vals = numpy.array([
                    m.data[chunk_start:chunk_start + chunk_size]
                    for m in dataset_maps
                ])

                # Check that the output values are the expected dimensions
                if i_chunk + 1 < num_chunks:
                    assert len(tmp_map_vals) == len(dataset_maps)
                    assert len(tmp_map_vals.T) == chunk_size

                tmp_map_means = numpy.mean(tmp_map_vals, axis=0)
                mean_map_vals[chunk_start:chunk_start +
                              chunk_size] = tmp_map_means
                tmp_map_medns = numpy.median(tmp_map_vals, axis=0)
                medn_map_vals[chunk_start:chunk_start +
                              chunk_size] = tmp_map_medns

            status_bar_2(n=num_chunks, n_max=num_chunks)

            t2 = time.time()

        mu = m.new_from_template(map_data=flex.double(mean_map_vals.flatten()),
                                 sparse=m.is_sparse())

        return mu
Exemple #29
0
def write_xplor_map_file(coeffs, frac_min, frac_max, file_base):
    fft_map = coeffs.fft_map(resolution_factor=1 / 3.0)
    fft_map.apply_sigma_scaling()
    n_real = fft_map.n_real()
    gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
    gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
    title_lines = ["REMARK map covering model + 3.0A buffer"]
    file_name = "%s.map" % file_base
    fft_map.as_xplor_map(file_name=file_name,
                         title_lines=title_lines,
                         gridding_first=gridding_first,
                         gridding_last=gridding_last)
    return file_name
Exemple #30
0
def export_r_free_flags_for_ccp4(flags, test_flag_value):
    assert (test_flag_value == True) or isinstance(test_flag_value, int)
    from scitbx.array_family import flex
    if (isinstance(flags, flex.bool)):
        test_flag_value = True
    else:
        assert isinstance(flags, flex.int)
    unique_values = set(flags)
    if len(unique_values) > 2:  # XXX: is this safe?
        return flags
    new_flags = flex.int(flags.size())
    n_free = flags.count(test_flag_value)
    if (n_free > 0):
        n_bins = iceil(flags.size() / n_free)
    else:
        n_bins = 1  # XXX dangerous!  but necessary for tiny sets
    for i in range(flags.size()):
        if flags[i] == test_flag_value:
            new_flags[i] = 0
        else:
            new_flags[i] = iceil(random.random() * (n_bins - 1))
    return new_flags
def export_r_free_flags_for_ccp4 (flags, test_flag_value) :
  assert (test_flag_value == True) or isinstance(test_flag_value, int)
  from scitbx.array_family import flex
  if (isinstance(flags, flex.bool)) :
    test_flag_value = True
  else :
    assert isinstance(flags, flex.int)
  unique_values = set(flags)
  if len(unique_values) > 2 : # XXX: is this safe?
    return flags
  new_flags = flex.int(flags.size())
  n_free = flags.count(test_flag_value)
  if (n_free > 0) :
    n_bins = iceil(flags.size() / n_free)
  else :
    n_bins = 1 # XXX dangerous!  but necessary for tiny sets
  for i in range(flags.size()) :
    if flags[i] == test_flag_value :
      new_flags[i] = 0
    else :
      new_flags[i] = iceil(random.random() * (n_bins - 1))
  return new_flags
Exemple #32
0
def write_xplor_map_file (coeffs, frac_min, frac_max, file_base) :
  fft_map = coeffs.fft_map(resolution_factor=1/3.0)
  fft_map.apply_sigma_scaling()
  n_real = fft_map.n_real()
  gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
  gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
  title_lines=["REMARK map covering model + 3.0A buffer"]
  file_name = "%s.map" % file_base
  fft_map.as_xplor_map(
    file_name=file_name,
    title_lines=title_lines,
    gridding_first=gridding_first,
    gridding_last=gridding_last)
  return file_name
Exemple #33
0
 def __init__(self, params, coeffs, atom_selection_manager=None,
              xray_structure=None):
   adopt_init_args(self, locals())
   fft_map = coeffs.fft_map(resolution_factor =
     self.params.grid_resolution_factor)
   if(self.params.scale == "volume"): fft_map.apply_volume_scaling()
   elif(self.params.scale == "sigma"): fft_map.apply_sigma_scaling()
   else: raise RuntimeError
   title_lines=["REMARK file: %s" %
     show_string(os.path.basename(self.params.file_name))]
   title_lines.append("REMARK directory: %s" %
     show_string(os.path.dirname(self.params.file_name)))
   title_lines.append("REMARK %s" % date_and_time())
   assert self.params.region in ["selection", "cell"]
   if(self.params.region == "selection" and xray_structure is not None) :
     map_iselection = None
     if atom_selection_manager is not None :
       map_iselection = self.atom_iselection()
     frac_min, frac_max = self.box_around_selection(
       iselection = map_iselection,
       buffer     = self.params.atom_selection_buffer)
     n_real = fft_map.n_real()
     gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
     gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
     title_lines.append('REMARK map around selection')
     title_lines.append('REMARK   atom_selection=%s' %
       show_string(self.params.atom_selection))
     title_lines.append('REMARK   atom_selection_buffer=%.6g' %
       self.params.atom_selection_buffer)
     if(map_iselection is None):
       sel_size = self.xray_structure.scatterers().size()
     else:
       sel_size = map_iselection.size()
     title_lines.append('REMARK   number of atoms selected: %d' % sel_size)
   else:
     gridding_first = None
     gridding_last = None
     title_lines.append("REMARK map covering the unit cell")
   if params.format == "xplor" :
     fft_map.as_xplor_map(
       file_name      = self.params.file_name,
       title_lines    = title_lines,
       gridding_first = gridding_first,
       gridding_last  = gridding_last)
   else :
     fft_map.as_ccp4_map(
       file_name      = self.params.file_name,
       gridding_first = gridding_first,
       gridding_last  = gridding_last,
       labels=title_lines)
Exemple #34
0
def mask_grid(xrs, buffer, map_data, n_real):
    # XXX move to C++
    frac_min, frac_max = xrs.unit_cell().box_frac_around_sites(
        sites_cart=xrs.sites_cart(), buffer=buffer - 1.5)
    gridding_first = [ifloor(f * n) for f, n in zip(frac_min, n_real)]
    gridding_last = [iceil(f * n) for f, n in zip(frac_max, n_real)]
    new_map = flex.double(flex.grid(n_real), 0)
    for i in range(gridding_first[0], gridding_last[0]):
        for j in xrange(gridding_first[1], gridding_last[1]):
            for k in xrange(gridding_first[2], gridding_last[2]):
                if (i > 0 and i < n_real[0] and j > 0 and j < n_real[1]
                        and k > 0 and k < n_real[2]):
                    new_map[(i, j, k)] = map_data[(i, j, k)]
    return new_map
Exemple #35
0
def mask_grid(xrs, buffer, map_data, n_real):
  # XXX move to C++
  frac_min, frac_max = xrs.unit_cell().box_frac_around_sites(
    sites_cart = xrs.sites_cart(), buffer = buffer-1.5)
  gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
  gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
  new_map = flex.double(flex.grid(n_real),0)
  for i in range(gridding_first[0], gridding_last[0]):
    for j in xrange(gridding_first[1], gridding_last[1]):
      for k in xrange(gridding_first[2], gridding_last[2]):
        if(i> 0 and i<n_real[0] and
           j> 0 and j<n_real[1] and
           k> 0 and k<n_real[2]):
          new_map[(i,j,k)] = map_data[(i,j,k)]
  return new_map
Exemple #36
0
    def modify_mask_box(self, mask_data, sites_frac):
        box_buffer = self.params.box_buffer
        # Number of selected atoms
        n_selected = self.selection_bool.count(True)
        na = mask_data.all()
        n_selected_p1 = sites_frac.size()
        n_boxes = int(n_selected_p1 / n_selected)
        box_list = [[] for i in range(n_boxes)]
        for n_box in range(n_boxes):
            for i in range(n_selected):
                box_list[n_box].append(sites_frac[n_box + n_boxes * i])
        na = self.mask_data_all.all()
        k = 0
        for box in box_list:
            k += 1
            x_min = min(frac[0] for frac in box)
            y_min = min(frac[1] for frac in box)
            z_min = min(frac[2] for frac in box)
            x_max = max(frac[0] for frac in box)
            y_max = max(frac[1] for frac in box)
            z_max = max(frac[2] for frac in box)
            frac_min = [x_min, y_min, z_min]
            frac_max = [x_max, y_max, z_max]

            cs = self.xray_structure.crystal_symmetry()

            # Add buffer to box if indicated.
            if (box_buffer is not None):
                cushion = flex.double(cs.unit_cell().fractionalize(
                    (box_buffer, ) * 3))
                frac_min = list(flex.double(frac_min) - cushion)
                frac_max = list(flex.double(frac_max) + cushion)

            gridding_first = [ifloor(f * n) for f, n in zip(frac_min, na)]
            gridding_last = [iceil(f * n) for f, n in zip(frac_max, na)]

            for j in range(3):
                if (gridding_last[j] - gridding_first[j] >= na[j]):
                    raise Sorry(
                        "The box is too big. Decrease box_buffer or use a " +
                        "different selection")

            maptbx.set_box(value=0,
                           map_data_to=mask_data,
                           start=gridding_first,
                           end=gridding_last)
        return mask_data
Exemple #37
0
    def __init__(self, map_manager, model, cushion, wrapping, log=sys.stdout):
        adopt_init_args(self, locals())
        self.basis_for_boxing_string = 'using model, wrapping=%s' % (wrapping)
        # safeguards
        assert isinstance(wrapping, bool)
        assert isinstance(map_manager, iotbx.map_manager.map_manager)
        assert isinstance(model, mmtbx.model.manager)
        assert self.map_manager.map_data().accessor().origin() == (0, 0, 0)

        # Make sure original map_manager symmetry matches model or original model
        original_uc_symmetry = map_manager.original_unit_cell_crystal_symmetry
        assert (original_uc_symmetry.is_similar_symmetry(
            model.crystal_symmetry())
                or (model.get_shift_manager()
                    and original_uc_symmetry.is_similar_symmetry(
                        model.get_shift_manager().get_original_cs())))

        assert cushion >= 0
        if wrapping:
            assert map_manager.unit_cell_grid == map_manager.map_data().all()
        # get items needed to do the shift
        cs = map_manager.crystal_symmetry()
        uc = cs.unit_cell()
        sites_frac = model.get_sites_frac()
        map_data = map_manager.map_data()
        # convert cushion into fractional vector
        cushion_frac = flex.double(uc.fractionalize((cushion, ) * 3))
        # find fractional corners
        frac_min = sites_frac.min()
        frac_max = sites_frac.max()
        frac_max = list(flex.double(frac_max) + cushion_frac)
        frac_min = list(flex.double(frac_min) - cushion_frac)
        # find corner grid nodes
        all_orig = map_data.all()
        self.gridding_first = [
            ifloor(f * n) for f, n in zip(frac_min, all_orig)
        ]
        self.gridding_last = [iceil(f * n) for f, n in zip(frac_max, all_orig)]

        # Ready with gridding...set up shifts and box crystal_symmetry
        self.set_shifts_and_crystal_symmetry()

        # Apply to model and to map_manager so that self.model()
        #  and self.map_manager are boxed versions

        self.map_manager = self.apply_to_map(self.map_manager)
        self.model = self.apply_to_model(self.model)
Exemple #38
0
    def partition(self, mask=None, cpus=1):
        """Find the nearest neighbour for each grid point (or the subset defined by mask.outer_mask() if mask is not None)"""
        def find_sites(sites_tuple):
            ref_sites, query_sites = sites_tuple
            tree = spatial.KDTree(data=ref_sites)
            nn_dists, nn_groups = tree.query(query_sites)
            return nn_groups

        assert isinstance(cpus, int) and (cpus > 0)

        # Sites that we are partitioning
        if mask: query_sites = flex.vec3_double(mask.outer_mask())
        else: query_sites = flex.vec3_double(self.parent.grid_points())
        # Find the nearest grid_site for each query_site (returns index of the grid site)
        if cpus == 1:
            output = [find_sites((self.sites_grid, query_sites))]
        else:
            # Chunk the points into groups
            chunk_size = iceil(1.0 * len(query_sites) / cpus)
            chunked_points = [
                query_sites[i:i + chunk_size]
                for i in range(0, len(query_sites), chunk_size)
            ]
            assert sum(map(len, chunked_points)) == len(query_sites)
            assert len(chunked_points) == cpus
            # Map to cpus
            arg_list = [(self.sites_grid, chunk) for chunk in chunked_points]
            output = easy_mp.pool_map(fixed_func=find_sites,
                                      args=arg_list,
                                      processes=cpus)

        assert len(output) == cpus, '{!s} != {!s}'.format(len(output), cpus)
        # Extract the indices of the mapped points
        nn_groups = []
        [nn_groups.extend(o) for o in output]
        nn_groups = numpy.array(nn_groups)
        assert len(query_sites) == len(nn_groups)
        # Reformat into full grid size
        if mask:
            self.nn_groups = -1 * numpy.ones(self.parent.grid_size_1d(),
                                             dtype=int)
            self.nn_groups.put(mask.outer_mask_indices(), nn_groups)
        else:
            self.nn_groups = nn_groups

        return self
    def __init__(self,
                 map_manager=None,
                 model=None,
                 cushion=None,
                 wrapping=None):
        self.map_manager = map_manager
        self.model = model
        self.wrapping = wrapping

        self.basis_for_boxing_string = 'using model, wrapping=%s' % (wrapping)

        # safeguards
        assert wrapping is not None
        assert isinstance(map_manager, iotbx.map_manager.map_manager)
        assert isinstance(model, mmtbx.model.manager)
        assert self.map_manager.map_data().accessor().origin() == (0, 0, 0)
        assert map_manager.crystal_symmetry().is_similar_symmetry(
            model.crystal_symmetry())
        assert cushion >= 0

        if wrapping:
            assert map_manager.unit_cell_grid == map_manager.map_data().all()

        # get items needed to do the shift
        cs = map_manager.crystal_symmetry()
        uc = cs.unit_cell()
        sites_frac = model.get_sites_frac()
        map_data = map_manager.map_data()
        # convert cushion into fractional vector
        cushion_frac = flex.double(uc.fractionalize((cushion, ) * 3))
        # find fractional corners
        frac_min = sites_frac.min()
        frac_max = sites_frac.max()
        frac_max = list(flex.double(frac_max) + cushion_frac)
        frac_min = list(flex.double(frac_min) - cushion_frac)

        # find corner grid nodes
        all_orig = map_data.all()
        self.gridding_first = [
            ifloor(f * n) for f, n in zip(frac_min, all_orig)
        ]
        self.gridding_last = [iceil(f * n) for f, n in zip(frac_max, all_orig)]

        # Ready with gridding...set up shifts and box crystal_symmetry
        self.set_shifts_and_crystal_symmetry()
Exemple #40
0
def calc_ratio(count_list, sampling_angle=5):
  """
  Calculate the same statistics as the "statistic" call, but do it without
  first binning the peaks.
  """
  # Calculate the same statistics as the "statistic" call, but do it without ifrst binning the peaks.
  total_angles=iceil(360.0/sampling_angle)
  binsize=int(total_angles/6)
  first_loc=60/sampling_angle
  binned_list=[0]*6
  for i in range(6):
    for j in range(binsize):
      binned_list[i] += count_list[int(first_loc+i*binsize-binsize/2+j)%total_angles]
  rotamer_count = sum(binned_list[0::2])
  total_count = sum(binned_list)
  stdev = math.sqrt((total_angles/2+3)*(total_angles/2-3)/(total_angles**2)*total_count)
  mean= total_count*(total_angles/2+3)/total_angles
  rotamer_ratio=rotamer_count/(total_count+0.000000000000000000001)
  zscore=(rotamer_count-mean)/(stdev+0.000000000000000000001)
  return rotamer_ratio, zscore
Exemple #41
0
def calc_ratio(count_list, sampling_angle=5):
  """
  Calculate the same statistics as the "statistic" call, but do it without
  first binning the peaks.
  """
  # Calculate the same statistics as the "statistic" call, but do it without ifrst binning the peaks.
  total_angles=iceil(360.0/sampling_angle)
  binsize=int(total_angles/6)
  first_loc=60/sampling_angle
  binned_list=[0]*6
  for i in range(6):
    for j in range(binsize):
      binned_list[i] += count_list[int(first_loc+i*binsize-binsize/2+j)%total_angles]
  rotamer_count = sum(binned_list[0::2])
  total_count = sum(binned_list)
  stdev = math.sqrt((total_angles/2+3)*(total_angles/2-3)/(total_angles**2)*total_count)
  mean= total_count*(total_angles/2+3)/total_angles
  rotamer_ratio=rotamer_count/(total_count+0.000000000000000000001)
  zscore=(rotamer_count-mean)/(stdev+0.000000000000000000001)
  return rotamer_ratio, zscore
Exemple #42
0
def write_dsn6_map(sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10):
  import iotbx.dsn6
  from cctbx import sgtbx
  from scitbx.array_family import flex
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first = tuple([ifloor(f*n) for f,n in zip(frac_min,n_real)])
  gridding_last = tuple([iceil(f*n) for f,n in zip(frac_max,n_real)])
  print "n_real:", n_real
  print "gridding start:", gridding_first
  print "gridding end:", gridding_last
  iotbx.dsn6.write_dsn6_map(
    file_name=file_name,
    unit_cell=unit_cell,
    gridding_first=gridding_first,
    gridding_last=gridding_last,
    map_data=map_data)
Exemple #43
0
def write_ccp4_map (sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10) :
  import iotbx.ccp4_map
  from cctbx import sgtbx
  from scitbx.array_family import flex
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first = tuple([ifloor(f*n) for f,n in zip(frac_min,n_real)])
  gridding_last = tuple([iceil(f*n) for f,n in zip(frac_max,n_real)])
  space_group = sgtbx.space_group_info("P1").group()
  iotbx.ccp4_map.write_ccp4_map(
    file_name=file_name,
    unit_cell=unit_cell,
    space_group=space_group,
    gridding_first=gridding_first,
    gridding_last=gridding_last,
    map_data=map_data,
    labels=flex.std_string(["iotbx.map_conversion.write_ccp4_map_box"]))
Exemple #44
0
def write_dsn6_map (sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10) :
  import iotbx.dsn6
  from cctbx import sgtbx
  from scitbx.array_family import flex
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first = tuple([ifloor(f*n) for f,n in zip(frac_min,n_real)])
  gridding_last = tuple([iceil(f*n) for f,n in zip(frac_max,n_real)])
  print "n_real:", n_real
  print "gridding start:", gridding_first
  print "gridding end:", gridding_last
  iotbx.dsn6.write_dsn6_map(
    file_name=file_name,
    unit_cell=unit_cell,
    gridding_first=gridding_first,
    gridding_last=gridding_last,
    map_data=map_data)
Exemple #45
0
def write_xplor_map(sites_cart, unit_cell, map_data, n_real, file_name,
    buffer=10) :
  import iotbx.xplor.map
  if sites_cart is not None :
    frac_min, frac_max = unit_cell.box_frac_around_sites(
      sites_cart=sites_cart,
      buffer=buffer)
  else :
    frac_min, frac_max = (0.0, 0.0, 0.0), (1.0, 1.0, 1.0)
  gridding_first=[ifloor(f*n) for f,n in zip(frac_min,n_real)]
  gridding_last=[iceil(f*n) for f,n in zip(frac_max,n_real)]
  gridding = iotbx.xplor.map.gridding(n     = map_data.focus(),
                                      first = gridding_first,
                                      last  = gridding_last)
  iotbx.xplor.map.writer(
    file_name          = file_name,
    is_p1_cell         = True,
    title_lines        = [' None',],
    unit_cell          = unit_cell,
    gridding           = gridding,
    data               = map_data,
    average            = -1,
    standard_deviation = -1)
Exemple #46
0
def pdbMap(fileName):
    d_min = 0.4
    pdb_inp = iotbx.pdb.input(fileName)
    xray_structure = pdb_inp.xray_structure_simple()
    print("d_min:", d_min)
    f_calc = xray_structure.structure_factors(d_min=d_min).f_calc()
    x = f_calc.amplitudes()
    file2 = open(r"computedValues.txt", 'w')
    for i in x:
        for m in i:
            file2.write(str(m) + " ")
        file2.write('\n')
    fft_map = f_calc.fft_map()
    n = fft_map.n_real()
    print("unit cell gridding:", n)
    fft_map.as_xplor_map(file_name="unit_cell.map")
    block_first = tuple([ifloor(i * 0.2) for i in n])
    block_last = tuple(
        [max(f + 10, iceil(i * 0.7)) for f, i in zip(block_first, n)])
    print("block first:", block_first)
    print "block last: ", block_last
    fft_map.as_xplor_map(file_name="block.map",
                         gridding_first=block_first,
                         gridding_last=block_last)
Exemple #47
0
def exercise_writer (space_group_info, n_sites=100, d_min=1.5) :
  if op.isfile("tst_iotbx_dsn6.omap") :
    os.remove("tst_iotbx_dsn6.omap")
  xrs = random_structure.xray_structure(
    space_group_info=space_group_info,
    elements=(("O","N","C")*(n_sites//3+1))[:n_sites],
    volume_per_atom=50,
    min_distance=1.5)
  fc = xrs.structure_factors(d_min=d_min).f_calc()
  fft_map = fc.fft_map(resolution_factor=1/3).apply_sigma_scaling()
  n_real = fft_map.n_real()
  n_blocks = iceil(n_real[0]/8) * iceil(n_real[1]/8) * iceil(n_real[2]/8)
  fft_map.as_dsn6_map(file_name="tst_iotbx_dsn6.omap")
  assert op.isfile("tst_iotbx_dsn6.omap")
  size = op.getsize("tst_iotbx_dsn6.omap")
  assert (size == (n_blocks+1)*512)
  os.remove("tst_iotbx_dsn6.omap")
  fft_map.as_dsn6_map(
    file_name="tst_iotbx_dsn6.omap",
    gridding_first=[-5,-5,-5],
    gridding_last=n_real)
  n_blocks = iceil(1+n_real[0]/8) * iceil(1+n_real[1]/8) * iceil(1+n_real[2]/8)
  size = op.getsize("tst_iotbx_dsn6.omap")
  assert (size == (n_blocks+1)*512)
Exemple #48
0
 def line_between_points(self, x1, y1, x2, y2, n_values=100):
     """
 Given two points on the image, sample intensities along a line connecting
 them (using linear interpolation).  This also calculates the coordinates
 of each sample point, which is used for lattice dimension calculations
 once peaks have been identified.  Arguments are in image pixel coordinates
 (starting at 1,1).
 """
     x1_, y1_ = self.image_coords_as_array_coords(x1, y1)
     x2_, y2_ = self.image_coords_as_array_coords(x2, y2)
     n_values = ifloor(math.sqrt((x2_ - x1_)**2 + (y2_ - y1_)**2))
     delta_x = (x2_ - x1_) / (n_values - 1)
     delta_y = (y2_ - y1_) / (n_values - 1)
     vals = []
     img_coords = []
     d = self._raw.linearintdata
     # TODO remarkably, this is reasonably fast in Python, but it would
     # probably be more at home in scitbx.math
     for n in range(n_values):
         x = x1_ + (n * delta_x)
         y = y1_ + (n * delta_y)
         xd, yd = self.array_coords_as_detector_coords(x, y)
         img_coords.append((xd, yd))
         x_1 = ifloor(x)
         x_2 = iceil(x)
         y_1 = ifloor(y)
         y_2 = iceil(y)
         v11 = d[(x_1, y_1)]
         v12 = d[(x_1, y_2)]
         v21 = d[(x_2, y_1)]
         v22 = d[(x_2, y_2)]
         if (x_2 == x_1):
             if (y_2 == y_1):
                 vxy = v11
             else:
                 vxy = ((v12 * (y - y_1)) + (v11 * (y_2 - y))) / (y_2 - y_1)
         elif (y_2 == y_1):
             vxy = ((v21 * (x - x_1)) + (v11 * (x_2 - x))) / (x_2 - x_1)
         else:
             dxdy = (y_2 - y_1) * (x_2 - x_1)
             vxy = ((v11 / dxdy) * (x_2 - x) * (y_2 - y)) + \
                   ((v21 / dxdy) * (x - x_1) * (y_2 - y)) + \
                   ((v12 / dxdy) * (x_2 - x) * (y - y_1)) + \
                   ((v22 / dxdy) * (x - x_1) * (y - y_1))
         vals.append(vxy)
     lattice_length = None
     if (len(vals) > 5):
         # first find peaks in the profile
         peaks = []
         avg = sum(vals) / len(vals)
         filtered_vals = []
         for x in vals:
             if (x <= avg * 3):
                 filtered_vals.append(x)
         background = sum(filtered_vals) / len(filtered_vals)
         i = 2
         while (i < len(vals) - 2):
             x = vals[i]
             if (x <= background):
                 pass
             elif ((x > vals[i - 1]) and (x > vals[i - 2])
                   and (x > vals[i + 1]) and (x > vals[i + 2])):
                 peaks.append(i)
             i += 1
         if (len(peaks) > 0):
             # calculate the average lattice length
             center_x, center_y = self.get_beam_center_mm()
             distances = []
             i = 1
             while (i < len(peaks)):
                 x1, y1 = img_coords[peaks[i - 1]]
                 x2, y2 = img_coords[peaks[i]]
                 rs_distance = rstbx.utils.reciprocal_space_distance(
                     x1,
                     y1,
                     x2,
                     y2,
                     wavelength=self.get_wavelength(),
                     center_x=center_x,
                     center_y=center_y,
                     distance=self.get_detector_distance(),
                     detector_two_theta=self.get_detector_2theta(),
                     distance_is_corrected=True)
                 assert (rs_distance > 0)
                 distances.append(1 / rs_distance)
                 i += 1
             lattice_length = sum(distances) / len(distances)
     distance = self.distance_between_points(x1, y1, x2, y2)
     return line_profile(vals, distance, lattice_length)
def exercise_real_space_refinement(verbose):
  if (verbose):
    out = sys.stdout
  else:
    out = StringIO()
  out_of_bounds_clamp = maptbx.out_of_bounds_clamp(0)
  out_of_bounds_raise = maptbx.out_of_bounds_raise()
  crystal_symmetry = crystal.symmetry(
    unit_cell=(10,10,10,90,90,90),
    space_group_symbol="P 1")
  xray_structure = xray.structure(
    crystal_symmetry=crystal_symmetry,
    scatterers=flex.xray_scatterer([
      xray.scatterer(label="C", site=(0,0,0))]))
  miller_set = miller.build_set(
    crystal_symmetry=crystal_symmetry,
    anomalous_flag=False,
    d_min=1)
  f_calc = miller_set.structure_factors_from_scatterers(
    xray_structure=xray_structure).f_calc()
  fft_map = f_calc.fft_map()
  fft_map.apply_sigma_scaling()
  real_map = fft_map.real_map_unpadded()
  #### unit_cell test
  delta_h = .005
  basic_map = maptbx.basic_map(
    maptbx.basic_map_unit_cell_flag(),
    real_map,
    real_map.focus(),
    crystal_symmetry.unit_cell().orthogonalization_matrix(),
    out_of_bounds_clamp.as_handle(),
    crystal_symmetry.unit_cell())
  testing_function_for_rsfit(basic_map,delta_h,xray_structure,out)
  ### non_symmetric test
  #
  minfrac = crystal_symmetry.unit_cell().fractionalize((-5,-5,-5))
  maxfrac = crystal_symmetry.unit_cell().fractionalize((5,5,5))
  gridding_first = [ifloor(n*b) for n,b in zip(fft_map.n_real(), minfrac)]
  gridding_last = [iceil(n*b) for n,b in zip(fft_map.n_real(), maxfrac)]
  data=maptbx.copy(real_map, gridding_first, gridding_last)
  #
  basic_map = maptbx.basic_map(
    maptbx.basic_map_non_symmetric_flag(),
    data,
    fft_map.n_real(),
    crystal_symmetry.unit_cell().orthogonalization_matrix(),
    out_of_bounds_clamp.as_handle(),
    crystal_symmetry.unit_cell())
  testing_function_for_rsfit(basic_map,delta_h,xray_structure,out)
  ### asu test
  #
  minfrac = crystal_symmetry.unit_cell().fractionalize((0,0,0))
  maxfrac = crystal_symmetry.unit_cell().fractionalize((10,10,10))
  gridding_first = [ifloor(n*b) for n,b in zip(fft_map.n_real(), minfrac)]
  gridding_last = [iceil(n*b) for n,b in zip(fft_map.n_real(), maxfrac)]
  data=maptbx.copy(real_map, gridding_first, gridding_last)
  #
  basic_map = maptbx.basic_map(
    maptbx.basic_map_asu_flag(),
    data,
    crystal_symmetry.space_group(),
    crystal_symmetry.direct_space_asu().as_float_asu(),
    real_map.focus(),
    crystal_symmetry.unit_cell().orthogonalization_matrix(),
    out_of_bounds_clamp.as_handle(),
    crystal_symmetry.unit_cell(),
    0.5,
    True)
  testing_function_for_rsfit(basic_map,delta_h,xray_structure,out)
 def update_solvent_and_scale_2(self, fast, params, apply_back_trace,
                                refine_hd_scattering, log):
   if(params is None): params = bss.master_params.extract()
   if(self.xray_structure is not None):
     # Figure out Fcalc and Fmask based on presence of H
     hd_selection = self.xray_structure.hd_selection()
     xrs_no_h = self.xray_structure.select(~hd_selection)
     xrs_h    = self.xray_structure.select(hd_selection)
   # Create data container for scalers. If H scattering is refined then it is
   # assumed that self.f_calc() does not contain H contribution at all.
   fmodel_kbu = mmtbx.f_model.manager_kbu(
     f_obs   = self.f_obs(),
     f_calc  = self.f_calc(),
     f_masks = self.f_masks(),
     ss      = self.ss)
   # Compute k_total and k_mask using one of the two methods (anal or min).
   # Note: this intentionally ignores previously existing f_part1 and f_part2.
   #
   k_sol, b_sol, b_cart, b_adj = [None,]*4
   if(fast): # analytical
     assert len(fmodel_kbu.f_masks)==1
     result = mmtbx.bulk_solvent.scaler.run_simple(
       fmodel_kbu     = fmodel_kbu,
       r_free_flags   = self.r_free_flags(),
       bulk_solvent   = params.bulk_solvent,
       bin_selections = self.bin_selections)
     r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
   else: # using minimization: exp solvent and scale model (k_sol,b_sol,b_cart)
     result = bss.bulk_solvent_and_scales(
       fmodel_kbu = fmodel_kbu,
       params     = params)
     k_sol, b_sol, b_cart = result.k_sols(), result.b_sols(), result.b_cart()
     r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
   if(apply_back_trace and len(fmodel_kbu.f_masks)==1 and
      self.xray_structure is not None):
     o = result.apply_back_trace_of_overall_exp_scale_matrix(
       xray_structure = self.xray_structure)
     b_adj = o.b_adj
     if(not fast): b_sol, b_cart = [o.b_sol], o.b_cart
     self.update_xray_structure(
       xray_structure = o.xray_structure,
       update_f_calc  = True)
     fmodel_kbu = fmodel_kbu.update(f_calc = self.f_calc())
     self.show(prefix = "overall B=%s to atoms"%str("%7.2f"%o.b_adj).strip(),
       log = log)
   # Update self with new arrays so that H correction knows current R factor.
   # If no H to account for, then this is the final result.
   k_masks       = result.k_masks()
   k_anisotropic = result.k_anisotropic()
   k_isotropic   = result.k_isotropic()
   self.update_core(
     k_mask        = k_masks,
     k_anisotropic = k_anisotropic,
     k_isotropic   = k_isotropic)
   self.show(prefix = "bulk-solvent and scaling", log = log)
   # Consistency check
   assert approx_equal(self.r_all(), r_all_from_scaler)
   # Add contribution from H (if present and riding). This goes to f_part2.
   kh, bh = 0, 0
   if(refine_hd_scattering and
      self.need_to_refine_hd_scattering_contribution()):
     # Obsolete previous contribution f_part2
     f_part2 = fmodel_kbu.f_calc.array(data=fmodel_kbu.f_calc.data()*0)
     self.update_core(f_part2 = f_part2)
     xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
     f_h = self.compute_f_calc(xray_structure = xrs_h)
     # Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
     data = fmodel_kbu.f_calc.data()
     for k_mask_, f_mask_ in zip(k_masks, fmodel_kbu.f_masks):
       data = data + k_mask_*f_mask_.data()
     f_calc_plus_f_bulk_no_scales = fmodel_kbu.f_calc.array(data = data)
     # Consistency check
     assert approx_equal(self.f_model().data(),
       f_calc_plus_f_bulk_no_scales.data()*k_isotropic*k_anisotropic)
     assert approx_equal(self.f_model_no_scales().data(),
       f_calc_plus_f_bulk_no_scales.data())
     #
     # Compute contribution from H (F_H)
     #
     # Coarse sampling
     b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
     b_min = int(max(0,b_mean)*0.5)
     b_max = int(b_mean*1.5)
     sc = 1000.
     kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
     br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
     o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
       f_obs       = fmodel_kbu.f_obs.data(),
       f_calc      = f_calc_plus_f_bulk_no_scales.data(),
       f_mask      = f_h.data(),
       k_total     = k_isotropic*k_anisotropic,
       ss          = fmodel_kbu.ss,
       k_sol_range = flex.double(kr),
       b_sol_range = flex.double(br),
       r_ref       = self.r_work())
     if(o.updated()):
       f_part2 = f_h.array(data = o.k_mask()*f_h.data())
       kh, bh = o.k_sol(), o.b_sol()
       self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
     # Fine sampling
     k_min = max(0,o.k_sol()-0.1)
     k_max = o.k_sol()+0.1
     b_min = max(0,o.b_sol()-5.)
     b_max = o.b_sol()+5.
     kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
     br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(1.*sc))]
     o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
       f_obs       = fmodel_kbu.f_obs.data(),
       f_calc      = f_calc_plus_f_bulk_no_scales.data(),
       f_mask      = f_h.data(),
       k_total     = k_isotropic*k_anisotropic,
       ss          = fmodel_kbu.ss,
       k_sol_range = flex.double(kr),
       b_sol_range = flex.double(br),
       r_ref       = o.r())
     if(o.updated()):
       f_part2 = f_h.array(data = o.k_mask()*f_h.data())
       kh, bh = o.k_sol(), o.b_sol()
       self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
     # THIS HELPS if fast=true is used, see how it works in reality
     #
     if(fast):
       fmodel_kbu_ = mmtbx.f_model.manager_kbu(
         f_obs   = self.f_obs(),
         f_calc  = f_calc_plus_f_bulk_no_scales,
         f_masks = [f_part2],
         ss      = self.ss)
       result = mmtbx.bulk_solvent.scaler.run_simple(
         fmodel_kbu     = fmodel_kbu_,
         r_free_flags   = self.r_free_flags(),
         bulk_solvent   = params.bulk_solvent,
         bin_selections = self.bin_selections)
       f_part2 = f_part2.array(data = result.core.k_mask()*f_part2.data())
       k_isotropic   = result.core.k_isotropic*result.core.k_isotropic_exp
       k_anisotropic = result.core.k_anisotropic
     # Update self with final scales
     self.update_core(
       k_mask        = k_masks,
       k_anisotropic = k_anisotropic,
       k_isotropic   = k_isotropic,
       f_part2       = f_part2)
     # Make sure what came out of scaling matches what self thinks it really is
     # It must match at least up to 1.e-6.
     self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
     if(fast):
       assert approx_equal(result.r_factor(), self.r_work())
     else:
       assert approx_equal(self.r_all(), o.r()), [self.r_all(), o.r()]
   return group_args(
     k_sol  = k_sol,
     b_sol  = b_sol,
     b_cart = b_cart,
     k_h    = kh,
     b_h    = bh,
     b_adj  = b_adj)
Exemple #51
0
 def line_between_points (self, x1, y1, x2, y2, n_values=100) :
   """
   Given two points on the image, sample intensities along a line connecting
   them (using linear interpolation).  This also calculates the coordinates
   of each sample point, which is used for lattice dimension calculations
   once peaks have been identified.  Arguments are in image pixel coordinates
   (starting at 1,1).
   """
   x1_, y1_ = self.image_coords_as_array_coords(x1, y1)
   x2_, y2_ = self.image_coords_as_array_coords(x2, y2)
   n_values = ifloor(math.sqrt((x2_-x1_)**2 + (y2_-y1_)**2))
   delta_x = (x2_ - x1_) / (n_values - 1)
   delta_y = (y2_ - y1_) / (n_values - 1)
   vals = []
   img_coords = []
   d = self._raw.linearintdata
   # TODO remarkably, this is reasonably fast in Python, but it would
   # probably be more at home in scitbx.math
   for n in range(n_values) :
     x = x1_ + (n * delta_x)
     y = y1_ + (n * delta_y)
     xd, yd = self.array_coords_as_detector_coords(x, y)
     img_coords.append((xd,yd))
     x_1 = ifloor(x)
     x_2 = iceil(x)
     y_1 = ifloor(y)
     y_2 = iceil(y)
     v11 = d[(x_1, y_1)]
     v12 = d[(x_1, y_2)]
     v21 = d[(x_2, y_1)]
     v22 = d[(x_2, y_2)]
     if (x_2 == x_1) :
       if (y_2 == y_1) :
         vxy = v11
       else :
         vxy = ((v12 * (y - y_1)) + (v11 * (y_2 - y))) / (y_2 - y_1)
     elif (y_2 == y_1) :
       vxy =  ((v21 * (x - x_1)) + (v11 * (x_2 - x))) / (x_2 - x_1)
     else :
       dxdy = (y_2 - y_1) * (x_2 - x_1)
       vxy = ((v11 / dxdy) * (x_2 - x) * (y_2 - y)) + \
             ((v21 / dxdy) * (x - x_1) * (y_2 - y)) + \
             ((v12 / dxdy) * (x_2 - x) * (y - y_1)) + \
             ((v22 / dxdy) * (x - x_1) * (y - y_1))
     vals.append(vxy)
   lattice_length = None
   if (len(vals) > 5) :
     # first find peaks in the profile
     peaks = []
     avg = sum(vals) / len(vals)
     filtered_vals = []
     for x in vals :
       if (x <= avg*3) :
         filtered_vals.append(x)
     background = sum(filtered_vals) / len(filtered_vals)
     i = 2
     while (i < len(vals) - 2) :
       x = vals[i]
       if (x <= background) :
         pass
       elif ((x > vals[i-1]) and (x > vals[i-2]) and
             (x > vals[i+1]) and (x > vals[i+2])) :
         peaks.append(i)
       i += 1
     if (len(peaks) > 0) :
       # calculate the average lattice length
       center_x, center_y = self.get_beam_center_mm()
       distances = []
       i = 1
       while (i < len(peaks)) :
         x1,y1 = img_coords[peaks[i-1]]
         x2,y2 = img_coords[peaks[i]]
         rs_distance = rstbx.utils.reciprocal_space_distance(x1, y1, x2, y2,
           wavelength=self.get_wavelength(),
           center_x=center_x,
           center_y=center_y,
           distance=self.get_detector_distance(),
           detector_two_theta=self.get_detector_2theta(),
           distance_is_corrected=True)
         assert (rs_distance > 0)
         distances.append(1 / rs_distance)
         i += 1
       lattice_length = sum(distances) / len(distances)
   distance = self.distance_between_points(x1, y1, x2, y2)
   return line_profile(vals, distance, lattice_length)
Exemple #52
0
 def get_zoom_box (self, x, y, boxsize=400, mag=16) :
   #assert ((boxsize % mag) == 0)
   n_pixels = iceil(boxsize / mag)
   x0 = min(self.img_w - n_pixels, ifloor(x - (n_pixels / 2)))
   y0 = min(self.img_h - n_pixels, ifloor(y - (n_pixels / 2)))
   return (x0, y0, n_pixels, n_pixels)
def rmsd_start_final_plots_annealing(
      pdb_file,
      random_displacements_parameterization,
      tst_tardy_pdb_params,
      parameter_trial_table,
      cp_n_trials,
      rmsds,
      write_separate_pages):
  ttd = dict(parameter_trial_table)
  plot_data = {}
  for h in ttd["structure_factors_high_resolution"]:
    plot_data[h] = {}
    for e in  ttd["emulate_cartesian"]:
      plot_data[h][e] = {}
      for d in ttd["real_space_gradients_delta_resolution_factor"]:
        plot_data[h][e][d] = {}
        for w in ttd["real_space_target_weight"]:
          plot_data[h][e][d][w] = {}
          for t in ttd["start_temperature_kelvin"]:
            plot_data[h][e][d][w][t] = {}
            for c in ttd["number_of_cooling_steps"]:
              plot_data[h][e][d][w][t][c] = group_args(
                rmsd_start=flex.double(),
                rmsd_final=flex.double())
  #
  p = tst_tardy_pdb_params
  for cp_i_trial in xrange(cp_n_trials):
    tst_tardy_comprehensive.set_parameters(
      params=p,
      trial_table=parameter_trial_table,
      cp_i_trial=cp_i_trial)
    plot = plot_data[
      p.structure_factors_high_resolution][
      p.emulate_cartesian][
      p.real_space_gradients_delta_resolution_factor][
      p.real_space_target_weight][
      p.start_temperature_kelvin][
      p.number_of_cooling_steps]
    for rmsd in rmsds[cp_i_trial]:
      plot.rmsd_start.append(rmsd[0])
      plot.rmsd_final.append(rmsd[-1])
  #
  extra_type = None
  if (pdb_file == "1yjp_box.pdb"):
    if (random_displacements_parameterization == "constrained"):
      extra_type = 0
  elif (pdb_file == "1yjp_no_water.pdb"):
    if (random_displacements_parameterization == "cartesian"):
      extra_type = 1
  if (extra_type is not None):
    extra_page = plot_grid(
      grid=(4,3), top_labels=[], more_narrow_shift=30)
  else:
    extra_page = None
  mpp = multi_page_plots(file_name="plots_h_e.pdf")
  for i_h,h in enumerate(ttd["structure_factors_high_resolution"]):
    for e in  ttd["emulate_cartesian"]:
      short_label = "h%.2f_e%d" % (h, int(e))
      print short_label
      top_labels = [compose_top_label(
        pdb_file, random_displacements_parameterization, e)]
      top_labels.append("high resol: %.2f, algorithm: annealing" % h)
      page = plot_grid(grid=(4,3), top_labels=top_labels)
      plot_xy_max = iceil(h * 1.3)
      for d in ttd["real_space_gradients_delta_resolution_factor"]:
        for i_w,w in enumerate(ttd["real_space_target_weight"]):
          for i_t,t in enumerate(ttd["start_temperature_kelvin"]):
            for i_c,c in enumerate(ttd["number_of_cooling_steps"]):
              pd = plot_data[h][e][d][w][t][c]
              rf = flex.mean(pd.rmsd_final)
              label = "w%04.0f_t%.0f_c%d: %.2f" % (w, t, c, rf)
              print "  ", label
              page.process(
                grid_ij=(i_t*2+i_c, i_w),
                xy_max=plot_xy_max,
                label=label,
                data=zip(pd.rmsd_start, pd.rmsd_final))
              if (extra_type == 0
                  and h == 3.75
                  and t == 5000
                  and c == 500):
                extra_label = "w_rs = %.0f" % w
                extra_page.process(
                  grid_ij=(i_w+1, int(e)),
                  xy_max=plot_xy_max,
                  label=extra_label,
                  data=zip(pd.rmsd_start, pd.rmsd_final),
                  label_font_size=14)
              elif (extra_type == 1
                    and w == 100
                    and (h == 3.75 or h == 5.00)
                    and c == 500):
                extra_label = u"resol. = %.2f \u00C5" % h
                extra_page.process(
                  grid_ij=(i_h, int(e)),
                  xy_max=plot_xy_max,
                  label=extra_label,
                  data=zip(pd.rmsd_start, pd.rmsd_final),
                  label_font_size=14)
      if (write_separate_pages):
        page.write_to_file(file_name="plot_%s.pdf" % short_label)
      mpp.add_page(page=page)
  mpp.write_to_file()
  if (extra_page is not None):
    from reportlab.graphics.shapes import String
    if (extra_type == 0): ty = 540
    else:                 ty = 372
    extra_page.top_group.add(String(
      120, ty,
      "Torsion-Angle SA",
      fontSize=16,
      textAnchor="middle"))
    extra_page.top_group.add(String(
      280+2/3, ty,
      "Cartesian SA",
      fontSize=16,
      textAnchor="middle"))
    extra_page.write_to_file(file_name="plot_extra.pdf")
  #
  mms = min_mean_stats(
    algorithm="annealing",
    random_displacements_parameterization=random_displacements_parameterization,
    pdb_file=pdb_file)
  assert ttd["emulate_cartesian"] == (False, True)
  assert len(ttd["real_space_gradients_delta_resolution_factor"]) == 1
  for h in ttd["structure_factors_high_resolution"]:
    for d in ttd["real_space_gradients_delta_resolution_factor"]:
      for w in ttd["real_space_target_weight"]:
        for t in ttd["start_temperature_kelvin"]:
          for c in ttd["number_of_cooling_steps"]:
            mms.collect(
              rmsd_t_c=[plot_data[h][e][d][w][t][c].rmsd_final
                for e in (False, True)],
              param_values=(h,w,t,c))
  mms.finalize().show().pickle()
  def update_solvent_and_scale_twin(self, refine_hd_scattering, log):
    if(not self.twinned()): return
    assert len(self.f_masks()) == 1
    # Re-set all scales to unit or zero
    self.show(prefix = "update scales twin start", log = log)
    self.reset_all_scales()
    self.show(prefix = "reset f_part, k_(total,mask)", log = log)
    f_calc_data      = self.f_calc().data()
    f_calc_data_twin = self.f_calc_twin().data()
    # Initial trial set
    sc = 1000.
    ksr = [i/sc for i in range(ifloor(0*sc), iceil(0.6*sc)+1,  int(0.05*sc))]
    bsr = [i/sc for i in range(ifloor(0*sc), iceil(150.*sc)+1, int(10.*sc))]
    o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
      f_obs          = self.f_obs().data(),
      f_calc_1       = f_calc_data,
      f_calc_2       = f_calc_data_twin,
      f_mask_1       = self.arrays.core.f_masks[0].data(),
      f_mask_2       = self.arrays.core_twin.f_masks[0].data(),
      ss             = self.ss,
      twin_fraction  = self.twin_fraction,
      k_sol_range    = flex.double(ksr),
      b_sol_range    = flex.double(bsr),
      miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
      unit_cell      = self.f_obs().unit_cell(),
      r_ref          = self.r_all())
    if(o_kbu_sol.updated()):
      self.update(
        k_mask        = o_kbu_sol.k_mask(),
        k_anisotropic = o_kbu_sol.k_anisotropic())
    # Second (finer) trial set
    k_min = max(o_kbu_sol.k_sol()-0.05, 0)
    k_max = min(o_kbu_sol.k_sol()+0.05, 0.6)
    ksr = [i/sc for i in range(ifloor(k_min*sc), iceil(k_max*sc)+1, int(0.01*sc))]
    b_min = max(o_kbu_sol.b_sol()-10, 0)
    b_max = min(o_kbu_sol.b_sol()+10, 150)
    bsr = [i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(1.*sc))]
    o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
      f_obs          = self.f_obs().data(),
      f_calc_1       = f_calc_data,
      f_calc_2       = f_calc_data_twin,
      f_mask_1       = self.arrays.core.f_masks[0].data(),
      f_mask_2       = self.arrays.core_twin.f_masks[0].data(),
      ss             = self.ss,
      twin_fraction  = self.twin_fraction,
      k_sol_range    = flex.double(ksr),
      b_sol_range    = flex.double(bsr),
      miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
      unit_cell      = self.f_obs().unit_cell(),
      r_ref          = o_kbu_sol.r())
    if(o_kbu_sol.updated()):
      self.update(
        k_mask        = o_kbu_sol.k_mask(),
        k_anisotropic = o_kbu_sol.k_anisotropic())
      assert approx_equal(self.r_all(), o_kbu_sol.r())
      ##############
      # use apply_back_trace in if below
      if(self.xray_structure is not None):
        o = mmtbx.bulk_solvent.scaler.tmp(
          xray_structure = self.xray_structure,
          k_anisotropic  = o_kbu_sol.k_anisotropic(),
          k_masks        = [o_kbu_sol.k_mask()],
          ss             = self.ss)
        self.update_xray_structure(
          xray_structure = o.xray_structure,
          update_f_calc  = True)
      #############
        self.update(
          k_mask        = o.k_masks,
          k_anisotropic = o.k_anisotropic)

    self.show(prefix = "bulk-solvent and scaling", log = log)
    #
    # Add contribution from H (if present and riding). This goes to f_part2.
    #
    kh, bh = 0, 0
    if(refine_hd_scattering and
       self.need_to_refine_hd_scattering_contribution()):
      hd_selection = self.xray_structure.hd_selection()
      xrs_no_h = self.xray_structure.select(~hd_selection)
      xrs_h    = self.xray_structure.select(hd_selection)
      # Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
      data = self.f_calc().data()+self.f_masks()[0].data()*self.k_masks()[0]
      f_calc_plus_f_bulk_no_scales = self.f_calc().array(data = data)
      data = self.f_calc_twin().data()+\
        self.f_masks_twin()[0].data()*self.k_masks_twin()[0]
      f_calc_plus_f_bulk_no_scales_twin = self.f_calc_twin().array(data = data)
      # Initial FH contribution
      xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
      f_h = self.compute_f_calc(xray_structure = xrs_h)
      f_h_twin = self.compute_f_calc(xray_structure = xrs_h,
        miller_array = self.f_calc_twin())
      # Coarse sampling
      b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
      b_min = int(max(0,b_mean)*0.5)
      b_max = int(b_mean*1.5)
      sc = 1000.
      kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
      br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
      obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
        f_obs          = self.f_obs().data(),
        f_calc_1       = f_calc_plus_f_bulk_no_scales.data(),
        f_calc_2       = f_calc_plus_f_bulk_no_scales_twin.data(),
        f_mask_1       = f_h.data(),
        f_mask_2       = f_h_twin.data(),
        ss             = self.ss,
        twin_fraction  = self.twin_fraction,
        k_sol_range    = flex.double(kr),
        b_sol_range    = flex.double(br),
        miller_indices = self.f_obs().indices(), # XXX What about twin-related?
        unit_cell      = self.f_obs().unit_cell(),
        r_ref          = self.r_work())
      if(obj.updated()):
        f_part2      = f_h.array(     data = obj.k_mask()*f_h.data())
        f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
        kh, bh = obj.k_sol(), obj.b_sol()
      # Fine sampling
      k_min = max(0,obj.k_sol()-0.1)
      k_max = obj.k_sol()+0.1
      b_min = max(0,obj.b_sol()-5.)
      b_max = obj.b_sol()+5.
      kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
      br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(5.*sc))]
      obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
        f_obs          = self.f_obs().data(),
        f_calc_1       = f_calc_plus_f_bulk_no_scales.data(),
        f_calc_2       = f_calc_plus_f_bulk_no_scales_twin.data(),
        f_mask_1       = f_h.data(),
        f_mask_2       = f_h_twin.data(),
        ss             = self.ss,
        twin_fraction  = self.twin_fraction,
        k_sol_range    = flex.double(kr),
        b_sol_range    = flex.double(br),
        miller_indices = self.f_obs().indices(), # XXX What about twin-related?
        unit_cell      = self.f_obs().unit_cell(),
        r_ref          = obj.r())
      if(obj.updated()):
        f_part2      = f_h.array(     data = obj.k_mask()*f_h.data())
        f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
        kh, bh = obj.k_sol(), obj.b_sol()
      self.update_core(
        f_part2       = f_part2,
        f_part2_twin  = f_part2_twin,
        k_anisotropic = obj.k_anisotropic())
      self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
    b_cart = adptbx.u_as_b(adptbx.u_star_as_u_cart(
                             self.f_obs().unit_cell(), o_kbu_sol.u_star()))
    return group_args(
      k_sol  = o_kbu_sol.k_sol(),
      b_sol  = o_kbu_sol.b_sol(),
      b_cart = b_cart,
      k_h    = kh,
      b_h    = bh)
def hcp_fill_box(cb_op_original_to_sampling, float_asu, continuous_shift_flags,
                 point_distance,
                 buffer_thickness=-1, all_twelve_neighbors=False,
                 exercise_cpp=True):
  if (exercise_cpp):
    cpp = close_packing.hexagonal_sampling_generator(
      cb_op_original_to_sampling=cb_op_original_to_sampling,
      float_asu=float_asu,
      continuous_shift_flags=continuous_shift_flags,
      point_distance=point_distance,
      buffer_thickness=buffer_thickness,
      all_twelve_neighbors=all_twelve_neighbors)
  assert point_distance > 0
  if (buffer_thickness < 0):
    buffer_thickness = point_distance * (2/3. * (.5 * math.sqrt(3)))
  if (exercise_cpp):
    assert cpp.cb_op_original_to_sampling().c()==cb_op_original_to_sampling.c()
    assert cpp.float_asu().unit_cell().is_similar_to(float_asu.unit_cell())
    assert cpp.continuous_shift_flags() == continuous_shift_flags
    assert approx_equal(cpp.point_distance(), point_distance)
    assert approx_equal(cpp.buffer_thickness(), buffer_thickness)
    assert cpp.all_twelve_neighbors() == all_twelve_neighbors
  float_asu_buffer = float_asu.add_buffer(thickness=buffer_thickness)
  hex_cell = hexagonal_sampling_cell(point_distance=point_distance)
  hex_box = hexagonal_box(
    hex_cell=hex_cell,
    vertices_cart=float_asu.shape_vertices(cartesian=True))
  hex_box_buffer = hexagonal_box(
    hex_cell=hex_cell,
    vertices_cart=float_asu_buffer.shape_vertices(cartesian=True))
  box_lower = []
  box_upper = []
  for i in xrange(3):
    if (continuous_shift_flags[i]):
      box_lower.append(0)
      box_upper.append(0)
    else:
      n = iceil(abs(hex_box.max[i]-hex_box.pivot[i]))
      box_lower.append(min(-2,ifloor(hex_box_buffer.min[i]-hex_box.pivot[i])))
      box_upper.append(n+max(2,iceil(hex_box_buffer.max[i]-hex_box.max[i])))
  if (exercise_cpp):
    assert list(cpp.box_lower()) == box_lower
    assert list(cpp.box_upper()) == box_upper
  hex_to_frac_matrix = (
      matrix.sqr(float_asu.unit_cell().fractionalization_matrix())
    * matrix.sqr(hex_cell.orthogonalization_matrix()))
  sites_frac = flex.vec3_double()
  for point in flex.nested_loop(begin=box_lower,
                                end=box_upper,
                                open_range=False):
    site_hex = matrix.col(hex_box.pivot) \
             + matrix.col(hex_indices_as_site(point))
    site_frac = hex_to_frac_matrix * site_hex
    if (float_asu_buffer.is_inside(site_frac)):
      sites_frac.append(site_frac)
    elif (all_twelve_neighbors):
      for offset in [(1,0,0),(1,1,0),(0,1,0),(-1,0,0),(-1,-1,0),(0,-1,0),
                     (0,0,1),(-1,-1,1),(0,-1,1),
                     (0,0,-1),(-1,-1,-1),(0,-1,-1)]:
        offset_hex = hex_indices_as_site(offset, layer=point[2])
        offset_frac = hex_to_frac_matrix * matrix.col(offset_hex)
        other_site_frac = site_frac + offset_frac
        if (float_asu.is_inside(other_site_frac)):
          sites_frac.append(site_frac)
          break
  assert sites_frac.size() > 0
  rt = cb_op_original_to_sampling.c_inv().as_double_array()
  sites_frac = rt[:9] * sites_frac
  sites_frac += rt[9:]
  if (exercise_cpp):
    assert not cpp.at_end()
    cpp_sites_frac = cpp.all_sites_frac()
    assert cpp.at_end()
    assert cpp_sites_frac.size() == sites_frac.size()
    assert approx_equal(cpp_sites_frac, sites_frac)
    cpp.restart()
    assert not cpp.at_end()
    assert approx_equal(cpp.next_site_frac(), sites_frac[0])
    assert cpp.count_sites() == sites_frac.size()-1
    assert cpp.at_end()
    cpp.restart()
    n = 0
    for site in cpp: n += 1
    assert n == sites_frac.size()
  return sites_frac
Exemple #56
0
def region_density_correlation(
      large_unit_cell,
      large_d_min,
      large_density_map,
      sites_cart,
      site_radii,
      work_scatterers):
  sites_frac_large = large_unit_cell.fractionalize(sites_cart)
  large_frac_min = sites_frac_large.min()
  large_frac_max = sites_frac_large.max()
  large_n_real = large_density_map.focus()
  from scitbx import fftpack
  from libtbx.math_utils import ifloor, iceil
  large_ucp = large_unit_cell.parameters()
  small_n_real = [0,0,0]
  small_origin_in_large_grid = [0,0,0]
  small_abc = [0,0,0]
  sites_frac_shift = [0,0,0]
  for i in xrange(3):
    grid_step = large_ucp[i] / large_n_real[i]
    buffer = large_d_min / grid_step
    grid_min = ifloor(large_frac_min[i] * large_n_real[i] - buffer)
    grid_max = iceil(large_frac_max[i] * large_n_real[i] + buffer)
    min_grid = grid_max - grid_min + 1
    small_n_real[i] = fftpack.adjust_gridding(min_grid=min_grid, max_prime=5)
    if (small_n_real[i] < large_n_real[i]):
      shift_min = (small_n_real[i] - min_grid) // 2
      small_origin_in_large_grid[i] = grid_min - shift_min
      small_abc[i] = small_n_real[i] * grid_step
      sites_frac_shift[i] = small_origin_in_large_grid[i] / large_n_real[i]
    else:
      small_n_real[i] = large_n_real[i]
      small_origin_in_large_grid[i] = 0
      small_abc[i] = large_ucp[i]
      sites_frac_shift[i] = 0
  sites_cart_shift = large_unit_cell.orthogonalize(sites_frac_shift)
  sites_cart_small = sites_cart - sites_cart_shift
  from cctbx import xray
  small_xray_structure = xray.structure(
    crystal_symmetry=crystal.symmetry(
      unit_cell=tuple(small_abc)+large_ucp[3:],
      space_group_symbol="P1"),
    scatterers=work_scatterers)
  small_xray_structure.set_sites_cart(sites_cart=sites_cart_small)
  small_f_calc = small_xray_structure.structure_factors(
    d_min=large_d_min).f_calc()
  small_gridding = crystal_gridding(
    unit_cell=small_f_calc.unit_cell(),
    space_group_info=small_f_calc.space_group_info(),
    pre_determined_n_real=small_n_real)
  from cctbx import miller
  small_fft_map = miller.fft_map(
    crystal_gridding=small_gridding,
    fourier_coefficients=small_f_calc)
  small_fft_map.apply_sigma_scaling()
  small_map = small_fft_map.real_map_unpadded()
  grid_indices = grid_indices_around_sites(
    unit_cell=small_xray_structure.unit_cell(),
    fft_n_real=small_n_real,
    fft_m_real=small_n_real,
    sites_cart=sites_cart_small,
    site_radii=site_radii)
  small_copy_from_large_map = copy(
    map_unit_cell=large_density_map,
    first=small_origin_in_large_grid,
    last=matrix.col(small_origin_in_large_grid)
       + matrix.col(small_n_real)
       - matrix.col((1,1,1)))
  assert small_copy_from_large_map.all() == small_map.all()
  corr = flex.linear_correlation(
    x=small_map.select(grid_indices),
    y=small_copy_from_large_map.select(grid_indices))
  if (not corr.is_well_defined()):
    return None
  return corr.coefficient()
def rmsd_start_final_plots_minimization(
      pdb_file,
      random_displacements_parameterization,
      tst_tardy_pdb_params,
      parameter_trial_table,
      cp_n_trials,
      rmsds,
      rmsd_n_n,
      write_separate_pages):
  ttd = dict(parameter_trial_table)
  plot_data = {}
  for h in ttd["structure_factors_high_resolution"]:
    plot_data[h] = {}
    for e in  ttd["emulate_cartesian"]:
      plot_data[h][e] = {}
      for d in ttd["real_space_gradients_delta_resolution_factor"]:
        plot_data[h][e][d] = {}
        for w in ttd["real_space_target_weight"]:
          plot_data[h][e][d][w] = group_args(
            rmsd_start=flex.double(),
            rmsd_final=flex.double(),
            rmsd_n=flex.double())
  #
  p = tst_tardy_pdb_params
  for cp_i_trial in xrange(cp_n_trials):
    tst_tardy_comprehensive.set_parameters(
      params=p,
      trial_table=parameter_trial_table,
      cp_i_trial=cp_i_trial)
    plot = plot_data[
      p.structure_factors_high_resolution][
      p.emulate_cartesian][
      p.real_space_gradients_delta_resolution_factor][
      p.real_space_target_weight]
    for rmsd in rmsds[cp_i_trial]:
      plot.rmsd_start.append(rmsd[0])
      plot.rmsd_final.append(rmsd[-1])
      plot.rmsd_n.append(rmsd[min(len(rmsd)-1, rmsd_n_n)])
  #
  mpp = multi_page_plots(file_name="plots_h_e.pdf")
  for e in  ttd["emulate_cartesian"]:
    short_label = "e%d" % int(e)
    print short_label
    top_labels = [
      compose_top_label(
        pdb_file, random_displacements_parameterization, e),
      "algorithm: minimization"]
    page = plot_grid(grid=(4,3), top_labels=top_labels)
    w_d_ranks_rn = {}
    w_d_ranks_rf = {}
    for d in ttd["real_space_gradients_delta_resolution_factor"]:
      for w in ttd["real_space_target_weight"]:
        w_d_ranks_rn[(w,d)] = []
        w_d_ranks_rf[(w,d)] = []
    for i_h,h in enumerate(ttd["structure_factors_high_resolution"]):
      plot_xy_max = iceil(h * 1.3)
      page_rn = []
      page_rf = []
      for d in ttd["real_space_gradients_delta_resolution_factor"]:
        for i_w,w in enumerate(ttd["real_space_target_weight"]):
          pd = plot_data[h][e][d][w]
          rn = flex.mean(pd.rmsd_n)
          rf = flex.mean(pd.rmsd_final)
          page_rn.append((rn, (w,d)))
          page_rf.append((rf, (w,d)))
          label = "h%.2f_w%04.0f: %d=%.2f, %.2f" % (h, w, rmsd_n_n, rn, rf)
          print "  ", label
          page.process(
            grid_ij=(i_h,i_w),
            xy_max=plot_xy_max,
            label=label,
            data=zip(pd.rmsd_start, pd.rmsd_final))
      def cmp_rx(a, b):
        result = cmp(a[0], b[0])
        if (result == 0):
          result = cmp(a[1], b[1])
        return result
      page_rn.sort(cmp_rx)
      page_rf.sort(cmp_rx)
      for i,(r,w_d) in enumerate(page_rn):
        w_d_ranks_rn[w_d].append(i)
      for i,(r,w_d) in enumerate(page_rf):
        w_d_ranks_rf[w_d].append(i)
    if (write_separate_pages):
      page.write_to_file(file_name="plot_%s.pdf" % short_label)
    mpp.add_page(page=page)
    w_d_ranks_rn = w_d_ranks_rn.items()
    w_d_ranks_rf = w_d_ranks_rf.items()
    def cmp_w_d_ranks(a, b):
      result = cmp(sum(a[1]), sum(b[1]))
      if (result == 0):
        result = cmp(sorted(a[1]), sorted(b[1]))
        if (result == 0):
          result = cmp(a[1], b[1])
          if (result == 0):
            result = cmp(a[0], b[0])
      return result
    w_d_ranks_rn.sort(cmp_w_d_ranks)
    w_d_ranks_rf.sort(cmp_w_d_ranks)
    print "emulate_cartesian = %s" % str(e)
    for prefix,w_d_ranks in [("rn:", w_d_ranks_rn),
                             ("rf:", w_d_ranks_rf)]:
      for w_d,ranks in w_d_ranks:
        print prefix, "%4.0f %4.2f" % w_d, "%2d" % sum(ranks), \
          "[" + ", ".join(["%2d" % r for r in ranks]) + "]"
        prefix = "   "
      print
  mpp.write_to_file()
  #
  mms = min_mean_stats(
    algorithm="minimization",
    random_displacements_parameterization=random_displacements_parameterization,
    pdb_file=pdb_file)
  assert ttd["emulate_cartesian"] == (False, True)
  assert len(ttd["real_space_gradients_delta_resolution_factor"]) == 1
  for h in ttd["structure_factors_high_resolution"]:
    for d in ttd["real_space_gradients_delta_resolution_factor"]:
      for w in ttd["real_space_target_weight"]:
        mms.collect(
          rmsd_t_c=[plot_data[h][e][d][w].rmsd_final for e in (False, True)],
          param_values=(h,w))
  mms.finalize().show().pickle()
Exemple #58
0
 def __init__ (self,
     file_name,
     ringer_result=None,
     sampling_angle=5,
     out_dir=None,
     out=sys.stdout,
     quiet=False) :
   self.threshold = waves = None
   if (ringer_result is not None) :
     waves, self.thresholds = process_raw_results(ringer_result, out=out)
   else :
     assert (file_name is not None)
     waves, self.thresholds = parse_pickle(file_name, out=out)
   assert (out_dir is None) or os.path.isdir(out_dir)
   if (out_dir is None) and (not quiet) :
     out_dir = file_name + ".output"
     if (not os.path.isdir(out_dir)) :
       os.makedirs(file_name+'.output')
   Weird_residues=OrderedDict()
   self.peak_count={}
   residue_peak_count={}
   rotamer_ratios_residues={}
   zscores_residues={}
   for i in Residue_codes:
     residue_peak_count[i]={}
     rotamer_ratios_residues[i]=[]
     zscores_residues[i]=[]
   binned_peaks={}
   n_angles = iceil(360.0 / sampling_angle)
   self.zscores=[]
   self.rotamer_ratios=[]
   self.non_zero_thresholds=[]
   self.length = len(waves)
   self.peaks=OrderedDict()
       # calculate peaks and histogram
   for threshold in self.thresholds:
     if (not quiet) :
       print >>out, ""
       print >>out, "===== Calculating Statistics for Threshold %.3f =====" %\
         threshold
     self.peaks[threshold]=Peaklist()
     Weird_residues[threshold]=Peaklist()
     self.peak_count[threshold] = [0]*n_angles
     for i in Residue_codes:
       residue_peak_count[i][threshold]=[0]*n_angles
     for i in waves:
       self.peaks[threshold].append_lists(calculate_peaks(i, threshold))
     for peak in self.peaks[threshold].get_peaks():
       self.peak_count[threshold][peak.chi_value] += 1
       residue_peak_count[peak.resname][threshold][peak.chi_value]+=1
       if ((peak.chi_value<6) or (peak.chi_value>18 and peak.chi_value<30) or (peak.chi_value>42 and peak.chi_value<54) or (peak.chi_value>66)):
         Weird_residues[threshold].peaks.append(peak)
     # Calculate the binned peaks and ratios
     binned_peaks[threshold] = calculate_binned_counts(self.peak_count[threshold], 60)
     # print "For threshold %.3f" % threshold
     # print "Sample size = %d" % sum(binned_peaks[threshold])
     zscore_n, rotamer_ratio_n = statistic(binned_peaks[threshold], n_angles)
     if rotamer_ratio_n==0:
       break
     for i in Residue_codes:
       rotamer_ratios_residues_n, zscores_n = calc_ratio(residue_peak_count[i][threshold], sampling_angle)
       rotamer_ratios_residues[i].append(rotamer_ratios_residues_n)
       zscores_residues[i].append(zscores_n)
     self.non_zero_thresholds.append(threshold)
     self.zscores.append(zscore_n)
     self.rotamer_ratios.append(rotamer_ratio_n)
     if (not quiet) :
       print >> out, "===== Plotting Histogram for Threshold %.3f =====" % \
         threshold
       out_file = os.path.join(out_dir, "%.3f.histogram.png" % threshold)
       plot_peaks(
         peak_count=self.peak_count[threshold],
         file_name=out_file,
         threshold=threshold,
         first=60,
         title=RMSD_statistic(self.peaks[threshold].peaks),
         n_angles=n_angles)
       print >> out, "Saved plot to %s" % out_file
     # plot_rotamers(binned_peaks[threshold], file, threshold, args.first_rotamer)
   #   print "Outliers at threshold %.2f: %s" % (threshold, str(Weird_residues[threshold]))
   if (not quiet) :
     print >> out, ""
     print >> out, "===== Plotting Statistics Across Thresholds ====="
     out_file = os.path.join(out_dir, "Total.threshold_scan.png")
     plot_progression(
       non_zero_thresholds=self.non_zero_thresholds,
       rotamer_ratios=self.rotamer_ratios,
       file_name=out_file,
       zscores=self.all_scores)
     print >> out, "Saved plot to %s" % out_file
   # for i in Residue_codes:
   #   plot_progression(non_zero_thresholds, rotamer_ratios_residues[i], file, zscores_residues[i], i)
     print >> out, ""
     print >> out, "===== Writing Pickles Out ====="
     easy_pickle.dump(out_dir + '/Outliers.pkl',Weird_residues)
     print >> out, 'Wrote ' + out_dir + '/Outliers.pkl'
     easy_pickle.dump(out_dir + '/rotamer_ratios.pkl', self.rotamer_ratios)
     print >> out, 'Wrote ' + out_dir + '/rotamer_ratios.pkl'
     easy_pickle.dump(out_dir + '/zscores.pkl', self.zscores)
     print >> out, 'Wrote ' + out_dir + '/zscores.pkl'
     easy_pickle.dump(out_dir + '/emringer_scores.pkl', self.all_scores)
     print >> out, 'Wrote ' + out_dir + '/emringer_scores.pkl'
     easy_pickle.dump(out_dir + '/thresholds.pkl', self.thresholds)
     print >> out, 'Wrote ' + out_dir + '/thresholds.pkl'
     easy_pickle.dump(out_dir + '/peak_counts.pkl', self.peak_count)
     print >> out, 'Wrote ' + out_dir + '/peak_counts.pkl'
   self.zscore_max = max(self.zscores)
   self._zscore_max_index = self.zscores.index(self.zscore_max)