def get_map_stats_for_atoms (self, atoms) : from cctbx import maptbx from scitbx.array_family import flex sites_cart = flex.vec3_double() sites_cart_nonH = flex.vec3_double() values_2fofc = flex.double() values_fofc = flex.double() for atom in atoms : sites_cart.append(atom.xyz) if (not atom.element.strip() in ["H","D"]) : #XXX trap: neutrons? sites_cart_nonH.append(atom.xyz) site_frac = self.unit_cell.fractionalize(atom.xyz) values_2fofc.append(self.f_map.eight_point_interpolation(site_frac)) values_fofc.append(self.diff_map.eight_point_interpolation(site_frac)) if (len(sites_cart_nonH) == 0) : return None sel = maptbx.grid_indices_around_sites( unit_cell=self.unit_cell, fft_n_real=self.f_map.focus(), fft_m_real=self.f_map.all(), sites_cart=sites_cart, site_radii=get_atom_radii(atoms, self.atom_radius)) f_map_sel = self.f_map.select(sel) model_map_sel = self.model_map.select(sel) diff_map_sel = self.diff_map.select(sel) cc = flex.linear_correlation(x=f_map_sel, y=model_map_sel).coefficient() return group_args(cc=cc, mean_2fofc=flex.mean(values_2fofc), mean_fofc=flex.mean(values_fofc))
def run(args): assert len(args) == 2 import iotbx.pdb lists_of_atoms = [] for file_name in args: pdb_inp = iotbx.pdb.input(file_name=file_name) pdb_inp.construct_hierarchy().only_residue() # raises if more than one lists_of_atoms.append(pdb_inp.atoms()) lookup_dict = {} for atom_i in lists_of_atoms[0]: lookup_dict[atom_i.name] = atom_i atom_pairs = [] for atom_j in lists_of_atoms[1]: atom_i = lookup_dict.get(atom_j.name) if (atom_i is not None): atom_pairs.append((atom_i, atom_j)) assert len(atom_pairs) > 2 from scitbx.array_family import flex reference_sites = flex.vec3_double() other_sites = flex.vec3_double() for pair in atom_pairs: reference_sites.append(pair[0].xyz) other_sites.append(pair[1].xyz) import scitbx.math.superpose fit = scitbx.math.superpose.least_squares_fit( reference_sites=reference_sites, other_sites=other_sites) rmsd = fit.other_sites_best_fit().rms_difference(reference_sites) print "Number of atoms first pdb: ", lists_of_atoms[0].size() print "Number of atoms second pdb:", lists_of_atoms[1].size() print "Number of superposed atoms:", reference_sites.size() print "RMSD: %.3f" % rmsd
def tst_nsd(): moving1 = flex.vec3_double() moving2 = flex.vec3_double() fixed = flex.vec3_double() max_noise = 0 for ii in range(10): noise = flex.random_double(3)*2-1.0 if noise.norm() > max_noise: max_noise = noise.norm() xyz = flex.random_double(3)*5 fixed.append( list(xyz) ) moving1.append( list(xyz + noise/10) ) moving2.append( list(xyz + noise/2) ) ne = nsd_engine(fixed) a = ne.nsd(fixed) b = ne.nsd(moving1) c = ne.nsd(moving2) assert abs(a)<1e-6 assert(b<=c) matrix = euler.zyz_matrix(0.7,1.3,2.1) fixed_r = matrix*moving1+(8,18,28) fitter = nsd_rigid_body_fitter( fixed,fixed_r) nxyz = fitter.best_shifted() dd = nxyz[0:fixed.size()]-fixed dd = dd.norms() dd = flex.max(dd) assert (dd<2.00*max_noise/10)
def additional_check_Gendron_2001(r_i, r_j): # distance between rings < 5.5A ring_i = [] ring_j = [] center_i = flex.vec3_double([[0,0,0]]) center_j = flex.vec3_double([[0,0,0]]) for ring, res, center in [(ring_i, r_i, center_i), (ring_j, r_j, center_j)]: for atom_name in [" N1 ", " C2 ", " N3 ", " C4 ", " C5 ", " C6 ", " N9 ", " C8 ", " N7 "]: atom = res.find_atom_by(name=atom_name) if atom is not None: center += flex.vec3_double([atom.xyz]) ring.append(flex.vec3_double([atom.xyz])) if len(ring) > 2: center *= 1./float(len(ring)) else: return False d = center_j-center_i dn = d.norm() # angle between 2 normals is < 30 degrees r_i1 = ring_i[0]-center_i r_i2 = ring_i[1]-center_i n_i = r_i1.cross(r_i2) r_j1 = ring_j[0]-center_j r_j2 = ring_j[1]-center_j n_j = r_j1.cross(r_j2) cos_ni_nj = n_i.dot(n_j)/n_i.norm()/n_j.norm() angle_ni_nj_degrees = math.degrees(math.acos(abs(cos_ni_nj[0]))) # angle between center line and normal cos_d_ni = d.dot(n_i)/dn/n_i.norm() angle_d_ni_degrees = math.degrees(math.acos(abs(cos_d_ni[0]))) result = (dn < 5.5 and angle_ni_nj_degrees < 30 and angle_d_ni_degrees < 40) return result
def compute_functional_and_gradients(self): if(self.sites): self.fmodel.xray_structure.set_sites_cart( sites_cart = flex.vec3_double(self.x)) if(self.u_iso): self.fmodel.xray_structure.set_u_iso(values = self.x) self.fmodel.update_xray_structure( xray_structure = self.fmodel.xray_structure, update_f_calc = True) tgx = self.x_target_functor(compute_gradients=True) if(self.sites): tx = tgx.target_work() gx = flex.vec3_double(tgx.\ gradients_wrt_atomic_parameters(site=True).packed()) f = tx g = gx if(self.u_iso): tx = tgx.target_work() gx = tgx.gradients_wrt_atomic_parameters(u_iso=True) f = tx g = gx # When we have MTRIX records, use only the # gradients of the first NCS copy ncs_end = len(g)//(self.n_ncs_mtrix+1) assert ncs_end*(self.n_ncs_mtrix+1)==len(g) g[:ncs_end] return f, g.as_double()
def set_points_and_lines(self): self.sim_as = simulation() self.sim_ac = simulation() self.points = flex.vec3_double(self.sim_as.sites_cart_moved_F01) self.points.extend(flex.vec3_double(self.sim_ac.sites_cart_moved_F01)) self.points.extend(flex.vec3_double(self.sim_as.sites_cart_wells_F01)) def add_line(i, j, color): line = (i,j) self.line_i_seqs.append(line) self.line_colors[line] = color self.labels = [] n = len(self.sim_as.sites_cart_F1) offs = 0 for prefix,color in [("S",(1,0,0)),("C",(0,0,1)),("W",(0,1,0))]: for i in xrange(n): add_line(offs+i, offs+(i+1)%n, color) self.labels.append(prefix+str(i)) offs += n mcs = minimum_covering_sphere(self.points, epsilon=1.e-2) self.minimum_covering_sphere = sphere_3d( center=mcs.center(), radius=mcs.radius()*1.3) self.flag_show_minimum_covering_sphere = False self.flag_show_rotation_center = False self.steps_per_tab = 8 print "Press and hold Tab key to run the simulation." print "Press Shift-Tab to increase speed." print "Press Ctrl-Tab to decrease speed."
def remove_far_atoms(list_a, list_b, res_list_a,res_list_b, ref_sites,other_sites, residue_match_radius=4.0): """ When comparing lists of matching atoms, remove residues where some atoms are are locally misaligned, for example when matching residues are perpendicular to each other rather than being close to parallel. The criteria used: For each matching residues, the difference between distance of farthest matching atoms pair and the distance of closest pair mast be < residue_match_radius Args: list_a, list_a (list of list): list of residues atoms res_list_a,res_list_b (list): list of residues in chains ref_sites,other_sites (flex.vec3): atoms coordinates residue_match_radius (float): max allow distance difference Returns: Updated arguments: sel_a,sel_b, res_list_a_new,res_list_b_new, ref_sites_new,other_sites_new """ # check every residue for consecutive distance # print "list_a" # print list(list_a[0]) # print "list_b", list(list_b) # print "res_list_a", res_list_a # print "res_list_b", res_list_b res_list_a_new = [] res_list_b_new = [] ref_sites_new = flex.vec3_double([]) other_sites_new = flex.vec3_double([]) sel_a = flex.size_t([]) sel_b = flex.size_t([]) current_pos = 0 for i in xrange(len(res_list_a)): # find the matching atoms form each residue (work on small sections) res_len = list_a[i].size() res_ref_sites = ref_sites[current_pos:current_pos+res_len] res_other_sites = other_sites[current_pos:current_pos+res_len] current_pos += res_len xyz_diff = abs(res_ref_sites.as_double() - res_other_sites.as_double()) (min_d,max_d,_) = xyz_diff.min_max_mean().as_tuple() # print "current match radius:", max_d-min_d if (max_d - min_d) <= residue_match_radius: ref_sites_new.extend(res_ref_sites) other_sites_new.extend(res_other_sites) sel_a.extend(list_a[i]) sel_b.extend(list_b[i]) res_list_a_new.append(res_list_a[i]) res_list_b_new.append(res_list_b[i]) else: pass # print "removing poorly matching residue:",i,max_d - min_d return sel_a,sel_b,res_list_a_new,res_list_b_new,ref_sites_new,other_sites_new
def exercise_lbfgs_simple (mon_lib_srv, ener_lib, verbose=False) : # three peptides: # 1 = poly-ALA, favored # 2 = poly-ALA, outlier # 3 = poly-TRP, outlier # # Note that the ramalyze score for the first actually gets slightly worse, # but it's still good and we're starting from an excellent score anyway. # # residuals = [0.00024512, 307.616444, 294.913714] residuals = [0.00168766995882, 186.24718562, 177.259069807] for i, peptide in enumerate([pdb1, pdb2, pdb3]) : pdb_in = iotbx.pdb.input(source_info="peptide", lines=flex.split_lines(peptide)) params = pdb_interpretation.master_params.extract() processed_pdb_file = pdb_interpretation.process( mon_lib_srv=mon_lib_srv, ener_lib=ener_lib, params=params, pdb_inp=pdb_in, log=StringIO()) log = StringIO() pdb_hierarchy = processed_pdb_file.all_chain_proxies.pdb_hierarchy atoms = pdb_hierarchy.atoms() sites_cart_1 = atoms.extract_xyz().deep_copy() gradients_fd = flex.vec3_double(sites_cart_1.size(), (0,0,0)) gradients_an = flex.vec3_double(sites_cart_1.size(), (0,0,0)) params = ramachandran.master_phil.fetch().extract() rama_manager = ramachandran.ramachandran_manager( pdb_hierarchy, None, params, log) assert rama_manager.get_n_proxies() == 1 residual_an = rama_manager.target_and_gradients( unit_cell=None, sites_cart=sites_cart_1, gradient_array=gradients_an) # print "comparing", residual_an assert approx_equal(residual_an, residuals[i], eps=0.00001) if verbose : print "" for i, peptide in enumerate([pdb1, pdb2, pdb3]) : pdb_in = iotbx.pdb.input(source_info="peptide", lines=flex.split_lines(peptide)) o = benchmark_structure(pdb_in, mon_lib_srv, ener_lib, verbose) phi0, psi0 = o.r0.results[0].phi, o.r0.results[0].psi phi1, psi1 = o.r1.results[0].phi, o.r1.results[0].psi phi2, psi2 = o.r2.results[0].phi, o.r2.results[0].psi r0 = o.r0.results[0].score r1 = o.r1.results[0].score r2 = o.r2.results[0].score if verbose : print "peptide %d" % (i+1) print " before: rmsd_bonds=%-6.4f rmsd_angles=%-6.3f" % (o.b0,o.a0) print " phi=%-6.1f psi=%-6.1f score=%-.2f" % (phi0, psi0, r0) print " simple: rmsd_bonds=%-6.4f rmsd_angles=%-6.3f" % (o.b1,o.a1) print " phi=%-6.1f psi=%-6.1f score=%-.2f" % (phi1, psi1, r1) print " + Rama: rmsd_bonds=%-6.4f rmsd_angles=%-6.3f" % (o.b2,o.a2) print " phi=%-6.1f psi=%-6.1f score=%-.2f" % (phi2, psi2, r2) print ""
def _predict_core(self, reflections): """perform prediction for the specified reflections""" # update the reflection_predictor with the scan-independent part of the # current geometry self._reflection_predictor.update() # duck-typing for VaryingCrystalPredictionParameterisation. Only this # class has a compose(reflections) method. Sets ub_matrix (and caches # derivatives). try: self._prediction_parameterisation.compose( reflections) except AttributeError: pass # do prediction (updates reflection table in situ). Scan-varying prediction # is done automatically if the crystal has scan-points (assuming reflections # have ub_matrix set) self._reflection_predictor.predict(reflections) x_obs, y_obs, phi_obs = reflections['xyzobs.mm.value'].parts() x_calc, y_calc, phi_calc = reflections['xyzcal.mm'].parts() # do not wrap around multiples of 2*pi; keep the full rotation # from zero to differentiate repeat observations. resid = phi_calc - (flex.fmod_positive(phi_obs, TWO_PI)) # ensure this is the smaller of two possibilities resid = flex.fmod_positive((resid + pi), TWO_PI) - pi phi_calc = phi_obs + resid # put back in the reflections reflections['xyzcal.mm'] = flex.vec3_double(x_calc, y_calc, phi_calc) # update xyzcal.px with the correct z_px values in keeping with above experiments = self._reflection_predictor._experiments for i, expt in enumerate(experiments): scan = expt.scan sel = (reflections['id'] == i) x_px, y_px, z_px = reflections['xyzcal.px'].select(sel).parts() if scan is not None: z_px = scan.get_array_index_from_angle(phi_calc.select(sel), deg=False) else: # must be a still image, z centroid not meaningful z_px = phi_calc.select(sel) xyzcal_px = flex.vec3_double(x_px, y_px, z_px) reflections['xyzcal.px'].set_selected(sel, xyzcal_px) # calculate residuals and assign columns reflections['x_resid'] = x_calc - x_obs reflections['x_resid2'] = reflections['x_resid']**2 reflections['y_resid'] = y_calc - y_obs reflections['y_resid2'] = reflections['y_resid']**2 reflections['phi_resid'] = phi_calc - phi_obs reflections['phi_resid2'] = reflections['phi_resid']**2 return reflections
def compute_per_atom(h1,h2,log): as1 = list(h1.atoms()) as2 = list(h2.atoms()) if(len(as1)==1): return print >> log, "Per atom:" for a1, a2 in zip(as1, as2): r1 = flex.vec3_double([a1.xyz]) r2 = flex.vec3_double([a2.xyz]) d = flex.sqrt((r1 - r2).dot()) print >> log, a1.format_atom_record()[:30], ": %-8.3f"%d[0]
def centroid_px_to_mm_panel(panel, scan, position, variance, sd_error): '''Convenience function to calculate centroid in mm/rad from px''' from operator import mul # Get the pixel to millimeter function pixel_size = panel.get_pixel_size() if scan is None: oscillation = (0,0) else: oscillation = scan.get_oscillation(deg=False) scale = pixel_size + (oscillation[1],) scale2 = map(mul, scale, scale) if isinstance(position, tuple): # Convert Pixel coordinate into mm/rad x, y, z = position xy_mm = panel.pixel_to_millimeter((x, y)) if scan is None: z_rad = 0 else: z_rad = scan.get_angle_from_array_index(z, deg=False) # Set the position, variance and squared width in mm/rad # N.B assuming locally flat pixel to millimeter transform # for variance calculation. position_mm = xy_mm + (z_rad,) variance_mm = map(mul, variance, scale2) sd_error_mm = map(mul, sd_error, scale2) else: from scitbx.array_family import flex # Convert Pixel coordinate into mm/rad x, y, z = position.parts() xy_mm = panel.pixel_to_millimeter(flex.vec2_double(x, y)) if scan is None: z_rad = flex.double(z.size(), 0) else: z_rad = scan.get_angle_from_array_index(z, deg=False) # Set the position, variance and squared width in mm/rad # N.B assuming locally flat pixel to millimeter transform # for variance calculation. x_mm, y_mm = xy_mm.parts() position_mm = flex.vec3_double(x_mm, y_mm, z_rad) v0, v1, v2 = variance.parts() variance_mm = flex.vec3_double(v0*scale2[0], v1*scale2[1], v2*scale2[2]) s0, s1, s2 = sd_error.parts() sd_error_mm = flex.vec3_double(s0*scale2[0], s1*scale2[1], s2*scale2[2]) # Return the stuff in mm/rad return position_mm, variance_mm, sd_error_mm
def compute_transform_grad(grad_wrt_xyz, xyz_asu, x, ncs_restraints_group_list=None, transforms_obj=None): """ Compute gradient in respect to the rotation angles and the translation vectors. R = Rx(the)Ry(psi)Rz(phi) Args: grad_wrt_xyz (flex.double): gradients with respect to xyz. ncs_restraints_group_list: list containing ncs_restraint_group objects transforms_obj (ncs_group_object): containing information in rotation matrices and to which chains they apply xyz_asu (flex.vec3): The coordinates sites cart of the complete ASU x (flex double): The angles, in the form (theta_1,psi_1,phi_1,tx_1,ty_1,tz_1,.. theta_n,psi_n,phi_n,tx_n/s,ty_n/s,tz_n/s) Returns: g (flex.double): the gradient """ assert bool(transforms_obj) == (not bool(ncs_restraints_group_list)) if transforms_obj: ncs_restraints_group_list = transforms_obj.get_ncs_restraints_group_list() g = [] grad_wrt_xyz = flex.vec3_double(grad_wrt_xyz) i = 0 for nrg in ncs_restraints_group_list: xyz_ncs_transform = xyz_asu.select(nrg.master_iselection) xyz_len = xyz_ncs_transform.size() # calc the coordinates of the master NCS at its coordinates center system mu_c = flex.vec3_double([xyz_ncs_transform.sum()]) * (1/xyz_len) xyz_cm = xyz_ncs_transform - flex.vec3_double(list(mu_c) * xyz_len) for nrg_copy in nrg.copies: grad_ncs_wrt_xyz = grad_wrt_xyz.select(nrg_copy.iselection) assert xyz_len == grad_ncs_wrt_xyz.size() grad_wrt_t = list(grad_ncs_wrt_xyz.sum()) # Sum angles gradient over the coordinates # Use the coordinate center for rotation m = grad_ncs_wrt_xyz.transpose_multiply(xyz_cm) m = matrix.sqr(m) # Calculate gradient with respect to the rotation angles the,psi,phi = x[i*6:i*6+3] rot = scitbx.rigid_body.rb_mat_xyz( the=the, psi=psi, phi=phi, deg=False) g_the = (m * rot.r_the().transpose()).trace() g_psi = (m * rot.r_psi().transpose()).trace() g_phi = (m * rot.r_phi().transpose()).trace() g.extend([g_the, g_psi, g_phi]) g.extend(grad_wrt_t) i += 1 return flex.double(g)
def run(args): assert len(args) == 0 from cctbx.crystal.distance_based_connectivity import \ build_simple_two_way_bond_sets from scitbx.array_family import flex sites_cart = flex.vec3_double([ (25.655, 43.266, 42.630), (24.038, 43.853, 43.337), (23.048, 44.525, 43.290), (21.223, 44.207, 41.475), (24.951, 47.170, 37.585), (19.298, 46.942, 51.808)]) elements = flex.std_string(["S", "C", "N", "CU", "ZN", "CA"]) bond_list = build_simple_two_way_bond_sets( sites_cart=sites_cart, elements=elements) assert [sorted(b) for b in bond_list] == [[1], [0,2], [1,3], [2], [], []] # # caffeine sites_cart = flex.vec3_double([ (-2.986, 0.015, 1.643), (-1.545, 0.015, 1.643), (-0.733, 0.015, 2.801), (0.592, 0.015, 2.395), (0.618, 0.015, 1.034), (1.758, 0.015, 0.102), (3.092, -0.06, 0.694), (1.525, 0.015, -1.360), (2.489, -0.024, -2.139), (0.158, 0.015, -1.888), (-0.025, 0.024, -3.330), (-0.986, 0.015, -0.959), (-2.155, 0.008, -1.408), (-0.733, 0.015, 0.565), (-3.346, 0.016, 2.662), (-3.347, 0.896, 1.133), (-3.347, -0.868, 1.136), (-1.083, 0.02, 3.822), (3.184, -0.975, 1.26), (3.245, 0.785, 1.348), (3.835, -0.047, -0.09), (0.508, 0.861, -3.756), (-1.076, 0.113, -3.560), (0.358, -0.896, -3.748) ]) elements = flex.std_string([ ' C', ' N', ' C', ' N', ' C', ' N', ' C', ' C', ' O', ' N', ' C', ' C', ' O', ' C', ' H', ' H', ' H', ' H', ' H', ' H', ' H', ' H', ' H', ' H']) bonds = build_simple_two_way_bond_sets( sites_cart=sites_cart, elements=elements) assert bonds.size() == sites_cart.size() assert list(bonds[0]) == [1, 14, 15, 16] # print "OK"
def __call__(self, ipanel=0): panel_a = self.det1[ipanel] panel_b = self.det2[ipanel] size_fast, size_slow = panel_a.get_image_size_mm() assert size_fast, size_slow == panel_b.get_image_size_mm() # num of sample intervals n_fast = int((size_fast) / SAMPLE_FREQ) n_slow = int((size_slow) / SAMPLE_FREQ) # interval width step_fast = size_fast / n_fast step_slow = size_slow / n_slow # samples samp_fast = [step_fast * i for i in range(n_fast + 1)] samp_slow = [step_slow * i for i in range(n_slow + 1)] lab1 = flex.vec3_double() lab2 = flex.vec3_double() sample_pts = flex.vec2_double() # loop for s in samp_slow: for f in samp_fast: lab1.append(panel_a.get_lab_coord((f, s))) lab2.append(panel_b.get_lab_coord((f, s))) sample_pts.append((f,s)) offset = lab2 - lab1 # store offset in the lab frame x_off, y_off, z_off = offset.parts() # reexpress offset in the basis fast, slow, normal of panel_a f_off = offset.dot(panel_a.get_fast_axis()) s_off = offset.dot(panel_a.get_slow_axis()) n_off = offset.dot(panel_a.get_normal()) f, s = sample_pts.parts() return {'lab_coord':lab1, 'fast':f, 'slow':s, 'x_offset':x_off, 'y_offset':y_off, 'z_offset':z_off, 'fast_offset':f_off, 'slow_offset':s_off, 'normal_offset':n_off, 'size_fast':size_fast, 'size_slow':size_slow}
def _get_f_r_s(axis_point_1,axis_point_2, moving_coor, fixed_coor): fc_proj = project_point_on_axis(axis_point_1, axis_point_2, fixed_coor) mc_proj = project_point_on_axis(axis_point_1, axis_point_2, moving_coor) f = (fixed_coor[0]-fc_proj[0],fixed_coor[1]-fc_proj[1],fixed_coor[2]-fc_proj[2]) r = (moving_coor[0]-mc_proj[0],moving_coor[1]-mc_proj[1],moving_coor[2]-mc_proj[2]) ap_21 = (axis_point_2[0]-axis_point_1[0], axis_point_2[1]-axis_point_1[1], axis_point_2[2]-axis_point_1[2]) r_norm = math.sqrt(r[0]*r[0]+r[1]*r[1]+r[2]*r[2]) r_home = flex.vec3_double([(r[0]/r_norm, r[1]/r_norm, r[2]/r_norm)]) ap_21_norm = math.sqrt(ap_21[0]*ap_21[0]+ap_21[1]*ap_21[1]+ap_21[2]*ap_21[2]) theta_home = flex.vec3_double([(ap_21[0]/ap_21_norm, ap_21[1]/ap_21_norm, ap_21[2]/ap_21_norm)]) tt = theta_home.cross(r_home) s_home = tt*(1/tt.norm()) return flex.vec3_double([f]), s_home, r_norm, r_home
def fit(self, fragment, reference_sites, control_point_indices=None): """ fits given fragment to given sites, if control_points indices are not given - all points are fit, otherwise only control points are fit and the result is propagated to the rest of the fragment coordinates. returns coordinates of the trasformed fragment """ if not control_point_indices: control_point_indices = range(0, len(fragment)) to_fit = [ (fragment[i].x, fragment[i].y, 0) for i in control_point_indices] lsf = superpose.least_squares_fit( flex.vec3_double(reference_sites), flex.vec3_double(to_fit)) to_fit = flex.vec3_double([(i.x, i.y, 0) for i in fragment]) return lsf.r.elems * to_fit + lsf.t.elems
def __init__(self, goniometer): from scitbx.array_family import flex import math self.goniometer = goniometer coords = flex.vec3_double() axis = flex.size_t() # FACE A: Sample holder # Defined as semi-circle of radius r(A) = 10 mm (centred on PHI axis) # with rectangle of size a(A) = 12.8 mm (x 20 mm) offsetA = 33.0 radiusA = 10.0 sqdA = 12.8 # square depth phi = flex.double_range(-90, 100, step=10) * math.pi/180 x = flex.double(phi.size(), -offsetA) y = radiusA * flex.cos(phi) z = radiusA * flex.sin(phi) x.extend(flex.double(5, -offsetA)) y.extend(flex.double((-sqdA/2, -sqdA, -sqdA, -sqdA, -sqdA/2))) z.extend(flex.double((radiusA, radiusA, 0, -radiusA, -radiusA))) self.faceA = flex.vec3_double(x, y, z) # FACE B: Lower arm sx = -28.50 sy = -4.90 sz = 8.50 mx = -13.80 my = -26.00 nx = -27.50 ny = -29.50 px = -65.50 py = -29.50 self.faceB = flex.vec3_double(((sx,sy,sz),(mx,my,0),(nx,ny,0),(px,py,0))) # FACE E: Rim of sample holder # Defined as circle of radius r(E) = 6 mm (centred on PHI axis) at an # offset o(E) = 19 mm offsetE = 19.0 radiusE = 6.0 phi = flex.double_range(0, 360, step=15) * math.pi/180 x = flex.double(phi.size(), -offsetE) y = radiusE * flex.cos(phi) z = radiusE * flex.sin(phi) self.faceE = flex.vec3_double(x, y, z)
def generate_reflections(self, num): from random import randint, seed from scitbx import matrix from dials.array_family import flex from dials.algorithms.shoebox import MaskCode seed(0) assert(len(self.detector) == 1) beam_vector = flex.vec3_double(num) xyzcal_px = flex.vec3_double(num) xyzcal_mm = flex.vec3_double(num) panel = flex.size_t(num) s0_length = matrix.col(self.beam.get_s0()).length() for i in range(num): x = randint(0, 2000) y = randint(0, 2000) z = randint(0, 8) s1 = self.detector[0].get_pixel_lab_coord((x, y)) s1 = matrix.col(s1).normalize() * s0_length phi = self.scan.get_angle_from_array_index(z, deg=False) beam_vector[i] = s1 xyzcal_px[i] = (x, y, z) (x, y) = self.detector[0].pixel_to_millimeter((x, y)) xyzcal_mm[i] = (x, y, phi) panel[i] = 0 sigma_b = self.experiment[0].beam.get_sigma_divergence(deg=False) sigma_m = self.experiment[0].crystal.get_mosaicity(deg=False) rlist = flex.reflection_table() rlist['id'] = flex.int(len(beam_vector), 0) rlist['s1'] = beam_vector rlist['panel'] = panel rlist['xyzcal.px'] = xyzcal_px rlist['xyzcal.mm'] = xyzcal_mm rlist['bbox'] = rlist.compute_bbox(self.experiment) index = [] image_size = self.experiment[0].detector[0].get_image_size() array_range = self.experiment[0].scan.get_array_range() bbox = rlist['bbox'] for i in range(len(rlist)): x0, x1, y0, y1, z0, z1 = bbox[i] if (x0 < 0 or x1 > image_size[0] or y0 < 0 or y1 > image_size[1] or z0 < array_range[0] or z1 > array_range[1]): index.append(i) rlist.del_selected(flex.size_t(index)) rlist['shoebox'] = flex.shoebox( rlist['panel'], rlist['bbox']) rlist['shoebox'].allocate_with_value(MaskCode.Valid) return rlist
def exercise_00(): # of = open("ncs_1.pdb","w") print >> of, ncs_1_copy of.close() of = open("full_asu.pdb","w") print >> of, full_asu of.close() # xrs_1_copy = iotbx.pdb.input(source_info=None, lines=ncs_1_copy).xray_structure_simple() # pdb_inp = iotbx.pdb.input(source_info=None, lines=full_asu) xrs_full_asu = pdb_inp.xray_structure_simple() cs = pdb_inp.crystal_symmetry_from_cryst1() ph = pdb_inp.construct_hierarchy() # o = mmtbx.ncs.asu_ncs_converter(pdb_hierarchy=ph) xrs = o.ph_first_chain.extract_xray_structure(crystal_symmetry=cs) mmtbx.utils.assert_xray_structures_equal(x1 = xrs, x2 = xrs_1_copy) ### o.update_sites_cart(sites_cart_master_ncs_copy = xrs_1_copy.sites_cart()) x1 = o.pdb_hierarchy.extract_xray_structure(crystal_symmetry=cs) mmtbx.utils.assert_xray_structures_equal( x1 = xrs_full_asu, x2 = o.pdb_hierarchy.extract_xray_structure(crystal_symmetry=cs), eps = 1.e-4) o.write_pdb_file(file_name="one.pdb", crystal_symmetry=cs, mode="ncs") o.write_pdb_file(file_name="all.pdb", crystal_symmetry=cs, mode="asu") mmtbx.utils.assert_xray_structures_equal( x1 = xrs_full_asu, x2 = iotbx.pdb.input(file_name="all.pdb").xray_structure_simple(), eps = 1.e-4) mmtbx.utils.assert_xray_structures_equal( x1 = xrs_1_copy, x2 = iotbx.pdb.input(file_name="one.pdb").xray_structure_simple(), eps = 1.e-4) ### sh1 = flex.vec3_double(xrs_1_copy.sites_cart().size(), [1,2,3]) shf = flex.vec3_double(xrs_full_asu.sites_cart().size(), [1,2,3]) o.update_sites_cart(sites_cart_master_ncs_copy = xrs_1_copy.sites_cart()+sh1) tmp = xrs_full_asu.deep_copy_scatterers() tmp.set_sites_cart(sites_cart = xrs_full_asu.sites_cart()+shf) mmtbx.utils.assert_xray_structures_equal( x1 = tmp, x2 = o.pdb_hierarchy.extract_xray_structure(crystal_symmetry=cs), eps = 1.e-4) o.write_pdb_file(file_name="one.pdb", crystal_symmetry=cs, mode="ncs") o.write_pdb_file(file_name="all.pdb", crystal_symmetry=cs, mode="asu")
def run(args): from scitbx.array_family import flex from scitbx import matrix from dials.util.command_line import Importer from dials.algorithms.reflection_basis import zeta_factor importer = Importer(args, check_format=False) assert importer.datablocks is not None assert len(importer.datablocks) == 1 datablock = importer.datablocks[0] imagesets = datablock.extract_imagesets() assert len(imagesets) == 1 imageset = imagesets[0] detector = imageset.get_detector() beam = imageset.get_beam() goniometer = imageset.get_goniometer() assert goniometer is not None assert len(detector) == 1 panel = detector[0] lab_coords = flex.vec3_double(flex.grid(panel.get_image_size())) for i in range(panel.get_image_size()[0]): for j in range(panel.get_image_size()[1]): lab_coords[i,j] = panel.get_lab_coord(panel.pixel_to_millimeter((i,j))) axis = matrix.col(goniometer.get_rotation_axis()) s0 = matrix.col(beam.get_s0()) s1 = (lab_coords.as_1d()/lab_coords.as_1d().norms()) * s0.length() s1_cross_s0 = s1.cross(flex.vec3_double(s1.size(), s0.elems)) p_volume = flex.abs(s1_cross_s0.dot(axis.elems)) p_volume.reshape(flex.grid(panel.get_image_size())) zeta = flex.abs(zeta_factor(axis.elems, s0.elems, s1.as_1d())) zeta.reshape(flex.grid(panel.get_image_size())) from matplotlib import pyplot pyplot.figure() pyplot.title('parallelepiped volume') CS = pyplot.contour(p_volume.matrix_transpose().as_numpy_array(), 10) pyplot.clabel(CS, inline=1, fontsize=10, fmt="%6.3f") pyplot.axes().set_aspect('equal') pyplot.show() pyplot.title('zeta factor') CS = pyplot.contour(zeta.matrix_transpose().as_numpy_array(), 10) pyplot.clabel(CS, inline=1, fontsize=10, fmt="%6.3f") pyplot.axes().set_aspect('equal') pyplot.show()
def __init__(self, decompose_tls_object, pdb_hierarchy, xray_structure, n_models, sigma, log=None): if(log is None): log = sys.stdout xray_structure.convert_to_isotropic() xray_structure = xray_structure.set_b_iso(value=0) sites_cart = xray_structure.sites_cart() self.states = mmtbx.utils.states( xray_structure = xray_structure, pdb_hierarchy = pdb_hierarchy) r = decompose_tls_object print >> log print >> log, "Generating ensemble of %d models:"%n_models for trial in xrange(n_models): print >> log, "model #%d"%trial #All that needs to be updated from the original tls_to_xyz is how we get dx0,dy0,dz0 and d_r_M_V dx0,dy0,dz0 = step_i__get_dxdydz( L_L=r.b_o.L_L, R_PL=r.b_o.R_PL, step=trial, sigma= sigma ,sampling = n_models, log = log) d_r_M_V = step_j(h_o=r.h_o, step=trial, sampling=n_models, sigma=sigma, log = log) sites_cart_new = flex.vec3_double() for site_cart in sites_cart: r_L = r.b_o.R_PL.transpose() * site_cart d_r_M_L = step_i__compute_delta_L_r_dp( r_L=r_L,c_o=r.c_o,e_o=r.e_o,dx0=dx0,dy0=dy0,dz0=dz0, R_PL=r.b_o.R_PL) d_r_M = step_k(d_r_M_L=d_r_M_L, d_r_M_V=d_r_M_V) sites_cart_new.append(matrix.col(site_cart) + d_r_M) self.states.add(sites_cart = sites_cart_new)
def etm_as_ftm(etm): ftm = scitbx.rigid_body.tardy_model( labels=etm.labels, sites=flex.vec3_double(etm.sites), masses=flex.double(etm.masses), tardy_tree=etm.tardy_tree, potential_obj=etm.potential_obj, near_singular_hinges_angular_tolerance_deg= etm.near_singular_hinges_angular_tolerance_deg) assert ftm.bodies_size() == len(etm.bodies) assert ftm.number_of_trees == etm.number_of_trees assert ftm.degrees_of_freedom == etm.degrees_of_freedom assert ftm.q_packed_size == etm.q_packed_size assert list(ftm.degrees_of_freedom_each_joint()) \ == [body.joint.degrees_of_freedom for body in etm.bodies] assert list(ftm.q_size_each_joint()) \ == [body.joint.q_size for body in etm.bodies] ftm.flag_positions_as_changed() ftm.flag_velocities_as_changed() assert ftm.labels is etm.labels assert ftm.sites.size() == len(etm.sites) assert ftm.masses.size() == len(etm.masses) assert ftm.tardy_tree is etm.tardy_tree assert ftm.potential_obj is etm.potential_obj assert approx_equal( ftm.near_singular_hinges_angular_tolerance_deg, etm.near_singular_hinges_angular_tolerance_deg) return ftm
def exercise_joint_lib_six_dof_aja_simplified(): tc = test_cases_tardy_pdb.test_cases[9] tt = tc.tardy_tree_construct() masses = [1.0]*len(tc.sites) # arbitrary transformation so that the center of mass is not at the origin rt_arbitrary = matrix.col((-0.21,-0.51,0.64)) \ .rt_for_rotation_around_axis_through( point=matrix.col((-0.80, 0.28, -0.89)), angle=-37, deg=True) sites = [rt_arbitrary * site for site in tc.sites] tm = scitbx.rigid_body.essence.tardy.model( labels=tc.labels, sites=sites, masses=masses, tardy_tree=tt, potential_obj=None) assert len(tm.bodies) == 1 assert tm.q_packed_size == 7 mt = flex.mersenne_twister(seed=0) for i_trial in xrange(3): q = mt.random_double(size=tm.q_packed_size)*2-1 tm.unpack_q(q_packed=q) sm = tm.sites_moved() aja = matrix.rt(scitbx.rigid_body.joint_lib_six_dof_aja_simplified( center_of_mass=tuple(flex.vec3_double(sites).mean()), q=q)) sm2 = [aja * site for site in sites] assert approx_equal(sm2, sm)
def _goniometer(self): '''Return a model for a simple single-axis goniometer. This should probably be checked against the image header, though for miniCBF there are limited options for this.''' if 'Phi' in self._cif_header_dictionary: phi_value = float(self._cif_header_dictionary['Phi'].split()[0]) else: phi_value = 0.0 if 'Kappa' in self._cif_header_dictionary: kappa_value = float(self._cif_header_dictionary['Kappa'].split()[0]) else: kappa_value = 0.0 if 'Omega' in self._cif_header_dictionary: omega_value = float(self._cif_header_dictionary['Omega'].split()[0]) else: omega_value = 0.0 from scitbx import matrix from scitbx.array_family import flex phi = (1.0, 0.0, 0.0) kappa = (0.914, 0.279, -0.297) omega = (1.0, 0.0, 0.0) axes = flex.vec3_double((phi, kappa, omega)) angles = flex.double((phi_value, kappa_value, omega_value)) names = flex.std_string(("GON_PHI", "GON_KAPPA", "GON_OMEGA")) return self._goniometer_factory.make_multi_axis_goniometer( axes, angles, names, scan_axis=2)
def coord_stats_for_atom_groups (residue1, residue2) : from scitbx.array_family import flex sites1 = flex.vec3_double() sites2 = flex.vec3_double() atoms = [] for atom1 in residue1.atoms() : if (atom1.element.strip() in ["H","D"]) : continue found = False for atom2 in residue2.atoms() : if (atom2.name == atom1.name) : assert (not found) found = True atoms.append(atom1) sites1.append(atom1.xyz) sites2.append(atom2.xyz) return coord_stats_with_flips(sites1, sites2, atoms)
def equally_spaced_points_on_vector(start, end, n=None, step=None): assert [n, step].count(None) == 1 vec = [end[0]-start[0],end[1]-start[1],end[2]-start[2]] r = flex.vec3_double([vec]) if(n is not None): assert n > 0 else: assert step > 0 vec_length = math.sqrt(vec[0]**2+vec[1]**2+vec[2]**2) n = int(vec_length/step)-1 dr = r*(1/float(n+1)) points = flex.vec3_double() for i in xrange(n+1): points.extend(dr * i + start) points.append(end) return points
def check_f_calc_derivs(): eps = 1e-6 g_fin = flex.complex_double() c_fin = flex.vec3_double() for ih in xrange(f_calc.size()): c_orig = f_calc[ih] g_fin_ab = [] c_fin_ab = [] for iab in [0, 1]: fs = [] gs = [] for signed_eps in [eps, -eps]: if iab == 0: f_calc[ih] = complex(c_orig.real + signed_eps, c_orig.imag) else: f_calc[ih] = complex(c_orig.real, c_orig.imag + signed_eps) trg_eps = r1.target(f_obs=f_obs, f_calc=f_calc) fs.append(trg_eps.t) gs.append(trg_eps.f_calc_gradients[ih]) g_fin_ab.append((fs[0] - fs[1]) / (2 * eps)) c_fin_ab.append((gs[0] - gs[1]) / (2 * eps)) g_fin.append(complex(*g_fin_ab)) assert approx_equal(c_fin_ab[0].imag, c_fin_ab[1].real) c_fin.append((c_fin_ab[0].real, c_fin_ab[1].imag, c_fin_ab[0].imag)) f_calc[ih] = c_orig for pn, f, a in zip(g_fin.part_names(), g_fin.parts(), trg.f_calc_gradients.parts()): print >> log, "g fin %s:" % pn, numstr(f) print >> log, " ana %s:" % pn, numstr(a) assert approx_equal(trg.f_calc_gradients, g_fin) for pn, f, a in zip(["aa", "bb", "ab"], c_fin.parts(), trg.f_calc_hessians.parts()): print >> log, "c fin %s:" % pn, numstr(f) print >> log, " ana %s:" % pn, numstr(a) assert approx_equal(trg.f_calc_hessians, c_fin)
def generate_image(n,l, N=100): nmax = max(20,n) lfg = math.log_factorial_generator(nmax) #rzfa = math.zernike_2d_radial(n,l,lfg) #rap = math.zernike_2d_polynome(n,l,rzfa) rap = math.zernike_2d_polynome(n,l)#,rzfa) image = flex.vec3_double() original=open('original.dat','w') count = 0 for x in range(-N, N+1): for y in range(-N, N+1): rr = smath.sqrt(x*x+y*y)/N if rr>1.0: value=0.0 else: tt = smath.atan2(y,x) value = rap.f(rr,tt) value = value.real count = count + 1 image.append([x+N,y+N,value]) print>>original, x+N,y+N, value original.close() return image
def parameter_based_model(self,params): PIXEL_SZ = 0.11 # mm/pixel all_model = mark3_collect_data(self.frame_id, self.HKL) self.FRAMES["refined_detector_origin"] = flex.vec3_double(len(self.FRAMES["frame_id"])) for iframe in xrange(len(self.FRAMES["frame_id"])): frame_id = self.FRAMES["frame_id"][iframe] self.frame_id_to_param_no[frame_id] = iframe detector_origin = self.parameter_based_model_one_frame_detail(frame_id,iframe,all_model) try: self.bandpass_models[frame_id].gaussian_fast_slow() except Exception,e: print "Exception from picture",e raise e try: all_model.collect_mean_position(self.bandpass_models[frame_id].mean_position, self.bandpass_models[frame_id].observed_flag, frame_id); all_model.collect_distance(self.bandpass_models[frame_id].part_distance,frame_id) self.FRAMES["refined_detector_origin"][iframe] = detector_origin/(-PIXEL_SZ) except Exception,e: print "Exception from collect",e raise e
def make_kappa_goniometer(alpha, omega, kappa, phi, direction, scan_axis): import math omega_axis = (1, 0, 0) phi_axis = (1, 0, 0) c = math.cos(alpha * math.pi / 180); s = math.sin(alpha * math.pi / 180); if direction == "+y": kappa_axis = (c, s, 0.0) elif direction == "+z": kappa_axis = (c, 0.0, s) elif direction == "-y": kappa_axis = (c, -s, 0.0) elif direction == "-z": kappa_axis = (c, 0.0, -s) else: raise RuntimeError("Invalid direction") if scan_axis == "phi": scan_axis = 0 else: scan_axis = 2 from scitbx.array_family import flex axes = flex.vec3_double((phi_axis, kappa_axis, omega_axis)) angles = flex.double((phi, kappa, omega)) names = flex.std_string(("PHI", "KAPPA", "OMEGA")) return goniometer_factory.make_multi_axis_goniometer( axes, angles, names, scan_axis)
def from_map_map_atom(map_1, map_2, site_cart, unit_cell, radius): assert_same_gridding(map_1, map_2) sel = maptbx.grid_indices_around_sites( unit_cell = unit_cell, fft_n_real = map_1.focus(), fft_m_real = map_1.all(), sites_cart = flex.vec3_double([site_cart]), site_radii = flex.double(1, radius)) return flex.linear_correlation( x=map_1.select(sel).as_1d(), y=map_2.select(sel).as_1d()).coefficient()
def preprocess_image(file): image = flex.vec3_double() # original = open(h5file.split(".")[0]+'_original.dat','w') size = file.shape[0] for x in range(0, size): for y in range(0, size): value = file[x][y] image.append([int(x), int(y), float(value)]) # print>>original, x,y, value # original.close() return image
def draw_points(self): if self.points_display_list is None: self.points_display_list = gltbx.gl_managed.display_list() self.points_display_list.compile() gl.glLineWidth(1) if self.colors is None: self.colors = flex.vec3_double(len(self.points), (1, 1, 1)) for point, color in zip(self.points, self.colors): self.draw_cross_at(point, color=color) self.points_display_list.end() self.points_display_list.call()
def __init__(self, file_name, scale): self.CA_indx = flex.int() self.label = [] self.xyz = flex.vec3_double() self.natm = 0 self.readPDB(file_name) self.crd = flex.double() self.eigens = [] for xyz in self.xyz: for value in xyz: self.crd.append(value)
def _construct_goniometer(phi, kappa, omega): phi_axis = (1.0, 0.0, 0.0) kappa_axis = (0.914, 0.279, -0.297) omega_axis = (1.0, 0.0, 0.0) axes = flex.vec3_double((phi_axis, kappa_axis, omega_axis)) angles = flex.double((phi, kappa, omega)) names = flex.std_string(("GON_PHI", "GON_KAPPA", "GON_OMEGA")) return GoniometerFactory.make_multi_axis_goniometer(axes, angles, names, scan_axis=2)
def _construct_goniometer(phi, chi, omega): phi_axis = (1.0, 0.0, 0.0) chi_axis = (0, 0, -1) omega_axis = (1.0, 0.0, 0.0) axes = flex.vec3_double((phi_axis, chi_axis, omega_axis)) angles = flex.double((phi, chi, omega)) names = flex.std_string(("GON_PHI", "GON_CHI", "GON_OMEGA")) return GoniometerFactory.make_multi_axis_goniometer(axes, angles, names, scan_axis=2)
def run(prefix): """ Exercise gradients match: - small vs large box: -- using clustering vs not using clustering. --- fast_interaction True / False Non-P1 case (P212121) """ for fast_interaction in [True, False]: data_file_prefix = "2olx" common_args = ["restraints=cctbx", "mode=opt", "parallel.nproc=1"] r = run_tests.run_cmd( prefix, args=common_args + [ "clustering=true", "fast_interaction=%s" % str(fast_interaction), "dump_gradients=cluster_true.pkl" ], pdb_name=os.path.join(qr_unit_tests_data, "%s.pdb" % data_file_prefix), mtz_name=os.path.join(qr_unit_tests_data, "%s.mtz" % data_file_prefix)) r = run_tests.run_cmd( prefix, args=common_args + ["clustering=false", "dump_gradients=cluster_false.pkl"], pdb_name=os.path.join(qr_unit_tests_data, "%s.pdb" % data_file_prefix), mtz_name=os.path.join(qr_unit_tests_data, "%s.mtz" % data_file_prefix)) # g1 = flex.vec3_double(easy_pickle.load("cluster_false.pkl")) g2 = flex.vec3_double(easy_pickle.load("cluster_true.pkl")) assert g1.size() == g2.size() diff = g1 - g2 if (0): for i, diff_i in enumerate(diff): if (abs(max(diff_i)) > 1.e-6): print(i, diff_i, g1[i], g2[i]) print() assert approx_equal(diff.max(), [0, 0, 0])
def partition(self, mask=None, cpus=1): """Find the nearest neighbour for each grid point (or the subset defined by mask.outer_mask() if mask is not None)""" def find_sites(sites_tuple): ref_sites, query_sites = sites_tuple tree = spatial.KDTree(data=ref_sites) nn_dists, nn_groups = tree.query(query_sites) return nn_groups assert isinstance(cpus, int) and (cpus > 0) # Sites that we are partitioning if mask: query_sites = flex.vec3_double(mask.outer_mask()) else: query_sites = flex.vec3_double(self.parent.grid_points()) # Find the nearest grid_site for each query_site (returns index of the grid site) print("STARTING MULTIPROCESSING") if cpus == 1: output = [find_sites((self.sites_grid, query_sites))] else: # Chunk the points into groups chunk_size = iceil(1.0*len(query_sites)/cpus) chunked_points = [query_sites[i:i + chunk_size] for i in range(0, len(query_sites), chunk_size)] assert sum(map(len,chunked_points)) == len(query_sites) assert len(chunked_points) == cpus # Map to cpus arg_list = [(self.sites_grid, chunk) for chunk in chunked_points] output = easy_mp.pool_map(fixed_func=find_sites, args=arg_list, processes=cpus) assert len(output) == cpus, '{!s} != {!s}'.format(len(output), cpus) # Extract the indices of the mapped points nn_groups = []; [nn_groups.extend(o) for o in output] nn_groups = numpy.array(nn_groups) assert len(query_sites) == len(nn_groups) # Reformat into full grid size if mask: self.nn_groups = -1*numpy.ones(self.parent.grid_size_1d(), dtype=int) self.nn_groups.put(mask.outer_mask_indices(), nn_groups) else: self.nn_groups = nn_groups return self
def apply_transforms(ncs_coordinates, ncs_restraints_group_list, total_asu_length, extended_ncs_selection, round_coordinates = True, center_of_coordinates = None): """ Apply transformation to ncs_coordinates, and round the results if round_coordinates is True Args: ncs_coordinates (flex.vec3): master ncs coordinates ncs_restraints_group_list: list of ncs_restraint_group objects total_asu_length (int): Complete ASU length extended_ncs_selection (flex.size_t): master ncs and non-ncs related parts center_of_coordinates : when not None, contains the center of coordinate of the master for each ncs copy Returns: (flex.vec3_double): Asymmetric or biological unit parts that are related via ncs operations """ asu_xyz = flex.vec3_double([(0,0,0)]*total_asu_length) asu_xyz.set_selected(extended_ncs_selection,ncs_coordinates) # get the rotation and translation for the native coordinate system if bool(center_of_coordinates): ncs_restraints_group_list = shift_translation_back_to_place( shifts = center_of_coordinates, ncs_restraints_group_list = ncs_restraints_group_list) for nrg in ncs_restraints_group_list: master_ncs_selection = flex.bool(total_asu_length,nrg.master_iselection) for ncs_copy in nrg.copies: copy_selection = flex.bool(total_asu_length,ncs_copy.iselection) ncs_xyz = asu_xyz.select(master_ncs_selection) new_sites = ncs_copy.r.elems * ncs_xyz + ncs_copy.t asu_xyz.set_selected(copy_selection,new_sites) if round_coordinates: return flex.vec3_double(asu_xyz).round(3) else: return flex.vec3_double(asu_xyz)
def read_data(filename): file=open(filename, 'r') data=flex.vec3_double() for line in file: keys=line.split() if(len(keys)==3): x=int(keys[0]) y=int(keys[1]) z=float(keys[2]) data.append([x,y,z]) file.close() return data
def compute_functional_and_gradients(self): if (self.sites): self.fmodel.xray_structure.set_sites_cart( sites_cart=flex.vec3_double(self.x)) if (self.u_iso): self.fmodel.xray_structure.set_u_iso(values=self.x) self.fmodel.update_xray_structure( xray_structure=self.fmodel.xray_structure, update_f_calc=True) tgx = self.x_target_functor(compute_gradients=True) if (self.sites): tx = tgx.target_work() gx = flex.vec3_double(tgx.\ gradients_wrt_atomic_parameters(site=True).packed()) f = tx g = gx if (self.u_iso): tx = tgx.target_work() gx = tgx.gradients_wrt_atomic_parameters(u_iso=True) f = tx g = gx return f, g.as_double()
def d2_target_d_params_diag_cpp(self, f_obs, target_type): da_db = flex.complex_double() daa_dbb_dab = flex.vec3_double() for hkl, obs in zip(self.miller_indices, f_obs.data()): sf = structure_factor(xray_structure=self.xray_structure, hkl=hkl) target = target_type(obs=obs, calc=sf.f()) da_db.append(complex(target.da(), target.db())) daa_dbb_dab.append((target.daa(), target.dbb(), target.dab())) return self.xray_structure.grads_and_curvs_target_simple( miller_indices=f_obs.indices(), da_db=da_db, daa_dbb_dab=daa_dbb_dab)
def compose(self): # extract parameters from the internal list dist, shift1, shift2, tau1, tau2, tau3 = self._param # convert angles to radians tau1rad = tau1.value / 1000. tau2rad = tau2.value / 1000. tau3rad = tau3.value / 1000. # compose rotation matrices and their first order derivatives Tau1 = (tau1.axis).axis_and_angle_as_r3_rotation_matrix(tau1rad, deg=False) dTau1_dtau1 = dR_from_axis_and_angle(tau1.axis, tau1rad, deg=False) Tau2 = (tau2.axis).axis_and_angle_as_r3_rotation_matrix(tau2rad, deg=False) dTau2_dtau2 = dR_from_axis_and_angle(tau2.axis, tau2rad, deg=False) Tau3 = (tau3.axis).axis_and_angle_as_r3_rotation_matrix(tau3rad, deg=False) dTau3_dtau3 = dR_from_axis_and_angle(tau3.axis, tau3rad, deg=False) # Compose the new state from dials_refinement_helpers_ext import multi_panel_compose from scitbx.array_family import flex ret = multi_panel_compose( flex.vec3_double( [self._initial_state[tag] for tag in ('d1', 'd2', 'dn')]), flex.double([p.value for p in self._param]), flex.vec3_double([p.axis for p in self._param]), self._model, flex.vec3_double(self._offsets), flex.vec3_double(self._dir1s), flex.vec3_double(self._dir2s), Tau1, dTau1_dtau1, Tau2, dTau2_dtau2, Tau3, dTau3_dtau3) # Store the results. The results come back as a single array, convert it to a 2D array self._multi_state_derivatives = [[matrix.sqr(ret[j*len(self._offsets)+i]) \ for j in xrange(len(self._param))] \ for i in xrange(len(self._offsets))] return
def evaluate_backrub_pair_impl( calphas_A, calphas_B, labels=(), max_calpha_sep=5.0, rmsd_limit=0.1, backrub_angle_limit=10.0): # FIXME is this an appropriate cutoff? assert (len(calphas_A) == len(calphas_B) == 5) if (None in calphas_A) or (None in calphas_B): return None for k_res in range(0, 4): dist = calphas_A[k_res].distance(calphas_A[k_res + 1]) if (dist > max_calpha_sep): return None from scitbx.array_family import flex from scitbx.math import superpose from scitbx.matrix import col import scitbx.math sites_A = flex.vec3_double([calphas_A[k].xyz for k in [0, 1, 3, 4]]) sites_B = flex.vec3_double([calphas_B[k].xyz for k in [0, 1, 3, 4]]) lsq_fit = superpose.least_squares_fit(reference_sites=sites_A, other_sites=sites_B) sites_B_new = lsq_fit.other_sites_best_fit() rmsd = sites_B_new.rms_difference(sites_A) ca2 = (col(sites_A[1]) + col(sites_B_new[1])) / 2 ca3r = col(calphas_A[2].xyz) ca3m = lsq_fit.rt() * calphas_B[2].xyz ca4 = (col(sites_A[2]) + col(sites_B_new[2])) / 2 backrub_angle = scitbx.math.dihedral_angle( sites=[ca3r.elems, ca2.elems, ca4.elems, ca3m.elems], deg=True) if ((rmsd <= rmsd_limit) and (abs(backrub_angle) >= backrub_angle_limit)): if (len(labels) == 0): labels = (calphas_A[2].fetch_labels().altloc, calphas_B[2].fetch_labels().altloc) return backrub_residue(calpha=calphas_A[2], i_mod=labels[0], j_mod=labels[1], rmsd=rmsd, backrub_angle=backrub_angle) return None
def compose(self): # reset the list that holds derivatives for i in range(len(self._model)): self._multi_state_derivatives[i] = [None] * len(self._dstate_dp) # loop over groups of panels collecting derivatives of the state wrt # parameters param = iter(self._param) for igp, pnl_ids in enumerate(self._panel_ids_by_group): # extract parameters from the internal list dist = next(param) shift1 = next(param) shift2 = next(param) tau1 = next(param) tau2 = next(param) tau3 = next(param) param_vals = flex.double((dist.value, shift1.value, shift2.value, tau1.value, tau2.value, tau3.value)) param_axes = flex.vec3_double((dist.axis, shift1.axis, shift2.axis, tau1.axis, tau2.axis, tau3.axis)) offsets = self._offsets[igp] dir1s = self._dir1s[igp] dir2s = self._dir2s[igp] # Get items from the initial state for the group of interest initial_state = self._initial_state[igp] id1 = initial_state['d1'] id2 = initial_state['d2'] idn = initial_state['dn'] igp_offset = initial_state['gp_offset'] # Compose the new state using the helper class for calculations pgc = PanelGroupCompose(id1, id2, idn, igp_offset, param_vals, param_axes) # assign back to the group frame self._groups[igp].set_frame(pgc.d1(), pgc.d2(), pgc.origin()) # Loop over attached Panel matrices, using the helper class to calculate # derivatives of the d matrix in each case and store them. i = igp * 6 for (panel_id, offset, dir1_new_basis, dir2_new_basis) in \ zip(pnl_ids, offsets, dir1s, dir2s): self._multi_state_derivatives[panel_id][i:(i+6)] = \ pgc.derivatives_for_panel(offset, dir1_new_basis, dir2_new_basis) return
def generate_reflections(self): from cctbx.sgtbx import space_group, space_group_symbols from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection sequence_range = self.scan.get_oscillation_range(deg=False) resolution = 2.0 index_generator = IndexGenerator( self.crystal.get_unit_cell(), space_group(space_group_symbols(1).hall()).type(), resolution, ) indices = index_generator.to_array() # Predict rays within the sequence range ray_predictor = ScansRayPredictor(self.experiments, sequence_range) obs_refs = ray_predictor(indices) # Take only those rays that intersect the detector intersects = ray_intersection(self.detector, obs_refs) obs_refs = obs_refs.select(intersects) # Re-predict using the Experiments predictor for all these reflections. The # result is the same, but we gain also the flags and xyzcal.px columns obs_refs["id"] = flex.int(len(obs_refs), 0) obs_refs = self.ref_predictor(obs_refs) # Set 'observed' centroids from the predicted ones obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"] # Invent some variances for the centroid positions of the simulated data im_width = 0.1 * pi / 180.0 px_size = self.detector[0].get_pixel_size() var_x = flex.double(len(obs_refs), (px_size[0] / 2.0)**2) var_y = flex.double(len(obs_refs), (px_size[1] / 2.0)**2) var_phi = flex.double(len(obs_refs), (im_width / 2.0)**2) obs_refs["xyzobs.mm.variance"] = flex.vec3_double( var_x, var_y, var_phi) # set the flex random seed to an 'uninteresting' number flex.set_random_seed(12407) # take 10 random reflections for speed reflections = obs_refs.select(flex.random_selection(len(obs_refs), 10)) # use a BlockCalculator to calculate the blocks per image from dials.algorithms.refinement.reflection_manager import BlockCalculator block_calculator = BlockCalculator(self.experiments, reflections) reflections = block_calculator.per_image() return reflections
def get_model_map_stats(selection, target_map, model_map, unit_cell, sites_cart, pdb_atoms, local_sampling=False): """ Collect basic statistics for a model map and some target map (usually an mFo-DFc map), including CC, mean, and minimum density at the atomic positions. """ assert (len(target_map) == len(model_map)) iselection = selection if (type(selection).__name__ == 'bool'): iselection = selection.iselection() from scitbx.array_family import flex sites_cart_refined = sites_cart.select(selection) sites_selected = flex.vec3_double() map1 = flex.double() map2 = flex.double() min_density = sys.maxsize sum_density = n_sites = 0 worst_atom = None # XXX I'm not sure the strict density cutoff is a good idea here for i_seq, xyz in zip(iselection, sites_cart_refined): if (pdb_atoms[i_seq].element.strip() != "H"): sites_selected.append(xyz) site_frac = unit_cell.fractionalize(site_cart=xyz) target_value = target_map.tricubic_interpolation(site_frac) if (target_value < min_density): min_density = target_value worst_atom = pdb_atoms[i_seq] sum_density += target_value n_sites += 1 if (not local_sampling): map1.append(target_value) map2.append(model_map.tricubic_interpolation(site_frac)) assert (n_sites > 0) if (local_sampling): from cctbx import maptbx map_sel = maptbx.grid_indices_around_sites( unit_cell=unit_cell, fft_n_real=target_map.focus(), fft_m_real=target_map.all(), sites_cart=sites_selected, site_radii=flex.double(sites_selected.size(), 1.0)) map1 = target_map.select(map_sel) map2 = model_map.select(map_sel) assert (len(map1) > 0) and (len(map1) == len(map2)) cc = flex.linear_correlation(x=map1, y=map2).coefficient() return group_args(cc=cc, min=min_density, mean=sum_density / n_sites)
def iterate(self): if (self.Niter > 0): # need to build PDB object from the last PDB file iter_name = self.root + str(self.Niter) + ".pdb" t1 = time.time() self.pdb = PDB(iter_name, method=self.method) self.nmode = self.pdb.Hessian(self.cutoff, self.nmode_init, self.scale_factor) - 1 self.time_nm += (time.time() - t1) self.n1 = self.nmodes + 1 score = [] candidates = [] for kk in range(self.nmode_init - self.nmodes): self.modes = flex.int(range(self.nmodes)) + 7 self.modes.append(kk + 7 + self.nmodes) self.starting_simplex = [] cand = flex.double(self.n1, 0) for ii in range(self.n1): self.starting_simplex.append( flex.double(self.orth(ii, self.n1)) * self.step_size + cand) self.starting_simplex.append(cand) self.optimizer = simplex.simplex_opt(dimension=self.n1, matrix=self.starting_simplex, evaluator=self, monitor_cycle=4, tolerance=1e-1) self.x = self.optimizer.get_solution() candidates.append(self.x.deep_copy()) score.append(self.optimizer.get_score()) minscore = min(score[0:self.topn]) print self.Niter, minscore, self.counter if ((self.Niter % self.optNum) > 1): self.stopCheck(minscore) self.updateScore(minscore) minvec = candidates[score.index(minscore)] new_coord = flex.vec3_double(self.pdb.NMPerturb(self.modes, minvec)) self.Niter = self.Niter + 1 iter_name = self.root + str(self.Niter) + ".pdb" self.pdb.writePDB(new_coord, iter_name) if (self.Niter % self.optNum == 0): processed_pdb, pdb_inp = self.pdb_processor.process_pdb_files( pdb_file_names=[iter_name]) new_coord = geo_opt(processed_pdb, self.log) self.pdb.writePDB(new_coord, iter_name) if (not self.stop): # if(self.Niter < 50): self.iterate()
def ImageToDat(filename): im = Image.open(filename) im = im.convert("L") data = im.getdata() width,height = im.size # print"=====",width,height new_data = np.reshape(data,(width, height)) image = flex.vec3_double() for x in range(0, width): for y in range(0, height): value = new_data[x][y] image.append([x,y,value]) return image
def _calculate_centroids(self, coords, intensity, spots): """Calculate the spot centroids. Params: coords The spot coords intensity The spot intensities spots The pixel-spot mapping Returns: (centroid position, centroid variance) """ from scitbx.array_family import flex # Loop through all the spots centroid_pos = flex.vec3_double() centroid_var = flex.vec3_double() for s in spots: # Get pixel coords and values pixel_coords = [map(lambda x: x + 0.5, coords[i]) for i in s] pixel_values = flex.double([intensity[i] for i in s]) pixel_x, pixel_y, pixel_z = zip(*pixel_coords) # Calculate the centroid and variance xc = flex.mean_and_variance(flex.double(pixel_x), pixel_values) yc = flex.mean_and_variance(flex.double(pixel_y), pixel_values) zc = flex.mean_and_variance(flex.double(pixel_z), pixel_values) # Add the centroid and variance centroid_pos.append((xc.mean(), yc.mean(), zc.mean())) centroid_var.append(( xc.gsl_stats_wvariance(), yc.gsl_stats_wvariance(), zc.gsl_stats_wvariance(), )) # Return the centroid and variance return centroid_pos, centroid_var
def zoom_selections(self): from scitbx.array_family import flex points = flex.vec3_double() for object_id, scene in six.iteritems(self.scene_objects): if self.show_object[object_id]: for point in scene.get_selected_xyz(): points.append(point) if points.size() == 0: for object_id, scene in six.iteritems(self.scene_objects): if self.show_object[object_id]: points.extend(scene.points) if points.size() != 0: self.update_mcs(points, buffer=self.buffer_selection_sphere)
def vec3_double(): from scitbx.array_family import flex num = 100 x0, x1, y0, y1, z0, z1 = 0, 100, 0, 100, 0, 100 result = flex.vec3_double(num) for i in range(num): result[i] = ( random.uniform(x0, x1), random.uniform(y0, y1), random.uniform(z0, z1), ) return result
def color_by_element (self) : cached = self._color_cache.get("element") if cached is not None : self.atom_colors = cached else : from scitbx.array_family import flex atom_colors = flex.vec3_double() for atom in self.pdb_hierarchy.atoms_with_labels() : element = atom.element.strip() color = element_shades.get(element, self.base_color) atom_colors.append(color) self.atom_colors = atom_colors self._color_cache["element"] = cached
def refine_window(O, window): processed_pdb_file = O.get_processed_pdb_file() assert (processed_pdb_file is not None) hierarchy = processed_pdb_file.all_chain_proxies.pdb_hierarchy pdb_atoms = hierarchy.atoms() sites_cart = pdb_atoms.extract_xyz() dxyz = [None] * 3 dxyz[0] = flex.vec3_double(pdb_atoms.size(), (1.0, 0.2, 0.3)) dxyz[1] = flex.vec3_double(pdb_atoms.size(), (-1.0, -0.1, 0.0)) dxyz[2] = dxyz[1] trials = [] if (window.residue_id_str in [" A PHE 113 ", " A SER 110 "]): for i in range(3): trials.append( alt_confs.trial_result( sites_cart=(sites_cart + dxyz[i]).select( window.selection), min_fofc=4.0, mean_fofc=4.5, rmsd=1.2, max_dev=1.3, cc=0.99)) elif (window.residue_id_str in [" A ASN 108 "]): trials.append( alt_confs.trial_result(sites_cart=(sites_cart + dxyz[0]).select( window.selection), min_fofc=4.0, mean_fofc=4.5, rmsd=1.2, max_dev=1.3, cc=0.99)) e = sliding_window.ensemble(window=window, sites_trials=trials) n_keep = e.filter_trials(sites_cart=O.sites_cart, min_rmsd=O.params.min_rmsd, min_dev=O.min_required_deviation) if (n_keep > 0): return e return None
def read_xyz_output(self): filename = self.get_coordinate_filename() if not os.path.exists(filename): raise Sorry('QM output filename not found: %s' % filename) f=open(filename, 'r') lines = f.read() del f rc = flex.vec3_double() for i, line in enumerate(lines.splitlines()): if i>=2: tmp = line.split() rc.append((float(tmp[1]), float(tmp[2]), float(tmp[3]))) return rc
def run(prefix="tst_18"): """ Exercise gradients match: -- pdbs with altlocs -- using clustering with less clusters vs not using clustering. -- using clustering with more clusters vs not using clustering. """ import multiprocessing nproc = str(multiprocessing.cpu_count()) for data_file_prefix in ["h_altconf_complete", "h_altconf_2_complete"]: for maxnum in ["15", "2"]: common_args = ["restraints=cctbx", "mode=opt", "parallel.nproc="+nproc] +\ ["altloc_method=subtract","maxnum_residues_in_cluster="+maxnum] r = run_tests.run_cmd( prefix, args=common_args + ["clustering=true", "dump_gradients=cluster_true.pkl"], pdb_name=os.path.join(qr_unit_tests_data, "%s.pdb" % data_file_prefix), mtz_name=os.path.join(qr_unit_tests_data, "%s.mtz" % data_file_prefix)) r = run_tests.run_cmd( prefix, args=common_args + ["clustering=false", "dump_gradients=cluster_false.pkl"], pdb_name=os.path.join(qr_unit_tests_data, "%s.pdb" % data_file_prefix), mtz_name=os.path.join(qr_unit_tests_data, "%s.mtz" % data_file_prefix)) # g1 = flex.vec3_double(easy_pickle.load("cluster_false.pkl")) g2 = flex.vec3_double(easy_pickle.load("cluster_true.pkl")) assert g1.size() == g2.size() diff = g1 - g2 if (0): for i, diff_i in enumerate(diff): print i, diff_i #, g1[i], g2[i] assert approx_equal(diff.max(), [0, 0, 0], [1.0E-3, 1.0E-3, 1.0E-3])
def exercise_clash_detector_simple(): from scitbx.array_family import flex sites_cart = flex.vec3_double([ (1,2.0,3), (1,3.3,3)]) from scitbx.r3_utils import clash_detector_simple cd = clash_detector_simple(n_sites=2, threshold=1.2) assert approx_equal(cd.threshold_sq, 1.2**2) assert not cd.has_clash(sites_cart=sites_cart) sites_cart[1] = (1,3.1,3) assert cd.has_clash(sites_cart=sites_cart) cd.add_exclusion(i=0, j=1) assert not cd.has_clash(sites_cart=sites_cart)
def color_rainbow (self) : cached = self._color_cache.get("rainbow") if cached is not None : self.atom_colors = cached else : from scitbx import graphics_utils if (self.visibility.atoms_visible.count(True) == 1) : from scitbx.array_family import flex self.atom_colors = flex.vec3_double([(0,0,1)]) return self.atom_colors = graphics_utils.color_rainbow( selection=self.visibility.atoms_visible) self._color_cache["rainbow"] = self.atom_colors
def generate_reflections(experiments): from dials.algorithms.spot_prediction import IndexGenerator from dials.algorithms.refinement.prediction.managed_predictors import ( ScansRayPredictor, ScansExperimentsPredictor, ) from dials.algorithms.spot_prediction import ray_intersection from cctbx.sgtbx import space_group, space_group_symbols from scitbx.array_family import flex detector = experiments[0].detector crystal = experiments[0].crystal # All indices in a 2.0 Angstrom sphere resolution = 2.0 index_generator = IndexGenerator( crystal.get_unit_cell(), space_group(space_group_symbols(1).hall()).type(), resolution, ) indices = index_generator.to_array() # Predict rays within the sequence range scan = experiments[0].scan sequence_range = scan.get_oscillation_range(deg=False) ray_predictor = ScansRayPredictor(experiments, sequence_range) obs_refs = ray_predictor(indices) # Take only those rays that intersect the detector intersects = ray_intersection(detector, obs_refs) obs_refs = obs_refs.select(intersects) # Make a reflection predictor and re-predict for all these reflections. The # result is the same, but we gain also the flags and xyzcal.px columns ref_predictor = ScansExperimentsPredictor(experiments) obs_refs["id"] = flex.int(len(obs_refs), 0) obs_refs = ref_predictor(obs_refs) # Set 'observed' centroids from the predicted ones obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"] # Invent some variances for the centroid positions of the simulated data im_width = 0.1 * pi / 180.0 px_size = detector[0].get_pixel_size() var_x = flex.double(len(obs_refs), (px_size[0] / 2.0)**2) var_y = flex.double(len(obs_refs), (px_size[1] / 2.0)**2) var_phi = flex.double(len(obs_refs), (im_width / 2.0)**2) obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi) return obs_refs, ref_predictor
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms, sigma_cutoff, outliers_only=True, use_segids_in_place_of_chainids=False): from scitbx.array_family import flex site_labels = flex.bool(sites_cart.size(), True).iselection() sorted_table, not_shown = proxies.get_sorted(by_value="residual", sites_cart=sites_cart, site_labels=site_labels) # this can happen for C-alpha-only models, etc. if (sorted_table is None): return [] outliers = [] for restraint_info in sorted_table: (i_seq, j_seq, i_seqs, ideal, model, slack, delta, sigma, weight, residual, sym_op_j, rt_mx) = restraint_info bond_atoms = get_atoms_info( pdb_atoms, iselection=i_seqs, use_segids_in_place_of_chainids=use_segids_in_place_of_chainids ) if sym_op_j: import scitbx m3 = rt_mx.r().as_double() m3 = scitbx.matrix.sqr(m3) t = rt_mx.t().as_double() t = scitbx.matrix.col((t[0], t[1], t[2])) xyz = unit_cell.fractionalize( flex.vec3_double([bond_atoms[1].xyz])) new_xyz = unit_cell.orthogonalize(m3.elems * xyz + t) bond_atoms[1].xyz = new_xyz[0] outlier = bond(atoms_info=bond_atoms, target=ideal, model=model, sigma=sigma, slack=slack, delta=delta, residual=residual, symop=sym_op_j, outlier=True, xyz=get_mean_xyz(bond_atoms)) if (outlier.score > sigma_cutoff): outliers.append(outlier) elif (not outliers_only): outlier.outlier = False outliers.append(outlier) return outliers