def _local_setup(self, reflections): """Setup additional attributes used in gradients calculation. These are specific to scans-type prediction parameterisations""" # Spindle rotation matrices for every reflection # R = self._axis.axis_and_angle_as_r3_rotation_matrix(phi) # R = flex.mat3_double(len(reflections)) # NB for now use flex.vec3_double.rotate_around_origin each time I need the # rotation matrix R. # r is the reciprocal lattice vector, in the lab frame self._phi_calc = reflections["xyzcal.mm"].parts()[2] q = self._fixed_rotation * (self._UB * self._h) self._r = self._setting_rotation * q.rotate_around_origin( self._axis, self._phi_calc ) # All of the derivatives of phi have a common denominator, given by # (e X r).s0, where e is the rotation axis. Calculate this once, here. self._e_X_r = (self._setting_rotation * self._axis).cross(self._r) self._e_r_s0 = (self._e_X_r).dot(self._s0) # Note that e_r_s0 -> 0 when the rotation axis, beam vector and # relp are coplanar. This occurs when a reflection just touches # the Ewald sphere. # # There is a relationship between e_r_s0 and zeta_factor. # Uncommenting the code below shows that # s0.(e X r) = zeta * |s X s0| # from dials.algorithms.profile_model.gaussian_rs import zeta_factor # from libtbx.test_utils import approx_equal # s = matrix.col(reflections['s1'][0]) # z = zeta_factor(axis[0], s0[0], s) # ss0 = (s.cross(matrix.col(s0[0]))).length() # assert approx_equal(e_r_s0[0], z * ss0) # catch small values of e_r_s0 e_r_s0_mag = flex.abs(self._e_r_s0) try: assert flex.min(e_r_s0_mag) > 1.0e-6 except AssertionError as e: imin = flex.min_index(e_r_s0_mag) print("(e X r).s0 too small:") print("for", (e_r_s0_mag <= 1.0e-6).count(True), "reflections") print("out of", len(e_r_s0_mag), "total") print("such as", reflections["miller_index"][imin]) print("with scattering vector", reflections["s1"][imin]) print("where r =", self._r[imin]) print("e =", self._axis[imin]) print("s0 =", self._s0[imin]) print("this reflection forms angle with the equatorial plane " "normal:") vecn = ( matrix.col(self._s0[imin]) .cross(matrix.col(self._axis[imin])) .normalize() ) print(matrix.col(reflections["s1"][imin]).accute_angle(vecn)) raise e
def _local_setup(self, reflections): """Setup additional attributes used in gradients calculation. These are specific to scans-type prediction parameterisations""" # Spindle rotation matrices for every reflection #R = self._axis.axis_and_angle_as_r3_rotation_matrix(phi) #R = flex.mat3_double(len(reflections)) # NB for now use flex.vec3_double.rotate_around_origin each time I need the # rotation matrix R. # r is the reciprocal lattice vector, in the lab frame self._phi_calc = reflections['xyzcal.mm'].parts()[2] q = self._fixed_rotation * (self._UB * self._h) self._r = self._setting_rotation * q.rotate_around_origin(self._axis, self._phi_calc) # All of the derivatives of phi have a common denominator, given by # (e X r).s0, where e is the rotation axis. Calculate this once, here. self._e_X_r = (self._setting_rotation * self._axis).cross(self._r) self._e_r_s0 = (self._e_X_r).dot(self._s0) # Note that e_r_s0 -> 0 when the rotation axis, beam vector and # relp are coplanar. This occurs when a reflection just touches # the Ewald sphere. # # There is a relationship between e_r_s0 and zeta_factor. # Uncommenting the code below shows that # s0.(e X r) = zeta * |s X s0| #from dials.algorithms.profile_model.gaussian_rs import zeta_factor #from libtbx.test_utils import approx_equal #s = matrix.col(reflections['s1'][0]) #z = zeta_factor(axis[0], s0[0], s) #ss0 = (s.cross(matrix.col(s0[0]))).length() #assert approx_equal(e_r_s0[0], z * ss0) # catch small values of e_r_s0 e_r_s0_mag = flex.abs(self._e_r_s0) try: assert flex.min(e_r_s0_mag) > 1.e-6 except AssertionError as e: imin = flex.min_index(e_r_s0_mag) print "(e X r).s0 too small:" print "for", (e_r_s0_mag <= 1.e-6).count(True), "reflections" print "out of", len(e_r_s0_mag), "total" print "such as", reflections['miller_index'][imin] print "with scattering vector", reflections['s1'][imin] print "where r =", self._r[imin] print "e =", self._axis[imin] print "s0 =", self._s0[imin] print ("this reflection forms angle with the equatorial plane " "normal:") vecn = matrix.col(self._s0[imin]).cross(matrix.col(self._axis[imin])).normalize() print matrix.col(reflections['s1'][imin]).accute_angle(vecn) raise e return
def choose_best_orientation_matrix(self, candidate_orientation_matrices): logger.info("*" * 80) logger.info("Selecting the best orientation matrix") logger.info("*" * 80) class CandidateInfo(libtbx.group_args): pass candidates = [] params = copy.deepcopy(self.all_params) for icm, cm in enumerate(candidate_orientation_matrices): if icm >= self.params.basis_vector_combinations.max_refine: break # Index reflections in P1 sel = self.reflections["id"] == -1 refl = self.reflections.select(sel) experiments = self.experiment_list_for_crystal(cm) self.index_reflections(experiments, refl) indexed = refl.select(refl["id"] >= 0) indexed = indexed.select(indexed.get_flags(indexed.flags.indexed)) # If target symmetry supplied, try to apply it. Then, apply the change of basis to the reflections # indexed in P1 to the target setting if ( self.params.stills.refine_candidates_with_known_symmetry and self.params.known_symmetry.space_group is not None ): new_crystal, cb_op_to_primitive = self._symmetry_handler.apply_symmetry( cm ) if new_crystal is None: logger.info("Cannot convert to target symmetry, candidate %d", icm) continue new_crystal = new_crystal.change_basis( self._symmetry_handler.cb_op_primitive_inp ) cm = new_crystal experiments = self.experiment_list_for_crystal(cm) if not cb_op_to_primitive.is_identity_op(): indexed["miller_index"] = cb_op_to_primitive.apply( indexed["miller_index"] ) if self._symmetry_handler.cb_op_primitive_inp is not None: indexed[ "miller_index" ] = self._symmetry_handler.cb_op_primitive_inp.apply( indexed["miller_index"] ) if params.indexing.stills.refine_all_candidates: try: logger.info( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d initial outlier identification", icm, ) acceptance_flags = self.identify_outliers( params, experiments, indexed ) # create a new "indexed" list with outliers thrown out: indexed = indexed.select(acceptance_flags) logger.info( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d refinement before outlier rejection", icm, ) R = e_refine( params=params, experiments=experiments, reflections=indexed, graph_verbose=False, ) ref_experiments = R.get_experiments() # try to improve the outcome with a second round of outlier rejection post-initial refinement: acceptance_flags = self.identify_outliers( params, ref_experiments, indexed ) # insert a round of Nave-outlier rejection on top of the r.m.s.d. rejection nv0 = NaveParameters( params=params, experiments=ref_experiments, reflections=indexed, refinery=R, graph_verbose=False, ) nv0() acceptance_flags_nv0 = nv0.nv_acceptance_flags indexed = indexed.select(acceptance_flags & acceptance_flags_nv0) logger.info( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d after positional and delta-psi outlier rejection", icm, ) R = e_refine( params=params, experiments=ref_experiments, reflections=indexed, graph_verbose=False, ) ref_experiments = R.get_experiments() nv = NaveParameters( params=params, experiments=ref_experiments, reflections=indexed, refinery=R, graph_verbose=False, ) crystal_model = nv() assert ( len(crystal_model) == 1 ), "$$$ stills_indexer::choose_best_orientation_matrix, Only one crystal at this stage" crystal_model = crystal_model[0] # Drop candidates that after refinement can no longer be converted to the known target space group if ( not self.params.stills.refine_candidates_with_known_symmetry and self.params.known_symmetry.space_group is not None ): ( new_crystal, cb_op_to_primitive, ) = self._symmetry_handler.apply_symmetry(crystal_model) if new_crystal is None: logger.info( "P1 refinement yielded model diverged from target, candidate %d", icm, ) continue rmsd, _ = calc_2D_rmsd_and_displacements( R.predict_for_reflection_table(indexed) ) except Exception as e: logger.info( "Couldn't refine candidate %d, %s: %s", icm, e.__class__.__name__, str(e), ) else: logger.info( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d done", icm, ) candidates.append( CandidateInfo( crystal=crystal_model, green_curve_area=nv.green_curve_area, ewald_proximal_volume=nv.ewald_proximal_volume(), n_indexed=len(indexed), rmsd=rmsd, indexed=indexed, experiments=ref_experiments, ) ) else: from dials.algorithms.refinement.prediction.managed_predictors import ( ExperimentsPredictorFactory, ) ref_predictor = ExperimentsPredictorFactory.from_experiments( experiments, force_stills=True, spherical_relp=params.refinement.parameterisation.spherical_relp_model, ) rmsd, _ = calc_2D_rmsd_and_displacements(ref_predictor(indexed)) candidates.append( CandidateInfo( crystal=cm, n_indexed=len(indexed), rmsd=rmsd, indexed=indexed, experiments=experiments, ) ) if len(candidates) == 0: raise DialsIndexError("No suitable indexing solution found") logger.info("**** ALL CANDIDATES:") for i, XX in enumerate(candidates): logger.info("\n****Candidate %d %s", i, XX) cc = XX.crystal if hasattr(cc, "get_half_mosaicity_deg"): logger.info( " half mosaicity %5.2f deg.", (cc.get_half_mosaicity_deg()) ) logger.info(" domain size %.0f Ang.", (cc.get_domain_size_ang())) logger.info("\n**** BEST CANDIDATE:") results = flex.double([c.rmsd for c in candidates]) best = candidates[flex.min_index(results)] logger.info(best) if params.indexing.stills.refine_all_candidates: if best.rmsd > params.indexing.stills.rmsd_min_px: raise DialsIndexError("RMSD too high, %f" % best.rmsd) if len(candidates) > 1: for i in range(len(candidates)): if i == flex.min_index(results): continue if best.ewald_proximal_volume > candidates[i].ewald_proximal_volume: logger.info( "Couldn't figure out which candidate is best; picked the one with the best RMSD." ) best.indexed["entering"] = flex.bool(best.n_indexed, False) return best.crystal, best.n_indexed
def index_reflections_detail(debug, experiments, reflections, detector, reciprocal_lattice_points1, reciprocal_lattice_points2, d_min=None, tolerance=0.3, verbosity=0): ''' overwrites base class index_reflections function and assigns spots to their corresponding experiment (wavelength)''' print("\n\n special indexing \n\n") # initialize each reflections miller index to 0,0,0 reflections['miller_index'] = flex.miller_index(len(reflections), (0,0,0)) # for two wavelengths assert len(experiments) == 3 low_energy = 0 # 0th experiment is low-energy high_energy = 1 # 1st experiment is high-energ avg_energy = 2 # 2nd experiment is average energy (for spot overlaps) # code to check input orientation matrix # get predicted reflections based on basis vectors pred = False if pred ==True: experiments[0].crystal._ML_half_mosaicity_deg = .2 experiments[0].crystal._ML_domain_size_ang = 1000 predicted = flex.reflection_table.from_predictions_multi(experiments[0:2]) predicted.as_pickle('test') inside_resolution_limit = flex.bool(len(reflections), True) if d_min is not None: d_spacings = 1/reflections['rlp'].norms() inside_resolution_limit &= (d_spacings > d_min) # boolean array, all yet-to-be spots that are bound by the resolution sel = inside_resolution_limit & (reflections['id'] == -1) # array of indices of the reflections isel = sel.iselection() # I believe .select( isel) is same as .select( sel) rlps0 = reciprocal_lattice_points1.select(isel) # low-energy beam lp vectors calculated in two_color_grid_search rlps1 = reciprocal_lattice_points2.select(isel) # high-energy beam lps! refs = reflections.select(isel) rlps = (rlps0, rlps1) # put em in a tuple ? rlp_norms = [] hkl_ints = [] norms = [] diffs = [] c1 = experiments.crystals()[0] assert( len(experiments.crystals()) == 1 ) # 3 beams but only 1 crystal! A = matrix.sqr( experiments.crystals()[0].get_A()) A_inv = A.inverse() # confusing variable names, but for each set of R.L.P.s. # (one for the high and one for the low energy beam) # find the distance to the nearest integer hkl for rlp in range(len(rlps)): hkl_float = tuple(A_inv) * rlps[rlp] hkl_int = hkl_float.iround() differences = hkl_float - hkl_int.as_vec3_double() diffs.append(differences) norms.append(differences.norms()) hkl_ints.append(hkl_int) n_rejects = 0 for i_hkl in range(hkl_int.size()): n = flex.double([norms[j][i_hkl] for j in range(len(rlps))]) potential_hkls = [hkl_ints[j][i_hkl] for j in range(len(rlps))] potential_rlps = [rlps[j][i_hkl] for j in range(len(rlps))] if norms[0][i_hkl]>norms[1][i_hkl]: i_best_lattice = high_energy i_best_rlp = high_energy elif norms[0][i_hkl]<norms[1][i_hkl]: i_best_lattice = low_energy i_best_rlp = low_energy else: i_best_lattice = flex.min_index(n) i_best_rlp = flex.min_index(n) if n[i_best_lattice] > tolerance: n_rejects += 1 continue miller_index = potential_hkls[i_best_lattice] reciprocal_lattice_points = potential_rlps[i_best_rlp] i_ref = isel[i_hkl] reflections['miller_index'][i_ref] = miller_index reflections['id'][i_ref] = i_best_lattice reflections['rlp'][i_ref] = reciprocal_lattice_points # if more than one spot can be assigned the same miller index then choose # the closest one miller_indices = reflections['miller_index'].select(isel) rlp_norms = reflections['rlp'].select(isel).norms() same=0 for i_hkl, hkl in enumerate(miller_indices): if hkl == (0,0,0): continue iselection = (miller_indices == hkl).iselection() if len(iselection) > 1: for i in iselection: for j in iselection: if j <= i: continue crystal_i = reflections['id'][isel[i]] crystal_j = reflections['id'][isel[j]] if crystal_i != crystal_j: continue elif (crystal_i == -1 or crystal_j ==-1) or (crystal_i == -2 or crystal_j == -2): continue elif crystal_i ==2 or crystal_j ==2: continue #print hkl_ints[crystal_i][i], hkl_ints[crystal_j][j], crystal_i assert hkl_ints[crystal_j][j] == hkl_ints[crystal_i][i] same +=1 if rlp_norms[i] < rlp_norms[j]: reflections['id'][isel[i]] = high_energy reflections['id'][isel[j]] = low_energy elif rlp_norms[j] < rlp_norms[i]: reflections['id'][isel[j]] = high_energy reflections['id'][isel[i]] = low_energy #calculate Bragg angles s0 = col(experiments[2].beam.get_s0()) lambda_0 = experiments[0].beam.get_wavelength() lambda_1 = experiments[1].beam.get_wavelength() det_dist = experiments[0].detector[0].get_distance() px_size_mm = experiments[0].detector[0].get_pixel_size()[0] spot_px_coords=reflections['xyzobs.px.value'].select(isel) px_x,px_y,px_z = spot_px_coords.parts() res = [] for i in range(len(spot_px_coords)): res.append(detector[0].get_resolution_at_pixel(s0, (px_x[i], px_y[i]))) # predicted spot distance based on the resultion of the observed spot at either wavelength 1 or 2 theta_1a = [math.asin(lambda_0/(2*res[i])) for i in range(len(res))] theta_2a = [math.asin(lambda_1/(2*res[i])) for i in range(len(res))] px_dist = [(math.tan(2*theta_1a[i])*det_dist-math.tan(2*theta_2a[i])*det_dist)/px_size_mm for i in range(len(spot_px_coords))] # first calculate distance from stop centroid to farthest valid pixel (determine max spot radius) # coords of farthest valid pixel # if the predicted spot distance at either wavelength is less than 2x distance described above than the spot is considered "overlapped" and assigned to experiment 2 at average wavelength valid = MaskCode.Valid | MaskCode.Foreground for i in range(len(refs)): if reflections['miller_index'][isel[i]]==(0,0,0): continue sb = reflections['shoebox'][isel[i]] bbox = sb.bbox mask = sb.mask centroid = col(reflections['xyzobs.px.value'][isel[i]][0:2]) x1, x2, y1, y2, z1, z2 = bbox longest = 0 for y in range(y1, y2): for x in range(x1, x2): if mask[z1,y-y1,x-x1] != valid: continue v = col([x,y]) dist = (centroid -v).length() if dist > longest: longest = dist #print "Miller Index", reflections['miller_index'][i], "longest", longest,"predicted distance", px_dist_1[i] if 2*longest > px_dist[i]: avg_rlp0 = reflections['rlp'][isel[i]][0]*experiments[reflections['id'][isel[i]]].beam.get_wavelength()/experiments[2].beam.get_wavelength() avg_rlp1 = reflections['rlp'][isel[i]][1]*experiments[reflections['id'][isel[i]]].beam.get_wavelength()/experiments[2].beam.get_wavelength() avg_rlp2 = reflections['rlp'][isel[i]][2]*experiments[reflections['id'][isel[i]]].beam.get_wavelength()/experiments[2].beam.get_wavelength() reflections['id'][isel[i]] = avg_energy reflections['rlp'][isel[i]] = (avg_rlp0, avg_rlp1, avg_rlp2) # check for repeated hkl in experiment 2, and if experiment 2 has same hkl as experiment 0 or 1 the spot with the largest variance is assigned to experiment -2 and the remaining spot is assigned to experiment 2 for i_hkl, hkl in enumerate(miller_indices): if hkl == (0,0,0): continue iselection = (miller_indices == hkl).iselection() if len(iselection) > 1: for i in iselection: for j in iselection: if j <= i: continue crystal_i = reflections['id'][isel[i]] crystal_j = reflections['id'][isel[j]] if (crystal_i == -1 or crystal_j ==-1) or (crystal_i == -2 or crystal_j == -2): continue # control to only filter for experient 2; duplicate miller indices in 0 and 1 are resolved above if (crystal_i == 1 and crystal_j == 0) or (crystal_i == 0 and crystal_j ==1): continue if (crystal_i ==2 or crystal_j ==2) and (reflections['xyzobs.px.variance'][isel[i]]<reflections['xyzobs.px.variance'][isel[j]]): reflections['id'][isel[j]] = -2 reflections['id'][isel[i]] = avg_energy elif (crystal_i ==2 or crystal_j ==2) and (reflections['xyzobs.px.variance'][isel[i]]>reflections['xyzobs.px.variance'][isel[j]]): reflections['id'][isel[i]] = -2 reflections['id'][isel[j]] = avg_energy if (crystal_i ==2 and crystal_j ==2) and (reflections['xyzobs.px.variance'][isel[i]]<reflections['xyzobs.px.variance'][isel[j]]): reflections['id'][isel[j]] = -2 reflections['id'][isel[i]] = avg_energy elif (crystal_i ==2 and crystal_j ==2) and (reflections['xyzobs.px.variance'][isel[i]]>reflections['xyzobs.px.variance'][isel[j]]): reflections['id'][isel[i]] = -2 reflections['id'][isel[j]] = avg_energy # check that each experiment list does not contain duplicate miller indices exp_0 = reflections.select(reflections['id']==0) exp_1 = reflections.select(reflections['id']==1) exp_2 = reflections.select(reflections['id']==2)
def choose_best_orientation_matrix(self, candidate_orientation_matrices): from dxtbx.model.experiment_list import Experiment, ExperimentList import copy logger.info('*' * 80) logger.info('Selecting the best orientation matrix') logger.info('*' * 80) from libtbx import group_args class candidate_info(group_args): pass candidates = [] params = copy.deepcopy(self.all_params) n_cand = len(candidate_orientation_matrices) for icm, cm in enumerate(candidate_orientation_matrices): # Index reflections in P1 sel = ((self.reflections['id'] == -1)) refl = self.reflections.select(sel) experiments = self.experiment_list_for_crystal(cm) self.index_reflections(experiments, refl) indexed = refl.select(refl['id'] >= 0) indexed = indexed.select(indexed.get_flags(indexed.flags.indexed)) # If target symmetry supplied, try to apply it. Then, apply the change of basis to the reflections # indexed in P1 to the target setting if self.params.stills.refine_candidates_with_known_symmetry and self.params.known_symmetry.space_group is not None: target_space_group = self.target_symmetry_primitive.space_group( ) new_crystal, cb_op_to_primitive = self.apply_symmetry( cm, target_space_group) if new_crystal is None: print( "Cannot convert to target symmetry, candidate %d/%d" % (icm, n_cand)) continue new_crystal = new_crystal.change_basis( self.cb_op_primitive_inp) cm = candidate_orientation_matrices[icm] = new_crystal experiments = self.experiment_list_for_crystal(cm) if not cb_op_to_primitive.is_identity_op(): indexed['miller_index'] = cb_op_to_primitive.apply( indexed['miller_index']) if self.cb_op_primitive_inp is not None: indexed['miller_index'] = self.cb_op_primitive_inp.apply( indexed['miller_index']) if params.indexing.stills.refine_all_candidates: try: print( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d/%d initial outlier identification" % (icm, n_cand)) acceptance_flags = self.identify_outliers( params, experiments, indexed) #create a new "indexed" list with outliers thrown out: indexed = indexed.select(acceptance_flags) print( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d/%d refinement before outlier rejection" % (icm, n_cand)) R = e_refine(params=params, experiments=experiments, reflections=indexed, graph_verbose=False) ref_experiments = R.get_experiments() # try to improve the outcome with a second round of outlier rejection post-initial refinement: acceptance_flags = self.identify_outliers( params, ref_experiments, indexed) # insert a round of Nave-outlier rejection on top of the r.m.s.d. rejection nv0 = nave_parameters(params=params, experiments=ref_experiments, reflections=indexed, refinery=R, graph_verbose=False) crystal_model_nv0 = nv0() acceptance_flags_nv0 = nv0.nv_acceptance_flags indexed = indexed.select(acceptance_flags & acceptance_flags_nv0) print( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d/%d after positional and delta-psi outlier rejection" % (icm, n_cand)) R = e_refine(params=params, experiments=ref_experiments, reflections=indexed, graph_verbose=False) ref_experiments = R.get_experiments() nv = nave_parameters(params=params, experiments=ref_experiments, reflections=indexed, refinery=R, graph_verbose=False) crystal_model = nv() # Drop candidates that after refinement can no longer be converted to the known target space group if not self.params.stills.refine_candidates_with_known_symmetry and self.params.known_symmetry.space_group is not None: target_space_group = self.target_symmetry_primitive.space_group( ) new_crystal, cb_op_to_primitive = self.apply_symmetry( crystal_model, target_space_group) if new_crystal is None: print( "P1 refinement yielded model diverged from target, candidate %d/%d" % (icm, n_cand)) continue rmsd, _ = calc_2D_rmsd_and_displacements( R.predict_for_reflection_table(indexed)) except Exception as e: print("Couldn't refine candiate %d/%d, %s" % (icm, n_cand, str(e))) else: print( "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d/%d done" % (icm, n_cand)) candidates.append( candidate_info( crystal=crystal_model, green_curve_area=nv.green_curve_area, ewald_proximal_volume=nv.ewald_proximal_volume(), n_indexed=len(indexed), rmsd=rmsd, indexed=indexed, experiments=ref_experiments)) else: from dials.algorithms.refinement.prediction import ExperimentsPredictor ref_predictor = ExperimentsPredictor( experiments, force_stills=True, spherical_relp=params.refinement.parameterisation. spherical_relp_model) rmsd, _ = calc_2D_rmsd_and_displacements( ref_predictor(indexed)) candidates.append( candidate_info(crystal=cm, n_indexed=len(indexed), rmsd=rmsd, indexed=indexed, experiments=experiments)) if len(candidates) == 0: raise Sorry("No suitable indexing solution found") print("**** ALL CANDIDATES:") for i, XX in enumerate(candidates): print("\n****Candidate %d" % i, XX) cc = XX.crystal if hasattr(cc, 'get_half_mosaicity_deg'): print(" half mosaicity %5.2f deg." % (cc.get_half_mosaicity_deg())) print(" domain size %.0f Ang." % (cc.get_domain_size_ang())) print("\n**** BEST CANDIDATE:") results = flex.double([c.rmsd for c in candidates]) best = candidates[flex.min_index(results)] print(best) if params.indexing.stills.refine_all_candidates: if best.rmsd > params.indexing.stills.rmsd_min_px: raise Sorry("RMSD too high, %f" % best.rmsd) if best.ewald_proximal_volume > params.indexing.stills.ewald_proximal_volume_max: raise Sorry("Ewald proximity volume too high, %f" % best.ewald_proximal_volume) if len(candidates) > 1: for i in xrange(len(candidates)): if i == flex.min_index(results): continue if best.ewald_proximal_volume > candidates[ i].ewald_proximal_volume: print( "Couldn't figure out which candidate is best; picked the one with the best RMSD." ) best.indexed['entering'] = flex.bool(best.n_indexed, False) return best.crystal, best.n_indexed
def _get_gradients_core(self, reflections, D, s0, U, B, axis, fixed_rotation, callback=None): """Calculate gradients of the prediction formula with respect to each of the parameters of the contained models, for reflection h that reflects at rotation angle phi with scattering vector s that intersects panel panel_id. That is, calculate dX/dp, dY/dp and dphi/dp""" # Spindle rotation matrices for every reflection #R = self._axis.axis_and_angle_as_r3_rotation_matrix(phi) #R = flex.mat3_double(len(reflections)) # NB for now use flex.vec3_double.rotate_around_origin each time I need the # rotation matrix R. self._axis = axis self._fixed_rotation = fixed_rotation self._s0 = s0 # pv is the 'projection vector' for the ray along s1. self._D = D self._s1 = reflections['s1'] self._pv = D * self._s1 # also need quantities derived from pv, precalculated for efficiency u, v, w = self._pv.parts() self._w_inv = 1/w self._u_w_inv = u * self._w_inv self._v_w_inv = v * self._w_inv self._UB = U * B self._U = U self._B = B # r is the reciprocal lattice vector, in the lab frame self._h = reflections['miller_index'].as_vec3_double() self._phi_calc = reflections['xyzcal.mm'].parts()[2] self._r = (self._fixed_rotation * (self._UB * self._h)).rotate_around_origin(self._axis, self._phi_calc) # All of the derivatives of phi have a common denominator, given by # (e X r).s0, where e is the rotation axis. Calculate this once, here. self._e_X_r = self._axis.cross(self._r) self._e_r_s0 = (self._e_X_r).dot(self._s0) # Note that e_r_s0 -> 0 when the rotation axis, beam vector and # relp are coplanar. This occurs when a reflection just touches # the Ewald sphere. # # There is a relationship between e_r_s0 and zeta_factor. # Uncommenting the code below shows that # s0.(e X r) = zeta * |s X s0| #from dials.algorithms.profile_model.gaussian_rs import zeta_factor #from libtbx.test_utils import approx_equal #s = matrix.col(reflections['s1'][0]) #z = zeta_factor(axis[0], s0[0], s) #ss0 = (s.cross(matrix.col(s0[0]))).length() #assert approx_equal(e_r_s0[0], z * ss0) # catch small values of e_r_s0 e_r_s0_mag = flex.abs(self._e_r_s0) try: assert flex.min(e_r_s0_mag) > 1.e-6 except AssertionError as e: imin = flex.min_index(e_r_s0_mag) print "(e X r).s0 too small:" print "for", (e_r_s0_mag <= 1.e-6).count(True), "reflections" print "out of", len(e_r_s0_mag), "total" print "such as", reflections['miller_index'][imin] print "with scattering vector", reflections['s1'][imin] print "where r =", self._r[imin] print "e =", self._axis[imin] print "s0 =", self._s0[imin] print ("this reflection forms angle with the equatorial plane " "normal:") vecn = matrix.col(self._s0[imin]).cross(matrix.col(self._axis[imin])).normalize() print matrix.col(reflections['s1'][imin]).accute_angle(vecn) raise e # Set up empty list in which to store gradients m = len(reflections) results = [] # determine experiment to indices mappings once, here experiment_to_idx = [] for iexp, exp in enumerate(self._experiments): sel = reflections['id'] == iexp isel = sel.iselection() experiment_to_idx.append(isel) # reset a pointer to the parameter number self._iparam = 0 ### Work through the parameterisations, calculating their contributions ### to derivatives d[pv]/dp and d[phi]/dp # loop over the detector parameterisations for dp in self._detector_parameterisations: # Determine (sub)set of reflections affected by this parameterisation isel = flex.size_t() for exp_id in dp.get_experiment_ids(): isel.extend(experiment_to_idx[exp_id]) # Access the detector model being parameterised detector = dp.get_model() # Get panel numbers of the affected reflections panel = reflections['panel'].select(isel) # Extend derivative vectors for this detector parameterisation results = self._extend_gradient_vectors(results, m, dp.num_free(), keys=self._grad_names) # loop through the panels in this detector for panel_id, _ in enumerate(exp.detector): # get the right subset of array indices to set for this panel sub_isel = isel.select(panel == panel_id) if len(sub_isel) == 0: # if no reflections intersect this panel, skip calculation continue sub_pv = self._pv.select(sub_isel) sub_D = self._D.select(sub_isel) dpv_ddet_p = self._detector_derivatives(dp, sub_pv, sub_D, panel_id) # convert to dX/dp, dY/dp and assign the elements of the vectors # corresponding to this experiment and panel sub_w_inv = self._w_inv.select(sub_isel) sub_u_w_inv = self._u_w_inv.select(sub_isel) sub_v_w_inv = self._v_w_inv.select(sub_isel) dX_ddet_p, dY_ddet_p = self._calc_dX_dp_and_dY_dp_from_dpv_dp( sub_w_inv, sub_u_w_inv, sub_v_w_inv, dpv_ddet_p) # use a local parameter index pointer because we set all derivatives # for this panel before moving on to the next iparam = self._iparam for dX, dY in zip(dX_ddet_p, dY_ddet_p): if dX is not None: results[iparam]['dX_dp'].set_selected(sub_isel, dX) if dY is not None: results[iparam]['dY_dp'].set_selected(sub_isel, dY) # increment the local parameter index pointer iparam += 1 if callback is not None: iparam = self._iparam for i in range(dp.num_free()): results[iparam] = callback(results[iparam]) iparam += 1 # increment the parameter index pointer to the last detector parameter self._iparam += dp.num_free() # loop over the beam parameterisations for bp in self._beam_parameterisations: # Determine (sub)set of reflections affected by this parameterisation isel = flex.size_t() for exp_id in bp.get_experiment_ids(): isel.extend(experiment_to_idx[exp_id]) # Extend derivative vectors for this beam parameterisation results = self._extend_gradient_vectors(results, m, bp.num_free(), keys=self._grad_names) if len(isel) == 0: # if no reflections are in this experiment, skip calculation self._iparam += bp.num_free() continue # Get required data from those reflections r = self._r.select(isel) e_X_r = self._e_X_r.select(isel) e_r_s0 = self._e_r_s0.select(isel) D = self._D.select(isel) w_inv = self._w_inv.select(isel) u_w_inv = self._u_w_inv.select(isel) v_w_inv = self._v_w_inv.select(isel) dpv_dbeam_p, dphi_dbeam_p = self._beam_derivatives(bp, r, e_X_r, e_r_s0, D) # convert to dX/dp, dY/dp and assign the elements of the vectors # corresponding to this experiment dX_dbeam_p, dY_dbeam_p = self._calc_dX_dp_and_dY_dp_from_dpv_dp( w_inv, u_w_inv, v_w_inv, dpv_dbeam_p) for dX, dY, dphi in zip(dX_dbeam_p, dY_dbeam_p, dphi_dbeam_p): results[self._iparam][self._grad_names[0]].set_selected(isel, dX) results[self._iparam][self._grad_names[1]].set_selected(isel, dY) results[self._iparam][self._grad_names[2]].set_selected(isel, dphi) if callback is not None: results[self._iparam] = callback(results[self._iparam]) # increment the parameter index pointer self._iparam += 1 # loop over the crystal orientation parameterisations for xlop in self._xl_orientation_parameterisations: # Determine (sub)set of reflections affected by this parameterisation isel = flex.size_t() for exp_id in xlop.get_experiment_ids(): isel.extend(experiment_to_idx[exp_id]) # Extend derivative vectors for this crystal orientation parameterisation results = self._extend_gradient_vectors(results, m, xlop.num_free(), keys=self._grad_names) if len(isel) == 0: # if no reflections are in this experiment, skip calculation self._iparam += xlop.num_free() continue # Get required data from those reflections axis = self._axis.select(isel) fixed_rotation = self._fixed_rotation.select(isel) phi_calc = self._phi_calc.select(isel) h = self._h.select(isel) s1 = self._s1.select(isel) e_X_r = self._e_X_r.select(isel) e_r_s0 = self._e_r_s0.select(isel) B = self._B.select(isel) D = self._D.select(isel) w_inv = self._w_inv.select(isel) u_w_inv = self._u_w_inv.select(isel) v_w_inv = self._v_w_inv.select(isel) # get derivatives of the U matrix wrt the parameters dU_dxlo_p = [reflections["dU_dp{0}".format(i)].select(isel) \ for i in range(xlop.num_free())] dpv_dxlo_p, dphi_dxlo_p = self._xl_orientation_derivatives( dU_dxlo_p, axis, fixed_rotation, phi_calc, h, s1, e_X_r, e_r_s0, B, D) # convert to dX/dp, dY/dp and assign the elements of the vectors # corresponding to this experiment dX_dxlo_p, dY_dxlo_p = self._calc_dX_dp_and_dY_dp_from_dpv_dp( w_inv, u_w_inv, v_w_inv, dpv_dxlo_p) for dX, dY, dphi in zip(dX_dxlo_p, dY_dxlo_p, dphi_dxlo_p): results[self._iparam][self._grad_names[0]].set_selected(isel, dX) results[self._iparam][self._grad_names[1]].set_selected(isel, dY) results[self._iparam][self._grad_names[2]].set_selected(isel, dphi) if callback is not None: results[self._iparam] = callback(results[self._iparam]) # increment the parameter index pointer self._iparam += 1 # loop over the crystal unit cell parameterisations for xlucp in self._xl_unit_cell_parameterisations: # Determine (sub)set of reflections affected by this parameterisation isel = flex.size_t() for exp_id in xlucp.get_experiment_ids(): isel.extend(experiment_to_idx[exp_id]) # Extend derivative vectors for this crystal unit cell parameterisation results = self._extend_gradient_vectors(results, m, xlucp.num_free(), keys=self._grad_names) if len(isel) == 0: # if no reflections are in this experiment, skip calculation self._iparam += xlucp.num_free() continue # Get required data from those reflections axis = self._axis.select(isel) fixed_rotation = self._fixed_rotation.select(isel) phi_calc = self._phi_calc.select(isel) h = self._h.select(isel) s1 = self._s1.select(isel) e_X_r = self._e_X_r.select(isel) e_r_s0 = self._e_r_s0.select(isel) U = self._U.select(isel) D = self._D.select(isel) w_inv = self._w_inv.select(isel) u_w_inv = self._u_w_inv.select(isel) v_w_inv = self._v_w_inv.select(isel) dB_dxluc_p = [reflections["dB_dp{0}".format(i)].select(isel) \ for i in range(xlucp.num_free())] dpv_dxluc_p, dphi_dxluc_p = self._xl_unit_cell_derivatives( dB_dxluc_p, axis, fixed_rotation, phi_calc, h, s1, e_X_r, e_r_s0, U, D) # convert to dX/dp, dY/dp and assign the elements of the vectors # corresponding to this experiment dX_dxluc_p, dY_dxluc_p = self._calc_dX_dp_and_dY_dp_from_dpv_dp( w_inv, u_w_inv, v_w_inv, dpv_dxluc_p) for dX, dY, dphi in zip(dX_dxluc_p, dY_dxluc_p, dphi_dxluc_p): results[self._iparam][self._grad_names[0]].set_selected(isel, dX) results[self._iparam][self._grad_names[1]].set_selected(isel, dY) results[self._iparam][self._grad_names[2]].set_selected(isel, dphi) if callback is not None: results[self._iparam] = callback(results[self._iparam]) # increment the parameter index pointer self._iparam += 1 return results
def choose_best_orientation_matrix(self, candidate_orientation_matrices): from dxtbx.model.experiment.experiment_list import Experiment, ExperimentList from logging import info import copy info('*' * 80) info('Selecting the best orientation matrix') info('*' * 80) from libtbx import group_args class candidate_info(group_args): pass candidates = [] params = copy.deepcopy(self.all_params) for icm,cm in enumerate(candidate_orientation_matrices): sel = ((self.reflections['id'] == -1)) #(1/self.reflections['rlp'].norms() > self.d_min)) refl = self.reflections.select(sel) experiments = self.experiment_list_for_crystal(cm) self.index_reflections(experiments, refl) indexed = refl.select(refl['id'] >= 0) indexed = indexed.select(indexed.get_flags(indexed.flags.indexed)) print "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d initial outlier identification"%icm acceptance_flags = self.identify_outliers(params, experiments, indexed) #create a new "indexed" list with outliers thrown out: indexed = indexed.select(acceptance_flags) print "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d refinement before outlier rejection"%icm R = e_refine(params = params, experiments=experiments, reflections=indexed, graph_verbose=False) ref_experiments = R.get_experiments() # try to improve the outcome with a second round of outlier rejection post-initial refinement: acceptance_flags = self.identify_outliers(params, ref_experiments, indexed) # insert a round of Nave-outlier rejection on top of the r.m.s.d. rejection nv0 = nave_parameters(params = params, experiments=ref_experiments, reflections=indexed, refinery=R, graph_verbose=False) crystal_model_nv0 = nv0() acceptance_flags_nv0 = nv0.nv_acceptance_flags indexed = indexed.select(acceptance_flags & acceptance_flags_nv0) print "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d after positional and delta-psi outlier rejection"%icm R = e_refine(params = params, experiments=ref_experiments, reflections=indexed, graph_verbose=False) ref_experiments = R.get_experiments() nv = nave_parameters(params = params, experiments=ref_experiments, reflections=indexed, refinery=R, graph_verbose=False) crystal_model = nv() rmsd, _ = calc_2D_rmsd_and_displacements(R.predict_for_reflection_table(indexed)) print "$$$ stills_indexer::choose_best_orientation_matrix, candidate %d done"%icm candidates.append(candidate_info(crystal = crystal_model, green_curve_area = nv.green_curve_area, ewald_proximal_volume = nv.ewald_proximal_volume(), n_indexed = len(indexed), rmsd = rmsd, indexed = indexed, experiments = ref_experiments)) if len(candidates) == 0: raise Sorry("No suitable indexing solution found") print "**** ALL CANDIDATES:" for i,XX in enumerate(candidates): print "\n****Candidate %d"%i,XX cc = XX.crystal print " half mosaicity %5.2f deg."%(cc._ML_half_mosaicity_deg) print " domain size %.0f Ang."%(cc._ML_domain_size_ang) print "\n**** BEST CANDIDATE:" results = flex.double([c.rmsd for c in candidates]) best = candidates[flex.min_index(results)] print best if best.rmsd > 1.5: raise Sorry ("RMSD too high, %f" %rmsd) if best.ewald_proximal_volume > 0.0015: raise Sorry ("Ewald proximity volume too high, %f"%best.ewald_proximal_volume) if len(candidates) > 1: for i in xrange(len(candidates)): if i == flex.min_index(results): continue if best.ewald_proximal_volume > candidates[i].ewald_proximal_volume: print "Couldn't figure out which candidate is best; picked the one with the best RMSD." best.indexed['entering'] = flex.bool(best.n_indexed, False) self._best_indexed = best.indexed return best.crystal, best.n_indexed
def test_for_reference(self): from dials.algorithms.integration import ProfileFittingReciprocalSpace from dials.array_family import flex from dials.algorithms.shoebox import MaskCode from dials.algorithms.statistics import \ kolmogorov_smirnov_test_standard_normal from math import erf, sqrt, pi from copy import deepcopy from dials.algorithms.simulation.reciprocal_space import Simulator from os.path import basename # Integrate integration = ProfileFittingReciprocalSpace( grid_size=4, threshold=0.00, frame_interval=100, n_sigma=5, mask_n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0 ) # Integrate the reference profiles integration(self.experiment, self.reference) p = integration.learner.locate().profile(0) m = integration.learner.locate().mask(0) locator = integration.learner.locate() cor = locator.correlations() for j in range(cor.all()[0]): print ' '.join([str(cor[j,i]) for i in range(cor.all()[1])]) #exit(0) #from matplotlib import pylab #pylab.imshow(cor.as_numpy_array(), interpolation='none', vmin=-1, vmax=1) #pylab.show() #n = locator.size() #for i in range(n): #c = locator.coord(i) #p = locator.profile(i) #vmax = flex.max(p) #from matplotlib import pylab #for j in range(9): #pylab.subplot(3, 3, j+1) #pylab.imshow(p.as_numpy_array()[j], vmin=0, vmax=vmax, #interpolation='none') #pylab.show() #print "NRef: ", n #x = [] #y = [] #for i in range(n): #c = locator.coord(i) #x.append(c[0]) #y.append(c[1]) #from matplotlib import pylab #pylab.scatter(x,y) #pylab.show() #exit(0) import numpy #pmax = flex.max(p) #scale = 100 / pmax #print "Scale: ", 100 / pmax #p = p.as_numpy_array() *100 / pmax #p = p.astype(numpy.int) #print p #print m.as_numpy_array() # Check the reference profiles and spots are ok #self.check_profiles(integration.learner) # Make sure background is zero profiles = self.reference['rs_shoebox'] eps = 1e-7 for p in profiles: assert(abs(flex.sum(p.background) - 0) < eps) print 'OK' # Only select variances greater than zero mask = self.reference.get_flags(self.reference.flags.integrated) I_cal = self.reference['intensity.sum.value'] I_var = self.reference['intensity.sum.variance'] B_sim = self.reference['background.sim'].as_double() I_sim = self.reference['intensity.sim'].as_double() I_exp = self.reference['intensity.exp'] P_cor = self.reference['profile.correlation'] X_pos, Y_pos, Z_pos = self.reference['xyzcal.px'].parts() I_cal = I_cal.select(mask) I_var = I_var.select(mask) I_sim = I_sim.select(mask) I_exp = I_exp.select(mask) P_cor = P_cor.select(mask) max_ind = flex.max_index(I_cal) max_I = I_cal[max_ind] max_P = self.reference[max_ind]['rs_shoebox'].data max_C = self.reference[max_ind]['xyzcal.px'] max_S = self.reference[max_ind]['shoebox'].data min_ind = flex.min_index(P_cor) min_I = I_cal[min_ind] min_P = self.reference[min_ind]['rs_shoebox'].data min_C = self.reference[min_ind]['xyzcal.px'] min_S = self.reference[min_ind]['shoebox'].data ##for k in range(max_S.all()[0]): #if False: #for j in range(max_S.all()[1]): #for i in range(max_S.all()[2]): #max_S[k,j,i] = 0 #if (abs(i - max_S.all()[2] // 2) < 2 and #abs(j - max_S.all()[1] // 2) < 2 and #abs(k - max_S.all()[0] // 2) < 2): #max_S[k,j,i] = 100 #p = max_P.as_numpy_array() * 100 / flex.max(max_P) #p = p.astype(numpy.int) #print p #from dials.scratch.jmp.misc.test_transform import test_transform #grid_size = 4 #ndiv = 5 #sigma_b = 0.024 * pi / 180.0 #sigma_m = 0.044 * pi / 180.0 #n_sigma = 4.0 #max_P2 = test_transform( #self.experiment, #self.reference[max_ind]['shoebox'], #self.reference[max_ind]['s1'], #self.reference[max_ind]['xyzcal.mm'][2], #grid_size, #sigma_m, #sigma_b, #n_sigma, #ndiv) #max_P = max_P2 ref_ind = locator.index(max_C) ref_P = locator.profile(ref_ind) ref_C = locator.coord(ref_ind) print "Max Index: ", max_ind, max_I, flex.sum(max_P), flex.sum(max_S) print "Coord: ", max_C, "Ref Coord: ", ref_C print "Min Index: ", min_ind, min_I, flex.sum(min_P), flex.sum(min_S) print "Coord: ", min_C, "Ref Coord: ", ref_C #vmax = flex.max(max_P) #print sum(max_S) #print sum(max_P) #from matplotlib import pylab, cm #for j in range(9): #pylab.subplot(3, 3, j+1) #pylab.imshow(max_P.as_numpy_array()[j], vmin=0, vmax=vmax, #interpolation='none', cmap=cm.Greys_r) #pylab.show() #vmax = flex.max(min_P) #print sum(min_S) #print sum(min_P) #from matplotlib import pylab, cm #for j in range(9): #pylab.subplot(3, 3, j+1) #pylab.imshow(min_P.as_numpy_array()[j], vmin=0, vmax=vmax, #interpolation='none', cmap=cm.Greys_r) #pylab.show() #for k in range(max_S.all()[0]): #print '' #print 'Slice %d' % k #for j in range(max_S.all()[1]): #print ' '.join(["%-4d" % int(max_S[k,j,i]) for i in range(max_S.all()[2])]) print "Testing" def f(I): mask = flex.bool(flex.grid(9,9,9), False) for k in range(9): for j in range(9): for i in range(9): dx = 5 * (i - 4.5) / 4.5 dy = 5 * (j - 4.5) / 4.5 dz = 5 * (k - 4.5) / 4.5 dd = sqrt(dx**2 + dy**2 + dz**2) if dd <= 3: mask[k,j,i] = True mask = mask.as_1d() & (ref_P.as_1d() > 0) p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) return flex.sum((c - I * p)**2 / (I * p)) def df(I): mask = flex.bool(flex.grid(9,9,9), False) for k in range(9): for j in range(9): for i in range(9): dx = 5 * (i - 4.5) / 4.5 dy = 5 * (j - 4.5) / 4.5 dz = 5 * (k - 4.5) / 4.5 dd = sqrt(dx**2 + dy**2 + dz**2) if dd <= 3: mask[k,j,i] = True mask = mask.as_1d() & (ref_P.as_1d() > 0) p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) b = 0 return flex.sum(p) - flex.sum(c*c / (I*I*p)) #return flex.sum(p - p*c*c / ((b + I*p)**2)) #return flex.sum(3*p*p + (c*c*p*p - 4*b*p*p) / ((b + I*p)**2)) #return flex.sum(p - c*c / (I*I*p)) #return flex.sum(p * (-c+p*I)*(c+p*I)/((p*I)**2)) def d2f(I): mask = flex.bool(flex.grid(9,9,9), False) for k in range(9): for j in range(9): for i in range(9): dx = 5 * (i - 4.5) / 4.5 dy = 5 * (j - 4.5) / 4.5 dz = 5 * (k - 4.5) / 4.5 dd = sqrt(dx**2 + dy**2 + dz**2) if dd <= 3: mask[k,j,i] = True mask = mask.as_1d() & (ref_P.as_1d() > 0) p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) return flex.sum(2*c*c*p*p / (p*I)**3) I = 10703#flex.sum(max_P) mask = ref_P.as_1d() > 0 p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) for i in range(10): I = I - df(I) / d2f(I) #v = I*p #I = flex.sum(c * p / v) / flex.sum(p*p / v) print I from math import log ff = [] for I in range(9500, 11500): ff.append(f(I)) print sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500 from matplotlib import pylab pylab.plot(range(9500,11500), ff) pylab.show() #exit(0) #I = 10000 #print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P) #I = 10100 #print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P) #exit(0) print flex.sum(self.reference[0]['rs_shoebox'].data) print I_cal[0] # Calculate the z score perc = self.mv3n_tolerance_interval(3*3) Z = (I_cal - I_sim) / flex.sqrt(I_var) mv = flex.mean_and_variance(Z) Z_mean = mv.mean() Z_var = mv.unweighted_sample_variance() print "Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var)) print len(I_cal) from matplotlib import pylab from mpl_toolkits.mplot3d import Axes3D #fig = pylab.figure() #ax = fig.add_subplot(111, projection='3d') #ax.scatter(X_pos, Y_pos, P_cor) #pylab.scatter(X_pos, P_cor) #pylab.scatter(Y_pos, P_cor) #pylab.scatter(Z_pos, P_cor) #pylab.hist(P_cor,100) #pylab.scatter(P_cor, (I_cal - I_exp) / I_exp) pylab.hist(Z, 100) #pylab.hist(I_cal,100) #pylab.hist(I_cal - I_sim, 100) pylab.show()
indexed = indexed, experiments = experiments)) if len(candidates) == 0: raise Sorry("No suitable indexing solution found") print "**** ALL CANDIDATES:" for i,XX in enumerate(candidates): print "\n****Candidate %d"%i,XX cc = XX.crystal if hasattr(cc, '_ML_half_mosaicity_deg'): print " half mosaicity %5.2f deg."%(cc._ML_half_mosaicity_deg) print " domain size %.0f Ang."%(cc._ML_domain_size_ang) print "\n**** BEST CANDIDATE:" results = flex.double([c.rmsd for c in candidates]) best = candidates[flex.min_index(results)] print best if params.indexing.stills.refine_all_candidates: if best.rmsd > params.indexing.stills.rmsd_min_px: raise Sorry ("RMSD too high, %f" %rmsd) if best.ewald_proximal_volume > params.indexing.stills.ewald_proximal_volume_max: raise Sorry ("Ewald proximity volume too high, %f"%best.ewald_proximal_volume) if len(candidates) > 1: for i in xrange(len(candidates)): if i == flex.min_index(results): continue if best.ewald_proximal_volume > candidates[i].ewald_proximal_volume: print "Couldn't figure out which candidate is best; picked the one with the best RMSD."
def test_for_reference(self): from dials.algorithms.integration import ProfileFittingReciprocalSpace from dials.array_family import flex from dials.algorithms.shoebox import MaskCode from dials.algorithms.statistics import kolmogorov_smirnov_test_standard_normal from math import erf, sqrt, pi from copy import deepcopy from dials.algorithms.simulation.reciprocal_space import Simulator from os.path import basename # Integrate integration = ProfileFittingReciprocalSpace( grid_size=4, threshold=0.00, frame_interval=100, n_sigma=5, mask_n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0, ) # Integrate the reference profiles integration(self.experiment, self.reference) p = integration.learner.locate().profile(0) m = integration.learner.locate().mask(0) locator = integration.learner.locate() cor = locator.correlations() for j in range(cor.all()[0]): print(" ".join([str(cor[j, i]) for i in range(cor.all()[1])])) # exit(0) # from matplotlib import pylab # pylab.imshow(cor.as_numpy_array(), interpolation='none', vmin=-1, vmax=1) # pylab.show() # n = locator.size() # for i in range(n): # c = locator.coord(i) # p = locator.profile(i) # vmax = flex.max(p) # from matplotlib import pylab # for j in range(9): # pylab.subplot(3, 3, j+1) # pylab.imshow(p.as_numpy_array()[j], vmin=0, vmax=vmax, # interpolation='none') # pylab.show() # print "NRef: ", n # x = [] # y = [] # for i in range(n): # c = locator.coord(i) # x.append(c[0]) # y.append(c[1]) # from matplotlib import pylab # pylab.scatter(x,y) # pylab.show() # exit(0) import numpy # pmax = flex.max(p) # scale = 100 / pmax # print "Scale: ", 100 / pmax # p = p.as_numpy_array() *100 / pmax # p = p.astype(numpy.int) # print p # print m.as_numpy_array() # Check the reference profiles and spots are ok # self.check_profiles(integration.learner) # Make sure background is zero profiles = self.reference["rs_shoebox"] eps = 1e-7 for p in profiles: assert abs(flex.sum(p.background) - 0) < eps print("OK") # Only select variances greater than zero mask = self.reference.get_flags(self.reference.flags.integrated) I_cal = self.reference["intensity.sum.value"] I_var = self.reference["intensity.sum.variance"] B_sim = self.reference["background.sim"].as_double() I_sim = self.reference["intensity.sim"].as_double() I_exp = self.reference["intensity.exp"] P_cor = self.reference["profile.correlation"] X_pos, Y_pos, Z_pos = self.reference["xyzcal.px"].parts() I_cal = I_cal.select(mask) I_var = I_var.select(mask) I_sim = I_sim.select(mask) I_exp = I_exp.select(mask) P_cor = P_cor.select(mask) max_ind = flex.max_index(I_cal) max_I = I_cal[max_ind] max_P = self.reference[max_ind]["rs_shoebox"].data max_C = self.reference[max_ind]["xyzcal.px"] max_S = self.reference[max_ind]["shoebox"].data min_ind = flex.min_index(P_cor) min_I = I_cal[min_ind] min_P = self.reference[min_ind]["rs_shoebox"].data min_C = self.reference[min_ind]["xyzcal.px"] min_S = self.reference[min_ind]["shoebox"].data ##for k in range(max_S.all()[0]): # if False: # for j in range(max_S.all()[1]): # for i in range(max_S.all()[2]): # max_S[k,j,i] = 0 # if (abs(i - max_S.all()[2] // 2) < 2 and # abs(j - max_S.all()[1] // 2) < 2 and # abs(k - max_S.all()[0] // 2) < 2): # max_S[k,j,i] = 100 # p = max_P.as_numpy_array() * 100 / flex.max(max_P) # p = p.astype(numpy.int) # print p # from dials.scratch.jmp.misc.test_transform import test_transform # grid_size = 4 # ndiv = 5 # sigma_b = 0.024 * pi / 180.0 # sigma_m = 0.044 * pi / 180.0 # n_sigma = 4.0 # max_P2 = test_transform( # self.experiment, # self.reference[max_ind]['shoebox'], # self.reference[max_ind]['s1'], # self.reference[max_ind]['xyzcal.mm'][2], # grid_size, # sigma_m, # sigma_b, # n_sigma, # ndiv) # max_P = max_P2 ref_ind = locator.index(max_C) ref_P = locator.profile(ref_ind) ref_C = locator.coord(ref_ind) print("Max Index: ", max_ind, max_I, flex.sum(max_P), flex.sum(max_S)) print("Coord: ", max_C, "Ref Coord: ", ref_C) print("Min Index: ", min_ind, min_I, flex.sum(min_P), flex.sum(min_S)) print("Coord: ", min_C, "Ref Coord: ", ref_C) # vmax = flex.max(max_P) # print sum(max_S) # print sum(max_P) # from matplotlib import pylab, cm # for j in range(9): # pylab.subplot(3, 3, j+1) # pylab.imshow(max_P.as_numpy_array()[j], vmin=0, vmax=vmax, # interpolation='none', cmap=cm.Greys_r) # pylab.show() # vmax = flex.max(min_P) # print sum(min_S) # print sum(min_P) # from matplotlib import pylab, cm # for j in range(9): # pylab.subplot(3, 3, j+1) # pylab.imshow(min_P.as_numpy_array()[j], vmin=0, vmax=vmax, # interpolation='none', cmap=cm.Greys_r) # pylab.show() # for k in range(max_S.all()[0]): # print '' # print 'Slice %d' % k # for j in range(max_S.all()[1]): # print ' '.join(["%-4d" % int(max_S[k,j,i]) for i in range(max_S.all()[2])]) print("Testing") def f(I): mask = flex.bool(flex.grid(9, 9, 9), False) for k in range(9): for j in range(9): for i in range(9): dx = 5 * (i - 4.5) / 4.5 dy = 5 * (j - 4.5) / 4.5 dz = 5 * (k - 4.5) / 4.5 dd = sqrt(dx**2 + dy**2 + dz**2) if dd <= 3: mask[k, j, i] = True mask = mask.as_1d() & (ref_P.as_1d() > 0) p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) return flex.sum((c - I * p)**2 / (I * p)) def df(I): mask = flex.bool(flex.grid(9, 9, 9), False) for k in range(9): for j in range(9): for i in range(9): dx = 5 * (i - 4.5) / 4.5 dy = 5 * (j - 4.5) / 4.5 dz = 5 * (k - 4.5) / 4.5 dd = sqrt(dx**2 + dy**2 + dz**2) if dd <= 3: mask[k, j, i] = True mask = mask.as_1d() & (ref_P.as_1d() > 0) p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) b = 0 return flex.sum(p) - flex.sum(c * c / (I * I * p)) # return flex.sum(p - p*c*c / ((b + I*p)**2)) # return flex.sum(3*p*p + (c*c*p*p - 4*b*p*p) / ((b + I*p)**2)) # return flex.sum(p - c*c / (I*I*p)) # return flex.sum(p * (-c+p*I)*(c+p*I)/((p*I)**2)) def d2f(I): mask = flex.bool(flex.grid(9, 9, 9), False) for k in range(9): for j in range(9): for i in range(9): dx = 5 * (i - 4.5) / 4.5 dy = 5 * (j - 4.5) / 4.5 dz = 5 * (k - 4.5) / 4.5 dd = sqrt(dx**2 + dy**2 + dz**2) if dd <= 3: mask[k, j, i] = True mask = mask.as_1d() & (ref_P.as_1d() > 0) p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) return flex.sum(2 * c * c * p * p / (p * I)**3) I = 10703 # flex.sum(max_P) mask = ref_P.as_1d() > 0 p = ref_P.as_1d().select(mask) c = max_P.as_1d().select(mask) for i in range(10): I = I - df(I) / d2f(I) # v = I*p # I = flex.sum(c * p / v) / flex.sum(p*p / v) print(I) from math import log ff = [] for I in range(9500, 11500): ff.append(f(I)) print(sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500) from matplotlib import pylab pylab.plot(range(9500, 11500), ff) pylab.show() # exit(0) # I = 10000 # print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P) # I = 10100 # print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P) # exit(0) print(flex.sum(self.reference[0]["rs_shoebox"].data)) print(I_cal[0]) # Calculate the z score perc = self.mv3n_tolerance_interval(3 * 3) Z = (I_cal - I_sim) / flex.sqrt(I_var) mv = flex.mean_and_variance(Z) Z_mean = mv.mean() Z_var = mv.unweighted_sample_variance() print("Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var))) print(len(I_cal)) from matplotlib import pylab from mpl_toolkits.mplot3d import Axes3D # fig = pylab.figure() # ax = fig.add_subplot(111, projection='3d') # ax.scatter(X_pos, Y_pos, P_cor) # pylab.scatter(X_pos, P_cor) # pylab.scatter(Y_pos, P_cor) # pylab.scatter(Z_pos, P_cor) # pylab.hist(P_cor,100) # pylab.scatter(P_cor, (I_cal - I_exp) / I_exp) pylab.hist(Z, 100) # pylab.hist(I_cal,100) # pylab.hist(I_cal - I_sim, 100) pylab.show()