def optimise_peaks(self, peaks, reciprocal_lattice_points): # optimise the peak position using a grid search around the starting peak position optimised_peaks = flex.vec3_double() n_points = 4 grid_step = 0.25 for peak in peaks: max_value = 1e-8 max_index = None for i in range(-n_points, n_points): i_coord = peak[0] + i * grid_step for j in range(-n_points, n_points): j_coord = peak[1] + j * grid_step for k in range(-n_points, n_points): k_coord = peak[2] + k * grid_step v = self.fft_cell.orthogonalize( (i_coord / self.gridding[0], j_coord / self.gridding[1], k_coord / self.gridding[2])) two_pi_S_dot_v = 2 * math.pi * reciprocal_lattice_points.dot( v) f = flex.sum(flex.cos(two_pi_S_dot_v)) if f > max_value: max_value = f max_index = (i_coord, j_coord, k_coord) optimised_peaks.append(max_index) return optimised_peaks
def calc_partiality_anisotropy_set(self, my_uc, rotx, roty, miller_indices, ry, rz, r0, re, nu, bragg_angle_set, alpha_angle_set, wavelength, crystal_init_orientation, spot_pred_x_mm_set, spot_pred_y_mm_set, detector_distance_mm, partiality_model, flag_beam_divergence): #use III.4 in Winkler et al 1979 (A35; P901) for set of miller indices O = sqr(my_uc.orthogonalization_matrix()).transpose() R = sqr(crystal_init_orientation.crystal_rotation_matrix()).transpose() CO = crystal_orientation(O*R, basis_type.direct) CO_rotate = CO.rotate_thru((1,0,0), rotx ).rotate_thru((0,1,0), roty) A_star = sqr(CO_rotate.reciprocal_matrix()) S0 = -1*col((0,0,1./wavelength)) #caculate rs rs_set = r0 + (re * flex.tan(bragg_angle_set)) if flag_beam_divergence: rs_set += ((ry * flex.cos(alpha_angle_set))**2 + (rz * flex.sin(alpha_angle_set))**2)**(1/2) #calculate rh x = A_star.elems * miller_indices.as_vec3_double() sd_array = x + S0.elems rh_set = sd_array.norms() - (1/wavelength) #calculate partiality if partiality_model == "Lorentzian": partiality_set = ((rs_set**2)/((2*(rh_set**2))+(rs_set**2))) elif partiality_model == "Voigt": partiality_set = self.voigt(rh_set, rs_set, nu) elif partiality_model == "Lognormal": partiality_set = self.lognpdf(rh_set, rs_set, nu) #calculate delta_xy d_ratio = -detector_distance_mm/sd_array.parts()[2] calc_xy_array = flex.vec3_double(sd_array.parts()[0]*d_ratio, \ sd_array.parts()[1]*d_ratio, flex.double([0]*len(d_ratio))) pred_xy_array = flex.vec3_double(spot_pred_x_mm_set, spot_pred_y_mm_set, flex.double([0]*len(d_ratio))) delta_xy_set = (pred_xy_array - calc_xy_array).norms() return partiality_set, delta_xy_set, rs_set, rh_set
def polarization_correction(self): """ Perform basic polarization correction in place, and change the is_polarization_corrected flag to True. I_corrected = 2*I_uncorrected/(1 + cos(two_theta)**2) """ two_theta = self.miller_array.two_theta(wavelength=self.wavelength).data() one_over_P = 2/(1 + (flex.cos(two_theta) ** 2)) self.miller_array = self.miller_array.customized_copy( data=self.miller_array.data() * one_over_P) self.is_polarization_corrected = True
def get_hl(f_obs_cmpl, k_blur, b_blur): f_model_phases = f_obs_cmpl.phases().data() sin_f_model_phases = flex.sin(f_model_phases) cos_f_model_phases = flex.cos(f_model_phases) ss = 1. / flex.pow2(f_obs_cmpl.d_spacings().data()) / 4. t = 2 * k_blur * flex.exp(-b_blur * ss) hl_a_model = t * cos_f_model_phases hl_b_model = t * sin_f_model_phases hl_data = flex.hendrickson_lattman(a=hl_a_model, b=hl_b_model) hl = f_obs_cmpl.customized_copy(data=hl_data) return hl
def get_hl(f_obs_cmpl, k_blur, b_blur): f_model_phases = f_obs_cmpl.phases().data() sin_f_model_phases = flex.sin(f_model_phases) cos_f_model_phases = flex.cos(f_model_phases) ss = 1./flex.pow2(f_obs_cmpl.d_spacings().data()) / 4. t = 2*k_blur * flex.exp(-b_blur*ss) hl_a_model = t * cos_f_model_phases hl_b_model = t * sin_f_model_phases hl_data = flex.hendrickson_lattman(a = hl_a_model, b = hl_b_model) hl = f_obs_cmpl.customized_copy(data = hl_data) return hl
def optimise_peaks(self, peaks, reciprocal_lattice_points): # optimise the peak position using a grid search around the starting peak position optimised_peaks = flex.vec3_double() n_points = 4 grid_step = 0.25 for peak in peaks: max_value = 1e-8 max_index = None for i in range(-n_points, n_points): i_coord = peak[0] + i * grid_step for j in range(-n_points, n_points): j_coord = peak[1] + j * grid_step for k in range(-n_points, n_points): k_coord = peak[2] + k * grid_step v = self.fft_cell.orthogonalize( (i_coord/self.gridding[0], j_coord/self.gridding[1], k_coord/self.gridding[2])) two_pi_S_dot_v = 2 * math.pi * reciprocal_lattice_points.dot(v) f = flex.sum(flex.cos(two_pi_S_dot_v)) if f > max_value: max_value = f max_index = (i_coord, j_coord, k_coord) optimised_peaks.append(max_index) return optimised_peaks
def run(args): from dials.util.options import OptionParser from dials.util.options import flatten_datablocks from dials.util.masking import GoniometerShadowMaskGenerator from libtbx.utils import Sorry import libtbx.load_env usage = "%s [options] experiments.json" %libtbx.env.dispatcher_name parser = OptionParser( usage=usage, phil=phil_scope, read_datablocks=True, check_format=False, epilog=help_message) params, options = parser.parse_args(show_diff_phil=True) datablocks = flatten_datablocks(params.input.datablock) if len(datablocks) == 0: parser.print_help() return imagesets = [] for datablock in datablocks: imagesets.extend(datablock.extract_imagesets()) for imageset in imagesets: import math height = params.height # mm radius = params.radius # mm steps_per_degree = 10 steps_per_degree = 1 theta = flex.double([range(360*steps_per_degree)]) * math.pi/180 * 1/steps_per_degree y = radius * flex.cos(theta) # x z = radius * flex.sin(theta) # y x = flex.double(theta.size(), height) # z coords = flex.vec3_double(zip(x, y, z)) coords.insert(0, (0,0,0)) gonio = imageset.get_goniometer() scan = imageset.get_scan() beam = imageset.get_beam() detector = imageset.get_detector() if params.angle is not None: angle = params.angle else: angle = scan.get_oscillation()[0] gonio_masker = GoniometerShadowMaskGenerator( gonio, coords, flex.size_t(len(coords), 0)) from matplotlib import pyplot as plt if params.output.animation is not None: import matplotlib.animation as manimation import os.path ext = os.path.splitext(params.output.animation) metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!') if ext[1] == '.mp4': FFMpegWriter = manimation.writers['ffmpeg'] writer = FFMpegWriter(fps=15, metadata=metadata) elif ext[1] == '.gif': ImagemagickWriter = manimation.writers['imagemagick_file'] writer = ImagemagickWriter(fps=15, metadata=metadata) fig = plt.figure() l, = plt.plot([], [], c='r', marker=None) plt.axes().set_aspect('equal') plt.xlim(0, detector[0].get_image_size()[0]) plt.ylim(0, detector[0].get_image_size()[0]) plt.gca().invert_yaxis() title = plt.axes().set_title('') with writer.saving(fig, params.output.animation, 100): start, end = scan.get_array_range() step_size = 5 for i in range(start, end, step_size): angle = scan.get_angle_from_array_index(i) shadow_boundary = gonio_masker.project_extrema(detector, angle) x, y = shadow_boundary[0].parts() l.set_data(x.as_numpy_array(), y.as_numpy_array()) title.set_text('scan_angle = %.1f degrees' %angle) writer.grab_frame() plt.close() shadow_boundary = gonio_masker.project_extrema(detector, angle) with open('shadow.phil', 'wb') as f: print >> f, 'untrusted {' print >> f, ' polygon = \\' for c in shadow_boundary[0]: print >> f, ' %0.f %.0f \\' %(max(c[0], 0), max(c[1], 0)) print >> f, '}' import matplotlib.pyplot as plt fig = plt.figure() x, y, z = coords.parts() plt.scatter(x.as_numpy_array(), y.as_numpy_array()) plt.axes().set_aspect('equal') plt.xlabel('x (gonio axis)') plt.ylabel('y (perpendicular to beam)') plt.savefig('gonio_xy.png') plt.scatter(y.as_numpy_array(), z.as_numpy_array()) plt.axes().set_aspect('equal') plt.xlabel('y (perpendicular to beam)') plt.ylabel('z (towards beam))') plt.savefig('gonio_yz.png') plt.scatter(z.as_numpy_array(), x.as_numpy_array()) plt.axes().set_aspect('equal') plt.xlabel('z (towards beam)') plt.ylabel('x (gonio axis)') plt.savefig('gonio_zx.png') for p_id in range(len(detector)): x, y = shadow_boundary[p_id].parts() fig = plt.figure() plt.scatter(x.as_numpy_array(), y.as_numpy_array(), c='r', s=1, marker='x') plt.axes().set_aspect('equal') plt.xlim(0, detector[p_id].get_image_size()[0]) plt.ylim(0, detector[p_id].get_image_size()[0]) plt.gca().invert_yaxis() plt.savefig('shadow.png')
def organize_input(self, observations_pickle, iparams, avg_mode, pickle_filename=None): """Given the pickle file, extract and prepare observations object and the alpha angle (meridional to equatorial). """ #get general parameters if iparams.isoform_name is not None: if "identified_isoform" not in observations_pickle: return None, "No identified isoform" if observations_pickle[ "identified_isoform"] != iparams.isoform_name: return None, "Identified isoform(%s) is not the requested isoform (%s)" % ( observations_pickle["identified_isoform"], iparams.isoform_name) if iparams.flag_weak_anomalous: if avg_mode == 'final': target_anomalous_flag = iparams.target_anomalous_flag else: target_anomalous_flag = False else: target_anomalous_flag = iparams.target_anomalous_flag img_filename_only = '' if pickle_filename: img_filename_only = os.path.basename(pickle_filename) txt_exception = ' {0:40} ==> '.format(img_filename_only) #for dials integration pickles - also look for experimentxxx.json if "miller_index" in observations_pickle: from dxtbx.model.experiment_list import ExperimentListFactory exp_json_file = os.path.join( os.path.dirname(pickle_filename), img_filename_only.split('_')[0] + '_refined_experiments.json') if os.path.isfile(exp_json_file): experiments = ExperimentListFactory.from_json_file( exp_json_file) dials_crystal = experiments[0].crystal detector = experiments[0].detector beam = experiments[0].beam crystal_symmetry = crystal.symmetry( unit_cell=dials_crystal.get_unit_cell().parameters(), space_group_symbol=str(iparams.target_space_group)) miller_set_all = miller.set( crystal_symmetry=crystal_symmetry, indices=observations_pickle['miller_index'], anomalous_flag=target_anomalous_flag) observations = miller_set_all.array( data=observations_pickle['intensity.sum.value'], sigmas=flex.sqrt( observations_pickle['intensity.sum.variance']) ).set_observation_type_xray_intensity() detector_distance_mm = detector[0].get_distance() alpha_angle_obs = flex.double([0] * len(observations.data())) wavelength = beam.get_wavelength() spot_pred_x_mm = observations_pickle['s1'] #a disguise of s1 spot_pred_y_mm = flex.double([0] * len(observations.data())) #calculate the crystal orientation O = sqr(dials_crystal.get_unit_cell().orthogonalization_matrix( )).transpose() R = sqr(dials_crystal.get_U()).transpose() from cctbx.crystal_orientation import crystal_orientation, basis_type crystal_init_orientation = crystal_orientation( O * R, basis_type.direct) else: txt_exception += exp_json_file + ' not found' print(txt_exception) return None, txt_exception else: #for cctbx.xfel proceed as usual observations = observations_pickle["observations"][0] detector_distance_mm = observations_pickle['distance'] mm_predictions = iparams.pixel_size_mm * ( observations_pickle['mapped_predictions'][0]) xbeam = observations_pickle["xbeam"] ybeam = observations_pickle["ybeam"] alpha_angle_obs = flex.double([math.atan(abs(pred[0]-xbeam)/abs(pred[1]-ybeam)) \ for pred in mm_predictions]) spot_pred_x_mm = flex.double( [pred[0] - xbeam for pred in mm_predictions]) spot_pred_y_mm = flex.double( [pred[1] - ybeam for pred in mm_predictions]) #Polarization correction wavelength = observations_pickle["wavelength"] crystal_init_orientation = observations_pickle[ "current_orientation"][0] #continue reading... if iparams.flag_LP_correction and "observations" in observations_pickle: fx = 1 - iparams.polarization_horizontal_fraction fy = 1 - fx if fx > 1.0 or fx < 0: print( 'Horizontal polarization fraction is not correct. The value must be >= 0 and <= 1' ) print( 'No polarization correction. Continue with post-refinement' ) else: phi_angle_obs = flex.double([math.atan2(pred[1]-ybeam, pred[0]-xbeam) \ for pred in mm_predictions]) bragg_angle_obs = observations.two_theta(wavelength).data() P = ((fx*((flex.sin(phi_angle_obs)**2)+((flex.cos(phi_angle_obs)**2)*flex.cos(bragg_angle_obs)**2)))+\ (fy*((flex.cos(phi_angle_obs)**2)+((flex.sin(phi_angle_obs)**2)*flex.cos(bragg_angle_obs)**2)))) I_prime = observations.data() / P sigI_prime = observations.sigmas() / P observations = observations.customized_copy( data=flex.double(I_prime), sigmas=flex.double(sigI_prime)) #set observations with target space group - !!! required for correct #merging due to map_to_asu command. if iparams.target_crystal_system is not None: target_crystal_system = iparams.target_crystal_system else: target_crystal_system = observations.crystal_symmetry( ).space_group().crystal_system() lph = lbfgs_partiality_handler() if iparams.flag_override_unit_cell: uc_constrained_inp = lph.prep_input( iparams.target_unit_cell.parameters(), target_crystal_system) else: uc_constrained_inp = lph.prep_input( observations.unit_cell().parameters(), target_crystal_system) uc_constrained = list( lph.prep_output(uc_constrained_inp, target_crystal_system)) try: #apply constrain using the crystal system miller_set = symmetry( unit_cell=uc_constrained, space_group_symbol=str( iparams.target_space_group)).build_miller_set( anomalous_flag=target_anomalous_flag, d_min=iparams.merge.d_min) observations = observations.customized_copy( anomalous_flag=target_anomalous_flag, crystal_symmetry=miller_set.crystal_symmetry()) except Exception: a, b, c, alpha, beta, gamma = uc_constrained txt_exception += 'Mismatch spacegroup (%6.2f,%6.2f,%6.2f,%6.2f,%6.2f,%6.2f)' % ( a, b, c, alpha, beta, gamma) print(txt_exception) return None, txt_exception #reset systematic absence sys_absent_negate_flags = flex.bool([ sys_absent_flag[1] == False for sys_absent_flag in observations.sys_absent_flags() ]) observations = observations.select(sys_absent_negate_flags) alpha_angle_obs = alpha_angle_obs.select(sys_absent_negate_flags) spot_pred_x_mm = spot_pred_x_mm.select(sys_absent_negate_flags) spot_pred_y_mm = spot_pred_y_mm.select(sys_absent_negate_flags) #remove observations from rejection list if iparams.rejections: if pickle_filename in iparams.rejections: miller_indices_ori_rejected = iparams.rejections[ pickle_filename] i_sel_flag = flex.bool([True] * len(observations.data())) cnrej = 0 for miller_index_ori_rejected in miller_indices_ori_rejected: for i_index_ori, miller_index_ori in enumerate( observations.indices()): if miller_index_ori_rejected == miller_index_ori: i_sel_flag[i_index_ori] = False cnrej += 1 observations = observations.customized_copy( indices=observations.indices().select(i_sel_flag), data=observations.data().select(i_sel_flag), sigmas=observations.sigmas().select(i_sel_flag)) alpha_angle_obs = alpha_angle_obs.select(i_sel_flag) spot_pred_x_mm = spot_pred_x_mm.select(i_sel_flag) spot_pred_y_mm = spot_pred_y_mm.select(i_sel_flag) #filter resolution i_sel_res = observations.resolution_filter_selection( d_max=iparams.merge.d_max, d_min=iparams.merge.d_min) observations = observations.select(i_sel_res) alpha_angle_obs = alpha_angle_obs.select(i_sel_res) spot_pred_x_mm = spot_pred_x_mm.select(i_sel_res) spot_pred_y_mm = spot_pred_y_mm.select(i_sel_res) #Filter weak i_sel = (observations.data() / observations.sigmas()) > iparams.merge.sigma_min observations = observations.select(i_sel) alpha_angle_obs = alpha_angle_obs.select(i_sel) spot_pred_x_mm = spot_pred_x_mm.select(i_sel) spot_pred_y_mm = spot_pred_y_mm.select(i_sel) #filter icering (if on) if iparams.icering.flag_on: miller_indices = flex.miller_index() I_set = flex.double() sigI_set = flex.double() alpha_angle_obs_set = flex.double() spot_pred_x_mm_set = flex.double() spot_pred_y_mm_set = flex.double() for miller_index, d, I, sigI, alpha, spot_x, spot_y in zip( observations.indices(), observations.d_spacings().data(), observations.data(), observations.sigmas(), alpha_angle_obs, spot_pred_x_mm, spot_pred_y_mm): if d > iparams.icering.d_upper or d < iparams.icering.d_lower: miller_indices.append(miller_index) I_set.append(I) sigI_set.append(sigI) alpha_angle_obs_set.append(alpha) spot_pred_x_mm_set.append(spot_x) spot_pred_y_mm_set.append(spot_y) observations = observations.customized_copy(indices=miller_indices, data=I_set, sigmas=sigI_set) alpha_angle_obs = alpha_angle_obs_set[:] spot_pred_x_mm = spot_pred_x_mm_set[:] spot_pred_y_mm = spot_pred_y_mm_set[:] #replacing sigI (if set) if iparams.flag_replace_sigI: observations = observations.customized_copy( sigmas=flex.sqrt(observations.data())) inputs = observations, alpha_angle_obs, spot_pred_x_mm, spot_pred_y_mm, detector_distance_mm, wavelength, crystal_init_orientation return inputs, 'OK'
def list_6_as_miller_arrays(file_name): """ Read the file of given name and return a pair of miller arrays (F_obs^2, F_cal) """ # potentially iotbx.cif could be used here fcf = iter(open(file_name)) space_group = sgtbx.space_group() unit_cell_params = {} indices = flex.miller_index() f_obs_squares = flex.double() sigma_f_obs_squares = flex.double() f_calc_amplitudes = flex.double() f_calc_phases = flex.double() for li in fcf: if li.startswith('loop_'): for li in fcf: li = li.strip() if li == '_symmetry_equiv_pos_as_xyz': for li in fcf: li = li.strip() if not li: break space_group.expand_smx(li[1:-1]) else: for i in range(6): next(fcf) for li in fcf: items = li.split() if not items: break h, k, l, fo, sig_fo, fc, phase = items indices.append((int(h), int(k), int(l))) f_obs_squares.append(float(fo)) sigma_f_obs_squares.append(float(sig_fo)) f_calc_amplitudes.append(float(fc)) f_calc_phases.append(float(phase)) if not li: break elif li.startswith('_cell'): lbl, value = li.split() unit_cell_params[lbl] = float(value) unit_cell = uctbx.unit_cell([ unit_cell_params[p] for p in ("_cell_length_a", "_cell_length_b", "_cell_length_c", "_cell_angle_alpha", "_cell_angle_beta", "_cell_angle_gamma") ]) crystal_symmetry = crystal.symmetry(unit_cell=unit_cell, space_group=space_group) f_calc_phases *= pi / 180 f_calc = flex.complex_double( reals=f_calc_amplitudes * flex.cos(f_calc_phases), imags=f_calc_amplitudes * flex.sin(f_calc_phases)) miller_set = miller.set(crystal_symmetry=crystal_symmetry, indices=indices).auto_anomalous() f_obs_squares = miller.array(miller_set=miller_set, data=f_obs_squares, sigmas=sigma_f_obs_squares) f_obs_squares.set_observation_type_xray_intensity() f_obs_squares.set_info( miller.array_info(source=file_name, labels=["F_squared_meas", "F_squared_sigma"])) f_calc = miller.array(miller_set=miller_set, data=f_calc) f_obs_squares.set_info( miller.array_info(source=file_name, labels=["F_calc"])) return f_obs_squares, f_calc
def compute_map_coefficients(self): f_obs = self.f_obs_complete.select( self.f_obs_complete.d_spacings().data() >= self.d_min) f_calc = f_obs.structure_factors_from_map(self.map, use_sg=True) f_obs_active = f_obs.select_indices(self.active_indices) minimized = relative_scaling.ls_rel_scale_driver( f_obs_active, f_calc.as_amplitude_array().select_indices(self.active_indices), use_intensities=False, use_weights=False) #minimized.show() f_calc = f_calc.customized_copy(data=f_calc.data()\ * math.exp(-minimized.p_scale)\ * adptbx.debye_waller_factor_u_star( f_calc.indices(), minimized.u_star)) f_calc_active = f_calc.common_set(f_obs_active) matched_indices = f_obs.match_indices(self.f_obs_active) lone_indices_selection = matched_indices.single_selection(0) from mmtbx.max_lik import maxlik alpha_beta_est = maxlik.alpha_beta_est_manager( f_obs=f_obs_active, f_calc=f_calc_active, free_reflections_per_bin=140, flags=flex.bool(f_obs_active.size()), interpolation=True, epsilons=f_obs_active.epsilons().data().as_double()) alpha, beta = alpha_beta_est.alpha_beta_for_each_reflection( f_obs=self.f_obs_complete.select( self.f_obs_complete.d_spacings().data() >= self.d_min)) f_obs.data().copy_selected(lone_indices_selection.iselection(), flex.abs(f_calc.data())) t = maxlik.fo_fc_alpha_over_eps_beta(f_obs=f_obs, f_model=f_calc, alpha=alpha, beta=beta) hl_coeff = flex.hendrickson_lattman( t * flex.cos(f_calc.phases().data()), t * flex.sin(f_calc.phases().data())) dd = alpha.data() # hl_array = f_calc.array( data=self.hl_coeffs_start.common_set(f_calc).data() + hl_coeff) self.compute_phase_source(hl_array) fom = flex.abs(self.phase_source.data()) mFo = hl_array.array(data=f_obs.data() * self.phase_source.data()) DFc = hl_array.array(data=dd * f_calc.as_amplitude_array().phase_transfer( self.phase_source).data()) centric_flags = f_obs.centric_flags().data() acentric_flags = ~centric_flags fo_scale = flex.double(centric_flags.size()) fc_scale = flex.double(centric_flags.size()) fo_scale.set_selected(acentric_flags, 2) fo_scale.set_selected(centric_flags, 1) fc_scale.set_selected(acentric_flags, 1) fc_scale.set_selected(centric_flags, 0) fo_scale.set_selected(lone_indices_selection, 0) fc_scale.set_selected(lone_indices_selection, -1) self.map_coeffs = hl_array.array(data=mFo.data() * fo_scale - DFc.data() * fc_scale) self.fom = hl_array.array(data=fom) self.hl_coeffs = hl_array # statistics self.r1_factor = f_obs_active.r1_factor(f_calc_active) fom = fom.select(matched_indices.pair_selection(0)) self.r1_factor_fom = flex.sum( fom * flex.abs(f_obs_active.data() - f_calc_active.as_amplitude_array().data())) \ / flex.sum(fom * f_obs_active.data()) phase_source, phase_source_previous = self.phase_source.common_sets( self.phase_source_previous) self.mean_delta_phi = phase_error( flex.arg(phase_source.data()), flex.arg(phase_source_previous.data())) phase_source, phase_source_initial = self.phase_source.common_sets( self.phase_source_initial) self.mean_delta_phi_initial = phase_error( flex.arg(phase_source.data()), flex.arg(phase_source_initial.data())) self.mean_fom = flex.mean(fom) fom = f_obs_active.array(data=fom) if fom.data().size() < 1000: # 2013-12-14 was hard-wired at 1000 tt reflections_per_bin = fom.data().size() else: reflections_per_bin = 1000 fom.setup_binner(reflections_per_bin=reflections_per_bin) self.mean_fom_binned = fom.mean(use_binning=True)
def compute_map_coefficients(self): f_obs = self.f_obs_complete.select(self.f_obs_complete.d_spacings().data() >= self.d_min) f_calc = f_obs.structure_factors_from_map(self.map, use_sg=True) f_obs_active = f_obs.select_indices(self.active_indices) minimized = relative_scaling.ls_rel_scale_driver( f_obs_active, f_calc.as_amplitude_array().select_indices(self.active_indices), use_intensities=False, use_weights=False) #minimized.show() f_calc = f_calc.customized_copy(data=f_calc.data()\ * math.exp(-minimized.p_scale)\ * adptbx.debye_waller_factor_u_star( f_calc.indices(), minimized.u_star)) f_calc_active = f_calc.common_set(f_obs_active) matched_indices = f_obs.match_indices(self.f_obs_active) lone_indices_selection = matched_indices.single_selection(0) from mmtbx.max_lik import maxlik alpha_beta_est = maxlik.alpha_beta_est_manager( f_obs=f_obs_active, f_calc=f_calc_active, free_reflections_per_bin=140, flags=flex.bool(f_obs_active.size()), interpolation=True, epsilons=f_obs_active.epsilons().data().as_double()) alpha, beta = alpha_beta_est.alpha_beta_for_each_reflection( f_obs=self.f_obs_complete.select(self.f_obs_complete.d_spacings().data() >= self.d_min)) f_obs.data().copy_selected( lone_indices_selection.iselection(), flex.abs(f_calc.data())) t = maxlik.fo_fc_alpha_over_eps_beta( f_obs=f_obs, f_model=f_calc, alpha=alpha, beta=beta) hl_coeff = flex.hendrickson_lattman( t * flex.cos(f_calc.phases().data()), t * flex.sin(f_calc.phases().data())) dd = alpha.data() # hl_array = f_calc.array( data=self.hl_coeffs_start.common_set(f_calc).data()+hl_coeff) self.compute_phase_source(hl_array) fom = flex.abs(self.phase_source.data()) mFo = hl_array.array(data=f_obs.data()*self.phase_source.data()) DFc = hl_array.array(data=dd*f_calc.as_amplitude_array().phase_transfer( self.phase_source).data()) centric_flags = f_obs.centric_flags().data() acentric_flags = ~centric_flags fo_scale = flex.double(centric_flags.size()) fc_scale = flex.double(centric_flags.size()) fo_scale.set_selected(acentric_flags, 2) fo_scale.set_selected(centric_flags, 1) fc_scale.set_selected(acentric_flags, 1) fc_scale.set_selected(centric_flags, 0) fo_scale.set_selected(lone_indices_selection, 0) fc_scale.set_selected(lone_indices_selection, -1) self.map_coeffs = hl_array.array( data=mFo.data()*fo_scale - DFc.data()*fc_scale) self.fom = hl_array.array(data=fom) self.hl_coeffs = hl_array # statistics self.r1_factor = f_obs_active.r1_factor(f_calc_active) fom = fom.select(matched_indices.pair_selection(0)) self.r1_factor_fom = flex.sum( fom * flex.abs(f_obs_active.data() - f_calc_active.as_amplitude_array().data())) \ / flex.sum(fom * f_obs_active.data()) phase_source, phase_source_previous = self.phase_source.common_sets( self.phase_source_previous) self.mean_delta_phi = phase_error( flex.arg(phase_source.data()), flex.arg(phase_source_previous.data())) phase_source, phase_source_initial = self.phase_source.common_sets( self.phase_source_initial) self.mean_delta_phi_initial = phase_error( flex.arg(phase_source.data()), flex.arg(phase_source_initial.data())) self.mean_fom = flex.mean(fom) fom = f_obs_active.array(data=fom) if fom.data().size()<1000: # 2013-12-14 was hard-wired at 1000 tt reflections_per_bin=fom.data().size() else: reflections_per_bin=1000 fom.setup_binner(reflections_per_bin=reflections_per_bin) self.mean_fom_binned = fom.mean(use_binning=True)
def organize_input(self, observations_pickle, iparams, avg_mode, pickle_filename=None): """Given the pickle file, extract and prepare observations object and the alpha angle (meridional to equatorial). """ if iparams.isoform_name is not None: if "identified_isoform" not in observations_pickle: return None, "No identified isoform" if observations_pickle["identified_isoform"] != iparams.isoform_name: return ( None, "Identified isoform(%s) is not the requested isoform (%s)" % (observations_pickle["identified_isoform"], iparams.isoform_name), ) if iparams.flag_weak_anomalous: if avg_mode == "final": target_anomalous_flag = iparams.target_anomalous_flag else: target_anomalous_flag = False else: target_anomalous_flag = iparams.target_anomalous_flag img_filename_only = "" if pickle_filename is not None: pickle_filepaths = pickle_filename.split("/") img_filename_only = pickle_filepaths[len(pickle_filepaths) - 1] txt_exception = " {0:40} ==> ".format(img_filename_only) observations = observations_pickle["observations"][0] detector_distance_mm = observations_pickle["distance"] mm_predictions = iparams.pixel_size_mm * (observations_pickle["mapped_predictions"][0]) xbeam = observations_pickle["xbeam"] ybeam = observations_pickle["ybeam"] alpha_angle_obs = flex.double( [math.atan(abs(pred[0] - xbeam) / abs(pred[1] - ybeam)) for pred in mm_predictions] ) spot_pred_x_mm = flex.double([pred[0] - xbeam for pred in mm_predictions]) spot_pred_y_mm = flex.double([pred[1] - ybeam for pred in mm_predictions]) # Polarization correction wavelength = observations_pickle["wavelength"] if iparams.flag_LP_correction: fx = 1 - iparams.polarization_horizontal_fraction fy = 1 - fx if fx > 1.0 or fx < 0: print "Horizontal polarization fraction is not correct. The value must be >= 0 and <= 1" print "No polarization correction. Continue with post-refinement" else: phi_angle_obs = flex.double([math.atan2(pred[1] - ybeam, pred[0] - xbeam) for pred in mm_predictions]) bragg_angle_obs = observations.two_theta(wavelength).data() P = ( fx * ( (flex.sin(phi_angle_obs) ** 2) + ((flex.cos(phi_angle_obs) ** 2) * flex.cos(bragg_angle_obs) ** 2) ) ) + ( fy * ( (flex.cos(phi_angle_obs) ** 2) + ((flex.sin(phi_angle_obs) ** 2) * flex.cos(bragg_angle_obs) ** 2) ) ) I_prime = observations.data() / P sigI_prime = observations.sigmas() / P observations = observations.customized_copy(data=flex.double(I_prime), sigmas=flex.double(sigI_prime)) # set observations with target space group - !!! required for correct # merging due to map_to_asu command. if iparams.target_crystal_system is not None: target_crystal_system = iparams.target_crystal_system else: target_crystal_system = observations.crystal_symmetry().space_group().crystal_system() from mod_leastsqr import prep_input, prep_output if iparams.flag_override_unit_cell: uc_constrained_inp = prep_input(iparams.target_unit_cell.parameters(), target_crystal_system) else: uc_constrained_inp = prep_input(observations.unit_cell().parameters(), target_crystal_system) uc_constrained = list(prep_output(uc_constrained_inp, target_crystal_system)) try: # apply constrain using the crystal system miller_set = symmetry( unit_cell=uc_constrained, space_group_symbol=iparams.target_space_group ).build_miller_set(anomalous_flag=target_anomalous_flag, d_min=iparams.merge.d_min) observations = observations.customized_copy( anomalous_flag=target_anomalous_flag, crystal_symmetry=miller_set.crystal_symmetry() ) except Exception: a, b, c, alpha, beta, gamma = uc_constrained txt_exception += "Mismatch spacegroup (%6.2f,%6.2f,%6.2f,%6.2f,%6.2f,%6.2f)" % (a, b, c, alpha, beta, gamma) print txt_exception return None, txt_exception # reset systematic absence sys_absent_negate_flags = flex.bool( [sys_absent_flag[1] == False for sys_absent_flag in observations.sys_absent_flags()] ) observations = observations.select(sys_absent_negate_flags) alpha_angle_obs = alpha_angle_obs.select(sys_absent_negate_flags) spot_pred_x_mm = spot_pred_x_mm.select(sys_absent_negate_flags) spot_pred_y_mm = spot_pred_y_mm.select(sys_absent_negate_flags) import os.path # remove observations from rejection list if os.path.isfile(iparams.run_no + "/rejections.txt"): txt_out = pickle_filename + " \nN_before_rejection: " + str(len(observations.data())) + "\n" file_reject = open(iparams.run_no + "/rejections.txt", "r") data_reject = file_reject.read().split("\n") miller_indices_ori_rejected = flex.miller_index() for row_reject in data_reject: col_reject = row_reject.split() if len(col_reject) > 0: if col_reject[0].strip() == pickle_filename: miller_indices_ori_rejected.append( (int(col_reject[1].strip()), int(col_reject[2].strip()), int(col_reject[3].strip())) ) if len(miller_indices_ori_rejected) > 0: i_sel_flag = flex.bool([True] * len(observations.data())) for miller_index_ori_rejected in miller_indices_ori_rejected: i_index_ori = 0 for miller_index_ori in observations.indices(): if miller_index_ori_rejected == miller_index_ori: i_sel_flag[i_index_ori] = False txt_out += ( " -Discard:" + str(miller_index_ori[0]) + "," + str(miller_index_ori[1]) + "," + str(miller_index_ori[2]) + "\n" ) i_index_ori += 1 observations = observations.customized_copy( indices=observations.indices().select(i_sel_flag), data=observations.data().select(i_sel_flag), sigmas=observations.sigmas().select(i_sel_flag), ) alpha_angle_obs = alpha_angle_obs.select(i_sel_flag) spot_pred_x_mm = spot_pred_x_mm.select(i_sel_flag) spot_pred_y_mm = spot_pred_y_mm.select(i_sel_flag) txt_out += "N_after_rejection: " + str(len(observations.data())) + "\n" # filter resolution i_sel_res = observations.resolution_filter_selection(d_max=iparams.merge.d_max, d_min=iparams.merge.d_min) observations = observations.select(i_sel_res) alpha_angle_obs = alpha_angle_obs.select(i_sel_res) spot_pred_x_mm = spot_pred_x_mm.select(i_sel_res) spot_pred_y_mm = spot_pred_y_mm.select(i_sel_res) # Filter weak if iparams.flag_include_negatives: if iparams.merge.sigma_min > 0: iparams.merge.sigma_min = -99.0 i_sel = (observations.data() / observations.sigmas()) > iparams.merge.sigma_min observations = observations.select(i_sel) alpha_angle_obs = alpha_angle_obs.select(i_sel) spot_pred_x_mm = spot_pred_x_mm.select(i_sel) spot_pred_y_mm = spot_pred_y_mm.select(i_sel) # filter icering (if on) if iparams.icering.flag_on: miller_indices = flex.miller_index() I_set = flex.double() sigI_set = flex.double() alpha_angle_obs_set = flex.double() spot_pred_x_mm_set = flex.double() spot_pred_y_mm_set = flex.double() for miller_index, d, I, sigI, alpha, spot_x, spot_y in zip( observations.indices(), observations.d_spacings().data(), observations.data(), observations.sigmas(), alpha_angle_obs, spot_pred_x_mm, spot_pred_y_mm, ): if d > iparams.icering.d_upper or d < iparams.icering.d_lower: miller_indices.append(miller_index) I_set.append(I) sigI_set.append(sigI) alpha_angle_obs_set.append(alpha) spot_pred_x_mm_set.append(spot_x) spot_pred_y_mm_set.append(spot_y) observations = observations.customized_copy(indices=miller_indices, data=I_set, sigmas=sigI_set) alpha_angle_obs = alpha_angle_obs_set[:] spot_pred_x_mm = spot_pred_x_mm_set[:] spot_pred_y_mm = spot_pred_y_mm_set[:] # replacing sigI (if set) if iparams.flag_replace_sigI: observations = observations.customized_copy(sigmas=flex.sqrt(observations.data())) inputs = observations, alpha_angle_obs, spot_pred_x_mm, spot_pred_y_mm, detector_distance_mm return inputs, "OK"
def run(args): from dials.util.options import OptionParser from dials.util.options import flatten_datablocks from dials.util.masking import GoniometerShadowMaskGenerator from libtbx.utils import Sorry import libtbx.load_env usage = "%s [options] experiments.json" % libtbx.env.dispatcher_name parser = OptionParser( usage=usage, phil=phil_scope, read_datablocks=True, check_format=False, epilog=help_message, ) params, options = parser.parse_args(show_diff_phil=True) datablocks = flatten_datablocks(params.input.datablock) if len(datablocks) == 0: parser.print_help() return imagesets = [] for datablock in datablocks: imagesets.extend(datablock.extract_imagesets()) for imageset in imagesets: import math height = params.height # mm radius = params.radius # mm steps_per_degree = 10 steps_per_degree = 1 theta = ( flex.double([range(360 * steps_per_degree)]) * math.pi / 180 * 1 / steps_per_degree ) y = radius * flex.cos(theta) # x z = radius * flex.sin(theta) # y x = flex.double(theta.size(), height) # z coords = flex.vec3_double(zip(x, y, z)) coords.insert(0, (0, 0, 0)) gonio = imageset.get_goniometer() scan = imageset.get_scan() beam = imageset.get_beam() detector = imageset.get_detector() if params.angle is not None: angle = params.angle else: angle = scan.get_oscillation()[0] gonio_masker = GoniometerShadowMaskGenerator( gonio, coords, flex.size_t(len(coords), 0) ) from matplotlib import pyplot as plt if params.output.animation is not None: import matplotlib.animation as manimation import os.path ext = os.path.splitext(params.output.animation) metadata = dict( title="Movie Test", artist="Matplotlib", comment="Movie support!" ) if ext[1] == ".mp4": FFMpegWriter = manimation.writers["ffmpeg"] writer = FFMpegWriter(fps=15, metadata=metadata) elif ext[1] == ".gif": ImagemagickWriter = manimation.writers["imagemagick_file"] writer = ImagemagickWriter(fps=15, metadata=metadata) fig = plt.figure() (l,) = plt.plot([], [], c="r", marker=None) plt.axes().set_aspect("equal") plt.xlim(0, detector[0].get_image_size()[0]) plt.ylim(0, detector[0].get_image_size()[0]) plt.gca().invert_yaxis() title = plt.axes().set_title("") with writer.saving(fig, params.output.animation, 100): start, end = scan.get_array_range() step_size = 5 for i in range(start, end, step_size): angle = scan.get_angle_from_array_index(i) shadow_boundary = gonio_masker.project_extrema(detector, angle) x, y = shadow_boundary[0].parts() l.set_data(x.as_numpy_array(), y.as_numpy_array()) title.set_text("scan_angle = %.1f degrees" % angle) writer.grab_frame() plt.close() shadow_boundary = gonio_masker.project_extrema(detector, angle) with open("shadow.phil", "wb") as f: print("untrusted {", file=f) print(" polygon = \\", file=f) for c in shadow_boundary[0]: print(" %0.f %.0f \\" % (max(c[0], 0), max(c[1], 0)), file=f) print("}", file=f) import matplotlib.pyplot as plt fig = plt.figure() x, y, z = coords.parts() plt.scatter(x.as_numpy_array(), y.as_numpy_array()) plt.axes().set_aspect("equal") plt.xlabel("x (gonio axis)") plt.ylabel("y (perpendicular to beam)") plt.savefig("gonio_xy.png") plt.scatter(y.as_numpy_array(), z.as_numpy_array()) plt.axes().set_aspect("equal") plt.xlabel("y (perpendicular to beam)") plt.ylabel("z (towards beam))") plt.savefig("gonio_yz.png") plt.scatter(z.as_numpy_array(), x.as_numpy_array()) plt.axes().set_aspect("equal") plt.xlabel("z (towards beam)") plt.ylabel("x (gonio axis)") plt.savefig("gonio_zx.png") for p_id in range(len(detector)): x, y = shadow_boundary[p_id].parts() fig = plt.figure() plt.scatter(x.as_numpy_array(), y.as_numpy_array(), c="r", s=1, marker="x") plt.axes().set_aspect("equal") plt.xlim(0, detector[p_id].get_image_size()[0]) plt.ylim(0, detector[p_id].get_image_size()[0]) plt.gca().invert_yaxis() plt.savefig("shadow.png")
def scale_frame_detail(self, result, file_name, db_mgr, out): # If the pickled integration file does not contain a wavelength, # fall back on the value given on the command line. XXX The # wavelength parameter should probably be removed from master_phil # once all pickled integration files contain it. if (result.has_key("wavelength")): wavelength = result["wavelength"] elif (self.params.wavelength is not None): wavelength = self.params.wavelength else: # XXX Give error, or raise exception? return None assert (wavelength > 0) observations = result["observations"][0] cos_two_polar_angle = result["cos_two_polar_angle"] assert observations.size() == cos_two_polar_angle.size() tt_vec = observations.two_theta(wavelength) #print "mean tt degrees",180.*flex.mean(tt_vec.data())/math.pi cos_tt_vec = flex.cos( tt_vec.data() ) sin_tt_vec = flex.sin( tt_vec.data() ) cos_sq_tt_vec = cos_tt_vec * cos_tt_vec sin_sq_tt_vec = sin_tt_vec * sin_tt_vec P_nought_vec = 0.5 * (1. + cos_sq_tt_vec) F_prime = -1.0 # Hard-coded value defines the incident polarization axis P_prime = 0.5 * F_prime * cos_two_polar_angle * sin_sq_tt_vec # XXX added as a diagnostic prange=P_nought_vec - P_prime other_F_prime = 1.0 otherP_prime = 0.5 * other_F_prime * cos_two_polar_angle * sin_sq_tt_vec otherprange=P_nought_vec - otherP_prime diff2 = flex.abs(prange - otherprange) print "mean diff is",flex.mean(diff2), "range",flex.min(diff2), flex.max(diff2) # XXX done observations = observations / ( P_nought_vec - P_prime ) # This corrects observations for polarization assuming 100% polarization on # one axis (thus the F_prime = -1.0 rather than the perpendicular axis, 1.0) # Polarization model as described by Kahn, Fourme, Gadet, Janin, Dumas & Andre # (1982) J. Appl. Cryst. 15, 330-337, equations 13 - 15. print "Step 3. Correct for polarization." indexed_cell = observations.unit_cell() observations_original_index = observations.deep_copy() if result.get("model_partialities",None) is not None and result["model_partialities"][0] is not None: # some recordkeeping useful for simulations partialities_original_index = observations.customized_copy( crystal_symmetry=self.miller_set.crystal_symmetry(), data = result["model_partialities"][0]["data"], sigmas = flex.double(result["model_partialities"][0]["data"].size()), #dummy value for sigmas indices = result["model_partialities"][0]["indices"], ).resolution_filter(d_min=self.params.d_min) assert len(observations_original_index.indices()) == len(observations.indices()) # Now manipulate the data to conform to unit cell, asu, and space group # of reference. The resolution will be cut later. # Only works if there is NOT an indexing ambiguity! observations = observations.customized_copy( anomalous_flag=not self.params.merge_anomalous, crystal_symmetry=self.miller_set.crystal_symmetry() ).map_to_asu() observations_original_index = observations_original_index.customized_copy( anomalous_flag=not self.params.merge_anomalous, crystal_symmetry=self.miller_set.crystal_symmetry() ) print "Step 4. Filter on global resolution and map to asu" print >> out, "Data in reference setting:" #observations.show_summary(f=out, prefix=" ") show_observations(observations, out=out) #if self.params.significance_filter.apply is True: # raise Exception("significance filter not implemented in samosa") if self.params.significance_filter.apply is True: #------------------------------------ # Apply an I/sigma filter ... accept resolution bins only if they # have significant signal; tends to screen out higher resolution observations # if the integration model doesn't quite fit N_obs_pre_filter = observations.size() N_bins_small_set = N_obs_pre_filter // self.params.significance_filter.min_ct N_bins_large_set = N_obs_pre_filter // self.params.significance_filter.max_ct # Ensure there is at least one bin. N_bins = max( [min([self.params.significance_filter.n_bins,N_bins_small_set]), N_bins_large_set, 1] ) print "Total obs %d Choose n bins = %d"%(N_obs_pre_filter,N_bins) bin_results = show_observations(observations, out=out, n_bins=N_bins) #show_observations(observations, out=sys.stdout, n_bins=N_bins) acceptable_resolution_bins = [ bin.mean_I_sigI > self.params.significance_filter.sigma for bin in bin_results] acceptable_nested_bin_sequences = [i for i in xrange(len(acceptable_resolution_bins)) if False not in acceptable_resolution_bins[:i+1]] if len(acceptable_nested_bin_sequences)==0: return null_data( file_name=file_name, log_out=out.getvalue(), low_signal=True) else: N_acceptable_bins = max(acceptable_nested_bin_sequences) + 1 imposed_res_filter = float(bin_results[N_acceptable_bins-1].d_range.split()[2]) imposed_res_sel = observations.resolution_filter_selection( d_min=imposed_res_filter) observations = observations.select( imposed_res_sel) observations_original_index = observations_original_index.select( imposed_res_sel) print "New resolution filter at %7.2f"%imposed_res_filter,file_name print "N acceptable bins",N_acceptable_bins print "Old n_obs: %d, new n_obs: %d"%(N_obs_pre_filter,observations.size()) print "Step 5. Frame by frame resolution filter" # Finished applying the binwise I/sigma filter--------------------------------------- if self.params.raw_data.sdfac_auto is True: raise Exception("sdfac auto not implemented in samosa.") print "Step 6. Match to reference intensities, filter by correlation, filter out negative intensities." assert len(observations_original_index.indices()) \ == len(observations.indices()) data = frame_data(self.n_refl, file_name) data.set_indexed_cell(indexed_cell) data.d_min = observations.d_min() # Ensure that match_multi_indices() will return identical results # when a frame's observations are matched against the # pre-generated Miller set, self.miller_set, and the reference # data set, self.i_model. The implication is that the same match # can be used to map Miller indices to array indices for intensity # accumulation, and for determination of the correlation # coefficient in the presence of a scaling reference. if self.i_model is not None: assert len(self.i_model.indices()) == len(self.miller_set.indices()) \ and (self.i_model.indices() == self.miller_set.indices()).count(False) == 0 matches = miller.match_multi_indices( miller_indices_unique=self.miller_set.indices(), miller_indices=observations.indices()) use_weights = False # New facility for getting variance-weighted correlation if self.params.scaling.algorithm in ['mark1','levmar']: # Because no correlation is computed, the correlation # coefficient is fixed at zero. Setting slope = 1 means # intensities are added without applying a scale factor. sum_x = 0 sum_y = 0 for pair in matches.pairs(): data.n_obs += 1 if not self.params.include_negatives and observations.data()[pair[1]] <= 0: data.n_rejected += 1 else: sum_y += observations.data()[pair[1]] N = data.n_obs - data.n_rejected # Early return if there are no positive reflections on the frame. if data.n_obs <= data.n_rejected: return null_data( file_name=file_name, log_out=out.getvalue(), low_signal=True) # Update the count for each matched reflection. This counts # reflections with non-positive intensities, too. data.completeness += matches.number_of_matches(0).as_int() data.wavelength = wavelength if not self.params.scaling.enable: # Do not scale anything print "Scale factor to an isomorphous reference PDB will NOT be applied." slope = 1.0 offset = 0.0 observations_original_index_indices = observations_original_index.indices() if db_mgr is None: return unpack(MINI.x) # special exit for two-color indexing kwargs = {'wavelength': wavelength, 'beam_x': result['xbeam'], 'beam_y': result['ybeam'], 'distance': result['distance'], 'unique_file_name': data.file_name} ORI = result["current_orientation"][0] Astar = matrix.sqr(ORI.reciprocal_matrix()) kwargs['res_ori_1'] = Astar[0] kwargs['res_ori_2'] = Astar[1] kwargs['res_ori_3'] = Astar[2] kwargs['res_ori_4'] = Astar[3] kwargs['res_ori_5'] = Astar[4] kwargs['res_ori_6'] = Astar[5] kwargs['res_ori_7'] = Astar[6] kwargs['res_ori_8'] = Astar[7] kwargs['res_ori_9'] = Astar[8] assert self.params.scaling.report_ML is True kwargs['half_mosaicity_deg'] = result["ML_half_mosaicity_deg"][0] kwargs['domain_size_ang'] = result["ML_domain_size_ang"][0] frame_id_0_base = db_mgr.insert_frame(**kwargs) xypred = result["mapped_predictions"][0] indices = flex.size_t([pair[1] for pair in matches.pairs()]) sel_observations = flex.intersection( size=observations.data().size(), iselections=[indices]) set_original_hkl = observations_original_index_indices.select( flex.intersection( size=observations_original_index_indices.size(), iselections=[indices])) set_xypred = xypred.select( flex.intersection( size=xypred.size(), iselections=[indices])) kwargs = {'hkl_id_0_base': [pair[0] for pair in matches.pairs()], 'i': observations.data().select(sel_observations), 'sigi': observations.sigmas().select(sel_observations), 'detector_x': [xy[0] for xy in set_xypred], 'detector_y': [xy[1] for xy in set_xypred], 'frame_id_0_base': [frame_id_0_base] * len(matches.pairs()), 'overload_flag': [0] * len(matches.pairs()), 'original_h': [hkl[0] for hkl in set_original_hkl], 'original_k': [hkl[1] for hkl in set_original_hkl], 'original_l': [hkl[2] for hkl in set_original_hkl]} db_mgr.insert_observation(**kwargs) print >> out, "Lattice: %d reflections" % (data.n_obs - data.n_rejected) print >> out, "average obs", sum_y / (data.n_obs - data.n_rejected), \ "average calc", sum_x / (data.n_obs - data.n_rejected) print >> out, "Rejected %d reflections with negative intensities" % \ data.n_rejected data.accept = True for pair in matches.pairs(): if not self.params.include_negatives and (observations.data()[pair[1]] <= 0) : continue Intensity = observations.data()[pair[1]] # Super-rare exception. If saved sigmas instead of I/sigmas in the ISIGI dict, this wouldn't be needed. if Intensity == 0: continue # Add the reflection as a two-tuple of intensity and I/sig(I) # to the dictionary of observations. index = self.miller_set.indices()[pair[0]] isigi = (Intensity, observations.data()[pair[1]] / observations.sigmas()[pair[1]], 1.0) if index in data.ISIGI: data.ISIGI[index].append(isigi) else: data.ISIGI[index] = [isigi] sigma = observations.sigmas()[pair[1]] variance = sigma * sigma data.summed_N[pair[0]] += 1 data.summed_wt_I[pair[0]] += Intensity / variance data.summed_weight[pair[0]] += 1 / variance data.set_log_out(out.getvalue()) return data
def organize_input(self, observations_pickle, iparams, avg_mode, pickle_filename=None): """Given the pickle file, extract and prepare observations object and the alpha angle (meridional to equatorial). """ identified_isoform = None if iparams.isoform_name is not None: identified_isoform = iparams.isoform_name if "identified_isoform" not in observations_pickle: return None, "No identified isoform" else: identified_isoform = observations_pickle["identified_isoform"] if observations_pickle["identified_isoform"] != iparams.isoform_name: return None, "Identified isoform(%s) is not the requested isoform (%s)"%(observations_pickle["identified_isoform"], iparams.isoform_name) if iparams.flag_weak_anomalous: if avg_mode == 'final': target_anomalous_flag = iparams.target_anomalous_flag else: target_anomalous_flag = False else: target_anomalous_flag = iparams.target_anomalous_flag img_filename_only = '' if pickle_filename is not None: pickle_filepaths = pickle_filename.split('/') img_filename_only = pickle_filepaths[len(pickle_filepaths)-1] txt_exception = ' {0:40} ==> '.format(img_filename_only) observations = observations_pickle["observations"][0] detector_distance_mm = observations_pickle['distance'] mapped_predictions = observations_pickle['mapped_predictions'][0] #set observations with target space group - !!! required for correct #merging due to map_to_asu command. if iparams.target_crystal_system is not None: target_crystal_system = iparams.target_crystal_system else: target_crystal_system = observations.crystal_symmetry().space_group().crystal_system() lph = lbfgs_partiality_handler() if iparams.flag_override_unit_cell: uc_constrained_inp = lph.prep_input(iparams.target_unit_cell.parameters(), target_crystal_system) else: uc_constrained_inp = lph.prep_input(observations.unit_cell().parameters(), target_crystal_system) uc_constrained = list(lph.prep_output(uc_constrained_inp, target_crystal_system)) try: #apply constrain using the crystal system miller_set = symmetry( unit_cell=uc_constrained, space_group_symbol=iparams.target_space_group ).build_miller_set( anomalous_flag=target_anomalous_flag, d_min=iparams.merge.d_min) observations = observations.customized_copy(anomalous_flag=target_anomalous_flag, crystal_symmetry=miller_set.crystal_symmetry()) except Exception: a,b,c,alpha,beta,gamma = uc_constrained txt_exception += 'Mismatch spacegroup (%6.2f,%6.2f,%6.2f,%6.2f,%6.2f,%6.2f)'%(a,b,c,alpha,beta,gamma) return None, txt_exception #reset systematic absence sys_absent_negate_flags = flex.bool([sys_absent_flag[1]==False for sys_absent_flag in observations.sys_absent_flags()]) observations = observations.select(sys_absent_negate_flags) mapped_predictions = mapped_predictions.select(sys_absent_negate_flags) import os.path #remove observations from rejection list if os.path.isfile(iparams.run_no+'/rejections.txt'): txt_out = pickle_filename + ' \nN_before_rejection: ' + str(len(observations.data())) + '\n' file_reject = open(iparams.run_no+'/rejections.txt', 'r') data_reject=file_reject.read().split("\n") miller_indices_ori_rejected = flex.miller_index() for row_reject in data_reject: col_reject = row_reject.split() if len(col_reject) > 0: if col_reject[0].strip() == pickle_filename: miller_indices_ori_rejected.append((int(col_reject[1].strip()), int(col_reject[2].strip()), int(col_reject[3].strip()))) if len(miller_indices_ori_rejected) > 0: i_sel_flag = flex.bool([True]*len(observations.data())) for miller_index_ori_rejected in miller_indices_ori_rejected: i_index_ori = 0 for miller_index_ori in observations.indices(): if miller_index_ori_rejected == miller_index_ori: i_sel_flag[i_index_ori] = False txt_out += ' -Discard:' + str(miller_index_ori[0]) + \ ','+str(miller_index_ori[1])+','+str(miller_index_ori[2]) + '\n' i_index_ori += 1 observations = observations.customized_copy(indices=observations.indices().select(i_sel_flag), data=observations.data().select(i_sel_flag), sigmas=observations.sigmas().select(i_sel_flag)) mapped_predictions = mapped_predictions.select(i_sel_flag) txt_out += 'N_after_rejection: ' + str(len(observations.data())) + '\n' #filter resolution i_sel_res = observations.resolution_filter_selection(d_max=iparams.merge.d_max, d_min=iparams.merge.d_min) observations = observations.select(i_sel_res) mapped_predictions = mapped_predictions.select(i_sel_res) #Filter weak i_sel = (observations.data()/observations.sigmas()) > iparams.merge.sigma_min observations = observations.select(i_sel) mapped_predictions = mapped_predictions.select(i_sel) #filter icering (if on) #replacing sigI (if set) if iparams.flag_replace_sigI: observations = observations.customized_copy(sigmas=flex.sqrt(observations.data())) #setup spot predicton mm_predictions = iparams.pixel_size_mm*mapped_predictions xbeam = observations_pickle["xbeam"] ybeam = observations_pickle["ybeam"] alpha_angle_obs = flex.double([math.atan(abs(pred[0]-xbeam)/abs(pred[1]-ybeam)) \ for pred in mm_predictions]) spot_pred_x_mm = flex.double([pred[0]-xbeam for pred in mm_predictions]) spot_pred_y_mm = flex.double([pred[1]-ybeam for pred in mm_predictions]) #Polarization correction wavelength = observations_pickle["wavelength"] if iparams.flag_LP_correction: fx = 1 - iparams.polarization_horizontal_fraction fy = 1 - fx if fx > 1.0 or fx < 0: print 'Horizontal polarization fraction is not correct. The value must be >= 0 and <= 1' print 'No polarization correction. Continue with post-refinement' else: phi_angle_obs = flex.double([math.atan2(pred[1]-ybeam, pred[0]-xbeam) \ for pred in mm_predictions]) bragg_angle_obs = observations.two_theta(wavelength).data() P = ((fx*((flex.sin(phi_angle_obs)**2)+((flex.cos(phi_angle_obs)**2)*flex.cos(bragg_angle_obs)**2)))+\ (fy*((flex.cos(phi_angle_obs)**2)+((flex.sin(phi_angle_obs)**2)*flex.cos(bragg_angle_obs)**2)))) I_prime = observations.data()/P sigI_prime =observations.sigmas()/P observations = observations.customized_copy(data=flex.double(I_prime), sigmas=flex.double(sigI_prime)) inputs = observations, alpha_angle_obs, spot_pred_x_mm, spot_pred_y_mm, \ detector_distance_mm, identified_isoform, \ mapped_predictions, xbeam, ybeam return inputs, 'OK'
def organize_input(self, observations_pickle, iph): """Given the pickle file, extract and prepare observations object and the alpha angle (meridional to equatorial). """ observations = observations_pickle["observations"][0] mm_predictions = iph.pixel_size_mm*(observations_pickle['mapped_predictions'][0]) xbeam = observations_pickle["xbeam"] ybeam = observations_pickle["ybeam"] alpha_angle_obs = flex.double([math.atan(abs(pred[0]-xbeam)/abs(pred[1]-ybeam)) for pred in mm_predictions]) assert len(alpha_angle_obs)==len(observations.indices()), 'Size of alpha angles and observations are not equal %6.0f, %6.0f'%(len(alpha_angle_obs),len(observations.indices())) #Lorentz-polarization correction wavelength = observations_pickle["wavelength"] two_theta = observations.two_theta(wavelength=wavelength).data() one_over_LP = (2 * flex.sin(two_theta))/(1 + (flex.cos(two_theta)**2)) one_over_P = 2/(1 + (flex.cos(two_theta)**2)) observations = observations.customized_copy(data=observations.data()*one_over_P) #set observations with target space group - !!! required for correct #merging due to map_to_asu command. miller_set = symmetry( unit_cell=iph.target_unit_cell, space_group_symbol=iph.target_space_group ).build_miller_set( anomalous_flag=iph.target_anomalous_flag, d_min=iph.d_min) #Filter negative intensities i_I_positive = (observations.data() > 0) miller_indices_positive = observations.indices().select(i_I_positive) I_positive = observations.data().select(i_I_positive) sigI_positive = observations.sigmas().select(i_I_positive) alpha_angle_obs = alpha_angle_obs.select(i_I_positive) observations = observations.customized_copy(indices=miller_indices_positive, data=I_positive, sigmas=sigI_positive, anomalous_flag=iph.target_anomalous_flag, crystal_symmetry=miller_set.crystal_symmetry()) if observations.crystal_symmetry().is_compatible_unit_cell() == False: return None #filter out weak data I_over_sigi = observations.data()/ observations.sigmas() i_I_obs_sel = (I_over_sigi > iph.sigma_max) observations = observations.customized_copy(indices=observations.indices().select(i_I_obs_sel), data=observations.data().select(i_I_obs_sel), sigmas=observations.sigmas().select(i_I_obs_sel), ) alpha_angle_obs = alpha_angle_obs.select(i_I_obs_sel) #filter resolution i_sel_res = observations.resolution_filter_selection(d_max=iph.d_max, d_min=iph.d_min) observations = observations.customized_copy(indices=observations.indices().select(i_sel_res), data=observations.data().select(i_sel_res), sigmas=observations.sigmas().select(i_sel_res), ) alpha_angle_obs = alpha_angle_obs.select(i_sel_res) assert len(alpha_angle_obs)==len(observations.indices()), 'Size of alpha angles and observations are not equal %6.0f, %6.0f'%(len(alpha_angle_obs),len(observations.indices())) return observations, alpha_angle_obs
def list_6_as_miller_arrays(file_name): """ Read the file of given name and return a pair of miller arrays (F_obs^2, F_cal) """ # potentially iotbx.cif could be used here fcf = iter(open(file_name)) space_group = sgtbx.space_group() unit_cell_params = {} indices = flex.miller_index() f_obs_squares = flex.double() sigma_f_obs_squares = flex.double() f_calc_amplitudes = flex.double() f_calc_phases = flex.double() for li in fcf: if li.startswith('loop_'): for li in fcf: li = li.strip() if li == '_symmetry_equiv_pos_as_xyz': for li in fcf: li = li.strip() if not li: break space_group.expand_smx(li[1:-1]) else: for i in xrange(6): fcf.next() for li in fcf: items = li.split() if not items: break h,k,l, fo, sig_fo, fc, phase = items indices.append((int(h), int(k), int(l))) f_obs_squares.append(float(fo)) sigma_f_obs_squares.append(float(sig_fo)) f_calc_amplitudes.append(float(fc)) f_calc_phases.append(float(phase)) if not li: break elif li.startswith('_cell'): lbl, value = li.split() unit_cell_params[lbl] = float(value) unit_cell = uctbx.unit_cell( [ unit_cell_params[p] for p in ( "_cell_length_a","_cell_length_b","_cell_length_c", "_cell_angle_alpha","_cell_angle_beta","_cell_angle_gamma" ) ]) crystal_symmetry = crystal.symmetry( unit_cell=unit_cell, space_group=space_group) f_calc_phases *= pi/180 f_calc = flex.complex_double( reals=f_calc_amplitudes * flex.cos(f_calc_phases), imags=f_calc_amplitudes * flex.sin(f_calc_phases) ) miller_set = miller.set( crystal_symmetry=crystal_symmetry, indices=indices).auto_anomalous() f_obs_squares = miller.array( miller_set=miller_set, data=f_obs_squares, sigmas=sigma_f_obs_squares) f_obs_squares.set_observation_type_xray_intensity() f_obs_squares.set_info(miller.array_info( source=file_name, labels=["F_squared_meas", "F_squared_sigma"])) f_calc = miller.array( miller_set=miller_set, data=f_calc) f_obs_squares.set_info(miller.array_info( source=file_name, labels=["F_calc"])) return f_obs_squares, f_calc
def scale_frame_detail(self, result, file_name, db_mgr, out): # If the pickled integration file does not contain a wavelength, # fall back on the value given on the command line. XXX The # wavelength parameter should probably be removed from master_phil # once all pickled integration files contain it. if ("wavelength" in result): wavelength = result["wavelength"] elif (self.params.wavelength is not None): wavelength = self.params.wavelength else: # XXX Give error, or raise exception? return None assert (wavelength > 0) observations = result["observations"][0] cos_two_polar_angle = result["cos_two_polar_angle"] assert observations.size() == cos_two_polar_angle.size() tt_vec = observations.two_theta(wavelength) #print "mean tt degrees",180.*flex.mean(tt_vec.data())/math.pi cos_tt_vec = flex.cos(tt_vec.data()) sin_tt_vec = flex.sin(tt_vec.data()) cos_sq_tt_vec = cos_tt_vec * cos_tt_vec sin_sq_tt_vec = sin_tt_vec * sin_tt_vec P_nought_vec = 0.5 * (1. + cos_sq_tt_vec) F_prime = -1.0 # Hard-coded value defines the incident polarization axis P_prime = 0.5 * F_prime * cos_two_polar_angle * sin_sq_tt_vec # XXX added as a diagnostic prange = P_nought_vec - P_prime other_F_prime = 1.0 otherP_prime = 0.5 * other_F_prime * cos_two_polar_angle * sin_sq_tt_vec otherprange = P_nought_vec - otherP_prime diff2 = flex.abs(prange - otherprange) print "mean diff is", flex.mean(diff2), "range", flex.min( diff2), flex.max(diff2) # XXX done observations = observations / (P_nought_vec - P_prime) # This corrects observations for polarization assuming 100% polarization on # one axis (thus the F_prime = -1.0 rather than the perpendicular axis, 1.0) # Polarization model as described by Kahn, Fourme, Gadet, Janin, Dumas & Andre # (1982) J. Appl. Cryst. 15, 330-337, equations 13 - 15. print "Step 3. Correct for polarization." indexed_cell = observations.unit_cell() observations_original_index = observations.deep_copy() if result.get( "model_partialities", None ) is not None and result["model_partialities"][0] is not None: # some recordkeeping useful for simulations partialities_original_index = observations.customized_copy( crystal_symmetry=self.miller_set.crystal_symmetry(), data=result["model_partialities"][0]["data"], sigmas=flex.double(result["model_partialities"][0] ["data"].size()), #dummy value for sigmas indices=result["model_partialities"][0]["indices"], ).resolution_filter(d_min=self.params.d_min) assert len(observations_original_index.indices()) == len( observations.indices()) # Now manipulate the data to conform to unit cell, asu, and space group # of reference. The resolution will be cut later. # Only works if there is NOT an indexing ambiguity! observations = observations.customized_copy( anomalous_flag=not self.params.merge_anomalous, crystal_symmetry=self.miller_set.crystal_symmetry()).map_to_asu() observations_original_index = observations_original_index.customized_copy( anomalous_flag=not self.params.merge_anomalous, crystal_symmetry=self.miller_set.crystal_symmetry()) print "Step 4. Filter on global resolution and map to asu" print >> out, "Data in reference setting:" #observations.show_summary(f=out, prefix=" ") show_observations(observations, out=out) #if self.params.significance_filter.apply is True: # raise Exception("significance filter not implemented in samosa") if self.params.significance_filter.apply is True: #------------------------------------ # Apply an I/sigma filter ... accept resolution bins only if they # have significant signal; tends to screen out higher resolution observations # if the integration model doesn't quite fit N_obs_pre_filter = observations.size() N_bins_small_set = N_obs_pre_filter // self.params.significance_filter.min_ct N_bins_large_set = N_obs_pre_filter // self.params.significance_filter.max_ct # Ensure there is at least one bin. N_bins = max([ min([self.params.significance_filter.n_bins, N_bins_small_set]), N_bins_large_set, 1 ]) print "Total obs %d Choose n bins = %d" % (N_obs_pre_filter, N_bins) bin_results = show_observations(observations, out=out, n_bins=N_bins) #show_observations(observations, out=sys.stdout, n_bins=N_bins) acceptable_resolution_bins = [ bin.mean_I_sigI > self.params.significance_filter.sigma for bin in bin_results ] acceptable_nested_bin_sequences = [ i for i in xrange(len(acceptable_resolution_bins)) if False not in acceptable_resolution_bins[:i + 1] ] if len(acceptable_nested_bin_sequences) == 0: return null_data(file_name=file_name, log_out=out.getvalue(), low_signal=True) else: N_acceptable_bins = max(acceptable_nested_bin_sequences) + 1 imposed_res_filter = float(bin_results[N_acceptable_bins - 1].d_range.split()[2]) imposed_res_sel = observations.resolution_filter_selection( d_min=imposed_res_filter) observations = observations.select(imposed_res_sel) observations_original_index = observations_original_index.select( imposed_res_sel) print "New resolution filter at %7.2f" % imposed_res_filter, file_name print "N acceptable bins", N_acceptable_bins print "Old n_obs: %d, new n_obs: %d" % (N_obs_pre_filter, observations.size()) print "Step 5. Frame by frame resolution filter" # Finished applying the binwise I/sigma filter--------------------------------------- if self.params.raw_data.sdfac_auto is True: raise Exception("sdfac auto not implemented in samosa.") print "Step 6. Match to reference intensities, filter by correlation, filter out negative intensities." assert len(observations_original_index.indices()) \ == len(observations.indices()) data = frame_data(self.n_refl, file_name) data.set_indexed_cell(indexed_cell) data.d_min = observations.d_min() # Ensure that match_multi_indices() will return identical results # when a frame's observations are matched against the # pre-generated Miller set, self.miller_set, and the reference # data set, self.i_model. The implication is that the same match # can be used to map Miller indices to array indices for intensity # accumulation, and for determination of the correlation # coefficient in the presence of a scaling reference. if self.i_model is not None: assert len(self.i_model.indices()) == len(self.miller_set.indices()) \ and (self.i_model.indices() == self.miller_set.indices()).count(False) == 0 matches = miller.match_multi_indices( miller_indices_unique=self.miller_set.indices(), miller_indices=observations.indices()) use_weights = False # New facility for getting variance-weighted correlation if self.params.scaling.algorithm in ['mark1', 'levmar']: # Because no correlation is computed, the correlation # coefficient is fixed at zero. Setting slope = 1 means # intensities are added without applying a scale factor. sum_x = 0 sum_y = 0 for pair in matches.pairs(): data.n_obs += 1 if not self.params.include_negatives and observations.data()[ pair[1]] <= 0: data.n_rejected += 1 else: sum_y += observations.data()[pair[1]] N = data.n_obs - data.n_rejected # Early return if there are no positive reflections on the frame. if data.n_obs <= data.n_rejected: return null_data(file_name=file_name, log_out=out.getvalue(), low_signal=True) # Update the count for each matched reflection. This counts # reflections with non-positive intensities, too. data.completeness += matches.number_of_matches(0).as_int() data.wavelength = wavelength if not self.params.scaling.enable: # Do not scale anything print "Scale factor to an isomorphous reference PDB will NOT be applied." slope = 1.0 offset = 0.0 observations_original_index_indices = observations_original_index.indices( ) if db_mgr is None: return unpack(MINI.x) # special exit for two-color indexing kwargs = { 'wavelength': wavelength, 'beam_x': result['xbeam'], 'beam_y': result['ybeam'], 'distance': result['distance'], 'unique_file_name': data.file_name } ORI = result["current_orientation"][0] Astar = matrix.sqr(ORI.reciprocal_matrix()) kwargs['res_ori_1'] = Astar[0] kwargs['res_ori_2'] = Astar[1] kwargs['res_ori_3'] = Astar[2] kwargs['res_ori_4'] = Astar[3] kwargs['res_ori_5'] = Astar[4] kwargs['res_ori_6'] = Astar[5] kwargs['res_ori_7'] = Astar[6] kwargs['res_ori_8'] = Astar[7] kwargs['res_ori_9'] = Astar[8] assert self.params.scaling.report_ML is True kwargs['half_mosaicity_deg'] = result["ML_half_mosaicity_deg"][0] kwargs['domain_size_ang'] = result["ML_domain_size_ang"][0] frame_id_0_base = db_mgr.insert_frame(**kwargs) xypred = result["mapped_predictions"][0] indices = flex.size_t([pair[1] for pair in matches.pairs()]) sel_observations = flex.intersection(size=observations.data().size(), iselections=[indices]) set_original_hkl = observations_original_index_indices.select( flex.intersection(size=observations_original_index_indices.size(), iselections=[indices])) set_xypred = xypred.select( flex.intersection(size=xypred.size(), iselections=[indices])) kwargs = { 'hkl_id_0_base': [pair[0] for pair in matches.pairs()], 'i': observations.data().select(sel_observations), 'sigi': observations.sigmas().select(sel_observations), 'detector_x': [xy[0] for xy in set_xypred], 'detector_y': [xy[1] for xy in set_xypred], 'frame_id_0_base': [frame_id_0_base] * len(matches.pairs()), 'overload_flag': [0] * len(matches.pairs()), 'original_h': [hkl[0] for hkl in set_original_hkl], 'original_k': [hkl[1] for hkl in set_original_hkl], 'original_l': [hkl[2] for hkl in set_original_hkl] } db_mgr.insert_observation(**kwargs) print >> out, "Lattice: %d reflections" % (data.n_obs - data.n_rejected) print >> out, "average obs", sum_y / (data.n_obs - data.n_rejected), \ "average calc", sum_x / (data.n_obs - data.n_rejected) print >> out, "Rejected %d reflections with negative intensities" % \ data.n_rejected data.accept = True for pair in matches.pairs(): if not self.params.include_negatives and ( observations.data()[pair[1]] <= 0): continue Intensity = observations.data()[pair[1]] # Super-rare exception. If saved sigmas instead of I/sigmas in the ISIGI dict, this wouldn't be needed. if Intensity == 0: continue # Add the reflection as a two-tuple of intensity and I/sig(I) # to the dictionary of observations. index = self.miller_set.indices()[pair[0]] isigi = (Intensity, observations.data()[pair[1]] / observations.sigmas()[pair[1]], 1.0) if index in data.ISIGI: data.ISIGI[index].append(isigi) else: data.ISIGI[index] = [isigi] sigma = observations.sigmas()[pair[1]] variance = sigma * sigma data.summed_N[pair[0]] += 1 data.summed_wt_I[pair[0]] += Intensity / variance data.summed_weight[pair[0]] += 1 / variance data.set_log_out(out.getvalue()) return data
def organize_input(self, observations_pickle, iparams, avg_mode, pickle_filename=None): """Given the pickle file, extract and prepare observations object and the alpha angle (meridional to equatorial). """ identified_isoform = None if iparams.isoform_name is not None: identified_isoform = iparams.isoform_name if "identified_isoform" not in observations_pickle: return None, "No identified isoform" else: identified_isoform = observations_pickle["identified_isoform"] if observations_pickle[ "identified_isoform"] != iparams.isoform_name: return None, "Identified isoform(%s) is not the requested isoform (%s)" % ( observations_pickle["identified_isoform"], iparams.isoform_name) if iparams.flag_weak_anomalous: if avg_mode == 'final': target_anomalous_flag = iparams.target_anomalous_flag else: target_anomalous_flag = False else: target_anomalous_flag = iparams.target_anomalous_flag img_filename_only = '' if pickle_filename is not None: pickle_filepaths = pickle_filename.split('/') img_filename_only = pickle_filepaths[len(pickle_filepaths) - 1] txt_exception = ' {0:40} ==> '.format(img_filename_only) observations = observations_pickle["observations"][0] detector_distance_mm = observations_pickle['distance'] mapped_predictions = observations_pickle['mapped_predictions'][0] #set observations with target space group - !!! required for correct #merging due to map_to_asu command. if iparams.target_crystal_system is not None: target_crystal_system = iparams.target_crystal_system else: target_crystal_system = observations.crystal_symmetry( ).space_group().crystal_system() lph = lbfgs_partiality_handler() if iparams.flag_override_unit_cell: uc_constrained_inp = lph.prep_input( iparams.target_unit_cell.parameters(), target_crystal_system) else: uc_constrained_inp = lph.prep_input( observations.unit_cell().parameters(), target_crystal_system) uc_constrained = list( lph.prep_output(uc_constrained_inp, target_crystal_system)) try: #apply constrain using the crystal system miller_set = symmetry(unit_cell=uc_constrained, space_group_symbol=iparams.target_space_group ).build_miller_set( anomalous_flag=target_anomalous_flag, d_min=iparams.merge.d_min) observations = observations.customized_copy( anomalous_flag=target_anomalous_flag, crystal_symmetry=miller_set.crystal_symmetry()) except Exception: a, b, c, alpha, beta, gamma = uc_constrained txt_exception += 'Mismatch spacegroup (%6.2f,%6.2f,%6.2f,%6.2f,%6.2f,%6.2f)' % ( a, b, c, alpha, beta, gamma) return None, txt_exception #reset systematic absence sys_absent_negate_flags = flex.bool([ sys_absent_flag[1] == False for sys_absent_flag in observations.sys_absent_flags() ]) observations = observations.select(sys_absent_negate_flags) mapped_predictions = mapped_predictions.select(sys_absent_negate_flags) import os.path #remove observations from rejection list if os.path.isfile(iparams.run_no + '/rejections.txt'): txt_out = pickle_filename + ' \nN_before_rejection: ' + str( len(observations.data())) + '\n' file_reject = open(iparams.run_no + '/rejections.txt', 'r') data_reject = file_reject.read().split("\n") miller_indices_ori_rejected = flex.miller_index() for row_reject in data_reject: col_reject = row_reject.split() if len(col_reject) > 0: if col_reject[0].strip() == pickle_filename: miller_indices_ori_rejected.append( (int(col_reject[1].strip()), int(col_reject[2].strip()), int(col_reject[3].strip()))) if len(miller_indices_ori_rejected) > 0: i_sel_flag = flex.bool([True] * len(observations.data())) for miller_index_ori_rejected in miller_indices_ori_rejected: i_index_ori = 0 for miller_index_ori in observations.indices(): if miller_index_ori_rejected == miller_index_ori: i_sel_flag[i_index_ori] = False txt_out += ' -Discard:' + str(miller_index_ori[0]) + \ ','+str(miller_index_ori[1])+','+str(miller_index_ori[2]) + '\n' i_index_ori += 1 observations = observations.customized_copy( indices=observations.indices().select(i_sel_flag), data=observations.data().select(i_sel_flag), sigmas=observations.sigmas().select(i_sel_flag)) mapped_predictions = mapped_predictions.select(i_sel_flag) txt_out += 'N_after_rejection: ' + str(len( observations.data())) + '\n' #filter resolution i_sel_res = observations.resolution_filter_selection( d_max=iparams.merge.d_max, d_min=iparams.merge.d_min) observations = observations.select(i_sel_res) mapped_predictions = mapped_predictions.select(i_sel_res) #Filter weak i_sel = (observations.data() / observations.sigmas()) > iparams.merge.sigma_min observations = observations.select(i_sel) mapped_predictions = mapped_predictions.select(i_sel) #filter icering (if on) #replacing sigI (if set) if iparams.flag_replace_sigI: observations = observations.customized_copy( sigmas=flex.sqrt(observations.data())) #setup spot predicton mm_predictions = iparams.pixel_size_mm * mapped_predictions xbeam = observations_pickle["xbeam"] ybeam = observations_pickle["ybeam"] alpha_angle_obs = flex.double([math.atan(abs(pred[0]-xbeam)/abs(pred[1]-ybeam)) \ for pred in mm_predictions]) spot_pred_x_mm = flex.double( [pred[0] - xbeam for pred in mm_predictions]) spot_pred_y_mm = flex.double( [pred[1] - ybeam for pred in mm_predictions]) #Polarization correction wavelength = observations_pickle["wavelength"] if iparams.flag_LP_correction: fx = 1 - iparams.polarization_horizontal_fraction fy = 1 - fx if fx > 1.0 or fx < 0: print 'Horizontal polarization fraction is not correct. The value must be >= 0 and <= 1' print 'No polarization correction. Continue with post-refinement' else: phi_angle_obs = flex.double([math.atan2(pred[1]-ybeam, pred[0]-xbeam) \ for pred in mm_predictions]) bragg_angle_obs = observations.two_theta(wavelength).data() P = ((fx*((flex.sin(phi_angle_obs)**2)+((flex.cos(phi_angle_obs)**2)*flex.cos(bragg_angle_obs)**2)))+\ (fy*((flex.cos(phi_angle_obs)**2)+((flex.sin(phi_angle_obs)**2)*flex.cos(bragg_angle_obs)**2)))) I_prime = observations.data() / P sigI_prime = observations.sigmas() / P observations = observations.customized_copy( data=flex.double(I_prime), sigmas=flex.double(sigI_prime)) inputs = observations, alpha_angle_obs, spot_pred_x_mm, spot_pred_y_mm, \ detector_distance_mm, identified_isoform, \ mapped_predictions, xbeam, ybeam return inputs, 'OK'