def _export_experiment(filename, integrated_data, experiment, params, var_model=(1, 0)): # type: (str, flex.reflection_table, dxtbx.model.Experiment, libtbx.phil.scope_extract, Tuple) """Export a single experiment to an XDS_ASCII.HKL format file. Args: filename: The file to write to integrated_data: The reflection table, pre-selected to one experiment experiment: The experiment list entry to export params: The PHIL configuration object var_model: """ # export for xds_ascii should only be for non-scaled reflections assert any(i in integrated_data for i in ["intensity.sum.value", "intensity.prf.value"]) # Handle requesting profile intensities (default via auto) but no column if "profile" in params.intensity and "intensity.prf.value" not in integrated_data: raise Sorry( "Requested profile intensity data but only summed present. Use intensity=sum." ) integrated_data = filter_reflection_table( integrated_data, intensity_choice=params.intensity, partiality_threshold=params.mtz.partiality_threshold, combine_partials=params.mtz.combine_partials, min_isigi=params.mtz.min_isigi, filter_ice_rings=params.mtz.filter_ice_rings, d_min=params.mtz.d_min, ) # calculate the scl = lp/dqe correction for outputting but don't apply it as # it has already been applied in filter_reflection_table ( integrated_data, scl, ) = FilteringReductionMethods.calculate_lp_qe_correction_and_filter( integrated_data) # sort data before output nref = len(integrated_data["miller_index"]) indices = flex.size_t_range(nref) unique = copy.deepcopy(integrated_data["miller_index"]) map_to_asu(experiment.crystal.get_space_group().type(), False, unique) perm = sorted(indices, key=lambda k: unique[k]) integrated_data = integrated_data.select(flex.size_t(perm)) if experiment.goniometer is None: print( "Warning: No goniometer. Experimentally exporting with (1 0 0) axis" ) unit_cell = experiment.crystal.get_unit_cell() if experiment.scan is None: print( "Warning: No Scan. Experimentally exporting no-oscillation values") image_range = (1, 1) phi_start, phi_range = 0.0, 0.0 else: image_range = experiment.scan.get_image_range() phi_start, phi_range = experiment.scan.get_image_oscillation( image_range[0]) # gather the required information for the reflection file nref = len(integrated_data["miller_index"]) miller_index = integrated_data["miller_index"] # profile correlation if "profile.correlation" in integrated_data: prof_corr = 100.0 * integrated_data["profile.correlation"] else: prof_corr = flex.double(nref, 100.0) # partiality if "partiality" in integrated_data: partiality = 100 * integrated_data["partiality"] else: prof_corr = flex.double(nref, 100.0) if "intensity.sum.value" in integrated_data: I = integrated_data["intensity.sum.value"] V = integrated_data["intensity.sum.variance"] assert V.all_gt(0) V = var_model[0] * (V + var_model[1] * I * I) sigI = flex.sqrt(V) else: I = integrated_data["intensity.prf.value"] V = integrated_data["intensity.prf.variance"] assert V.all_gt(0) V = var_model[0] * (V + var_model[1] * I * I) sigI = flex.sqrt(V) fout = open(filename, "w") # first write the header - in the "standard" coordinate frame... panel = experiment.detector[0] fast = panel.get_fast_axis() slow = panel.get_slow_axis() Rd = align_reference_frame(fast, (1, 0, 0), slow, (0, 1, 0)) print("Coordinate change:") print("%5.2f %5.2f %5.2f\n%5.2f %5.2f %5.2f\n%5.2f %5.2f %5.2f\n" % Rd.elems) fast = Rd * fast slow = Rd * slow qx, qy = panel.get_pixel_size() nx, ny = panel.get_image_size() distance = matrix.col(Rd * panel.get_origin()).dot( matrix.col(Rd * panel.get_normal())) org = Rd * (matrix.col(panel.get_origin()) - distance * matrix.col(panel.get_normal())) orgx = -org.dot(fast) / qx orgy = -org.dot(slow) / qy UB = Rd * matrix.sqr(experiment.crystal.get_A()) real_space_ABC = UB.inverse().elems if experiment.goniometer is not None: axis = Rd * experiment.goniometer.get_rotation_axis() else: axis = Rd * (1, 0, 0) beam = Rd * experiment.beam.get_s0() cell_fmt = "%9.3f %9.3f %9.3f %7.3f %7.3f %7.3f" axis_fmt = "%9.3f %9.3f %9.3f" fout.write("\n".join([ "!FORMAT=XDS_ASCII MERGE=FALSE FRIEDEL'S_LAW=TRUE", "!Generated by dials.export", "!DATA_RANGE= %d %d" % image_range, "!ROTATION_AXIS= %9.6f %9.6f %9.6f" % axis.elems, "!OSCILLATION_RANGE= %f" % phi_range, "!STARTING_ANGLE= %f" % phi_start, "!STARTING_FRAME= %d" % image_range[0], "!SPACE_GROUP_NUMBER= %d" % experiment.crystal.get_space_group().type().number(), "!UNIT_CELL_CONSTANTS= %s" % (cell_fmt % unit_cell.parameters()), "!UNIT_CELL_A-AXIS= %s" % (axis_fmt % real_space_ABC[0:3]), "!UNIT_CELL_B-AXIS= %s" % (axis_fmt % real_space_ABC[3:6]), "!UNIT_CELL_C-AXIS= %s" % (axis_fmt % real_space_ABC[6:9]), "!X-RAY_WAVELENGTH= %f" % experiment.beam.get_wavelength(), "!INCIDENT_BEAM_DIRECTION= %f %f %f" % beam.elems, "!NX= %d NY= %d QX= %f QY= %f" % (nx, ny, qx, qy), "!ORGX= %9.2f ORGY= %9.2f" % (orgx, orgy), "!DETECTOR_DISTANCE= %8.3f" % distance, "!DIRECTION_OF_DETECTOR_X-AXIS= %9.5f %9.5f %9.5f" % fast.elems, "!DIRECTION_OF_DETECTOR_Y-AXIS= %9.5f %9.5f %9.5f" % slow.elems, "!VARIANCE_MODEL= %7.3e %7.3e" % var_model, "!NUMBER_OF_ITEMS_IN_EACH_DATA_RECORD=12", "!ITEM_H=1", "!ITEM_K=2", "!ITEM_L=3", "!ITEM_IOBS=4", "!ITEM_SIGMA(IOBS)=5", "!ITEM_XD=6", "!ITEM_YD=7", "!ITEM_ZD=8", "!ITEM_RLP=9", "!ITEM_PEAK=10", "!ITEM_CORR=11", "!ITEM_PSI=12", "!END_OF_HEADER", "", ])) # then write the data records s0 = Rd * matrix.col(experiment.beam.get_s0()) for j in range(nref): x, y, z = integrated_data["xyzcal.px"][j] phi = phi_start + z * phi_range h, k, l = miller_index[j] X = (UB * (h, k, l)).rotate(axis, phi, deg=True) s = s0 + X g = s.cross(s0).normalize() # find component of beam perpendicular to f, e e = -(s + s0).normalize() if h == k and k == l: u = (h, -h, 0) else: u = (k - l, l - h, h - k) q = ((matrix.col(u).transpose() * UB.inverse()).normalize().transpose().rotate(axis, phi, deg=True)) psi = q.angle(g, deg=True) if q.dot(e) < 0: psi *= -1 fout.write("%d %d %d %f %f %f %f %f %f %.1f %.1f %f\n" % ( h, k, l, I[j], sigI[j], x, y, z, scl[j], partiality[j], prof_corr[j], psi, )) fout.write("!END_OF_DATA\n") fout.close() logger.info("Output %d reflections to %s" % (nref, filename))
def test_FilteringReductionMethods(): """Test the FilteringReductionMethods class.""" # Test ice ring filtering reflections = generate_simple_table() reflections = FilteringReductionMethods.filter_ice_rings(reflections) assert list(reflections["intensity.prf.value"]) == [2.0, 3.0] # Test filtering on I/sigI reflections = generate_simple_table() reflections = FilteringReductionMethods._filter_on_min_isigi( reflections, "prf", 2.5) assert list(reflections["intensity.prf.value"]) == [3.0] # Test bad variance filtering reflections = generate_simple_table() reflections["intensity.prf.variance"][0] = 0.0 reflections = FilteringReductionMethods._filter_bad_variances( reflections, "prf") assert list(reflections["intensity.prf.value"]) == [2.0, 3.0] # Test filtering on dmin reflections = generate_simple_table() reflections["d"] = flex.double([1.0, 2.0, 3.0]) reflections = FilteringReductionMethods.filter_on_d_min(reflections, 1.5) assert list(reflections["d"]) == [2.0, 3.0] # test calculate_lp_qe_correction_and_filter - should be lp/qe # cases, qe, dqe , lp , qe zero r = flex.reflection_table() r["data"] = flex.double([1.0, 2.0, 3.0]) r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r) assert list(c) == pytest.approx([1.0, 1.0, 1.0]) r["lp"] = flex.double([1.0, 0.5, 1.0]) r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r) assert list(c) == pytest.approx([1.0, 0.5, 1.0]) r["qe"] = flex.double([0.25, 1.0, 0.0]) r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r) assert list(c) == pytest.approx([4.0, 0.5]) del r["qe"] r["dqe"] = flex.double([0.25, 0.0]) r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r) assert list(c) == pytest.approx([4.0]) # test filter unassigned r = flex.reflection_table() r["id"] = flex.int([-1, 0]) r["i"] = flex.double([1.0, 2.0]) r = FilteringReductionMethods.filter_unassigned_reflections(r) assert list(r["i"]) == [2.0] with mock.patch( fpath + ".sum_partial_reflections", side_effect=return_reflections_side_effect) as sum_partials: reflections = generate_simple_table() reflections = FilteringReductionMethods.combine_and_filter_partials( reflections, partiality_threshold=0.7) assert sum_partials.call_count == 1 assert list(reflections["intensity.prf.value"]) == [1.0, 2.0] reflections = generate_simple_table() FilteringReductionMethods.combine_and_filter_partials( reflections, partiality_threshold=0.4) assert sum_partials.call_count == 2 assert list(reflections["intensity.prf.value"]) == [1.0, 2.0, 3.0]
def export_xds_ascii(integrated_data, experiment_list, params, var_model=(1, 0)): """Export data from integrated_data corresponding to experiment_list to an XDS_ASCII.HKL formatted text file.""" from dials.array_family import flex # for the moment assume (and assert) that we will convert data from exactly # one lattice... assert len(experiment_list) == 1 # select reflections that are assigned to an experiment (i.e. non-negative id) integrated_data = integrated_data.select(integrated_data["id"] >= 0) assert max(integrated_data["id"]) == 0 # export for xds_ascii should only be for non-scaled reflections assert any([ i in integrated_data for i in ["intensity.sum.value", "intensity.prf.value"] ]) integrated_data = filter_reflection_table( integrated_data, intensity_choice=params.intensity, partiality_threshold=params.mtz.partiality_threshold, combine_partials=params.mtz.combine_partials, min_isigi=params.mtz.min_isigi, filter_ice_rings=params.mtz.filter_ice_rings, d_min=params.mtz.d_min, ) # calculate the scl = lp/dqe correction for outputting but don't apply it as # it has already been applied in filter_reflection_table integrated_data, scl = FilteringReductionMethods.calculate_lp_qe_correction_and_filter( integrated_data) experiment = experiment_list[0] # sort data before output nref = len(integrated_data["miller_index"]) indices = flex.size_t_range(nref) import copy unique = copy.deepcopy(integrated_data["miller_index"]) from cctbx.miller import map_to_asu map_to_asu(experiment.crystal.get_space_group().type(), False, unique) perm = sorted(indices, key=lambda k: unique[k]) integrated_data = integrated_data.select(flex.size_t(perm)) from scitbx import matrix from rstbx.cftbx.coordinate_frame_helpers import align_reference_frame assert not experiment.goniometer is None unit_cell = experiment.crystal.get_unit_cell() from scitbx.array_family import flex assert not experiment.scan is None image_range = experiment.scan.get_image_range() phi_start, phi_range = experiment.scan.get_image_oscillation( image_range[0]) # gather the required information for the reflection file nref = len(integrated_data["miller_index"]) zdet = flex.double(integrated_data["xyzcal.px"].parts()[2]) miller_index = integrated_data["miller_index"] # profile correlation if "profile.correlation" in integrated_data: prof_corr = 100.0 * integrated_data["profile.correlation"] else: prof_corr = flex.double(nref, 100.0) # partiality if "partiality" in integrated_data: partiality = 100 * integrated_data["partiality"] else: prof_corr = flex.double(nref, 100.0) if "intensity.sum.value" in integrated_data: I = integrated_data["intensity.sum.value"] V = integrated_data["intensity.sum.variance"] assert V.all_gt(0) V = var_model[0] * (V + var_model[1] * I * I) sigI = flex.sqrt(V) else: I = integrated_data["intensity.prf.value"] V = integrated_data["intensity.prf.variance"] assert V.all_gt(0) V = var_model[0] * (V + var_model[1] * I * I) sigI = flex.sqrt(V) fout = open(params.xds_ascii.hklout, "w") # first write the header - in the "standard" coordinate frame... panel = experiment.detector[0] fast = panel.get_fast_axis() slow = panel.get_slow_axis() Rd = align_reference_frame(fast, (1, 0, 0), slow, (0, 1, 0)) print("Coordinate change:") print("%5.2f %5.2f %5.2f\n%5.2f %5.2f %5.2f\n%5.2f %5.2f %5.2f\n" % Rd.elems) fast = Rd * fast slow = Rd * slow qx, qy = panel.get_pixel_size() nx, ny = panel.get_image_size() distance = matrix.col(Rd * panel.get_origin()).dot( matrix.col(Rd * panel.get_normal())) org = Rd * (matrix.col(panel.get_origin()) - distance * matrix.col(panel.get_normal())) orgx = -org.dot(fast) / qx orgy = -org.dot(slow) / qy UB = Rd * matrix.sqr(experiment.crystal.get_A()) real_space_ABC = UB.inverse().elems axis = Rd * experiment.goniometer.get_rotation_axis() beam = Rd * experiment.beam.get_s0() cell_fmt = "%9.3f %9.3f %9.3f %7.3f %7.3f %7.3f" axis_fmt = "%9.3f %9.3f %9.3f" fout.write("\n".join([ "!FORMAT=XDS_ASCII MERGE=FALSE FRIEDEL'S_LAW=TRUE", "!Generated by dials.export", "!DATA_RANGE= %d %d" % image_range, "!ROTATION_AXIS= %9.6f %9.6f %9.6f" % axis.elems, "!OSCILLATION_RANGE= %f" % phi_range, "!STARTING_ANGLE= %f" % phi_start, "!STARTING_FRAME= %d" % image_range[0], "!SPACE_GROUP_NUMBER= %d" % experiment.crystal.get_space_group().type().number(), "!UNIT_CELL_CONSTANTS= %s" % (cell_fmt % unit_cell.parameters()), "!UNIT_CELL_A-AXIS= %s" % (axis_fmt % real_space_ABC[0:3]), "!UNIT_CELL_B-AXIS= %s" % (axis_fmt % real_space_ABC[3:6]), "!UNIT_CELL_C-AXIS= %s" % (axis_fmt % real_space_ABC[6:9]), "!X-RAY_WAVELENGTH= %f" % experiment.beam.get_wavelength(), "!INCIDENT_BEAM_DIRECTION= %f %f %f" % beam.elems, "!NX= %d NY= %d QX= %f QY= %f" % (nx, ny, qx, qy), "!ORGX= %9.2f ORGY= %9.2f" % (orgx, orgy), "!DETECTOR_DISTANCE= %8.3f" % distance, "!DIRECTION_OF_DETECTOR_X-AXIS= %9.5f %9.5f %9.5f" % fast.elems, "!DIRECTION_OF_DETECTOR_Y-AXIS= %9.5f %9.5f %9.5f" % slow.elems, "!VARIANCE_MODEL= %7.3e %7.3e" % var_model, "!NUMBER_OF_ITEMS_IN_EACH_DATA_RECORD=12", "!ITEM_H=1", "!ITEM_K=2", "!ITEM_L=3", "!ITEM_IOBS=4", "!ITEM_SIGMA(IOBS)=5", "!ITEM_XD=6", "!ITEM_YD=7", "!ITEM_ZD=8", "!ITEM_RLP=9", "!ITEM_PEAK=10", "!ITEM_CORR=11", "!ITEM_PSI=12", "!END_OF_HEADER", "", ])) # then write the data records s0 = Rd * matrix.col(experiment.beam.get_s0()) for j in range(nref): x, y, z = integrated_data["xyzcal.px"][j] phi = phi_start + z * phi_range h, k, l = miller_index[j] X = (UB * (h, k, l)).rotate(axis, phi, deg=True) s = s0 + X g = s.cross(s0).normalize() f = (s - s0).normalize() # find component of beam perpendicular to f, e e = -(s + s0).normalize() if h == k and k == l: u = (h, -h, 0) else: u = (k - l, l - h, h - k) q = ((matrix.col(u).transpose() * UB.inverse()).normalize().transpose().rotate(axis, phi, deg=True)) psi = q.angle(g, deg=True) if q.dot(e) < 0: psi *= -1 fout.write("%d %d %d %f %f %f %f %f %f %.1f %.1f %f\n" % ( h, k, l, I[j], sigI[j], x, y, z, scl[j], partiality[j], prof_corr[j], psi, )) fout.write("!END_OF_DATA\n") fout.close() logger.info("Output %d reflections to %s" % (nref, params.xds_ascii.hklout))