def test_load_and_dump(): from dials.algorithms.profile_model.gaussian_rs import Model d1 = {"__id__": "gaussian_rs", "n_sigma": 3, "sigma_b": 1, "sigma_m": 2} d2 = {"__id__": "gaussian_rs", "n_sigma": 2, "sigma_b": 4, "sigma_m": 5} model1 = Model.from_dict(d1) model2 = Model.from_dict(d2) assert model1.n_sigma() == 3 assert model1.sigma_b() == 1 assert model1.sigma_m() == 2 assert model2.n_sigma() == 2 assert model2.sigma_b() == 4 assert model2.sigma_m() == 5
def test_integrator_3d(dials_data, nproc): from math import pi from dxtbx.model.experiment_list import ExperimentListFactory from dials.algorithms.profile_model.gaussian_rs import Model from dials.array_family import flex path = dials_data("centroid_test_data").join("experiments.json").strpath exlist = ExperimentListFactory.from_json_file(path) exlist[0].profile = Model(None, n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0) rlist = flex.reflection_table.from_predictions(exlist[0]) rlist["id"] = flex.int(len(rlist), 0) rlist.compute_bbox(exlist) rlist.compute_zeta_multi(exlist) rlist.compute_d(exlist) from libtbx.phil import parse from dials.algorithms.integration.integrator import Integrator3D, phil_scope params = phil_scope.fetch( parse(""" integration.block.size=%d integration.mp.nproc=%d integration.profile_fitting=False """ % (5, nproc))).extract() integrator = Integrator3D(exlist, rlist, params) integrator.integrate()
def predict_reflections(self): from dials.algorithms import shoebox from dials.array_family import flex from dxtbx.model.experiment_list import ExperimentList from dxtbx.model.experiment_list import Experiment from dials.algorithms.profile_model.gaussian_rs import Model # Get models from the sweep self.beam = self.sweep.get_beam() self.detector = self.sweep.get_detector() self.gonio = self.sweep.get_goniometer() self.scan = self.sweep.get_scan() sigma_b = self.beam.get_sigma_divergence(deg=True) sigma_m = self.crystal.get_mosaicity(deg=True) exlist = ExperimentList() exlist.append( Experiment(imageset=self.sweep, beam=self.beam, detector=self.detector, goniometer=self.gonio, scan=self.scan, crystal=self.crystal, profile=Model(None, 3, sigma_b, sigma_m, deg=True))) predicted = flex.reflection_table.from_predictions(exlist[0]) predicted['id'] = flex.int(len(predicted), 0) predicted.compute_bbox(exlist) # Find overlapping reflections overlaps = shoebox.find_overlapping(predicted['bbox']) # Return the reflections and overlaps return predicted, overlaps
class Test(object): def __init__(self): from math import pi import libtbx.load_env try: dials_regression = libtbx.env.dist_path('dials_regression') except KeyError, e: print 'FAIL: dials_regression not configured' exit(0) import os filename = os.path.join(dials_regression, 'centroid_test_data', 'fake_long_experiments.json') from dxtbx.model.experiment.experiment_list import ExperimentListFactory from dxtbx.model.experiment.experiment_list import ExperimentList exlist = ExperimentListFactory.from_json_file(filename) assert (len(exlist) == 1) self.experiment = exlist[0] # Set the delta_divergence/mosaicity self.n_sigma = 5 self.sigma_b = 0.060 * pi / 180 self.sigma_m = 0.154 * pi / 180 from dials.algorithms.profile_model.gaussian_rs import Model self.profile_model = Model(None, self.n_sigma, self.sigma_b, self.sigma_m) self.experiment.profile = self.profile_model self.experiments = ExperimentList() self.experiments.append(self.experiment)
class TestSummation(object): def __init__(self): from dxtbx.model.experiment_list import ExperimentListFactory from dials.algorithms.profile_model.gaussian_rs import Model import libtbx.load_env from dials.array_family import flex from os.path import join from math import pi try: dials_regression = libtbx.env.dist_path('dials_regression') except KeyError, e: print 'FAIL: dials_regression not configured' exit(0) path = join(dials_regression, "centroid_test_data", "experiments.json") exlist = ExperimentListFactory.from_json_file(path) exlist[0].profile = Model(None, n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0) rlist = flex.reflection_table.from_predictions(exlist[0]) rlist['id'] = flex.int(len(rlist), 0) self.rlist = rlist self.exlist = exlist
def __init__(self): import os import libtbx.load_env from dxtbx.serialize import load from dials.algorithms.profile_model.gaussian_rs import Model from dials.algorithms.profile_model.gaussian_rs import MaskCalculator3D from dxtbx.model.experiment_list import Experiment, ExperimentList try: dials_regression = libtbx.env.dist_path('dials_regression') except KeyError: print 'FAIL: dials_regression not configured' exit(0) # Set the sweep filename and load the sweep sweep_filename = os.path.join(dials_regression, 'centroid_test_data', 'sweep.json') crystal_filename = os.path.join(dials_regression, 'centroid_test_data', 'crystal.json') # Load the sweep self.sweep = load.imageset(sweep_filename) self.crystal = load.crystal(crystal_filename) self.beam = self.sweep.get_beam() self.detector = self.sweep.get_detector() self.goniometer = self.sweep.get_goniometer() self.scan = self.sweep.get_scan() self.delta_d = 3 * self.beam.get_sigma_divergence(deg=False) try: mosaicity = self.crystal.get_mosaicity(deg=False) except AttributeError: mosaicity = 0 self.delta_m = 3 * mosaicity self.nsigma = 3 self.profile_model = Model(None, self.nsigma, self.beam.get_sigma_divergence(deg=False), mosaicity) self.experiment = ExperimentList() self.experiment.append( Experiment(imageset=self.sweep, beam=self.beam, detector=self.detector, goniometer=self.goniometer, scan=self.scan, crystal=self.crystal, profile=self.profile_model)) assert (len(self.detector) == 1) # Get the function object to mask the foreground self.mask_foreground = MaskCalculator3D(self.beam, self.detector, self.goniometer, self.scan, self.delta_d, self.delta_m)
def test_load_and_dump(): from dials.algorithms.profile_model.gaussian_rs import Model d1 = { '__id__' : 'gaussian_rs', 'n_sigma' : 3, 'sigma_b' : 1, 'sigma_m' : 2 } d2 = { '__id__' : 'gaussian_rs', 'n_sigma' : 2, 'sigma_b' : 4, 'sigma_m' : 5 } model1 = Model.from_dict(d1) model2 = Model.from_dict(d2) assert model1.n_sigma() == 3 assert model1.sigma_b() == 1 assert model1.sigma_m() == 2 assert model2.n_sigma() == 2 assert model2.sigma_b() == 4 assert model2.sigma_m() == 5
def test_shoebox_memory_is_a_reasonable_guesstimate(dials_data): path = dials_data("centroid_test_data").join("experiments.json").strpath exlist = ExperimentListFactory.from_json_file(path)[0] exlist.profile = Model( None, n_sigma=3, sigma_b=0.024 * math.pi / 180.0, sigma_m=0.044 * math.pi / 180.0, ) rlist = flex.reflection_table.from_predictions(exlist) rlist["id"] = flex.int(len(rlist), 0) rlist["bbox"] = flex.int6(rlist.size(), (0, 1, 0, 1, 0, 1)) jobs = JobList() jobs.add((0, 1), (0, 9), 9) for flatten in (True, False): assumed_memory_usage = list(jobs.shoebox_memory(rlist, flatten)) assert len(assumed_memory_usage) == 1 assert assumed_memory_usage[0] == pytest.approx(23952, abs=3000)
def test_summation(dials_data): from math import pi from dxtbx.model.experiment_list import ExperimentListFactory from dials.algorithms.profile_model.gaussian_rs import Model from dials.array_family import flex path = dials_data("centroid_test_data").join("experiments.json").strpath exlist = ExperimentListFactory.from_json_file(path) exlist[0].profile = Model(None, n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0) rlist = flex.reflection_table.from_predictions(exlist[0]) rlist["id"] = flex.int(len(rlist), 0) def integrate(integrator_type, rlist): from libtbx.phil import parse from dials.algorithms.integration.integrator import create_integrator from dials.algorithms.integration.integrator import ( phil_scope as master_phil_scope, ) rlist = rlist.copy() phil_scope = parse(f""" integration.background.algorithm=null integration.intensity.algorithm=sum integration.intensity.sum.integrator={integrator_type} integration.block.size=0.5 integration.profile_fitting=False """) params = master_phil_scope.fetch(source=phil_scope).extract() integrator = create_integrator(params, exlist, rlist) result = integrator.integrate() return result from libtbx.test_utils import approx_equal def approx_equal_dict(a, b, k): return approx_equal(a[k], b[k]) # Do summation by all different methods result1 = integrate("3d", rlist) result2 = integrate("flat3d", rlist) result3 = integrate("2d", rlist) result4 = integrate("single2d", rlist) assert len(result1) >= len(rlist) assert len(result2) >= len(rlist) assert len(result3) >= len(rlist) assert len(result4) >= len(rlist) # result1 and result2 should be the same assert len(result1) == len(result2) for r1, r2 in zip(result1.rows(), result2.rows()): assert r1["partial_id"] == r2["partial_id"] assert r1["bbox"] == r2["bbox"] assert r1["entering"] == r2["entering"] assert r1["flags"] == r2["flags"] assert r1["id"] == r2["id"] assert r1["miller_index"] == r2["miller_index"] assert r1["panel"] == r2["panel"] assert approx_equal_dict(r1, r2, "d") assert approx_equal_dict(r1, r2, "intensity.sum.value") assert approx_equal_dict(r1, r2, "intensity.sum.variance") assert approx_equal_dict(r1, r2, "lp") assert approx_equal_dict(r1, r2, "partiality") assert approx_equal_dict(r1, r2, "s1") assert approx_equal_dict(r1, r2, "xyzcal.mm") assert approx_equal_dict(r1, r2, "xyzcal.px") assert approx_equal_dict(r1, r2, "zeta") # result3 and result4 should be the same assert len(result3) == len(result4) for r3, r4 in zip(result3.rows(), result4.rows()): assert r3["partial_id"] == r4["partial_id"] assert r3["bbox"] == r4["bbox"] assert r3["entering"] == r4["entering"] assert r3["flags"] == r4["flags"] assert r3["id"] == r4["id"] assert r3["miller_index"] == r4["miller_index"] assert r3["panel"] == r4["panel"] assert approx_equal_dict(r3, r4, "d") assert approx_equal_dict(r3, r4, "intensity.sum.value") assert approx_equal_dict(r3, r4, "intensity.sum.variance") assert approx_equal_dict(r3, r4, "lp") assert approx_equal_dict(r3, r4, "partiality") assert approx_equal_dict(r3, r4, "s1") assert approx_equal_dict(r3, r4, "xyzcal.mm") assert approx_equal_dict(r3, r4, "xyzcal.px") assert approx_equal_dict(r3, r4, "xyzobs.px.value") assert approx_equal_dict(r3, r4, "xyzobs.px.variance") assert approx_equal_dict(r3, r4, "zeta") # result3 should add up to result1 assert len(result3) >= len(result1) expected1 = rlist.copy() expected1["intensity.sum.value"] = flex.double(len(rlist), 0) expected1["intensity.sum.variance"] = flex.double(len(rlist), 0) for r1 in result1.rows(): pid = r1["partial_id"] r2 = expected1[pid] assert r1["entering"] == r2["entering"] assert r1["id"] == r2["id"] assert r1["miller_index"] == r2["miller_index"] assert r1["panel"] == r2["panel"] assert approx_equal_dict(r1, r2, "s1") assert approx_equal_dict(r1, r2, "xyzcal.mm") assert approx_equal_dict(r1, r2, "xyzcal.px") expected1["intensity.sum.value"][pid] += r1["intensity.sum.value"] expected1["intensity.sum.variance"][pid] += r1[ "intensity.sum.variance"] expected3 = rlist.copy() expected3["intensity.sum.value"] = flex.double(len(rlist), 0) expected3["intensity.sum.variance"] = flex.double(len(rlist), 0) for r1 in result3.rows(): pid = r1["partial_id"] r2 = expected3[pid] assert r1["entering"] == r2["entering"] assert r1["id"] == r2["id"] assert r1["miller_index"] == r2["miller_index"] assert r1["panel"] == r2["panel"] assert approx_equal_dict(r1, r2, "s1") assert approx_equal_dict(r1, r2, "xyzcal.mm") assert approx_equal_dict(r1, r2, "xyzcal.px") expected3["intensity.sum.value"][pid] += r1["intensity.sum.value"] expected3["intensity.sum.variance"][pid] += r1[ "intensity.sum.variance"] for r1, r3 in zip(expected1.rows(), expected3.rows()): assert approx_equal_dict(r1, r3, "intensity.sum.value") assert approx_equal_dict(r1, r3, "intensity.sum.variance")
def test(dials_data): from dxtbx.model.experiment_list import Experiment, ExperimentList from dxtbx.serialize import load from dials.algorithms.profile_model.gaussian_rs import MaskCalculator3D, Model sequence = load.imageset( dials_data("centroid_test_data").join("sweep.json").strpath) crystal = load.crystal( dials_data("centroid_test_data").join("crystal.json").strpath) beam = sequence.get_beam() detector = sequence.get_detector() goniometer = sequence.get_goniometer() scan = sequence.get_scan() delta_d = 3 * beam.get_sigma_divergence(deg=False) try: mosaicity = crystal.get_mosaicity(deg=False) except AttributeError: mosaicity = 0 delta_m = 3 * mosaicity nsigma = 3 profile_model = Model(None, nsigma, beam.get_sigma_divergence(deg=False), mosaicity) experiment = ExperimentList() experiment.append( Experiment( imageset=sequence, beam=beam, detector=detector, goniometer=goniometer, scan=scan, crystal=crystal, profile=profile_model, )) assert len(detector) == 1 # Get the function object to mask the foreground mask_foreground = MaskCalculator3D(beam, detector, goniometer, scan, delta_d, delta_m) from scitbx.array_family import flex from dials.algorithms.profile_model.gaussian_rs import CoordinateSystem from dials.algorithms.shoebox import MaskCode s0 = beam.get_s0() m2 = goniometer.get_rotation_axis() s0_length = matrix.col(beam.get_s0()).length() width, height = detector[0].get_image_size() zrange = scan.get_array_range() phi0, dphi = scan.get_oscillation(deg=False) # Generate some reflections reflections = generate_reflections(detector, beam, scan, experiment, 10) # Mask the foreground in each mask_foreground( reflections["shoebox"], reflections["s1"], reflections["xyzcal.px"].parts()[2], reflections["panel"], ) # Loop through all the reflections and check the mask values shoebox = reflections["shoebox"] beam_vector = reflections["s1"] rotation_angle = reflections["xyzcal.mm"].parts()[2] for l in range(len(reflections)): mask = shoebox[l].mask x0, x1, y0, y1, z0, z1 = shoebox[l].bbox s1 = beam_vector[l] phi = rotation_angle[l] cs = CoordinateSystem(m2, s0, s1, phi) def rs_coord(i, j, k): s1d = detector[0].get_pixel_lab_coord((i, j)) s1d = matrix.col(s1d).normalize() * s0_length e1, e2 = cs.from_beam_vector(s1d) e3 = cs.from_rotation_angle_fast(phi0 + (k - zrange[0]) * dphi) return e1, e2, e3 new_mask = flex.int(mask.accessor(), 0) for k in range(z1 - z0): for j in range(y1 - y0): for i in range(x1 - x0): # value1 = mask[k, j, i] e11, e12, e13 = rs_coord(x0 + i, y0 + j, z0 + k) e21, e22, e23 = rs_coord(x0 + i + 1, y0 + j, z0 + k) e31, e32, e33 = rs_coord(x0 + i, y0 + j + 1, z0 + k) e41, e42, e43 = rs_coord(x0 + i, y0 + j, z0 + k + 1) e51, e52, e53 = rs_coord(x0 + i + 1, y0 + j + 1, z0 + k) e61, e62, e63 = rs_coord(x0 + i + 1, y0 + j, z0 + k + 1) e71, e72, e73 = rs_coord(x0 + i, y0 + j + 1, z0 + k + 1) e81, e82, e83 = rs_coord(x0 + i + 1, y0 + j + 1, z0 + k + 1) de1 = (e11 / delta_d)**2 + ( e12 / delta_d)**2 # +(e13/delta_m)**2 de2 = (e21 / delta_d)**2 + ( e22 / delta_d)**2 # +(e23/delta_m)**2 de3 = (e31 / delta_d)**2 + ( e32 / delta_d)**2 # +(e33/delta_m)**2 de4 = (e41 / delta_d)**2 + ( e42 / delta_d)**2 # +(e43/delta_m)**2 de5 = (e51 / delta_d)**2 + ( e52 / delta_d)**2 # +(e53/delta_m)**2 de6 = (e61 / delta_d)**2 + ( e62 / delta_d)**2 # +(e63/delta_m)**2 de7 = (e71 / delta_d)**2 + ( e72 / delta_d)**2 # +(e73/delta_m)**2 de8 = (e81 / delta_d)**2 + ( e82 / delta_d)**2 # +(e83/delta_m)**2 de = math.sqrt( min([de1, de2, de3, de4, de5, de6, de7, de8])) if (x0 + i < 0 or y0 + j < 0 or x0 + i >= width or y0 + j >= height or z0 + k < zrange[0] or z0 + k >= zrange[1]): value2 = MaskCode.Valid else: if de <= 1.0: value2 = MaskCode.Valid | MaskCode.Foreground else: value2 = MaskCode.Valid | MaskCode.Background new_mask[k, j, i] = value2 if not all(m1 == m2 for m1, m2 in zip(mask, new_mask)): np.set_printoptions(threshold=10000) diff = (mask == new_mask).as_numpy_array() print(diff.astype(np.int)) # print mask.as_numpy_array() # print new_mask.as_numpy_array() # print (new_mask.as_numpy_array()[:,:,:] %2) * (new_mask.as_numpy_array() == 5) assert False
def test(dials_data): from dxtbx.model.experiment_list import ExperimentList, ExperimentListFactory exlist = ExperimentListFactory.from_json_file( dials_data("centroid_test_data").join( "fake_long_experiments.json").strpath) assert len(exlist) == 1 experiment = exlist[0] # Set the delta_divergence/mosaicity n_sigma = 5 sigma_b = 0.060 * math.pi / 180 sigma_m = 0.154 * math.pi / 180 from dials.algorithms.profile_model.gaussian_rs import Model profile_model = Model(None, n_sigma, sigma_b, sigma_m) experiment.profile = profile_model experiments = ExperimentList() experiments.append(experiment) from dials.algorithms.profile_model.gaussian_rs import PartialityCalculator3D from dials.array_family import flex calculator = PartialityCalculator3D(experiment.beam, experiment.goniometer, experiment.scan, sigma_m) predicted = flex.reflection_table.from_predictions_multi(experiments) predicted["bbox"] = predicted.compute_bbox(experiments) # Remove any touching edges of scan to get only fully recorded x0, x1, y0, y1, z0, z1 = predicted["bbox"].parts() predicted = predicted.select((z0 > 0) & (z1 < 100)) assert len(predicted) > 0 # Compute partiality partiality = calculator(predicted["s1"], predicted["xyzcal.px"].parts()[2], predicted["bbox"]) # Should have all fully recorded assert len(partiality) == len(predicted) three_sigma = 0.5 * (math.erf(3.0 / math.sqrt(2.0)) - math.erf(-3.0 / math.sqrt(2.0))) assert partiality.all_gt(three_sigma) # Trim bounding boxes x0, x1, y0, y1, z0, z1 = predicted["bbox"].parts() z0 = z0 + 1 z1 = z1 - 1 predicted["bbox"] = flex.int6(x0, x1, y0, y1, z0, z1) predicted = predicted.select(z1 > z0) assert len(predicted) > 0 # Compute partiality partiality = calculator(predicted["s1"], predicted["xyzcal.px"].parts()[2], predicted["bbox"]) # Should have all partials assert len(partiality) == len(predicted) assert partiality.all_le(1.0) and partiality.all_gt(0)
def test_summation(dials_regression): from dxtbx.model.experiment_list import ExperimentListFactory from dials.algorithms.profile_model.gaussian_rs import Model from dials.array_family import flex from math import pi path = os.path.join(dials_regression, "centroid_test_data", "experiments.json") exlist = ExperimentListFactory.from_json_file(path) exlist[0].profile = Model(None, n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0) rlist = flex.reflection_table.from_predictions(exlist[0]) rlist['id'] = flex.int(len(rlist), 0) def integrate(integrator_type, rlist): from dials.algorithms.integration.integrator import IntegratorFactory from dials.algorithms.integration.integrator import phil_scope as master_phil_scope from libtbx.phil import parse rlist = rlist.copy() phil_scope = parse(''' integration.background.algorithm=null integration.intensity.algorithm=sum integration.intensity.sum.integrator=%s integration.block.size=0.5 integration.profile_fitting=False ''' % integrator_type) params = master_phil_scope.fetch(source=phil_scope).extract() integrator = IntegratorFactory.create(params, exlist, rlist) result = integrator.integrate() return result from libtbx.test_utils import approx_equal def approx_equal_dict(a, b, k): return approx_equal(a[k], b[k]) # Do summation by all different methods result1 = integrate("3d", rlist) result2 = integrate("flat3d", rlist) result3 = integrate("2d", rlist) result4 = integrate("single2d", rlist) assert (len(result1) >= len(rlist)) assert (len(result2) >= len(rlist)) assert (len(result3) >= len(rlist)) assert (len(result4) >= len(rlist)) # result1 and result2 should be the same assert (len(result1) == len(result2)) for r1, r2 in zip(result1, result2): assert (r1['partial_id'] == r2['partial_id']) assert (r1['bbox'] == r2['bbox']) assert (r1['entering'] == r2['entering']) assert (r1['flags'] == r2['flags']) assert (r1['id'] == r2['id']) assert (r1['miller_index'] == r2['miller_index']) assert (r1['panel'] == r2['panel']) assert (approx_equal_dict(r1, r2, 'd')) assert (approx_equal_dict(r1, r2, 'intensity.sum.value')) assert (approx_equal_dict(r1, r2, 'intensity.sum.variance')) assert (approx_equal_dict(r1, r2, 'lp')) assert (approx_equal_dict(r1, r2, 'partiality')) assert (approx_equal_dict(r1, r2, 's1')) assert (approx_equal_dict(r1, r2, 'xyzcal.mm')) assert (approx_equal_dict(r1, r2, 'xyzcal.px')) assert (approx_equal_dict(r1, r2, 'zeta')) # result3 and result4 should be the same assert (len(result3) == len(result4)) for r3, r4 in zip(result3, result4): assert (r3['partial_id'] == r4['partial_id']) assert (r3['bbox'] == r4['bbox']) assert (r3['entering'] == r4['entering']) assert (r3['flags'] == r4['flags']) assert (r3['id'] == r4['id']) assert (r3['miller_index'] == r4['miller_index']) assert (r3['panel'] == r4['panel']) assert (approx_equal_dict(r3, r4, 'd')) assert (approx_equal_dict(r3, r4, 'intensity.sum.value')) assert (approx_equal_dict(r3, r4, 'intensity.sum.variance')) assert (approx_equal_dict(r3, r4, 'lp')) assert (approx_equal_dict(r3, r4, 'partiality')) assert (approx_equal_dict(r3, r4, 's1')) assert (approx_equal_dict(r3, r4, 'xyzcal.mm')) assert (approx_equal_dict(r3, r4, 'xyzcal.px')) assert (approx_equal_dict(r3, r4, 'xyzobs.px.value')) assert (approx_equal_dict(r3, r4, 'xyzobs.px.variance')) assert (approx_equal_dict(r3, r4, 'zeta')) # result3 should add up to result1 assert (len(result3) >= len(result1)) expected1 = rlist.copy() expected1['intensity.sum.value'] = flex.double(len(rlist), 0) expected1['intensity.sum.variance'] = flex.double(len(rlist), 0) for r1 in result1: pid = r1['partial_id'] r2 = expected1[pid] assert (r1['entering'] == r2['entering']) assert (r1['id'] == r2['id']) assert (r1['miller_index'] == r2['miller_index']) assert (r1['panel'] == r2['panel']) assert (approx_equal_dict(r1, r2, 's1')) assert (approx_equal_dict(r1, r2, 'xyzcal.mm')) assert (approx_equal_dict(r1, r2, 'xyzcal.px')) expected1['intensity.sum.value'][pid] += r1['intensity.sum.value'] expected1['intensity.sum.variance'][pid] += r1[ 'intensity.sum.variance'] expected3 = rlist.copy() expected3['intensity.sum.value'] = flex.double(len(rlist), 0) expected3['intensity.sum.variance'] = flex.double(len(rlist), 0) for r1 in result3: pid = r1['partial_id'] r2 = expected3[pid] assert (r1['entering'] == r2['entering']) assert (r1['id'] == r2['id']) assert (r1['miller_index'] == r2['miller_index']) assert (r1['panel'] == r2['panel']) assert (approx_equal_dict(r1, r2, 's1')) assert (approx_equal_dict(r1, r2, 'xyzcal.mm')) assert (approx_equal_dict(r1, r2, 'xyzcal.px')) expected3['intensity.sum.value'][pid] += r1['intensity.sum.value'] expected3['intensity.sum.variance'][pid] += r1[ 'intensity.sum.variance'] for r1, r3, in zip(expected1, expected3): assert (approx_equal_dict(r1, r3, 'intensity.sum.value')) assert (approx_equal_dict(r1, r3, 'intensity.sum.variance'))