def _detector(self): '''Return a working detector instance.''' if self._panel_origin is not None: from dxtbx.model import Detector detector = Detector() root = detector.hierarchy() root.set_frame(self._fast_axis, self._slow_axis, self._detector_origin) i_panel = 0 for p_offset, p_size, origin, fast, slow in zip( self._panel_offset, self._panel_size, self._panel_origin, self._panel_fast, self._panel_slow): # ensure mutual orthogonality in presence of numerical rounding errors normal = fast.cross(slow) slow = normal.cross(fast) p = root.add_panel() p.set_type('unknown') p.set_raw_image_offset(p_offset) p.set_image_size(p_size) p.set_name('Panel%d' %i_panel) p.set_pixel_size(self._pixel_size) p.set_frame(fast.elems, slow.elems, origin.elems) i_panel += 1 return detector return self._detector_factory.complex( self._detector_factory.sensor('unknown'), self._detector_origin, self._fast_axis, self._slow_axis, self._pixel_size, self._image_size, (0, 1.e9))
def __init__(self): detector = Detector() root = detector.hierarchy() root.set_name("D1") root.set_type("D") quad1 = root.add_group() quad1.set_name("Q1") quad1.set_type("Q") panel1 = quad1.add_panel() panel1.set_name("P1") panel1.set_type("P") panel2 = quad1.add_panel() panel2.set_name("P2") panel2.set_type("P") quad2 = root.add_group() quad2.set_name("Q2") quad2.set_type("Q") panel3 = quad2.add_panel() panel3.set_name("P3") panel3.set_type("P") panel4 = quad2.add_panel() panel4.set_name("P4") panel4.set_type("P") self.detector = detector
def tst_hierarchical_detector(): '''Test pickling the detector object.''' p = Panel() p.set_local_frame((1, 0, 0), (0, 1, 0), (0, 0, 1)) obj1 = Detector() root = obj1.hierarchy() root.add_panel(p) root.add_group() obj2 = pickle_then_unpickle(obj1) assert(obj2.hierarchy()[0] == obj2[0]) assert(obj2.hierarchy()[0] in obj2) assert(obj2.hierarchy()[1].is_group()) assert(obj1 == obj2) print "OK"
def _detector_from_dict(obj): ''' Get the detector from a dictionary. ''' from dxtbx.model import Detector, HierarchicalDetector if 'hierarchy' in obj: return HierarchicalDetector.from_dict(obj) else: return Detector.from_dict(obj)
def from_dict(d, t=None): ''' Convert the dictionary to a detector model Params: d The dictionary of parameters t The template dictionary to use Returns: The detector model ''' from dxtbx.model import Detector, HierarchicalDetector # If None, return None if d == None: if t == None: return None else: return from_dict(t, None) elif t != None: if isinstance(d, list): d = { 'panels' : d } d2 = dict(t.items() + d.items()) else: if isinstance(d, list): d = { 'panels' : d } # Create the model from the dictionary if "hierarchy" in d: return HierarchicalDetector.from_dict(d) else: return Detector.from_dict(d)
def generated_exp(n=1, scan=True, image_range=[0, 10]): """Generate an experiment list with two experiments.""" experiments = ExperimentList() exp_dict = { "__id__": "crystal", "real_space_a": [1.0, 0.0, 0.0], "real_space_b": [0.0, 1.0, 0.0], "real_space_c": [0.0, 0.0, 2.0], "space_group_hall_symbol": " C 2y", } crystal = Crystal.from_dict(exp_dict) if scan: scan = Scan(image_range=image_range, oscillation=[0.0, 1.0]) else: scan = None beam = Beam(s0=(0.0, 0.0, 1.01)) goniometer = Goniometer((1.0, 0.0, 0.0)) goniometer_2 = Goniometer((1.0, 1.0, 0.0)) detector = Detector() experiments.append( Experiment( beam=beam, scan=scan, goniometer=goniometer, detector=detector, crystal=crystal, ) ) experiments[0].identifier = "0" if n > 1: for i in range(0, n - 1): experiments.append( Experiment( beam=beam, scan=scan, goniometer=goniometer_2, detector=detector, crystal=crystal, ) ) experiments[i + 1].identifier = str(i + 1) return experiments
def do_work(item): tag, filename = item datablock = do_import(filename) imagesets = datablock.extract_imagesets() if len(imagesets) == 0 or len(imagesets[0]) == 0: logger.info("Zero length imageset in file: %s"%filename) return if len(imagesets) > 1: raise Abort("Found more than one imageset in file: %s"%filename) if len(imagesets[0]) > 1: raise Abort("Found a multi-image file. Run again with pre_import=True") if self.reference_detector is not None: from dxtbx.model import Detector imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict())) update_geometry(imagesets[0]) Processor(copy.deepcopy(params)).process_datablock(tag, datablock)
def load_models(obj): try: beam = Beam.from_dict(blist[obj['beam']]) except Exception: beam = None try: dobj = dlist[obj['detector']] detector = Detector.from_dict(dobj) except Exception: detector = None try: from dxtbx.serialize import goniometer gonio = goniometer.from_dict(glist[obj['goniometer']]) except Exception: gonio = None try: scan = Scan.from_dict(slist[obj['scan']]) except Exception: scan = None return beam, detector, gonio, scan
def test_experimentlist_factory_from_imageset(): from dxtbx.model import Beam, Detector, Goniometer, Scan from dxtbx.model import Crystal from dxtbx.format.Format import Format imageset = Format.get_imageset(["filename.cbf"], as_imageset=True) imageset.set_beam(Beam(), 0) imageset.set_detector(Detector(), 0) crystal = Crystal( (1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1") experiments = ExperimentListFactory.from_imageset_and_crystal( imageset, crystal) assert len(experiments) == 1 assert experiments[0].imageset is not None assert experiments[0].beam is not None assert experiments[0].detector is not None assert experiments[0].crystal is not None
def test_from_null_sweep(): filenames = ["template_%2d.cbf" % (i + 1) for i in range(0, 10)] sweep = Format.get_imageset( filenames, beam=Beam((0, 0, 1)), detector=Detector(), goniometer=Goniometer((1, 0, 0)), scan=Scan((1, 10), (0, 0.1)), ) # Create the datablock datablock = DataBlockFactory.from_imageset(sweep) assert len(datablock) == 1 datablock = datablock[0] sweeps = datablock.extract_sweeps() assert len(sweeps) == 1 assert sweeps[0].get_beam() == sweep.get_beam() assert sweeps[0].get_detector() == sweep.get_detector() assert sweeps[0].get_goniometer() == sweep.get_goniometer() assert sweeps[0].get_scan() == sweep.get_scan()
def test_detector(): d1 = Detector() p = d1.add_panel() p.set_name("p1") p.set_type("panel") p.set_pixel_size((0.1, 0.1)) p.set_image_size((100, 100)) p.set_trusted_range((0, 1000)) p.set_local_frame((1, 0, 0), (0, 1, 0), (0, 0, 1)) p = d1.add_panel() p.set_name("p2") p.set_type("panel") p.set_pixel_size((0.2, 0.2)) p.set_image_size((200, 200)) p.set_trusted_range((0, 2000)) p.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 1)) root = d1.hierarchy() g = root.add_group() g.set_name("g1") g.set_type("group") g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 2)) g.add_panel(d1[0]) g = root.add_group() g.set_name("g2") g.set_type("group") g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 4)) g.add_panel(d1[1]) d = d1.to_dict() d2 = Detector.from_dict(d) assert len(d1) == len(d2) for p1, p2 in zip(d1, d2): assert p1 == p2 assert d1.hierarchy() == d2.hierarchy() assert d1 == d2
def __call__(self): from dxtbx.model import Detector, Panel # import dependency d1 = Detector() p = d1.add_panel() p.set_name("p1") p.set_type("panel") p.set_pixel_size((0.1, 0.1)) p.set_image_size((100, 100)) p.set_trusted_range((0, 1000)) p.set_local_frame((1, 0, 0), (0, 1, 0), (0, 0, 1)) p = d1.add_panel() p.set_name("p2") p.set_type("panel") p.set_pixel_size((0.2, 0.2)) p.set_image_size((200, 200)) p.set_trusted_range((0, 2000)) p.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 1)) root = d1.hierarchy() g = root.add_group() g.set_name("g1") g.set_type("group") g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 2)) g.add_panel(d1[0]) g = root.add_group() g.set_name("g2") g.set_type("group") g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 4)) g.add_panel(d1[1]) d = d1.to_dict() d2 = Detector.from_dict(d) assert(len(d1) == len(d2)) for p1, p2 in zip(d1, d2): assert(p1 == p2) assert(d1.hierarchy() == d2.hierarchy()) assert(d1 == d2) print 'OK'
def test_experimentlist_contains(experiment_list): from dxtbx.model import Beam, Detector, Goniometer, Scan # Check all the models are found for e in experiment_list: assert e.beam in experiment_list assert e.detector in experiment_list assert e.goniometer in experiment_list assert e.scan in experiment_list # Create some more models b = Beam() d = Detector() g = Goniometer() s = Scan() # Check that models not in are not found assert b not in experiment_list assert d not in experiment_list assert g not in experiment_list assert s not in experiment_list
def test_from_null_sequence(): filenames = ["template_%2d.cbf" % (i + 1) for i in range(0, 10)] sequence = Format.get_imageset( filenames, beam=Beam((0, 0, 1)), detector=Detector(), goniometer=Goniometer((1, 0, 0)), scan=Scan((1, 10), (0, 0.1)), ) # Create the experiments experiments = ExperimentListFactory.from_sequence_and_crystal(sequence, crystal=None) assert len(experiments) == 1 imagesets = experiments.imagesets() assert imagesets[0].get_format_class() assert len(imagesets) == 1 assert imagesets[0].get_beam() == sequence.get_beam() assert imagesets[0].get_detector() == sequence.get_detector() assert imagesets[0].get_goniometer() == sequence.get_goniometer() assert imagesets[0].get_scan() == sequence.get_scan()
def test_experimentlist_indices(experiment_list): # Get the models b = [e.beam for e in experiment_list] d = [e.detector for e in experiment_list] g = [e.goniometer for e in experiment_list] s = [e.scan for e in experiment_list] # Check indices of beams assert list(experiment_list.indices(b[0])) == [0, 4] assert list(experiment_list.indices(b[1])) == [1, 3] assert list(experiment_list.indices(b[2])) == [2] assert list(experiment_list.indices(b[3])) == [1, 3] assert list(experiment_list.indices(b[4])) == [0, 4] # Check indices of detectors assert list(experiment_list.indices(d[0])) == [0, 4] assert list(experiment_list.indices(d[1])) == [1, 3] assert list(experiment_list.indices(d[2])) == [2] assert list(experiment_list.indices(d[3])) == [1, 3] assert list(experiment_list.indices(d[4])) == [0, 4] # Check indices of goniometer assert list(experiment_list.indices(g[0])) == [0, 4] assert list(experiment_list.indices(g[1])) == [1, 3] assert list(experiment_list.indices(g[2])) == [2] assert list(experiment_list.indices(g[3])) == [1, 3] assert list(experiment_list.indices(g[4])) == [0, 4] # Check indices of scans assert list(experiment_list.indices(s[0])) == [0, 4] assert list(experiment_list.indices(s[1])) == [1, 3] assert list(experiment_list.indices(s[2])) == [2] assert list(experiment_list.indices(s[3])) == [1, 3] assert list(experiment_list.indices(s[4])) == [0, 4] # Check some models not in the list assert len(experiment_list.indices(Beam())) == 0 assert len(experiment_list.indices(Detector())) == 0 assert len(experiment_list.indices(Goniometer())) == 0 assert len(experiment_list.indices(Scan())) == 0
def test_experimentlist_dumper_dump_empty_sequence(tmp_path): filenames = [tmp_path / f"filename_{i}.cbf" for i in range(1, 3)] imageset = Format.get_imageset( filenames, beam=Beam((1, 0, 0)), detector=Detector(), goniometer=Goniometer(), scan=Scan((1, 2), (0.0, 1.0)), as_sequence=True, ) crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1") experiments = ExperimentListFactory.from_imageset_and_crystal( imageset, crystal) filename = tmp_path / "temp.json" experiments.as_json(filename) experiments2 = ExperimentListFactory.from_json_file(filename, check_format=False) check(experiments, experiments2)
def load_models(obj): try: beam = Beam.from_dict(blist[obj['beam']]) except Exception: beam = None try: dobj = dlist[obj['detector']] if 'hierarchy' in dobj: detector = HierarchicalDetector.from_dict(dobj) else: detector = Detector.from_dict(dobj) except Exception: detector = None try: gonio = Goniometer.from_dict(glist[obj['goniometer']]) except Exception: gonio = None try: scan = Scan.from_dict(slist[obj['scan']]) except Exception: scan = None return beam, detector, gonio, scan
def tst_from_imageset(self): from dxtbx.imageset import ImageSet, NullReader from dxtbx.model import Beam, Detector, Goniometer, Scan from dxtbx.model import Crystal imageset = ImageSet(NullReader(["filename.cbf"])) imageset.set_beam(Beam(), 0) imageset.set_detector(Detector(), 0) crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1") experiments = ExperimentListFactory.from_imageset_and_crystal( imageset, crystal) assert (len(experiments) == 1) assert (experiments[0].imageset is not None) assert (experiments[0].beam is not None) assert (experiments[0].detector is not None) assert (experiments[0].crystal is not None) print 'OK'
def tst_from_null_sweep(self): from dxtbx.imageset import NullReader, ImageSweep, SweepFileList from dxtbx.model import Beam, Detector, Goniometer, Scan sweep = ImageSweep(NullReader(SweepFileList("template_%2d.cbf", (0, 10)))) sweep.set_beam(Beam((0, 0, 1))) sweep.set_detector(Detector()) sweep.set_goniometer(Goniometer((1, 0, 0))) sweep.set_scan(Scan((1, 10), (0, 0.1))) # Create the datablock datablock = DataBlockFactory.from_imageset(sweep) assert(len(datablock) == 1) datablock = datablock[0] sweeps = datablock.extract_sweeps() assert(len(sweeps) == 1) assert(sweeps[0].get_beam() == sweep.get_beam()) assert(sweeps[0].get_detector() == sweep.get_detector()) assert(sweeps[0].get_goniometer() == sweep.get_goniometer()) assert(sweeps[0].get_scan() == sweep.get_scan()) print 'OK'
def do_work(i, item_list): processor = Processor(copy.deepcopy(params), composite_tag="%04d" % i) for item in item_list: try: for imageset in item[1].extract_imagesets(): update_geometry(imageset) except RuntimeError as e: logger.warning( "Error updating geometry on item %s, %s" % (str(item[0]), str(e))) continue if self.reference_detector is not None: from dxtbx.model import Detector for i in range(len(imageset)): imageset.set_detector(Detector.from_dict( self.reference_detector.to_dict()), index=i) processor.process_datablock(item[0], item[1]) processor.finalize()
def do_work(i, item_list): processor = Processor(copy.deepcopy(params), composite_tag="%04d" % i) for item in item_list: tag, filename = item datablock = do_import(filename) imagesets = datablock.extract_imagesets() if len(imagesets) == 0 or len(imagesets[0]) == 0: logger.info("Zero length imageset in file: %s" % filename) return if len(imagesets) > 1: raise Abort( "Found more than one imageset in file: %s" % filename) if len(imagesets[0]) > 1: raise Abort( "Found a multi-image file. Run again with pre_import=True" ) try: update_geometry(imagesets[0]) except RuntimeError as e: logger.warning( "Error updating geometry on item %s, %s" % (tag, str(e))) continue if self.reference_detector is not None: from dxtbx.model import Detector imagesets[0].set_detector( Detector.from_dict( self.reference_detector.to_dict())) processor.process_datablock(tag, datablock) processor.finalize()
def test_experimentlist_dumper_dump_empty_sweep(tmpdir): tmpdir.chdir() filenames = ["filename_%01d.cbf" % (i + 1) for i in range(0, 2)] imageset = Format.get_imageset( filenames, beam=Beam((1, 0, 0)), detector=Detector(), goniometer=Goniometer(), scan=Scan((1, 2), (0.0, 1.0)), as_sweep=True, ) crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1") experiments = ExperimentListFactory.from_imageset_and_crystal( imageset, crystal) filename = "temp.json" experiments.as_json(filename) experiments2 = ExperimentListFactory.from_json_file(filename, check_format=False) check(experiments, experiments2)
def tst_set_models(imageset): from dxtbx.model import Beam, Detector, Panel # Create some other models beam = Beam((1, 0, 0), 0.5) detector = Detector( Panel( "UNKNOWN", "Panel", (1, 0, 0), (0, 1, 0), (0, 0, 1), (0.1, 0.1), (1000, 1000), (0, 1), )) # Override sweep models imageset.set_beam(beam) imageset.set_detector(detector) # Ensure this doens't interfere with reading for i in imageset: pass # Get the models back and check they're ok beam2 = imageset.get_beam() detector2 = imageset.get_detector() assert beam2 == beam assert detector2 == detector # Get the models from an index back and check they're not the same beam2 = imageset.get_beam(0) detector2 = imageset.get_detector(0) assert beam2 != beam assert detector2 != detector
def generated_exp(n=1): """Generate an experiment list with two experiments.""" experiments = ExperimentList() exp_dict = { "__id__": "crystal", "real_space_a": [1.0, 0.0, 0.0], "real_space_b": [0.0, 1.0, 0.0], "real_space_c": [0.0, 0.0, 2.0], "space_group_hall_symbol": " C 2y", } crystal = Crystal.from_dict(exp_dict) scan = Scan(image_range=[0, 90], oscillation=[0.0, 1.0]) beam = Beam(s0=(0.0, 0.0, 1.01)) goniometer = Goniometer((1.0, 0.0, 0.0)) detector = Detector() experiments.append( Experiment( beam=beam, scan=scan, goniometer=goniometer, detector=detector, crystal=crystal, ) ) if n > 1: for _ in range(n - 1): experiments.append( Experiment( beam=beam, scan=scan, goniometer=goniometer, detector=detector, crystal=crystal, ) ) return experiments
def test_experimentlist_factory_from_sequence(): filenames = ["filename_%01d.cbf" % (i + 1) for i in range(0, 2)] imageset = Format.get_imageset( filenames, beam=Beam(), detector=Detector(), goniometer=Goniometer(), scan=Scan((1, 2), (0, 1)), as_sequence=True, ) crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1") experiments = ExperimentListFactory.from_imageset_and_crystal( imageset, crystal) assert len(experiments) == 1 assert experiments[0].imageset assert experiments[0].beam assert experiments[0].detector is not None assert experiments[0].goniometer assert experiments[0].scan assert experiments[0].crystal
def _detector(self): """Return a model for a simple detector, presuming no one has one of these on a two-theta stage. Assert that the beam centre is provided in the Mosflm coordinate frame.""" if not self._multi_panel: detector = FormatCBFMini._detector(self) for f0, f1, s0, s1 in determine_pilatus_mask(detector): detector[0].add_mask(f0 - 1, s0 - 1, f1, s1) return detector # got to here means 60-panel version d = Detector() distance = float( self._cif_header_dictionary["Detector_distance"].split()[0]) beam_xy = (self._cif_header_dictionary["Beam_xy"].replace( "(", "").replace(")", "").replace(",", "").split()[:2]) beam_x, beam_y = map(float, beam_xy) wavelength = float( self._cif_header_dictionary["Wavelength"].split()[0]) pixel_xy = (self._cif_header_dictionary["Pixel_size"].replace( "m", "").replace("x", "").split()) pixel_x, pixel_y = map(float, pixel_xy) thickness = float( self._cif_header_dictionary["Silicon"].split()[2]) * 1000.0 nx = int( self._cif_header_dictionary["X-Binary-Size-Fastest-Dimension"]) ny = int(self._cif_header_dictionary["X-Binary-Size-Second-Dimension"]) overload = int(self._cif_header_dictionary["Count_cutoff"].split()[0]) underload = -1 # take into consideration here the thickness of the sensor also the # wavelength of the radiation (which we have in the same file...) table = attenuation_coefficient.get_table("Si") mu = table.mu_at_angstrom(wavelength) / 10.0 t0 = thickness # FIXME would also be very nice to be able to take into account the # misalignment of the individual modules given the calibration... # single detector or multi-module detector pixel_x *= 1000.0 pixel_y *= 1000.0 distance *= 1000.0 beam_centre = matrix.col((beam_x * pixel_x, beam_y * pixel_y, 0)) fast = matrix.col((1.0, 0.0, 0.0)) slow = matrix.col((0.0, -1.0, 0.0)) s0 = matrix.col((0, 0, -1)) origin = (distance * s0) - (fast * beam_centre[0]) - (slow * beam_centre[1]) root = d.hierarchy() root.set_local_frame(fast.elems, slow.elems, origin.elems) det = _DetectorDatabase["Pilatus"] # Edge dead areas not included, only gaps between modules matter n_fast, remainder = divmod(nx, det.module_size_fast) assert (n_fast - 1) * det.gap_fast == remainder n_slow, remainder = divmod(ny, det.module_size_slow) assert (n_slow - 1) * det.gap_slow == remainder mx = det.module_size_fast my = det.module_size_slow dx = det.gap_fast dy = det.gap_slow xmins = [(mx + dx) * i for i in range(n_fast)] xmaxes = [mx + (mx + dx) * i for i in range(n_fast)] ymins = [(my + dy) * i for i in range(n_slow)] ymaxes = [my + (my + dy) * i for i in range(n_slow)] self.coords = {} fast = matrix.col((1.0, 0.0, 0.0)) slow = matrix.col((0.0, 1.0, 0.0)) panel_idx = 0 for ymin, ymax in zip(ymins, ymaxes): for xmin, xmax in zip(xmins, xmaxes): xmin_mm = xmin * pixel_x ymin_mm = ymin * pixel_y origin_panel = fast * xmin_mm + slow * ymin_mm panel_name = "Panel%d" % panel_idx panel_idx += 1 p = d.add_panel() p.set_type("SENSOR_PAD") p.set_name(panel_name) p.set_raw_image_offset((xmin, ymin)) p.set_image_size((xmax - xmin, ymax - ymin)) p.set_trusted_range((underload, overload)) p.set_pixel_size((pixel_x, pixel_y)) p.set_thickness(thickness) p.set_material("Si") p.set_mu(mu) p.set_px_mm_strategy(ParallaxCorrectedPxMmStrategy(mu, t0)) p.set_local_frame(fast.elems, slow.elems, origin_panel.elems) p.set_raw_image_offset((xmin, ymin)) self.coords[panel_name] = (xmin, ymin, xmax, ymax) return d
def load_detector(entry): from dxtbx.model import Detector from scitbx import matrix # Get the detector module object nx_instrument = get_nx_instrument(entry, "instrument") nx_detector = get_nx_detector(nx_instrument, "detector") assert(nx_detector['depends_on'].value == '.') material = nx_detector['sensor_material'].value det_type = nx_detector['type'].value thickness = nx_detector['sensor_thickness'].value trusted_range = (nx_detector['underload'].value, nx_detector['saturation_value'].value) # The detector model detector = Detector() i = 0 while True: try: module = get_nx_detector_module(nx_detector, "module%d" % i) except Exception: break # Set the data size image_size = module['data_size'] # Set the module offset offset_length = module['module_offset'].value assert(module['module_offset'].attrs['depends_on'] == '.') assert(module['module_offset'].attrs['transformation_type'] == 'translation') assert(tuple(module['module_offset'].attrs['offset']) == (0, 0, 0)) offset_vector = matrix.col(module['module_offset'].attrs['vector']) origin = offset_vector * offset_length # Write the fast pixel direction module_offset_path = str(module['module_offset'].name) pixel_size_x = module['fast_pixel_direction'].value assert(module['fast_pixel_direction'].attrs['depends_on'] == module_offset_path) assert(module['fast_pixel_direction'].attrs['transformation_type'] == 'translation') assert(tuple(module['fast_pixel_direction'].attrs['offset']) == (0, 0, 0)) fast_axis = tuple(module['fast_pixel_direction'].attrs['vector']) # Write the slow pixel direction pixel_size_y = module['slow_pixel_direction'].value assert(module['slow_pixel_direction'].attrs['depends_on'] == module_offset_path) assert(module['slow_pixel_direction'].attrs['transformation_type'] == 'translation') assert(tuple(module['slow_pixel_direction'].attrs['offset']) == (0, 0, 0)) slow_axis = tuple(module['slow_pixel_direction'].attrs['vector']) # Get the pixel size and axis vectors pixel_size = (pixel_size_x, pixel_size_y) # Create the panel panel = detector.add_panel() panel.set_frame(fast_axis, slow_axis, origin) panel.set_pixel_size(pixel_size) panel.set_image_size(image_size) panel.set_type(det_type) panel.set_thickness(thickness) panel.set_material(material) panel.set_trusted_range(trusted_range) i += 1 # Return the detector and panel return detector
def _detector(self, index=None): if index is None: index = 0 run = self.get_run_from_index(index) det = self._get_psana_detector(run) geom = det.pyda.geoaccess(run.run()) cob = read_slac_metrology(geometry=geom, include_asic_offset=True) distance = env_distance( self.params.detector_address[0], run.env(), self.params.cspad.detz_offset ) d = Detector() pg0 = d.hierarchy() # first deal with D0 det_num = 0 origin = col((cob[(0,)] * col((0, 0, 0, 1)))[0:3]) fast = col((cob[(0,)] * col((1, 0, 0, 1)))[0:3]) - origin slow = col((cob[(0,)] * col((0, 1, 0, 1)))[0:3]) - origin origin += col((0.0, 0.0, -distance)) pg0.set_local_frame(fast.elems, slow.elems, origin.elems) pg0.set_name("D%d" % (det_num)) for quad_num in range(4): # Now deal with Qx pg1 = pg0.add_group() origin = col((cob[(0, quad_num)] * col((0, 0, 0, 1)))[0:3]) fast = col((cob[(0, quad_num)] * col((1, 0, 0, 1)))[0:3]) - origin slow = col((cob[(0, quad_num)] * col((0, 1, 0, 1)))[0:3]) - origin pg1.set_local_frame(fast.elems, slow.elems, origin.elems) pg1.set_name("D%dQ%d" % (det_num, quad_num)) for sensor_num in range(8): # Now deal with Sy pg2 = pg1.add_group() origin = col((cob[(0, quad_num, sensor_num)] * col((0, 0, 0, 1)))[0:3]) fast = ( col((cob[(0, quad_num, sensor_num)] * col((1, 0, 0, 1)))[0:3]) - origin ) slow = ( col((cob[(0, quad_num, sensor_num)] * col((0, 1, 0, 1)))[0:3]) - origin ) pg2.set_local_frame(fast.elems, slow.elems, origin.elems) pg2.set_name("D%dQ%dS%d" % (det_num, quad_num, sensor_num)) # Now deal with Az for asic_num in range(2): val = "ARRAY_D0Q%dS%dA%d" % (quad_num, sensor_num, asic_num) p = pg2.add_panel() origin = col( (cob[(0, quad_num, sensor_num, asic_num)] * col((0, 0, 0, 1)))[ 0:3 ] ) fast = ( col( ( cob[(0, quad_num, sensor_num, asic_num)] * col((1, 0, 0, 1)) )[0:3] ) - origin ) slow = ( col( ( cob[(0, quad_num, sensor_num, asic_num)] * col((0, 1, 0, 1)) )[0:3] ) - origin ) p.set_local_frame(fast.elems, slow.elems, origin.elems) p.set_pixel_size( (cspad_cbf_tbx.pixel_size, cspad_cbf_tbx.pixel_size) ) p.set_image_size(cspad_cbf_tbx.asic_dimension) p.set_trusted_range( ( cspad_tbx.cspad_min_trusted_value, cspad_tbx.cspad_saturated_value, ) ) p.set_name(val) try: beam = self._beam(index) except Exception: print( "No beam object initialized. Returning CSPAD detector without parallax corrections" ) return d # take into consideration here the thickness of the sensor also the # wavelength of the radiation (which we have in the same file...) wavelength = beam.get_wavelength() thickness = 0.5 # mm, see Hart et al. 2012 table = attenuation_coefficient.get_table("Si") # mu_at_angstrom returns cm^-1 mu = table.mu_at_angstrom(wavelength) / 10.0 # mu: mm^-1 t0 = thickness for panel in d: panel.set_px_mm_strategy(ParallaxCorrectedPxMmStrategy(mu, t0)) return d
def test_check_and_remove(): test = _Test() # Override the single panel model and parameterisation. This test function # exercises the code for non-hierarchical multi-panel detectors. The # hierarchical detector version is tested via test_cspad_refinement.py multi_panel_detector = Detector() for x in range(3): for y in range(3): new_panel = make_panel_in_array((x, y), test.detector[0]) multi_panel_detector.add_panel(new_panel) test.detector = multi_panel_detector test.stills_experiments[0].detector = multi_panel_detector test.det_param = DetectorParameterisationMultiPanel(multi_panel_detector, test.beam) # update the generated reflections test.generate_reflections() # Predict the reflections in place and put in a reflection manager ref_predictor = StillsExperimentsPredictor(test.stills_experiments) ref_predictor(test.reflections) test.refman = ReflectionManagerFactory.from_parameters_reflections_experiments( refman_phil_scope.extract(), test.reflections, test.stills_experiments, do_stills=True, ) test.refman.finalise() # Build a prediction parameterisation for the stills experiment test.pred_param = StillsPredictionParameterisation( test.stills_experiments, detector_parameterisations=[test.det_param], beam_parameterisations=[test.s0_param], xl_orientation_parameterisations=[test.xlo_param], xl_unit_cell_parameterisations=[test.xluc_param], ) # A non-hierarchical detector does not have panel groups, thus panels are # not treated independently wrt which reflections affect their parameters. # As before, setting 792 reflections as the minimum should leave all # parameters free, and should not remove any reflections options = ar_phil_scope.extract() options.min_nref_per_parameter = 792 ar = AutoReduce(options, pred_param=test.pred_param, reflection_manager=test.refman) ar.check_and_remove() det_params = test.pred_param.get_detector_parameterisations() beam_params = test.pred_param.get_beam_parameterisations() xl_ori_params = test.pred_param.get_crystal_orientation_parameterisations() xl_uc_params = test.pred_param.get_crystal_unit_cell_parameterisations() assert det_params[0].num_free() == 6 assert beam_params[0].num_free() == 3 assert xl_ori_params[0].num_free() == 3 assert xl_uc_params[0].num_free() == 6 assert len(test.refman.get_obs()) == 823 # Setting 793 reflections as the minimum fixes 3 unit cell parameters, # and removes all those reflections. There are then too few reflections # for any parameterisation and all will be fixed, leaving no free # parameters for refinement. This fails within PredictionParameterisation, # during update so the final 31 reflections are not removed. options = ar_phil_scope.extract() options.min_nref_per_parameter = 793 ar = AutoReduce(options, pred_param=test.pred_param, reflection_manager=test.refman) with pytest.raises( DialsRefineConfigError, match="There are no free parameters for refinement" ): ar.check_and_remove() det_params = test.pred_param.get_detector_parameterisations() beam_params = test.pred_param.get_beam_parameterisations() xl_ori_params = test.pred_param.get_crystal_orientation_parameterisations() xl_uc_params = test.pred_param.get_crystal_unit_cell_parameterisations() assert det_params[0].num_free() == 0 assert beam_params[0].num_free() == 0 assert xl_ori_params[0].num_free() == 0 assert xl_uc_params[0].num_free() == 0 assert len(test.refman.get_obs()) == 823 - 792
def test_detector(): from dxtbx.model import ParallaxCorrectedPxMmStrategy def create_detector(offset=0): # Create the detector detector = Detector( Panel( "", # Type "Panel", # Name (10, 0, 0), # Fast axis (0, 10, 0), # Slow axis (0 + offset, 0 + offset, 200 - offset), # Origin (0.172, 0.172), # Pixel size (512, 512), # Image size (0, 1000), # Trusted range 0.1, # Thickness "Si", # Material identifier="123")) # Identifier return detector detector = create_detector() # Perform some tests tst_get_identifier(detector) tst_get_gain(detector) tst_set_mosflm_beam_centre(detector) tst_get_pixel_lab_coord(detector) tst_get_image_size_mm(detector) tst_is_value_in_trusted_range(detector) tst_is_coord_valid(detector) tst_pixel_to_millimeter_to_pixel(detector) tst_get_names(detector) tst_get_thickness(detector) tst_get_material(detector) tst_resolution(detector) tst_panel_mask() # Attenuation length from cctbx.eltbx import attenuation_coefficient table = attenuation_coefficient.get_table("Si") mu = table.mu_at_angstrom(1) / 10.0 t0 = 0.320 # Create another detector with different origin detector_moved = create_detector(offset=100) tst_detectors_are_different(detector, detector_moved) detector_moved_copy = create_detector(offset=100) tst_detectors_are_same(detector_moved, detector_moved_copy) # Create the detector detector = Detector( Panel( "", # Type "", # Name (10, 0, 0), # Fast axis (0, 10, 0), # Slow axis (0, 0, 200), # Origin (0.172, 0.172), # Pixel size (512, 512), # Image size (0, 1000), # Trusted range 0.0, # Thickness "", # Material ParallaxCorrectedPxMmStrategy(mu, t0))) tst_parallax_correction(detector)
def run(self): '''Execute the script.''' from dials.util import log from time import time from libtbx import easy_mp import copy # Parse the command line params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True) # Check we have some filenames if not all_paths: self.parser.print_help() return # Save the options self.options = options self.params = params st = time() # Configure logging log.config( params.verbosity, info='dials.process.log', debug='dials.process.debug.log') # Log the diff phil diff_phil = self.parser.diff_phil.as_str() if diff_phil is not '': logger.info('The following parameters have been modified:\n') logger.info(diff_phil) self.load_reference_geometry() from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(params) # Import stuff logger.info("Loading files...") pre_import = params.dispatch.pre_import or len(all_paths) == 1 if pre_import: # Handle still imagesets by breaking them apart into multiple datablocks # Further handle single file still imagesets (like HDF5) by tagging each # frame using its index datablocks = [do_import(path) for path in all_paths] if self.reference_detector is not None: from dxtbx.model import Detector for datablock in datablocks: for imageset in datablock.extract_imagesets(): for i in range(len(imageset)): imageset.set_detector( Detector.from_dict(self.reference_detector.to_dict()), index=i) for datablock in datablocks: for imageset in datablock.extract_imagesets(): update_geometry(imageset) indices = [] basenames = [] split_datablocks = [] for datablock in datablocks: for imageset in datablock.extract_imagesets(): paths = imageset.paths() for i in xrange(len(imageset)): subset = imageset[i:i+1] split_datablocks.append(DataBlockFactory.from_imageset(subset)[0]) indices.append(i) basenames.append(os.path.splitext(os.path.basename(paths[i]))[0]) tags = [] for i, basename in zip(indices, basenames): if basenames.count(basename) > 1: tags.append("%s_%05d"%(basename, i)) else: tags.append(basename) # Wrapper function def do_work(item): Processor(copy.deepcopy(params)).process_datablock(item[0], item[1]) iterable = zip(tags, split_datablocks) else: basenames = [os.path.splitext(os.path.basename(filename))[0] for filename in all_paths] tags = [] for i, basename in enumerate(basenames): if basenames.count(basename) > 1: tags.append("%s_%05d"%(basename, i)) else: tags.append(basename) # Wrapper function def do_work(item): tag, filename = item datablock = do_import(filename) imagesets = datablock.extract_imagesets() if len(imagesets) == 0 or len(imagesets[0]) == 0: logger.info("Zero length imageset in file: %s"%filename) return if len(imagesets) > 1: raise Abort("Found more than one imageset in file: %s"%filename) if len(imagesets[0]) > 1: raise Abort("Found a multi-image file. Run again with pre_import=True") if self.reference_detector is not None: from dxtbx.model import Detector imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict())) update_geometry(imagesets[0]) Processor(copy.deepcopy(params)).process_datablock(tag, datablock) iterable = zip(tags, all_paths) # Process the data if params.mp.method == 'mpi': from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed size = comm.Get_size() # size: number of processes running in this job for i, item in enumerate(iterable): if (i+rank)%size == 0: do_work(item) else: easy_mp.parallel_map( func=do_work, iterable=iterable, processes=params.mp.nproc, method=params.mp.method, preserve_order=True, preserve_exception_message=True) # Total Time logger.info("") logger.info("Total Time Taken = %f seconds" % (time() - st))
def _detector(self): # module positions from detector blueprints - modelling at the moment as # 24 modules, each consisting of 5 sensors (the latter is ignored) from dxtbx.model import Detector from scitbx import matrix import math x = matrix.col((-1, 0, 0)) y = matrix.col((0, 1, 0)) z = matrix.col((0, 0, 1)) beam_xy = self._cif_header_dictionary['Beam_xy'] beam_xy = beam_xy.replace('(', '').replace(')', '').replace(',', '').split()[:2] obs_beam_x, obs_beam_y = [float(f) for f in beam_xy] ideal_beam_x = 1075 ideal_beam_y = 2594 beam_shift_x = 0.172 * (ideal_beam_x - obs_beam_x) beam_shift_y = 0.172 * (ideal_beam_y - obs_beam_y) distance = float(self._cif_header_dictionary['Detector_distance']. split()[0]) * 1000.0 wavelength = float( self._cif_header_dictionary['Wavelength'].split()[0]) thickness = float( self._cif_header_dictionary['Silicon'].split()[2]) * 1000.0 off_x = 184.9 detector = Detector() root = detector.hierarchy() root.set_frame(x.elems, y.elems, (-distance * z + (beam_shift_x * x) + (beam_shift_y * y)).elems) from cctbx.eltbx import attenuation_coefficient table = attenuation_coefficient.get_table("Si") mu = table.mu_at_angstrom(wavelength) / 10.0 t0 = thickness px_mm = ParallaxCorrectedPxMmStrategy(mu, t0) self.coords = {} for j in range(24): shift_y = 195 + 17 ymin, ymax = j * shift_y, j * shift_y + 195 angle = math.pi * (-12.2 + 0.5 * 7.903 + j * (7.903 + 0.441)) / 180.0 fast = matrix.col((1, 0, 0)) slow = matrix.col((0, math.sin(angle), math.cos(angle))) normal = fast.cross(slow) row_origin = 250.0 * normal - off_x * fast - 16.8 * slow if not self._multi_panel: xmin, xmax = 0, 2463 # OK two calls to add_panel here for detector like things => two # copies of the panel then? https://github.com/dials/dials/issues/189 # ... this is also not the source of the leak # OBS! you need to set the panel to a root before set local frame... p = root.add_panel() p.set_type('SENSOR_PAD') p.set_name('row-%02d' % j) p.set_raw_image_offset((xmin, ymin)) p.set_image_size((2463, 195)) p.set_trusted_range((-1, 1000000)) p.set_pixel_size((0.172, 0.172)) p.set_local_frame(fast.elems, slow.elems, row_origin.elems) p.set_thickness(thickness) p.set_material('Si') p.set_mu(mu) p.set_px_mm_strategy(px_mm) p.set_raw_image_offset((xmin, ymin)) self.coords[p.get_name()] = (xmin, ymin, xmax, ymax) else: shift_x = 487 + 7 for i in range(5): xmin, xmax = i * shift_x, i * shift_x + 487 origin = row_origin + i * (487 + 7) * 0.172 * fast # OBS! you need to set the panel to a root before set local frame... p = root.add_panel() p.set_type('SENSOR_PAD') p.set_name('row-%02d-col-%02d' % (j, i)) p.set_raw_image_offset((xmin, ymin)) p.set_image_size((487, 195)) p.set_trusted_range((-1, 1000000)) p.set_pixel_size((0.172, 0.172)) p.set_local_frame(fast.elems, slow.elems, origin.elems) p.set_thickness(thickness) p.set_material('Si') p.set_mu(mu) p.set_px_mm_strategy(px_mm) p.set_raw_image_offset((xmin, ymin)) self.coords[p.get_name()] = (xmin, ymin, xmax, ymax) return detector
def _detector(self): # module positions from detector blueprints - modelling at the moment as # 24 modules, each consisting of 5 sensors (the latter is ignored) from dxtbx.model import Detector from scitbx import matrix import math x = matrix.col((-1, 0, 0)) y = matrix.col((0, 1, 0)) z = matrix.col((0, 0, 1)) obs_beam_y = 2587 ideal_beam_y = 2594 beam_shift_y = 0.172 * (2594 - 2587) distance = float( self._cif_header_dictionary['Detector_distance'].split()[0]) * 1000.0 wavelength = float( self._cif_header_dictionary['Wavelength'].split()[0]) thickness = float( self._cif_header_dictionary['Silicon'].split()[2]) * 1000.0 # for longer wavelength data sets move 192.3 below to 184.9 if wavelength < 1.128: off_x = 191.9 else: off_x = 184.9 z += beam_shift_y * y detector = Detector() root = detector.hierarchy() root.set_frame( x.elems, y.elems, (-distance * z).elems) from cctbx.eltbx import attenuation_coefficient table = attenuation_coefficient.get_table("Si") mu = table.mu_at_angstrom(wavelength) / 10.0 t0 = thickness px_mm = ParallaxCorrectedPxMmStrategy(mu, t0) self.coords = {} for j in range(24): shift_y = 195 + 17 ymin, ymax = j * shift_y, j * shift_y + 195 angle = math.pi * (-12.2 + 0.5 * 7.903 + j * (7.903 + 0.441)) / 180.0 fast = matrix.col((1, 0, 0)) slow = matrix.col((0, math.sin(angle), math.cos(angle))) normal = fast.cross(slow) row_origin = 250.0 * normal - off_x * fast - 16.8 * slow if not self._multi_panel: xmin, xmax = 0, 2463 # OK two calls to add_panel here for detector like things => two # copies of the panel then? https://github.com/dials/dials/issues/189 # ... this is also not the source of the leak # OBS! you need to set the panel to a root before set local frame... p = root.add_panel() p.set_type('SENSOR_PAD') p.set_name('row-%02d' % j) p.set_raw_image_offset((xmin, ymin)) p.set_image_size((2463, 195)) p.set_trusted_range((-1, 1000000)) p.set_pixel_size((0.172, 0.172)) p.set_local_frame( fast.elems, slow.elems, row_origin.elems) p.set_thickness(thickness) p.set_material('Si') p.set_mu(mu) p.set_px_mm_strategy(px_mm) p.set_raw_image_offset((xmin,ymin)) self.coords[p.get_name()] = (xmin,ymin,xmax,ymax) else: shift_x = 487 + 7 for i in range(5): xmin, xmax = i * shift_x, i * shift_x + 487 origin = row_origin + i * (487+7) * 0.172 * fast # OBS! you need to set the panel to a root before set local frame... p = root.add_panel() p.set_type('SENSOR_PAD') p.set_name('row-%02d-col-%02d' % (j, i)) p.set_raw_image_offset((xmin, ymin)) p.set_image_size((487, 195)) p.set_trusted_range((-1, 1000000)) p.set_pixel_size((0.172, 0.172)) p.set_local_frame( fast.elems, slow.elems, origin.elems) p.set_thickness(thickness) p.set_material('Si') p.set_mu(mu) p.set_px_mm_strategy(px_mm) p.set_raw_image_offset((xmin,ymin)) self.coords[p.get_name()] = (xmin,ymin,xmax,ymax) return detector
def test(): # set the random seed to make the test reproducible random.seed(1337) # set up a simple detector frame with directions aligned with # principal axes and sensor origin located on the z-axis at -110 d1 = matrix.col((1, 0, 0)) d2 = matrix.col((0, -1, 0)) # lim = (0,50) npx_fast = 1475 npx_slow = 1679 pix_size_f = pix_size_s = 0.172 detector = DetectorFactory.make_detector( "PAD", d1, d2, matrix.col((0, 0, -110)), (pix_size_f, pix_size_s), (npx_fast, npx_slow), (0, 2e20), ) dp = DetectorParameterisationSinglePanel(detector) beam = BeamFactory().make_beam( sample_to_source=-1 * (matrix.col((0, 0, -110)) + 10 * d1 + 10 * d2), wavelength=1.0, ) # Test change of parameters # ========================= # 1. shift detector plane so that the z-axis intercepts its centre # at a distance of 100 along the initial normal direction. As the # initial normal is along -z, we expect the frame to intercept the # z-axis at -100. p_vals = dp.get_param_vals() p_vals[0:3] = [100.0, 0.0, 0.0] dp.set_param_vals(p_vals) detector = dp._model assert len(detector) == 1 panel = detector[0] v1 = matrix.col(panel.get_origin()) v2 = matrix.col((0.0, 0.0, 1.0)) assert approx_equal(v1.dot(v2), -100.0) # 2. rotate frame around its initial normal by +90 degrees. Only d1 # and d2 should change. As we rotate clockwise around the initial # normal (-z direction) then d1 should rotate onto the original # direction d2, and d2 should rotate to negative of the original # direction d1 p_vals[3] = 1000.0 * pi / 2 # set tau1 value dp.set_param_vals(p_vals) detector = dp._model assert len(detector) == 1 panel = detector[0] assert approx_equal( matrix.col(panel.get_fast_axis()).dot(dp._initial_state["d1"]), 0.0) assert approx_equal( matrix.col(panel.get_slow_axis()).dot(dp._initial_state["d2"]), 0.0) assert approx_equal( matrix.col(panel.get_normal()).dot(dp._initial_state["dn"]), 1.0) # 3. no rotation around initial normal, +10 degrees around initial # d1 direction and +10 degrees around initial d2. Check d1 and d2 # match paper calculation p_vals[3] = 0.0 # tau1 p_vals[4] = 1000.0 * pi / 18 # tau2 p_vals[5] = 1000.0 * pi / 18 # tau3 dp.set_param_vals(p_vals) # paper calculation values v1 = matrix.col((cos(pi / 18), 0, sin(pi / 18))) v2 = matrix.col(( sin(pi / 18)**2, -cos(pi / 18), sqrt((2 * sin(pi / 36) * sin(pi / 18))**2 - sin(pi / 18)**4) - sin(pi / 18), )) detector = dp._model assert len(detector) == 1 panel = detector[0] assert approx_equal(matrix.col(panel.get_fast_axis()).dot(v1), 1.0) assert approx_equal(matrix.col(panel.get_slow_axis()).dot(v2), 1.0) # 4. Test fixing and unfixing of parameters p_vals = [ 100.0, 0.0, 0.0, 1000.0 * pi / 18, 1000.0 * pi / 18, 1000.0 * pi / 18 ] dp.set_param_vals(p_vals) f = dp.get_fixed() f[0:3] = [True] * 3 dp.set_fixed(f) p_vals2 = [0.0, 0.0, 0.0] dp.set_param_vals(p_vals2) assert dp.get_param_vals(only_free=False) == [ 100.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] an_ds_dp = dp.get_ds_dp() assert len(an_ds_dp) == 3 f[0:3] = [False] * 3 dp.set_fixed(f) p_vals = dp.get_param_vals() p_vals2 = [a + b for a, b in zip(p_vals, [-10.0, 1.0, 1.0, 0.0, 0.0, 0.0])] dp.set_param_vals(p_vals2) assert dp.get_param_vals() == [90.0, 1.0, 1.0, 0.0, 0.0, 0.0] # 5. Tests of the calculation of derivatives # Now using parameterisation in mrad # random initial orientations with a random parameter shift at each attempts = 100 for i in range(attempts): # create random initial position det = Detector(random_panel()) dp = DetectorParameterisationSinglePanel(det) # apply a random parameter shift p_vals = dp.get_param_vals() p_vals = random_param_shift( p_vals, [10, 10, 10, 1000.0 * pi / 18, 1000.0 * pi / 18, 1000.0 * pi / 18]) dp.set_param_vals(p_vals) # compare analytical and finite difference derivatives. an_ds_dp = dp.get_ds_dp(multi_state_elt=0) fd_ds_dp = get_fd_gradients(dp, [1.0e-6] * 3 + [1.0e-4 * pi / 180] * 3) for j in range(6): assert approx_equal( (fd_ds_dp[j] - an_ds_dp[j]), matrix.sqr((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)), eps=1.0e-6, ), textwrap.dedent("""\ Failure comparing analytical with finite difference derivatives. Failure in try {i} failure for parameter number {j} of the orientation parameterisation with fd_ds_dp = {fd} and an_ds_dp = {an} so that difference fd_ds_dp - an_ds_dp = {diff} """).format(i=i, j=j, fd=fd_ds_dp[j], an=an_ds_dp[j], diff=fd_ds_dp[j] - an_ds_dp[j]) # 5. Test a multi-panel detector with non-coplanar panels. # place a beam at the centre of the single panel detector (need a # beam to initialise the multi-panel detector parameterisation) lim = det[0].get_image_size_mm() shift1 = lim[0] / 2.0 shift2 = lim[1] / 2.0 beam_centre = (matrix.col(det[0].get_origin()) + shift1 * matrix.col(det[0].get_fast_axis()) + shift2 * matrix.col(det[0].get_slow_axis())) beam = BeamFactory().make_beam(sample_to_source=-1.0 * beam_centre, wavelength=1.0) multi_panel_detector = make_multi_panel(det) # parameterise this detector dp = DetectorParameterisationMultiPanel(multi_panel_detector, beam) # ensure the beam still intersects the central panel intersection = multi_panel_detector.get_ray_intersection(beam.get_s0()) assert intersection[0] == 4 # record the offsets and dir1s, dir2s offsets_before_shift = dp._offsets dir1s_before_shift = dp._dir1s dir2s_before_shift = dp._dir2s # apply a random parameter shift (~10 mm distances, ~50 mrad angles) p_vals = dp.get_param_vals() p_vals = random_param_shift(p_vals, [10, 10, 10, 50, 50, 50]) # reparameterise the detector dp = DetectorParameterisationMultiPanel(multi_panel_detector, beam) # record the offsets and dir1s, dir2s offsets_after_shift = dp._offsets dir1s_after_shift = dp._dir1s dir2s_after_shift = dp._dir2s # ensure the offsets, dir1s and dir2s are the same. This means that # each panel in the detector moved with the others as a rigid body for a, b in zip(offsets_before_shift, offsets_after_shift): assert approx_equal(a, b, eps=1.0e-10) for a, b in zip(dir1s_before_shift, dir1s_after_shift): assert approx_equal(a, b, eps=1.0e-10) for a, b in zip(dir2s_before_shift, dir2s_after_shift): assert approx_equal(a, b, eps=1.0e-10) attempts = 5 for i in range(attempts): multi_panel_detector = make_multi_panel(det) # parameterise this detector dp = DetectorParameterisationMultiPanel(multi_panel_detector, beam) p_vals = dp.get_param_vals() # apply a random parameter shift p_vals = random_param_shift( p_vals, [10, 10, 10, 1000.0 * pi / 18, 1000.0 * pi / 18, 1000.0 * pi / 18]) dp.set_param_vals(p_vals) # compare analytical and finite difference derivatives # get_fd_gradients will implicitly only get gradients for the # 1st panel in the detector, so explicitly get the same for the # analytical gradients for j in range(9): an_ds_dp = dp.get_ds_dp(multi_state_elt=j) fd_ds_dp = get_fd_gradients(dp, [1.0e-7] * dp.num_free(), multi_state_elt=j) for k in range(6): assert approx_equal( (fd_ds_dp[k] - matrix.sqr(an_ds_dp[k])), matrix.sqr((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)), eps=1.0e-5, out=None, ), textwrap.dedent("""\ Failure comparing analytical with finite difference derivatives. Failure in try {i} for panel number {j] failure for parameter number {k} of the orientation parameterisation with fd_ds_dp = {fd} and an_ds_dp = {an} so that difference fd_ds_dp - an_ds_dp = {diff} """).format( i=i, j=j, k=k, fd=fd_ds_dp[k], an=an_ds_dp[k], diff=fd_ds_dp[k] - matrix.sqr(an_ds_dp[k]), )
def _detector(self): '''Detector model, allowing for small offsets in the positions of 60 detector modules''' # Module positional offsets in x, y, in pixels - for the moment ignoring the # rotational offsets as these are not well defined. To be honest these # positional offsets are also not well defined as I do not know how they # should be applied... x = { (0, 0): -0.477546, (0, 1): 0.130578, (0, 2): 0.045041, (0, 3): -0.439872, (0, 4): -0.382077, (1, 0): 0.087405, (1, 1): 0.249597, (1, 2): 0.184265, (1, 3): 0.158342, (1, 4): 0.025225, (2, 0): -0.179892, (2, 1): -0.010974, (2, 2): -0.139207, (2, 3): 0.282851, (2, 4): -0.442219, (3, 0): -0.185027, (3, 1): 0.218601, (3, 2): 0.092585, (3, 3): 0.35862, (3, 4): -0.29161, (4, 0): 0.145368, (4, 1): 0.609289, (4, 2): 0.396265, (4, 3): 0.41625, (4, 4): 0.07152, (5, 0): 0.247142, (5, 1): 0.046563, (5, 2): 0.248714, (5, 3): -0.044628, (5, 4): -0.391509, (6, 0): 0.516643, (6, 1): 0.358453, (6, 2): 0.069219, (6, 3): 0.095861, (6, 4): -0.167403, (7, 0): -0.381352, (7, 1): -0.35338, (7, 2): 0.348656, (7, 3): 0.024543, (7, 4): 0.328706, (8, 0): 0.150886, (8, 1): 0.244987, (8, 2): -0.102911, (8, 3): 0.16633, (8, 4): 0.386622, (9, 0): 0.037924, (9, 1): 0.314392, (9, 2): 0.238818, (9, 3): 0.815028, (9, 4): -0.048818, (10, 0): -0.670524, (10, 1): -0.304119, (10, 2): 0.252284, (10, 3): -0.05485, (10, 4): -0.355264, (11, 0): -0.404947, (11, 1): -0.020622, (11, 2): 0.648473, (11, 3): -0.277175, (11, 4): -0.711951 } y = { (0, 0): -0.494797, (0, 1): -0.212976, (0, 2): 0.085351, (0, 3): 0.35494, (0, 4): 0.571189, (1, 0): -0.421708, (1, 1): 0.061914, (1, 2): 0.238996, (1, 3): 0.146692, (1, 4): 0.407145, (2, 0): -0.313212, (2, 1): -0.225025, (2, 2): 0.031613, (2, 3): -0.047839, (2, 4): 0.42716, (3, 0): -0.361193, (3, 1): 0.057663, (3, 2): 0.022357, (3, 3): 0.062717, (3, 4): 0.150611, (4, 0): 0.035511, (4, 1): -0.271567, (4, 2): 0.007761, (4, 3): -0.124021, (4, 4): 0.093017, (5, 0): -0.238897, (5, 1): -0.179724, (5, 2): -0.113608, (5, 3): 0.017841, (5, 4): -0.012933, (6, 0): -0.166337, (6, 1): -0.272922, (6, 2): -0.194665, (6, 3): -0.058535, (6, 4): -0.405404, (7, 0): -0.318824, (7, 1): -0.311276, (7, 2): -0.205223, (7, 3): -0.292664, (7, 4): -0.474762, (8, 0): -0.039504, (8, 1): -0.239887, (8, 2): -0.343485, (8, 3): -0.459429, (8, 4): -0.426901, (9, 0): -0.187805, (9, 1): 0.282727, (9, 2): -0.601164, (9, 3): -0.467605, (9, 4): -0.589271, (10, 0): 0.028311, (10, 1): -0.391571, (10, 2): -0.463112, (10, 3): -0.358092, (10, 4): -0.285396, (11, 0): 0.01863, (11, 1): -0.380099, (11, 2): -0.234953, (11, 3): -0.593992, (11, 4): -0.801247 } distance = float( self._cif_header_dictionary['Detector_distance'].split()[0]) beam_xy = self._cif_header_dictionary['Beam_xy'].replace( '(', '').replace(')', '').replace(',', '').split()[:2] beam_x, beam_y = map(float, beam_xy) wavelength = float( self._cif_header_dictionary['Wavelength'].split()[0]) pixel_xy = self._cif_header_dictionary['Pixel_size'].replace( 'm', '').replace('x', '').split() pixel_x, pixel_y = map(float, pixel_xy) thickness = float( self._cif_header_dictionary['Silicon'].split()[2]) * 1000.0 nx = int( self._cif_header_dictionary['X-Binary-Size-Fastest-Dimension']) ny = int( self._cif_header_dictionary['X-Binary-Size-Second-Dimension']) overload = int( self._cif_header_dictionary['Count_cutoff'].split()[0]) underload = -1 # take into consideration here the thickness of the sensor also the # wavelength of the radiation (which we have in the same file...) from cctbx.eltbx import attenuation_coefficient table = attenuation_coefficient.get_table("Si") mu = table.mu_at_angstrom(wavelength) / 10.0 t0 = thickness # FIXME would also be very nice to be able to take into account the # misalignment of the individual modules given the calibration... # single detector or multi-module detector pixel_x *= 1000.0 pixel_y *= 1000.0 distance *= 1000.0 if not self._multi_panel: detector = self._detector_factory.simple( 'PAD', distance, (beam_x * pixel_x, beam_y * pixel_y), '+x', '-y', (pixel_x, pixel_y), (nx, ny), (underload, overload), [], ParallaxCorrectedPxMmStrategy(mu, t0)) for f0, s0, f1, s1 in determine_pilatus_mask(detector): detector[0].add_mask(f0, s0, f1, s1) detector[0].set_thickness(thickness) detector[0].set_material('Si') detector[0].set_mu(mu) return detector # got to here means 60-panel version from dxtbx.model import Detector from scitbx import matrix d = Detector() beam_centre = matrix.col((beam_x * pixel_x, beam_y * pixel_y, 0)) fast = matrix.col((1.0, 0.0, 0.0)) slow = matrix.col((0.0,-1.0, 0.0)) s0 = matrix.col((0, 0, -1)) origin = (distance * s0) - (fast * beam_centre[0]) - \ (slow * beam_centre[1]) root = d.hierarchy() root.set_local_frame( fast.elems, slow.elems, origin.elems) xmins = [0, 494, 988, 1482, 1976] xmaxes = [487, 981, 1475, 1969, 2463] ymins = [0, 212, 424, 636, 848, 1060, 1272, 1484, 1696, 1908, 2120, 2332] ymaxes = [195, 407, 619, 831, 1043, 1255, 1467, 1679, 1891, 2103, 2315, 2527] self.coords = {} fast = matrix.col((1.0, 0.0, 0.0)) slow = matrix.col((0.0, 1.0, 0.0)) panel_idx = 0 for ymin, ymax in zip(ymins, ymaxes): for xmin, xmax in zip(xmins, xmaxes): xmin_mm = xmin * pixel_x ymin_mm = ymin * pixel_y origin_panel = fast * xmin_mm + slow * ymin_mm panel_name = "Panel%d" % panel_idx panel_idx += 1 p = root.add_panel() p.set_type("SENSOR_PAD") p.set_px_mm_strategy(ParallaxCorrectedPxMmStrategy(mu, t0)) p.set_name(panel_name) p.set_image_size((xmax-xmin, ymax-ymin)) p.set_trusted_range((underload, overload)) p.set_pixel_size((pixel_x,pixel_y)) p.set_thickness(thickness) p.set_material('Si') p.set_mu(mu) p.set_local_frame( fast.elems, slow.elems, origin_panel.elems) p.set_raw_image_offset((xmin, ymin)) self.coords[panel_name] = (xmin,ymin,xmax,ymax) return d
def _detector(self): '''The _detector() function returns a model for a CSPAD detector as used at LCLS's CXI and XPP endstations. It converts the metrology information in the pure Python object extracted from the image pickle to DXTBX-style transformation vectors. Only ASIC:s are considered, since DXTBX metrology is not concerned with hierarchies. Merged from xfel.cftbx.detector.cspad_detector.readHeader() and xfel.cftbx.detector.metrology.metrology_as_dxtbx_vectors(). ''' from dxtbx.model import SimplePxMmStrategy from dxtbx.model import Detector from scitbx.matrix import col # XXX Introduces dependency on cctbx.xfel! Should probably be # merged into the code here! from xfel.cftbx.detector.metrology import \ _transform, get_projection_matrix # Apply the detector distance to the translation of the root # detector object. d = self._metrology_params.detector Tb_d = _transform( col(d.orientation).normalize(), col(d.translation) + col((0, 0, -self._metrology_params.distance * 1e-3)))[1] self._raw_data = [] detector = Detector() for p in d.panel: Tb_p = Tb_d * _transform( col(p.orientation).normalize(), col(p.translation))[1] for s in p.sensor: Tb_s = Tb_p * _transform( col(s.orientation).normalize(), col(s.translation))[1] for a in s.asic: Tb_a = Tb_s * _transform( col(a.orientation).normalize(), col(a.translation))[1] Pb = get_projection_matrix(a.pixel_size, a.dimension)[1] # The DXTBX-style metrology description consists of three # vectors for each ASIC. The origin vector locates the # (0, 0)-pixel in the laboratory frame in units of mm. # The second and third vectors give the directions to the # pixels immediately next to (0, 0) in the fast and slow # directions, respectively, in arbitrary units. origin = Tb_a * Pb * col((0, 0, 1)) fast = Tb_a * Pb * col((0, a.dimension[0], 1)) - origin slow = Tb_a * Pb * col((a.dimension[1], 0, 1)) - origin # Convert vector units from meter to millimeter. The # default, SimplePxMmStrategy applies here. XXX Due to # dark subtraction, a valid pixel intensity may be # negative, and this is currently not reflected by # trusted_range. key = (d.serial, p.serial, s.serial, a.serial) panel = detector.add_panel() panel.set_type("PAD") panel.set_name('%d:%d:%d:%d' % key) panel.set_local_frame( [t * 1e3 for t in fast.elems[0:3]], [t * 1e3 for t in slow.elems[0:3]], [t * 1e3 for t in origin.elems[0:3]]) panel.set_pixel_size([t * 1e3 for t in a.pixel_size]) panel.set_image_size(a.dimension) panel.set_trusted_range((0, a.saturation)) self._raw_data.append(self._tiles[key]) return detector
def __init__(self, obj, beam): from dxtbx.model import Detector, Panel from cctbx.eltbx import attenuation_coefficient from dxtbx.model import ParallaxCorrectedPxMmStrategy from scitbx import matrix # Get the handles nx_file = obj.handle.file nx_detector = obj.handle nx_module = obj.modules[0].handle # Get the detector name and type detector_type = str(nx_detector['type'][()]) detector_name = str(nx_detector.name) # Get the trusted range of pixel values trusted_range = (-1, float(nx_detector['saturation_value'][()])) # Get the detector thickness thickness = nx_detector['sensor_thickness'] thickness_value = float(thickness[()]) thickness_units = thickness.attrs['units'] thickness_value = float(convert_units( thickness_value, thickness_units, "mm")) # Get the detector material material = str(nx_detector['sensor_material'][()]) # Get the fast pixel size and vector fast_pixel_direction = nx_module['fast_pixel_direction'] fast_pixel_direction_value = float(fast_pixel_direction[()]) fast_pixel_direction_units = fast_pixel_direction.attrs['units'] fast_pixel_direction_vector = fast_pixel_direction.attrs['vector'] fast_pixel_direction_value = convert_units( fast_pixel_direction_value, fast_pixel_direction_units, "mm") fast_axis = matrix.col(fast_pixel_direction_vector).normalize() # Get the slow pixel size and vector slow_pixel_direction = nx_module['slow_pixel_direction'] slow_pixel_direction_value = float(slow_pixel_direction[()]) slow_pixel_direction_units = slow_pixel_direction.attrs['units'] slow_pixel_direction_vector = slow_pixel_direction.attrs['vector'] slow_pixel_direction_value = convert_units( slow_pixel_direction_value, slow_pixel_direction_units, "mm") slow_axis = matrix.col(slow_pixel_direction_vector).normalize() # Get the origin vector module_offset = nx_module['module_offset'] origin = construct_vector( nx_file, module_offset.name) # Ensure that fast and slow axis are orthogonal normal = fast_axis.cross(slow_axis) slow_axis = -fast_axis.cross(normal) # Compute the attenuation coefficient. # This will fail for undefined composite materials # mu_at_angstrom returns cm^-1, but need mu in mm^-1 if material == 'Si': pass elif material == 'Silicon': material = 'Si' elif material == 'Sillicon': material = 'Si' elif material == 'CdTe': pass elif material == 'GaAs': pass else: raise RuntimeError('Unknown material: %s' % material) table = attenuation_coefficient.get_table(material) wavelength = beam.get_wavelength() mu = table.mu_at_angstrom(wavelength) / 10.0 # Construct the detector model pixel_size = (fast_pixel_direction_value, slow_pixel_direction_value) image_size = tuple(map(int, nx_module['data_size'])) self.model = Detector() self.model.add_panel( Panel( detector_type, detector_name, tuple(fast_axis), tuple(slow_axis), tuple(origin), pixel_size, image_size, trusted_range, thickness_value, material, mu)) # Set the parallax correction for panel in self.model: panel.set_px_mm_strategy(ParallaxCorrectedPxMmStrategy(mu, thickness_value)) panel.set_type('SENSOR_PAD')
from dxtbx.model import Detector d = Detector() p1 = d.add_panel() p2 = d.add_panel() p3 = d.add_panel() p4 = d.add_panel() root = d.hierarchy() g = root.add_group() g.add_panel(d[0]) g.add_panel(d[1]) root.add_panel(d[2]) root.add_panel(d[3]) print d.to_dict()
def detector_parallel_refiners(params, experiments, reflections): print("Refining detector at hierarchy_level=" + \ str(params.refinement.parameterisation.detector.hierarchy_level), "\n") orig_detector = experiments.detectors()[0] try: h = orig_detector.hierarchy() except AttributeError: print("This detector does not have a hierarchy") raise # get the panel groups at the chosen level level = params.refinement.parameterisation.detector.hierarchy_level try: groups = get_panel_groups_at_depth(h, level) except AttributeError: print( "Cannot access the hierarchy at the depth level={0}".format(level)) raise # collect the panel ids for each Panel within the groups panels = [p for p in orig_detector] panel_ids_by_group = [get_panel_ids_at_root(panels, g) for g in groups] print("The detector will be divided into", len(panel_ids_by_group), \ "groups consisting of the following panels:") for i, g in enumerate(panel_ids_by_group): print("Group%02d:" % (i + 1), g) print() # now construct sub-detectors def recursive_add_child(d, parent, child): """ Creates either a panel group or a panel on the parent, and sets it up to match the child """ if child.is_group(): newchild = parent.add_group() else: newchild = parent.add_panel() newchild.set_image_size(child.get_image_size()) newchild.set_trusted_range(child.get_trusted_range()) newchild.set_pixel_size(child.get_pixel_size()) newchild.set_px_mm_strategy(child.get_px_mm_strategy()) m = child.get_local_d_matrix() newchild.set_local_frame(m[0::3], m[1::3], m[2::3]) newchild.set_name(child.get_name()) if child.is_group(): for c in child.children(): recursive_add_child(d, newchild, c) from dxtbx.model import Detector sub_detectors = [Detector() for e in groups] for d, g in zip(sub_detectors, groups): d.hierarchy().set_name(g.get_name()) d.hierarchy().set_frame(g.get_fast_axis(), g.get_slow_axis(), g.get_origin()) if g.is_group(): for c in g.children(): recursive_add_child(d, d.hierarchy(), c) else: # at the bottom of the hierarchy. Note the new panel's frame will be the identity matrix. p = d.hierarchy().add_panel() p.set_image_size(g.get_image_size()) p.set_trusted_range(g.get_trusted_range()) p.set_pixel_size(g.get_pixel_size()) p.set_px_mm_strategy(g.get_px_mm_strategy()) p.set_name(g.get_name()) # set experiment lists for each sub-detector sub_det_expts = [copy.deepcopy(experiments) for e in groups] for d, exp in zip(sub_detectors, sub_det_expts): exp.replace(exp.detectors()[0], d) # divide the reflections by sub-detector sub_reflections = [] for pnls in panel_ids_by_group: isels = [(reflections['panel'] == pnl).iselection() for pnl in pnls] isel = flex.size_t() for s in isels: isel.extend(s) gp_refs = reflections.select(isel) # reset panel number to match the sub-detector for new_id, old_id in enumerate(pnls): sel = gp_refs['panel'] == old_id gp_refs['panel'].set_selected(sel, new_id) sub_reflections.append(gp_refs) # We wish to refine each whole sub-detector as a single group. Therefore # we must use hierarchy_level=0 for these jobs tmplevel = params.refinement.parameterisation.detector.hierarchy_level params.refinement.parameterisation.detector.hierarchy_level = 0 # do refinements and collect the refined experiments def do_work(item): refs, exps = item if len(refs) < 20: print("Cannot refine detector", exps[0].detector.hierarchy().get_name(), "due to too few reflections (", len(refs), ")") return exps # do not refine this detector element # Here use the specialised faster refiner refiner = StillsDetectorRefinerFactory.from_parameters_data_experiments( params, refs, exps) refiner.run() return refiner.get_experiments() refined_exps = easy_mp.parallel_map(func=do_work, iterable=zip(sub_reflections, sub_det_expts), processes=params.mp.nproc, method=params.mp.method, asynchronous=True, preserve_exception_message=True) # update the full detector for group, refined_exp in zip(groups, refined_exps): refined_det = refined_exp.detectors()[0] local_root = refined_det[0] f = local_root.get_fast_axis() s = local_root.get_slow_axis() o = local_root.get_origin() group.set_frame(f, s, o) # propagates local frame changes # refine the full detector to get RMSDs per panel print() print("Refining full recombined detector") print("---------------------------------") experiments = detector_refiner(params, experiments, reflections) # reset hierarchy_level params.refinement.parameterisation.detector.hierarchy_level = tmplevel return experiments
def _detector(self): """Return a working detector instance.""" cbf = self._get_cbf_handle() d = Detector() # find the panel element names. Either array ids or section ids cbf.find_category(b"array_structure_list") try: cbf.find_column(b"array_section_id") except Exception as e: if "CBF_NOTFOUND" not in str(e): raise e cbf.find_column(b"array_id") panel_names = [] for i in range(cbf.count_rows()): cbf.select_row(i) if cbf.get_typeofvalue() == b"null": continue val = cbf.get_value() if val not in panel_names: panel_names.append(val) # the cbf detector objects are not guaranteed to be in the same order # as this array of panel names. re-iterate, associating root axes of # detector objects with panel names detector_axes = [] has_sections = cbf.has_sections() for i in range(len(panel_names)): cbf_detector = cbf.construct_detector(i) axis0 = cbf_detector.get_detector_surface_axes(0) detector_axes.append(axis0) cbf_detector.__swig_destroy__(cbf_detector) cbf.find_category(b"array_structure_list") array_ids_detectororder = [] panel_names_detectororder = [] for detector_axis in detector_axes: cbf.find_column(b"axis_set_id") cbf.find_row(detector_axis) if has_sections: try: cbf.find_column( b"array_id" ) # mandatory, but not always actually there except Exception as e: if "CBF_NOTFOUND" not in str(e): raise cbf.find_column(b"array_section") # use as backup, non standard array_ids_detectororder.append(cbf.get_value()) cbf.find_column(b"array_section_id") else: cbf.find_column(b"array_id") panel_names_detectororder.append(cbf.get_value()) for panel_number, panel_name in enumerate(panel_names): cbf_detector = cbf.construct_detector( panel_names_detectororder.index(panel_name) ) # code adapted below from dxtbx.model.detector.DetectorFactory.imgCIF_H pixel = ( cbf_detector.get_inferred_pixel_size(1), cbf_detector.get_inferred_pixel_size(2), ) axis0 = cbf_detector.get_detector_surface_axes(0) axis1 = cbf_detector.get_detector_surface_axes(1) assert cbf.get_axis_depends_on(axis0) == axis1 try: size = tuple(cbf.get_image_size_fs(i)) except Exception as e: if "CBF_NOTFOUND" in str(e): # no array data in the file, it's probably just a cbf header. Get the image size elsewhere size = [0, 0] cbf.find_category(b"array_structure_list") for axis in [axis0, axis1]: cbf.find_column(b"axis_set_id") cbf.find_row(axis) cbf.find_column(b"precedence") idx = int(cbf.get_value()) - 1 cbf.find_column(b"dimension") size[idx] = int(cbf.get_value()) assert size[0] != 0 and size[1] != 0 else: raise e parent, cob = self._get_cumulative_change_of_basis(axis0) pg = self._add_panel_group(parent, d) p = pg.add_panel() fast = cbf.get_axis_vector(axis0) slow = cbf.get_axis_vector(axis1) origin = (cob * col((0, 0, 0, 1)))[0:3] p.set_local_frame(fast, slow, origin) try: overload = cbf.get_overload(panel_number) cbf.find_category(b"array_intensities") cbf.find_column(b"array_id") if has_sections: cbf.find_row(array_ids_detectororder[panel_number]) else: cbf.find_row(panel_name) cbf.find_column(b"undefined_value") underload = cbf.get_doublevalue() trusted_range = (underload, overload) except Exception as e: if "CBF_NOTFOUND" not in str(e): raise trusted_range = (0.0, 0.0) try: cbf.find_column(b"gain") gain = cbf.get_doublevalue() except Exception as e: if "CBF_NOTFOUND" not in str(e): raise gain = 1.0 p.set_pixel_size(tuple(map(float, pixel))) p.set_image_size(size) p.set_trusted_range(tuple(map(float, trusted_range))) p.set_gain(gain) p.set_name(panel_name) # p.set_px_mm_strategy(px_mm) FIXME cbf_detector.__swig_destroy__(cbf_detector) del cbf_detector return d
def _detector(self): """The _detector() function returns a model for a CSPAD detector as used at LCLS's CXI and XPP endstations. It converts the metrology information in the pure Python object extracted from the image pickle to DXTBX-style transformation vectors. Only ASIC:s are considered, since DXTBX metrology is not concerned with hierarchies. Merged from xfel.cftbx.detector.cspad_detector.readHeader() and xfel.cftbx.detector.metrology.metrology_as_dxtbx_vectors(). """ from dxtbx.model import SimplePxMmStrategy from dxtbx.model import Detector from scitbx.matrix import col # XXX Introduces dependency on cctbx.xfel! Should probably be # merged into the code here! from xfel.cftbx.detector.metrology import _transform, get_projection_matrix # Apply the detector distance to the translation of the root # detector object. d = self._metrology_params.detector Tb_d = _transform( col(d.orientation).normalize(), col(d.translation) + col((0, 0, -self._metrology_params.distance * 1e-3)), )[1] self._raw_data = [] detector = Detector() for p in d.panel: Tb_p = ( Tb_d * _transform(col(p.orientation).normalize(), col(p.translation))[1] ) for s in p.sensor: Tb_s = ( Tb_p * _transform(col(s.orientation).normalize(), col(s.translation))[1] ) for a in s.asic: Tb_a = ( Tb_s * _transform( col(a.orientation).normalize(), col(a.translation) )[1] ) Pb = get_projection_matrix(a.pixel_size, a.dimension)[1] # The DXTBX-style metrology description consists of three # vectors for each ASIC. The origin vector locates the # (0, 0)-pixel in the laboratory frame in units of mm. # The second and third vectors give the directions to the # pixels immediately next to (0, 0) in the fast and slow # directions, respectively, in arbitrary units. origin = Tb_a * Pb * col((0, 0, 1)) fast = Tb_a * Pb * col((0, a.dimension[0], 1)) - origin slow = Tb_a * Pb * col((a.dimension[1], 0, 1)) - origin # Convert vector units from meter to millimeter. The # default, SimplePxMmStrategy applies here. XXX Due to # dark subtraction, a valid pixel intensity may be # negative, and this is currently not reflected by # trusted_range. key = (d.serial, p.serial, s.serial, a.serial) panel = detector.add_panel() panel.set_type("PAD") panel.set_name("%d:%d:%d:%d" % key) panel.set_local_frame( [t * 1e3 for t in fast.elems[0:3]], [t * 1e3 for t in slow.elems[0:3]], [t * 1e3 for t in origin.elems[0:3]], ) panel.set_pixel_size([t * 1e3 for t in a.pixel_size]) panel.set_image_size(a.dimension) panel.set_trusted_range((0, a.saturation)) self._raw_data.append(self._tiles[key]) return detector
def _detector(self, index=None): run = self.get_run_from_index(index) if run.run() in self._cached_detector: return self._cached_detector[run.run()] import psana from dxtbx.model import Detector from scitbx.matrix import col if index is None: index = 0 self._env = self._ds.env() assert len(self.params.detector_address) == 1 self._det = psana.Detector(self.params.detector_address[0], self._env) geom = self._det.pyda.geoaccess(self._get_event(index).run()) pixel_size = ( self._det.pixel_size(self._get_event(index)) / 1000.0 ) # convert to mm d = Detector() pg0 = d.hierarchy() # first deal with D0 det_num = 0 D0 = geom.get_top_geo().get_list_of_children()[0] xx, yy, zz = D0.get_pixel_coords() xx = xx / 1000.0 # to mm yy = yy / 1000.0 # to mm zz = zz / 1000.0 # to mm oriD0 = col((np.mean(xx), np.mean(yy), -np.mean(zz))) fp = col((xx[0][0][1], yy[0][0][1], zz[0][0][1])) sp = col((xx[0][1][0], yy[0][1][0], zz[0][1][0])) op = col((xx[0][0][0], yy[0][0][0], zz[0][0][0])) origin = oriD0 fast = (fp - op).normalize() slow = (sp - op).normalize() pg0.set_local_frame(fast.elems, slow.elems, origin.elems) pg0.set_name("D%d" % (det_num)) # Now deal with Qx for quad_num in range(2): pg1 = pg0.add_group() Qx = D0.get_list_of_children()[quad_num] xx, yy, zz = Qx.get_pixel_coords() xx = xx / 1000.0 # to mm yy = yy / 1000.0 # to mm zz = zz / 1000.0 # to mm oriQx = col((np.mean(xx), np.mean(yy), np.mean(zz))) fp = col((xx[0][1], yy[0][1], zz[0][1])) sp = col((xx[1][0], yy[1][0], zz[1][0])) op = col((xx[0][0], yy[0][0], zz[0][0])) origin = oriQx fast = (fp - op).normalize() slow = (sp - op).normalize() pg1.set_local_frame(fast.elems, slow.elems, origin.elems) pg1.set_name("D%dQ%d" % (det_num, quad_num)) # Now deal with Az for asic_num in range(8): val = "ARRAY_D0Q%dA%d" % (quad_num, asic_num) p = pg1.add_panel() dim_slow = xx.shape[0] dim_fast = xx.shape[1] sensor_id = asic_num // 4 # There are 2X4 asics per quadrant asic_in_sensor_id = asic_num % 4 # this number will be 0,1,2 or 3 id_slow = sensor_id * (dim_slow // 2) id_fast = asic_in_sensor_id * (dim_fast // 4) oriAy = col( (xx[id_slow][id_fast], yy[id_slow][id_fast], zz[id_slow][id_fast]) ) fp = col( ( xx[id_slow][id_fast + 1], yy[id_slow][id_fast + 1], zz[id_slow][id_fast + 1], ) ) sp = col( ( xx[id_slow + 1][id_fast], yy[id_slow + 1][id_fast], zz[id_slow + 1][id_fast], ) ) origin = oriAy - oriQx fast = (fp - oriAy).normalize() slow = (sp - oriAy).normalize() p.set_local_frame(fast.elems, slow.elems, origin.elems) p.set_pixel_size((pixel_size, pixel_size)) p.set_image_size((dim_fast // 4, dim_slow // 2)) p.set_trusted_range((-1, 2e6)) p.set_name(val) self._cached_detector[run.run()] = d return d
def process_event(self, run, timestamp): """ Process a single event from a run @param run psana run object @param timestamp psana timestamp object """ ts = cspad_tbx.evt_timestamp((timestamp.seconds(),timestamp.nanoseconds()/1e6)) if ts is None: print "No timestamp, skipping shot" return if len(self.params_cache.debug.event_timestamp) > 0 and ts not in self.params_cache.debug.event_timestamp: return if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events: if ts in self.known_events: if self.known_events[ts] not in ["stop", "done", "fail"]: if self.params_cache.debug.skip_bad_events: print "Skipping event %s: possibly caused an unknown exception previously"%ts return elif self.params_cache.debug.skip_processed_events: print "Skipping event %s: processed successfully previously"%ts return else: if self.params_cache.debug.skip_unprocessed_events: print "Skipping event %s: not processed previously"%ts return self.debug_start(ts) evt = run.event(timestamp) if evt.get("skip_event") or "skip_event" in [key.key() for key in evt.keys()]: print "Skipping event",ts self.debug_write("psana_skip", "skip") return print "Accepted", ts self.params = copy.deepcopy(self.params_cache) # the data needs to have already been processed and put into the event by psana if self.params.format.file_format == 'cbf': # get numpy array, 32x185x388 data = cspad_cbf_tbx.get_psana_corrected_data(self.psana_det, evt, use_default=False, dark=True, common_mode=self.common_mode, apply_gain_mask=self.params.format.cbf.gain_mask_value is not None, gain_mask_value=self.params.format.cbf.gain_mask_value, per_pixel_gain=False) if data is None: print "No data" self.debug_write("no_data", "skip") return if self.params.format.cbf.override_distance is None: distance = cspad_tbx.env_distance(self.params.input.address, run.env(), self.params.format.cbf.detz_offset) if distance is None: print "No distance, skipping shot" self.debug_write("no_distance", "skip") return else: distance = self.params.format.cbf.override_distance if self.params.format.cbf.override_energy is None: wavelength = cspad_tbx.evt_wavelength(evt) if wavelength is None: print "No wavelength, skipping shot" self.debug_write("no_wavelength", "skip") return else: wavelength = 12398.4187/self.params.format.cbf.override_energy if self.params.format.file_format == 'pickle': image_dict = evt.get(self.params.format.pickle.out_key) data = image_dict['DATA'] timestamp = t = ts s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23] print "Processing shot", s if self.params.format.file_format == 'cbf': # stitch together the header, data and metadata into the final dxtbx format object cspad_img = cspad_cbf_tbx.format_object_from_data(self.base_dxtbx, data, distance, wavelength, timestamp, self.params.input.address) if self.params.input.reference_geometry is not None: from dxtbx.model import Detector # copy.deep_copy(self.reference_detctor) seems unsafe based on tests. Use from_dict(to_dict()) instead. cspad_img._detector_instance = Detector.from_dict(self.reference_detector.to_dict()) cspad_img.sync_detector_to_cbf() elif self.params.format.file_format == 'pickle': from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory cspad_img = FormatPYunspecifiedStillInMemory(image_dict) cspad_img.timestamp = s if self.params.dispatch.dump_all: self.save_image(cspad_img, self.params, os.path.join(self.params.output.output_dir, "shot-" + s)) self.cache_ranges(cspad_img, self.params) imgset = MemImageSet([cspad_img]) if self.params.dispatch.estimate_gain_only: from dials.command_line.estimate_gain import estimate_gain estimate_gain(imgset) return if not self.params.dispatch.find_spots: self.debug_write("data_loaded", "done") return datablock = DataBlockFactory.from_imageset(imgset)[0] # before calling DIALS for processing, set output paths according to the templates if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template: self.params.output.indexed_filename = os.path.join(self.params.output.output_dir, self.indexed_filename_template%("idx-" + s)) if "%s" in self.refined_experiments_filename_template: self.params.output.refined_experiments_filename = os.path.join(self.params.output.output_dir, self.refined_experiments_filename_template%("idx-" + s)) if "%s" in self.integrated_filename_template: self.params.output.integrated_filename = os.path.join(self.params.output.output_dir, self.integrated_filename_template%("idx-" + s)) if "%s" in self.reindexedstrong_filename_template: self.params.output.reindexedstrong_filename = os.path.join(self.params.output.output_dir, self.reindexedstrong_filename_template%("idx-" + s)) # Load a dials mask from the trusted range and psana mask from dials.util.masking import MaskGenerator generator = MaskGenerator(self.params.border_mask) mask = generator.generate(imgset) if self.params.format.file_format == "cbf": mask = tuple([a&b for a, b in zip(mask,self.dials_mask)]) if self.spotfinder_mask is None: self.params.spotfinder.lookup.mask = mask else: self.params.spotfinder.lookup.mask = tuple([a&b for a, b in zip(mask,self.spotfinder_mask)]) if self.integration_mask is None: self.params.integration.lookup.mask = mask else: self.params.integration.lookup.mask = tuple([a&b for a, b in zip(mask,self.integration_mask)]) self.debug_write("spotfind_start") try: observed = self.find_spots(datablock) except Exception, e: import traceback; traceback.print_exc() print str(e), "event", timestamp self.debug_write("spotfinding_exception", "fail") return
def load_detector(entry): from dxtbx.model import Detector # Get the detector module object nx_instrument = get_nx_instrument(entry, "instrument") nx_detector = get_nx_detector(nx_instrument, "detector") assert nx_detector["depends_on"][()] == "." material = nx_detector["sensor_material"][()] det_type = nx_detector["type"][()] thickness = nx_detector["sensor_thickness"][()] trusted_range = (nx_detector["underload"][()], nx_detector["saturation_value"][()]) # The detector model detector = Detector() i = 0 while True: try: module = get_nx_detector_module(nx_detector, "module%d" % i) except Exception: break # Set the data size image_size = module["data_size"] # Set the module offset offset_length = module["module_offset"][()] assert module["module_offset"].attrs["depends_on"] == "." assert module["module_offset"].attrs[ "transformation_type"] == "translation" assert tuple(module["module_offset"].attrs["offset"]) == (0, 0, 0) offset_vector = matrix.col(module["module_offset"].attrs["vector"]) origin = offset_vector * offset_length # Write the fast pixel direction module_offset_path = str(module["module_offset"].name) pixel_size_x = module["fast_pixel_direction"][()] assert module["fast_pixel_direction"].attrs[ "depends_on"] == module_offset_path assert (module["fast_pixel_direction"].attrs["transformation_type"] == "translation") assert tuple(module["fast_pixel_direction"].attrs["offset"]) == (0, 0, 0) fast_axis = tuple(module["fast_pixel_direction"].attrs["vector"]) # Write the slow pixel direction pixel_size_y = module["slow_pixel_direction"][()] assert module["slow_pixel_direction"].attrs[ "depends_on"] == module_offset_path assert (module["slow_pixel_direction"].attrs["transformation_type"] == "translation") assert tuple(module["slow_pixel_direction"].attrs["offset"]) == (0, 0, 0) slow_axis = tuple(module["slow_pixel_direction"].attrs["vector"]) # Get the pixel size and axis vectors pixel_size = (pixel_size_x, pixel_size_y) # Create the panel panel = detector.add_panel() panel.set_frame(fast_axis, slow_axis, origin) panel.set_pixel_size(pixel_size) panel.set_image_size([int(x) for x in image_size]) panel.set_type(det_type) panel.set_thickness(thickness) panel.set_material(material) panel.set_trusted_range([float(x) for x in trusted_range]) i += 1 # Return the detector and panel return detector
def _detector_from_dict(obj): ''' Get the detector from a dictionary. ''' from dxtbx.model import Detector return Detector.from_dict(obj)