def test_split_single_image_datablock(dials_regression, tmpdir): tmpdir.chdir() pytest.importorskip("h5py") sacla_file = os.path.join( dials_regression, "image_examples", "SACLA_MPCCD_Cheetah", "run266702-0-subset.h5", ) db = DataBlockFactory.from_filenames([sacla_file])[0] assert db.num_images() == 4 imageset = db.extract_imagesets()[0] subset = imageset[2:3] subblock = DataBlockFactory.from_imageset(subset)[0] assert subblock.num_images() == 1 assert get_indices(subblock) == [2] dumped_filename = "split_datablock.json" dump = DataBlockDumper(subblock) dump.as_json(dumped_filename) db = DataBlockFactory.from_json_file(dumped_filename, check_format=True)[0] assert db.num_images() == 1 assert get_indices(db) == [2] db = DataBlockFactory.from_json_file(dumped_filename, check_format=False)[0] assert db.num_images() == 1 assert get_indices(db) == [2]
def test_combination_of_multiple_datablocks_and_strong_spots_files( dials_regression, tmpdir): tmpdir.chdir() path = os.path.join(dials_regression, "centroid_test_data/centroid_####.cbf") # example combined two different spot-finding settings for the same dataset # e.d. for comparison with the reciprocal lattice viewer. cmd = "dials.import template={0}".format(path) result = easy_run.fully_buffered(cmd).raise_if_errors() cmd = "dials.find_spots datablock.json output.reflections=strong1.pickle" result = easy_run.fully_buffered(cmd).raise_if_errors() cmd = ("dials.find_spots datablock.json sigma_strong=5 " "output.reflections=strong2.pickle") result = easy_run.fully_buffered(cmd).raise_if_errors() cmd = ("dev.dials.combine_datablocks datablock.json datablock.json " "strong1.pickle strong2.pickle") result = easy_run.fully_buffered(cmd).raise_if_errors() # load results comb_db = DataBlockFactory.from_json_file('combined_datablocks.json')[0] comb_strong = flex.reflection_table.from_pickle("combined_strong.pickle") # load reference models and reflections db = DataBlockFactory.from_json_file('datablock.json')[0] ref_detector = db.unique_detectors()[0] ref_beam = db.unique_beams()[0] ref_scan = db.unique_scans()[0] ref_goniometer = db.unique_goniometers()[0] strong1 = flex.reflection_table.from_pickle("strong1.pickle") strong2 = flex.reflection_table.from_pickle("strong2.pickle") # check the models have not been modified for imset in comb_db.extract_imagesets(): assert imset.get_detector() == ref_detector assert imset.get_beam() == ref_beam assert imset.get_scan() == ref_scan assert imset.get_goniometer() == ref_goniometer # check the reflections are unaffected, except for the change in id s1 = comb_strong.select(comb_strong['id'] == 0) s2 = comb_strong.select(comb_strong['id'] == 1) s2['id'] = flex.size_t(len(s2), 0) for r1, r2 in zip(s1, strong1): assert r1 == r2 for r1, r2 in zip(s2, strong2): assert r1 == r2
def test_find_spots_with_user_defined_mask(dials_regression, tmpdir): tmpdir.chdir() # Now with a user defined mask result = procrunner.run_process([ "dials.find_spots", "output.reflections=spotfinder.pickle", "output.shoeboxes=True", "lookup.mask=" + os.path.join(dials_regression, "centroid_test_data", "mask.pickle"), ] + glob( os.path.join(dials_regression, "centroid_test_data", "centroid*.cbf"))) assert result['exitcode'] == 0 assert result['stderr'] == '' assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_json_file( os.path.join(dials_regression, "centroid_test_data", "datablock.json")) assert len(datablocks) == 1 imageset = datablocks[0].extract_imagesets()[0] detector = imageset.get_detector() beam = imageset.get_beam() for x, y, z in reflections['xyzobs.px.value']: d = detector[0].get_resolution_at_pixel(beam.get_s0(), (x, y)) assert d >= 3
def run(self): from os.path import join, exists from libtbx import easy_run import os input_filename = join(self.path, "datablock.json") output_filename = "output_datablock.json" mask_filename = join(self.path, "lookup_mask.pickle") easy_run.fully_buffered( ['dials.apply_mask', 'input.datablock=%s' % input_filename, 'input.mask=%s' % mask_filename, 'output.datablock=%s' % output_filename]).raise_if_errors() from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_json_file(output_filename) assert len(datablocks) == 1 imagesets = datablocks[0].extract_imagesets() assert len(imagesets) == 1 imageset = imagesets[0] assert imageset.external_lookup.mask.filename == mask_filename print 'OK'
def tst_with_bad_external_lookup(self): from dxtbx.datablock import DataBlockFactory from dxtbx.imageset import ImageSweep from os.path import join filename = join(self.dials_regression, "centroid_test_data", "datablock_with_bad_lookup.json") blocks = DataBlockFactory.from_json_file(filename, check_format=False) assert (len(blocks) == 1) imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data is None assert imageset.external_lookup.gain.data is None assert imageset.external_lookup.pedestal.data is None blocks = self.encode_json_then_decode(blocks, check_format=False) assert (len(blocks) == 1) imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data is None assert imageset.external_lookup.gain.data is None assert imageset.external_lookup.pedestal.data is None print 'OK'
def get_arr(json_file_path = None): if json_file_path != None : print "json_file_path =", json_file_path datablocks = DataBlockFactory.from_json_file(json_file_path) if len(datablocks) > 0: assert(len(datablocks) == 1) imagesets = datablocks[0].extract_imagesets() crystals = None print "len(datablocks) > 0" else: raise RuntimeError("No imageset could be constructed, len(datablocks) <= 0 ") print "len(imagesets) =", len(imagesets) print "type(imagesets) =", type(imagesets) first_data = imagesets[0] print "type(first_data) =", type(first_data) my_array = first_data.to_array() print "type(my_array) =", type(my_array) my_array_double = my_array.as_double() print "my_array_double.all() =", my_array_double.all() else: print "No DataBlock PATH given" return my_array_double
def run(args=None): args = args or sys.argv[1:] user_phil = [] files = [] for arg in args: if os.path.isfile(arg): files.append(arg) else: try: user_phil.append(parse(arg)) except Exception: raise Sorry("Unrecognized argument %s" % arg) params = phil_scope.fetch(sources=user_phil).extract() fig = plt.figure() colormap = plt.cm.gist_ncar colors = [colormap(i) for i in np.linspace(0, 0.9, len(files))] for file_name, color in zip(files, colors): # read the data and get the detector models try: datablocks = DataBlockFactory.from_json_file(file_name, check_format=False) detectors = sum((db.unique_detectors() for db in datablocks), []) except Exception: try: experiments = ExperimentListFactory.from_json_file( file_name, check_format=False) except ValueError: experiments = ExperimentListFactory.from_filenames([file_name]) detectors = experiments.detectors() if not params.plot_all_detectors: detectors = detectors[0:1] for detector in detectors: # plot the hierarchy if params.orthographic: ax = fig.gca() else: ax = fig.gca(projection="3d") plot_group( detector.hierarchy(), color, ax, orthographic=params.orthographic, show_origin_vectors=params.show_origin_vectors, panel_numbers=params.panel_numbers, ) plt.xlabel("x") plt.ylabel("y") if params.orthographic: plt.axes().set_aspect("equal", "datalim") if params.pdf_file: pp = PdfPages(params.pdf_file) for i in plt.get_fignums(): pp.savefig(plt.figure(i)) pp.close() else: plt.show()
def get_3d_flex_array(): json_file_path = str("../../dummy_unversioned_data/datablock.json") print "json_file_path =", json_file_path datablocks = DataBlockFactory.from_json_file(json_file_path) if len(datablocks) > 0: assert (len(datablocks) == 1) imagesets = datablocks[0].extract_imagesets() crystals = None print "len(datablocks) > 0" else: raise RuntimeError("No imageset could be constructed") print "len(imagesets) =", len(imagesets) print "type(imagesets) =", type(imagesets) first_data = imagesets[0] print "type(first_data) =", type(first_data) my_array = first_data.to_array() print "type(my_array) =", type(my_array) my_array_double = my_array.as_double() print "my_array_double.all() =", my_array_double.all() return my_array_double
def __init__(self): super(BigWidget, self).__init__() my_box = QVBoxLayout() top_box = QHBoxLayout() left_top_box = QVBoxLayout() right_top_box = QVBoxLayout() self.my_painter = ImgPainter() #json_file_path = "/home/luiso/dui/dui_test/only_9_img/dui_idials_tst_04/dials-1/1_import/datablock.json" json_file_path = "/home/luiso/dui/dui_test/X4_wide/test_02/dials-1/1_import/datablock.json" #json_file_path = "/home/lui/dui/dui_test/X4_wide/tst01/datablock.json" datablocks = DataBlockFactory.from_json_file(json_file_path) db = datablocks[0] self.my_sweep = db.extract_sweeps()[0] print "self.my_sweep.get_array_range() =", self.my_sweep.get_array_range() print "self.my_sweep.get_image_size() =", self.my_sweep.get_image_size() n_of_imgs = self.my_sweep.get_array_range()[1] print "n_of_imgs =", n_of_imgs self.palette_lst = ["hot ascend", "hot descend", "black2white", "white2black"] self.palette = self.palette_lst[0] self.img_num = 0 self.set_img() img_select = QComboBox() for num in xrange(n_of_imgs): labl = "image number:" + str(num) img_select.addItem(labl) img_select.setCurrentIndex(0) img_select.currentIndexChanged.connect(self.img_changed_by_user) palette_select = QComboBox() for plt in self.palette_lst: palette_select.addItem(plt) palette_select.currentIndexChanged.connect(self.palette_changed_by_user) left_top_box.addWidget(palette_select) top_box.addLayout(left_top_box) right_top_box.addWidget(img_select) top_box.addLayout(right_top_box) my_box.addLayout(top_box) my_scrollable =QScrollArea() my_scrollable.setWidget(self.my_painter) my_box.addWidget(my_scrollable) self.setLayout(my_box) self.show()
def ini_datablock(self, json_file_path): if (json_file_path != None): try: datablocks = DataBlockFactory.from_json_file(json_file_path) ##TODO check length of datablock for safety datablock = datablocks[0] self.my_sweep = datablock.extract_sweeps()[0] self.img_select.clear() print "self.my_sweep.get_array_range() =", self.my_sweep.get_array_range( ) print "self.my_sweep.get_image_size() =", self.my_sweep.get_image_size( ) n_of_imgs = len(self.my_sweep.indices()) print "n_of_imgs =", n_of_imgs self.img_select.setMaximum(n_of_imgs) self.img_select.setMinimum(1) self.img_step.setMaximum(n_of_imgs / 2) self.img_step.setMinimum(1) self.num_of_imgs_to_add.setMaximum(n_of_imgs) self.num_of_imgs_to_add.setMinimum(1) except: print "Failed to load images from datablock.json" self.btn_first_clicked() #TODO Find a better way to call this function only onse self.ini_contrast() self.set_img()
def load_reference_geometry(self): if self.params.input.reference_geometry is None: return try: ref_datablocks = DataBlockFactory.from_json_file(self.params.input.reference_geometry, check_format=False) except Exception: ref_datablocks = None if ref_datablocks is None: from dxtbx.model.experiment_list import ExperimentListFactory try: ref_experiments = ExperimentListFactory.from_json_file(self.params.input.reference_geometry, check_format=False) except Exception: try: import dxtbx img = dxtbx.load(self.params.input.reference_geometry) except Exception: raise Sorry("Couldn't load geometry file %s"%self.params.input.reference_geometry) else: self.reference_detector = img.get_detector() else: assert len(ref_experiments.detectors()) == 1 self.reference_detector = ref_experiments.detectors()[0] else: assert len(ref_datablocks) == 1 and len(ref_datablocks[0].unique_detectors()) == 1 self.reference_detector = ref_datablocks[0].unique_detectors()[0]
def tst_with_external_lookup(self): from dxtbx.datablock import DataBlockFactory from dxtbx.imageset import ImageSweep from os.path import join filename = join(self.dials_regression, "centroid_test_data", "datablock_with_lookup.json") blocks = DataBlockFactory.from_json_file(filename) assert(len(blocks) == 1) imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.data is not None assert imageset.external_lookup.gain.data is not None assert imageset.external_lookup.pedestal.data is not None assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.all_eq(True) assert imageset.external_lookup.gain.data.all_eq(1) assert imageset.external_lookup.pedestal.data.all_eq(0) blocks = self.encode_json_then_decode(blocks) assert(len(blocks) == 1) imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.data is not None assert imageset.external_lookup.gain.data is not None assert imageset.external_lookup.pedestal.data is not None assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.all_eq(True) assert imageset.external_lookup.gain.data.all_eq(1) assert imageset.external_lookup.pedestal.data.all_eq(0) print 'OK'
def test(dials_regression, tmpdir): tmpdir.chdir() input_filename = os.path.join(dials_regression, "centroid_test_data", "datablock.json") mask_filename = os.path.join(dials_regression, "centroid_test_data", "lookup_mask.pickle") output_filename = "output_datablock.json" result = procrunner.run_process([ 'dials.apply_mask', 'input.datablock=%s' % input_filename, 'input.mask=%s' % mask_filename, 'output.datablock=%s' % output_filename, ]) assert result['exitcode'] == 0 assert result['stderr'] == '' from dials.array_family import flex # import dependency from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_json_file(output_filename) assert len(datablocks) == 1 imagesets = datablocks[0].extract_imagesets() assert len(imagesets) == 1 imageset = imagesets[0] assert imageset.external_lookup.mask.filename == mask_filename
def run(self): from os.path import join from libtbx import easy_run input_filename = join(self.path, "datablock.json") output_filename = "output_datablock.json" mask_filename = join(self.path, "lookup_mask.pickle") easy_run.fully_buffered([ 'dials.apply_mask', 'input.datablock=%s' % input_filename, 'input.mask=%s' % mask_filename, 'output.datablock=%s' % output_filename ]).raise_if_errors() from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_json_file(output_filename) assert len(datablocks) == 1 imagesets = datablocks[0].extract_imagesets() assert len(imagesets) == 1 imageset = imagesets[0] assert imageset.external_lookup.mask.filename == mask_filename print 'OK'
def ini_datablock(self, json_file_path): if(json_file_path != None): try: datablocks = DataBlockFactory.from_json_file(json_file_path) ##TODO check length of datablock for safety datablock = datablocks[0] self.my_sweep = datablock.extract_sweeps()[0] self.img_select.clear() except: print "Failed to load images from datablock.json" try: print "self.my_sweep.get_array_range() =", self.my_sweep.get_array_range() n_of_imgs = len(self.my_sweep.indices()) print "n_of_imgs =", n_of_imgs self.img_select.setMaximum(n_of_imgs) self.img_select.setMinimum(1) self.img_step.setMaximum(n_of_imgs / 2) self.img_step.setMinimum(1) self.num_of_imgs_to_add.setMaximum(n_of_imgs) self.num_of_imgs_to_add.setMinimum(1) except: print "Failed to set up IMG control dialog" self.btn_first_clicked() self.ini_contrast() self.set_img() QTimer.singleShot(1000, self.scale2border)
def tst_with_external_lookup(self): filename = join(self.dials_regression, "centroid_test_data", "datablock_with_lookup.json") blocks = DataBlockFactory.from_json_file(filename) assert(len(blocks) == 1) imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.data is not None assert imageset.external_lookup.gain.data is not None assert imageset.external_lookup.pedestal.data is not None assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.all_eq(True) assert imageset.external_lookup.gain.data.all_eq(1) assert imageset.external_lookup.pedestal.data.all_eq(0) blocks = self.encode_json_then_decode(blocks) assert(len(blocks) == 1) imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.data is not None assert imageset.external_lookup.gain.data is not None assert imageset.external_lookup.pedestal.data is not None assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.all_eq(True) assert imageset.external_lookup.gain.data.all_eq(1) assert imageset.external_lookup.pedestal.data.all_eq(0) print 'OK'
def do_import(filename): logger.info("Loading %s" % os.path.basename(filename)) datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: try: datablocks = DataBlockFactory.from_json_file(filename) except ValueError: raise Abort("Could not load %s" % filename) if len(datablocks) == 0: raise Abort("Could not load %s" % filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s" % filename) # Ensure the indexer and downstream applications treat this as set of stills reset_sets = [] from dxtbx.imageset import ImageSetFactory for imageset in datablocks[0].extract_imagesets(): imageset = ImageSetFactory.imageset_from_anyset(imageset) imageset.set_scan(None) imageset.set_goniometer(None) reset_sets.append(imageset) return DataBlockFactory.from_imageset(reset_sets)[0]
def test_with_external_lookup(centroid_test_data): filename = os.path.join(centroid_test_data, "datablock_with_lookup.json") blocks = DataBlockFactory.from_json_file(filename) assert len(blocks) == 1 imageset = blocks[0].extract_imagesets()[0] assert not imageset.external_lookup.mask.data.empty() assert not imageset.external_lookup.gain.data.empty() assert not imageset.external_lookup.pedestal.data.empty() assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True) assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1) assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0) blocks = encode_json_then_decode(blocks) assert len(blocks) == 1 imageset = blocks[0].extract_imagesets()[0] assert not imageset.external_lookup.mask.data.empty() assert not imageset.external_lookup.gain.data.empty() assert not imageset.external_lookup.pedestal.data.empty() assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True) assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1) assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)
def get_3d_flex_array(): json_file_path = str( "/home/luiso/dui/dui_test/X4_wide/dui_idials_tst_10/dials-1/1_import/datablock.json" ) print "json_file_path =", json_file_path datablocks = DataBlockFactory.from_json_file(json_file_path) if len(datablocks) > 0: assert (len(datablocks) == 1) imagesets = datablocks[0].extract_imagesets() crystals = None print "len(datablocks) > 0" else: raise RuntimeError( "No imageset could be constructed, len(datablocks) <= 0 ") print "len(imagesets) =", len(imagesets) print "type(imagesets) =", type(imagesets) first_data = imagesets[0] print "type(first_data) =", type(first_data) my_array = first_data.to_array() print "type(my_array) =", type(my_array) my_array_double = my_array.as_double() print "my_array_double.all() =", my_array_double.all() return my_array_double
def loader(x): try: obj = DataBlockFactory.from_filenames([x])[0].extract_imagesets()[0] except IndexError: import dxtbx.datablock try: obj = DataBlockFactory.from_json_file(x)[0].extract_imagesets()[0] except dxtbx.datablock.InvalidDataBlockError: obj = ExperimentListFactory.from_json_file(x)[0].imageset return obj
def run(args): user_phil = [] files = [] for arg in args: if os.path.isfile(arg): files.append(arg) else: try: user_phil.append(parse(arg)) except Exception: raise Sorry("Unrecognized argument %s" % arg) params = phil_scope.fetch(sources=user_phil).extract() if not params.orthographic: from mpl_toolkits.mplot3d import proj3d fig = plt.figure() colormap = plt.cm.gist_ncar colors = [colormap(i) for i in np.linspace(0, 0.9, len(files))] for file_name, color, in zip(files, colors): # read the data and get the detector models try: datablocks = DataBlockFactory.from_json_file(file_name, check_format=False) except Exception: experiments = ExperimentListFactory.from_json_file( file_name, check_format=False) detectors = experiments.detectors() else: detectors = [] for datablock in datablocks: detectors.extend(datablock.unique_detectors()) for detector in detectors: # plot the hierarchy if params.orthographic: ax = fig.gca() else: ax = fig.gca(projection='3d') plot_group(detector.hierarchy(), color, ax, orthographic=params.orthographic, show_origin_vectors=params.show_origin_vectors, panel_numbers=params.panel_numbers) if params.orthographic: plt.axes().set_aspect('equal', 'datalim') if params.pdf_file: pp = PdfPages(params.pdf_file) for i in plt.get_fignums(): pp.savefig(plt.figure(i)) pp.close() else: plt.show()
def do_import(filename): logger.info("Loading %s" % os.path.basename(filename)) try: datablocks = DataBlockFactory.from_json_file(filename) except ValueError: datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: raise Abort("Could not load %s" % filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s" % filename) return datablocks[0]
def from_string(self, s): from dxtbx.datablock import DataBlockFactory from os.path import exists from libtbx.utils import Sorry if s is None: return None if s not in self.cache: if not exists(s): raise Sorry('File %s does not exist' % s) self.cache[s] = FilenameDataWrapper(s, DataBlockFactory.from_json_file(s, check_format=self._check_format)) return self.cache[s]
def from_string(self, s): from dxtbx.datablock import DataBlockFactory from os.path import exists from libtbx.utils import Sorry if s is None: return None if s not in self.cache: if not exists(s): raise Sorry('File %s does not exist' % s) self.cache[s] = FilenameDataWrapper(s, DataBlockFactory.from_json_file(s, check_format=self._check_format)) return self.cache[s]
def ini_datablock(self, json_file_path): if json_file_path is not None: try: datablocks = DataBlockFactory.from_json_file(json_file_path) # TODO check length of datablock for safety datablock = datablocks[0] self.my_sweep = datablock.extract_sweeps()[0] self.img_select.clear() except BaseException as e: # We don't want to catch bare exceptions but don't know # what this was supposed to catch. Log it. logger.error( "Caught unknown exception type %s: %s", type(e).__name__, e ) logger.debug("Failed to load images from datablock.json") try: logger.debug( "self.my_sweep.get_array_range() = %s", self.my_sweep.get_array_range(), ) n_of_imgs = len(self.my_sweep.indices()) logger.debug("n_of_imgs = %s", n_of_imgs) self.img_select.setMaximum(n_of_imgs) self.img_select.setMinimum(1) self.img_step.setMaximum(n_of_imgs / 2) self.img_step.setMinimum(1) self.num_of_imgs_to_add.setMaximum(n_of_imgs) self.num_of_imgs_to_add.setMinimum(1) except BaseException as e: # We don't want to catch bare exceptions but don't know # what this was supposed to catch. Log it. logger.error( "Caught unknown exception type %s: %s", type(e).__name__, e ) logger.debug("Failed to set up IMG control dialog") self.btn_first_clicked() self.ini_contrast() self.set_img() QTimer.singleShot(1000, self.scale2border)
def load_reference_geometry(self): if self.params.input.reference_geometry is None: return try: ref_datablocks = DataBlockFactory.from_json_file(self.params.input.reference_geometry, check_format=False) except Exception: ref_datablocks = None if ref_datablocks is None: from dxtbx.model.experiment.experiment_list import ExperimentListFactory try: ref_experiments = ExperimentListFactory.from_json_file(self.params.input.reference_geometry, check_format=False) except Exception: raise Sorry("Couldn't load geometry file %s"%self.params.input.reference_geometry) assert len(ref_experiments.detectors()) == 1 self.reference_detector = ref_experiments.detectors()[0] else: assert len(ref_datablocks) == 1 and len(ref_datablocks[0].unique_detectors()) == 1 self.reference_detector = ref_datablocks[0].unique_detectors()[0]
def test_with_bad_external_lookup(centroid_test_data): filename = os.path.join(centroid_test_data, "datablock_with_bad_lookup.json") blocks = DataBlockFactory.from_json_file(filename, check_format=False) assert len(blocks) == 1 imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.empty() assert imageset.external_lookup.gain.data.empty() assert imageset.external_lookup.pedestal.data.empty() blocks = encode_json_then_decode(blocks, check_format=False) assert len(blocks) == 1 imageset = blocks[0].extract_imagesets()[0] assert imageset.external_lookup.mask.filename is not None assert imageset.external_lookup.gain.filename is not None assert imageset.external_lookup.pedestal.filename is not None assert imageset.external_lookup.mask.data.empty() assert imageset.external_lookup.gain.data.empty() assert imageset.external_lookup.pedestal.data.empty()
def do_import(filename): logger.info("Loading %s"%os.path.basename(filename)) try: datablocks = DataBlockFactory.from_json_file(filename) except ValueError: datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: raise Abort("Could not load %s"%filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s"%filename) # Ensure the indexer and downstream applications treat this as set of stills from dxtbx.imageset import ImageSet reset_sets = [] for imageset in datablocks[0].extract_imagesets(): imageset = ImageSet(imageset.reader(), imageset.indices()) imageset._models = imageset._models imageset.set_scan(None) imageset.set_goniometer(None) reset_sets.append(imageset) return DataBlockFactory.from_imageset(reset_sets)[0]
from dxtbx.datablock import DataBlockFactory #datablocks = DataBlockFactory.from_json_file("/home/luiso/dui/dui_test/only_9_img/idials_tst_04/dials-1/1_import/datablock.json") datablocks = DataBlockFactory.from_json_file( "/home/luiso/dui/dui_test/X4_wide/test_02/dials-1/1_import/datablock.json") print "datablocks[0] =", datablocks[0] db = datablocks[0] sw = db.extract_sweeps()[0] print "sw.get_raw_data(0) =", sw.get_raw_data(0) print "sw.get_raw_data(1) =", sw.get_raw_data(1) print "sw.get_raw_data(2) =", sw.get_raw_data(2) im1 = sw.get_raw_data(0)[0] print "im1.all() =", im1.all()
# Annotate with panel numbers ax.text(vcen[0], vcen[1], '%d' % g.index()) else: ax.plot(z[0], z[1], z[2], color=color) # Annotate with panel numbers ax.text(vcen[0], vcen[1], vcen[2], '%d' % g.index()) fig = plt.figure() colormap = plt.cm.gist_ncar colors = [colormap(i) for i in np.linspace(0, 0.9, len(files))] for file_name, color, in zip(files, colors): # read the data and get the detector models try: datablocks = DataBlockFactory.from_json_file(file_name, check_format=False) except Exception, e: experiments = ExperimentListFactory.from_json_file( file_name, check_format=False) detectors = experiments.detectors() else: detectors = [] for datablock in datablocks: detectors.extend(datablock.unique_detectors()) for detector in detectors: # plot the hierarchy if params.orthographic: ax = fig.gca() else: ax = fig.gca(projection='3d') plot_group(detector.hierarchy(),
def exercise_spotfinder(): if not libtbx.env.has_module("dials_regression"): print "Skipping exercise_spotfinder: dials_regression not present" return data_dir = libtbx.env.find_in_repositories( relative_path="dials_regression/centroid_test_data", test=os.path.isdir) template = glob(os.path.join(data_dir, "centroid*.cbf")) args = ["dials.find_spots", ' '.join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=True"] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 653, len(reflections) refl = reflections[0] assert approx_equal(refl['intensity.sum.value'], 42) assert approx_equal(refl['bbox'], (1398, 1400, 513, 515, 0, 1)) assert approx_equal(refl['xyzobs.px.value'], (1399.1190476190477, 514.2142857142857, 0.5)) assert "shoebox" in reflections print 'OK' # now with a resolution filter args = ["dials.find_spots", "filter.d_min=2", "filter.d_max=15", ' '.join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=False"] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 467, len(reflections) assert "shoebox" not in reflections print 'OK' # now with more generous parameters args = ["dials.find_spots", "min_spot_size=3", "max_separation=3", ' '.join(template), "output.reflections=spotfinder.pickle"] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 678, len(reflections) print 'OK' # Now with a user defined mask template = glob(os.path.join(data_dir, "centroid*.cbf")) args = ["dials.find_spots", ' '.join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=True", "lookup.mask=%s" % os.path.join(data_dir, "mask.pickle")] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_json_file(os.path.join(data_dir, "datablock.json")) assert(len(datablocks) == 1) imageset = datablocks[0].extract_imagesets()[0] detector = imageset.get_detector() beam = imageset.get_beam() for x, y, z in reflections['xyzobs.px.value']: d = detector[0].get_resolution_at_pixel(beam.get_s0(), (x, y)) assert(d >= 3) # Now with a user defined mask template = glob(os.path.join(data_dir, "centroid*.cbf")) args = ["dials.find_spots", ' '.join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=True", "region_of_interest=800,1200,800,1200"] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) x, y, z = reflections['xyzobs.px.value'].parts() assert x.all_ge(800) assert y.all_ge(800) assert x.all_lt(1200) assert y.all_lt(1200) print 'OK' # now with XFEL stills data_dir = libtbx.env.find_in_repositories( relative_path="dials_regression/spotfinding_test_data", test=os.path.isdir) template = os.path.join(data_dir, "idx-s00-20131106040302615.cbf") args = ["dials.find_spots", template, "output.reflections=spotfinder.pickle"] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 2643, len(reflections) print 'OK'
def exercise_spotfinder(): if not libtbx.env.has_module("dials_regression"): print "Skipping exercise_spotfinder: dials_regression not present" return data_dir = libtbx.env.find_in_repositories(relative_path="dials_regression/centroid_test_data", test=os.path.isdir) template = glob(os.path.join(data_dir, "centroid*.cbf")) args = ["dials.find_spots", " ".join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=True"] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 653, len(reflections) refl = reflections[0] assert approx_equal(refl["intensity.sum.value"], 42) assert approx_equal(refl["bbox"], (1398, 1400, 513, 515, 0, 1)) assert approx_equal(refl["xyzobs.px.value"], (1399.1190476190477, 514.2142857142857, 0.5)) assert "shoebox" in reflections print "OK" # now with a resolution filter args = [ "dials.find_spots", "filter.d_min=2", "filter.d_max=15", " ".join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=False", ] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 467, len(reflections) assert "shoebox" not in reflections print "OK" # now with more generous parameters args = [ "dials.find_spots", "min_spot_size=3", "max_separation=3", " ".join(template), "output.reflections=spotfinder.pickle", ] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 678, len(reflections) print "OK" # Now with a user defined mask template = glob(os.path.join(data_dir, "centroid*.cbf")) args = [ "dials.find_spots", " ".join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=True", "lookup.mask=%s" % os.path.join(data_dir, "mask.pickle"), ] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_json_file(os.path.join(data_dir, "datablock.json")) assert len(datablocks) == 1 imageset = datablocks[0].extract_imagesets()[0] detector = imageset.get_detector() beam = imageset.get_beam() for x, y, z in reflections["xyzobs.px.value"]: d = detector[0].get_resolution_at_pixel(beam.get_s0(), (x, y)) assert d >= 3 # Now with a user defined mask template = glob(os.path.join(data_dir, "centroid*.cbf")) args = [ "dials.find_spots", " ".join(template), "output.reflections=spotfinder.pickle", "output.shoeboxes=True", "region_of_interest=800,1200,800,1200", ] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) x, y, z = reflections["xyzobs.px.value"].parts() assert x.all_ge(800) assert y.all_ge(800) assert x.all_lt(1200) assert y.all_lt(1200) print "OK" # now with XFEL stills data_dir = libtbx.env.find_in_repositories( relative_path="dials_regression/spotfinding_test_data", test=os.path.isdir ) template = os.path.join(data_dir, "idx-s00-20131106040302615.cbf") args = ["dials.find_spots", template, "output.reflections=spotfinder.pickle"] result = easy_run.fully_buffered(command=" ".join(args)).raise_if_errors() assert os.path.exists("spotfinder.pickle") with open("spotfinder.pickle", "rb") as f: reflections = pickle.load(f) assert len(reflections) == 2643, len(reflections) print "OK"
def run(args): user_phil = [] files = [] for arg in args: if os.path.isfile(arg): files.append(arg) else: try: user_phil.append(parse(arg)) except Exception: raise Sorry("Unrecognized argument %s" % arg) params = phil_scope.fetch(sources=user_phil).extract() if not params.orthographic: from mpl_toolkits.mplot3d import proj3d def plot_group(g, color, orthographic=False): # recursively plot a detector group p = g.parent() if params.show_origin_vectors: if p is None: #parent origin pori = (0, 0, 0) else: #parent origin pori = p.get_origin() ori = g.get_origin() if not orthographic: a = Arrow3D([pori[0], ori[0]], [pori[1], ori[1]], [pori[2], ori[2]], mutation_scale=20, lw=1, arrowstyle="-|>", color='gray') ax.add_artist(a) if g.is_group(): for c in g: # plot all the children plot_group(c, color, orthographic) else: # plot the panel boundaries size = g.get_image_size() p0 = col(g.get_pixel_lab_coord((0, 0))) p1 = col(g.get_pixel_lab_coord((size[0] - 1, 0))) p2 = col(g.get_pixel_lab_coord((size[0] - 1, size[1] - 1))) p3 = col(g.get_pixel_lab_coord((0, size[1] - 1))) v1 = p1 - p0 v2 = p3 - p0 vcen = ((v2 / 2) + (v1 / 2)) + p0 z = zip(p0, p1, p2, p3, p0) if orthographic: ax.plot(z[0], z[1], color=color) if params.panel_numbers: # Annotate with panel numbers ax.text(vcen[0], vcen[1], '%d' % g.index()) else: ax.plot(z[0], z[1], z[2], color=color) if params.panel_numbers: # Annotate with panel numbers ax.text(vcen[0], vcen[1], vcen[2], '%d' % g.index()) fig = plt.figure() colormap = plt.cm.gist_ncar colors = [colormap(i) for i in np.linspace(0, 0.9, len(files))] for file_name, color, in zip(files, colors): # read the data and get the detector models try: datablocks = DataBlockFactory.from_json_file(file_name, check_format=False) except Exception: experiments = ExperimentListFactory.from_json_file( file_name, check_format=False) detectors = experiments.detectors() else: detectors = [] for datablock in datablocks: detectors.extend(datablock.unique_detectors()) for detector in detectors: # plot the hierarchy if params.orthographic: ax = fig.gca() else: ax = fig.gca(projection='3d') plot_group(detector.hierarchy(), color, orthographic=params.orthographic) if params.orthographic: plt.axes().set_aspect('equal', 'datalim') if params.pdf_file: pp = PdfPages(params.pdf_file) for i in plt.get_fignums(): pp.savefig(plt.figure(i)) pp.close() else: plt.show()
creator = flex.PixelListShoeboxCreator( pixel_labeller, 0, # panel 0, # zrange True, # twod min_spot_size, # min_pixels max_spot_size, # max_pixels False, ) shoeboxes = creator.result() centroid = shoeboxes.centroid_valid() intensity = shoeboxes.summed_intensity() observed = flex.observation(shoeboxes.panels(), centroid, intensity) return flex.reflection_table(observed, shoeboxes) if __name__ == "__main__": import sys from dxtbx.datablock import DataBlockFactory db = DataBlockFactory.from_json_file(sys.argv[1]) iset = db[0].extract_imagesets()[0] image = iset.get_raw_data(0)[0] mask = iset.get_mask(0)[0] reflections = find_spots(image, mask) print(len(reflections))
from __future__ import absolute_import, division, print_function from dxtbx.datablock import DataBlockFactory, DataBlockDumper import sys from six.moves import range fin = sys.argv[1] px_size = 0.05 # in mm dpx = [0, 0, 1, 1, 0, 1, 1, 0] # in px dpy = [0, 0, 1, 1, 0, 1, 1, 0] # in px db = DataBlockFactory.from_json_file(fin) db0 = db[0] dd = db0.to_dict() for i in range(8): x = dd['detector'][0]['panels'][i]['origin'][0] + dpx[i] * px_size y = dd['detector'][0]['panels'][i]['origin'][1] + dpy[i] * px_size dd['detector'][0]['panels'][i]['origin'] = (x, y, 0) xx = DataBlockFactory.from_dict(dd) yy = DataBlockDumper(xx) yy.as_file('new_detector_geom.json')