def tst_json(self): filenames = self.multiple_block_filenames() blocks1 = DataBlockFactory.from_filenames(filenames) blocks2 = self.encode_json_then_decode(blocks1) assert(len(blocks2) == len(blocks1)) for b1, b2 in zip(blocks1, blocks2): assert(b1.format_class() == b2.format_class()) assert(b1 == b2) assert(blocks1 == blocks2) filenames = self.multiple_block_filenames() blocks1 = DataBlockFactory.from_filenames(filenames) blocks2 = self.encode_json_then_decode(blocks1, check_format=False) assert(len(blocks2) == len(blocks1)) for b1, b2 in zip(blocks1, blocks2): for im1, im2 in zip(b1.extract_imagesets(), b2.extract_imagesets()): assert(len(im1) == len(im2)) if isinstance(im1, ImageSweep): assert(isinstance(im2, ImageSweep)) assert(im1.get_beam() == im2.get_beam()) assert(im1.get_detector() == im2.get_detector()) assert(im1.get_goniometer() == im2.get_goniometer()) assert(im1.get_scan() == im2.get_scan()) else: assert(not isinstance(im2, ImageSweep)) for i in xrange(len(im1)): assert(im1.get_beam(i) == im2.get_beam(i)) assert(im1.get_detector(i) == im2.get_detector(i)) print 'OK'
def test_cbf_writer(image_file, dials_regression, run_in_tmpdir): filename = os.path.join(dials_regression, image_file) datablock = DataBlockFactory.from_filenames([filename])[0] imageset = datablock.extract_imagesets()[0] FormatCBFMini.as_file( imageset.get_detector(), imageset.get_beam(), imageset.get_goniometer(), imageset.get_scan(), imageset.get_raw_data(0)[0], "image_0001.cbf", ) assert datablock.format_class() datablock2 = DataBlockFactory.from_filenames(["image_0001.cbf"])[0] imageset2 = datablock2.extract_imagesets()[0] tolerance = tolerance_phil_scope.extract().tolerance diff = SweepDiff(tolerance) print("\n".join(diff(imageset, imageset2))) assert BeamComparison()(imageset.get_beam(), imageset2.get_beam()) assert DetectorComparison(origin_tolerance=tolerance.detector.origin)( imageset.get_detector(), imageset2.get_detector()) assert GoniometerComparison()(imageset.get_goniometer(), imageset2.get_goniometer()) s1 = imageset.get_scan() s2 = imageset.get_scan() assert s1.get_exposure_times() == s2.get_exposure_times() assert s1.get_oscillation() == s2.get_oscillation() assert s1.get_image_range() == s2.get_image_range() assert imageset.get_raw_data(0) == imageset2.get_raw_data(0)
def tst_json(self): from dxtbx.datablock import DataBlockFactory from dxtbx.imageset import ImageSweep filenames = self.multiple_block_filenames() blocks1 = DataBlockFactory.from_filenames(filenames) blocks2 = self.encode_json_then_decode(blocks1) assert(len(blocks2) == len(blocks1)) for b1, b2 in zip(blocks1, blocks2): assert(b1.format_class() == b2.format_class()) assert(b1 == b2) assert(blocks1 == blocks2) filenames = self.multiple_block_filenames() blocks1 = DataBlockFactory.from_filenames(filenames) blocks2 = self.encode_json_then_decode(blocks1, check_format=False) assert(len(blocks2) == len(blocks1)) for b1, b2 in zip(blocks1, blocks2): for im1, im2 in zip(b1.extract_imagesets(), b2.extract_imagesets()): assert(len(im1) == len(im2)) if isinstance(im1, ImageSweep): assert(isinstance(im2, ImageSweep)) assert(im1.get_beam() == im2.get_beam()) assert(im1.get_detector() == im2.get_detector()) assert(im1.get_goniometer() == im2.get_goniometer()) assert(im1.get_scan() == im2.get_scan()) else: assert(not isinstance(im2, ImageSweep)) for i in xrange(len(im1)): assert(im1.get_beam(i) == im2.get_beam(i)) assert(im1.get_detector(i) == im2.get_detector(i)) print 'OK'
def test_VMXi_rotation_scan(): master_h5 = "/dls/mx/data/mx21314/mx21314-27/VMXi-AB0816/well_7/images/image_14364_master.h5" assert FormatNexus.understand(master_h5) datablocks = DataBlockFactory.from_filenames([master_h5]) imageset = datablocks[0].extract_imagesets()[0] assert imageset.get_format_class() == FormatNexus detector = imageset.get_detector() gonio = imageset.get_goniometer() scan = imageset.get_scan() beam = imageset.get_beam() panel = detector[0] assert panel.get_pixel_size() == (0.075, 0.075) assert panel.get_image_size() == (2068, 2162) assert panel.get_trusted_range() == (-1, 4096) assert panel.get_fast_axis() == (1, 0, 0) assert panel.get_slow_axis() == (0, -1, 0) assert panel.get_origin() == pytest.approx( (-78.05999999999999, 87.03, -194.5039999999999) ) assert panel.get_distance() == pytest.approx(194.504) assert isinstance(gonio, Goniometer) assert gonio.get_rotation_axis() == (0, 1, 0) assert gonio.get_fixed_rotation() == (1, 0, 0, 0, 1, 0, 0, 0, 1) assert gonio.get_setting_rotation() == (1, 0, 0, 0, 1, 0, 0, 0, 1) assert scan.get_oscillation() == pytest.approx((-30, 0.1)) assert scan.get_image_range() == (1, 600) assert beam.get_wavelength() == pytest.approx(0.979492) assert beam.get_s0() == pytest.approx((0, 0, -1 / beam.get_wavelength()))
def run(self, idx, img): if os.path.isfile(self.term_file): raise IOTATermination('IOTA_TRACKER: Termination signal received!') else: datablock = DataBlockFactory.from_filenames([img])[0] observed = self.processor.find_spots(datablock=datablock) return [idx, int(len(observed)), img, None, None]
def test_split_single_image_datablock(dials_regression, tmpdir): tmpdir.chdir() pytest.importorskip("h5py") sacla_file = os.path.join( dials_regression, "image_examples", "SACLA_MPCCD_Cheetah", "run266702-0-subset.h5", ) db = DataBlockFactory.from_filenames([sacla_file])[0] assert db.num_images() == 4 imageset = db.extract_imagesets()[0] subset = imageset[2:3] subblock = DataBlockFactory.from_imageset(subset)[0] assert subblock.num_images() == 1 assert get_indices(subblock) == [2] dumped_filename = "split_datablock.json" dump = DataBlockDumper(subblock) dump.as_json(dumped_filename) db = DataBlockFactory.from_json_file(dumped_filename, check_format=True)[0] assert db.num_images() == 1 assert get_indices(db) == [2] db = DataBlockFactory.from_json_file(dumped_filename, check_format=False)[0] assert db.num_images() == 1 assert get_indices(db) == [2]
def exercise_one_image(path, count_only_shadow, count_mask_shadow, count_mask_no_shadow): from dxtbx.datablock import DataBlockFactory assert os.path.exists(path), path for shadowing in (libtbx.Auto, True, False): format_kwargs = {'dynamic_shadowing': shadowing} datablock = DataBlockFactory.from_filenames( [path], format_kwargs=format_kwargs)[0] imageset = datablock.extract_imagesets()[0] detector = imageset.get_detector() scan = imageset.get_scan() filename = imageset.get_path(0) masker = imageset.masker().format_class( filename, **format_kwargs).get_goniometer_shadow_masker() assert masker is not None mask = masker.get_mask(detector, scan_angle=scan.get_oscillation()[0]) assert len(mask) == len(detector) # only shadowed pixels masked assert mask[0].count(False) == count_only_shadow, ( mask[0].count(False), count_only_shadow) mask = imageset.get_mask(0) # dead pixels, pixels in gaps, etc also masked if shadowing is libtbx.Auto or shadowing is True: assert mask[0].count(False) == count_mask_shadow, ( mask[0].count(False), count_mask_shadow) else: assert mask[0].count(False) == count_mask_no_shadow, ( mask[0].count(False), count_mask_no_shadow)
def test_grid_scan(): master_h5 = "/dls/i04/data/2019/cm23004-1/20190109/Eiger/grid/Thaum/Thau_5/Thau_5_1_master.h5" assert FormatNexusEigerDLS16MI04.understand(master_h5) datablocks = DataBlockFactory.from_filenames([master_h5]) imageset = datablocks[0].extract_imagesets()[0] assert imageset.get_format_class() == FormatNexusEigerDLS16MI04 detector = imageset.get_detector() gonio = imageset.get_goniometer() scan = imageset.get_scan() beam = imageset.get_beam() panel = detector[0] assert panel.get_pixel_size() == (0.075, 0.075) assert panel.get_image_size() == (4148, 4362) assert panel.get_trusted_range() == (-1, 65535) assert panel.get_fast_axis() == (1, 0, 0) assert panel.get_slow_axis() == (0, -1, 0) assert panel.get_origin() == pytest.approx( (-167.44717577120824, 172.46833023184868, -350.0)) assert panel.get_distance() == 350 assert len(gonio.get_axes()) == 3 expected_axes = ((1, 0, 0), (0, 0, -1), (1, 0, 0)) for a1, a2 in zip(gonio.get_axes(), expected_axes): assert a1 == pytest.approx(a2, abs=5e-2) # assert gonio.get_scan_axis() == 2 assert scan is None assert beam.get_wavelength() == pytest.approx(0.979499) assert beam.get_s0() == pytest.approx((0, 0, -1 / beam.get_wavelength()))
def test_units(): master_h5 = "/dls/i04/data/2019/cm23004-1/20190114/Eiger/grid/Se_Thaum/Se_Thaum_12/Se_Thaum_12_2_master.h5" assert FormatNexusEigerDLS16MI04.understand(master_h5) datablocks = DataBlockFactory.from_filenames([master_h5]) imageset = datablocks[0].extract_imagesets()[0] assert imageset.get_format_class() == FormatNexusEigerDLS16MI04 detector = imageset.get_detector() gonio = imageset.get_goniometer() scan = imageset.get_scan() beam = imageset.get_beam() panel = detector[0] assert panel.get_pixel_size() == (0.075, 0.075) assert panel.get_fast_axis() == (1, 0, 0) assert panel.get_slow_axis() == (0, -1, 0) # XXX Need to check and update expected values here, however the ones # dxtbx is currently generating from the file are very wrong assert panel.get_origin() == pytest.approx( (-167.35570274412459, 172.4729262553403, -339.9887931971389)) assert panel.get_distance() == pytest.approx(339.9887931971389) assert scan is None assert beam.get_wavelength() == pytest.approx(0.979499) assert beam.get_s0() == pytest.approx((0, 0, -1 / beam.get_wavelength())) assert panel.get_beam_centre_px(beam.get_s0()) == pytest.approx( (2231.41, 2299.64))
def test_rotation_scan(master_h5): assert FormatNexusEigerDLS16MI04.understand(master_h5) datablocks = DataBlockFactory.from_filenames( [master_h5], format_kwargs={"dynamic_shadowing": True}) imageset = datablocks[0].extract_imagesets()[0] assert imageset.get_format_class() == FormatNexusEigerDLS16MI04 detector = imageset.get_detector() gonio = imageset.get_goniometer() scan = imageset.get_scan() beam = imageset.get_beam() panel = detector[0] assert panel.get_pixel_size() == (0.075, 0.075) assert panel.get_image_size() == (4148, 4362) assert panel.get_trusted_range() == (-1, 65535) assert panel.get_fast_axis() == (1, 0, 0) assert panel.get_slow_axis() == (0, -1, 0) assert panel.get_origin() == pytest.approx( (-166.07661632390744, 172.5371934106162, -200.0)) assert panel.get_distance() == 200 assert len(gonio.get_axes()) == 3 expected_axes = ((1, 0, 0), (0, 0, -1), (1, 0, 0)) for a1, a2 in zip(gonio.get_axes(), expected_axes): assert a1 == pytest.approx(a2, abs=5e-2) assert gonio.get_scan_axis() == 2 assert scan.get_oscillation() == (0, 0.2) assert scan.get_image_range() == (1, 900) assert beam.get_wavelength() == pytest.approx(0.979499) assert beam.get_s0() == pytest.approx((0, 0, -1 / beam.get_wavelength()))
def from_filenames( filenames, verbose=False, unhandled=None, compare_beam=None, compare_detector=None, compare_goniometer=None, scan_tolerance=None, format_kwargs=None, load_models=True, ): """Create a list of data blocks from a list of directory or file names.""" experiments = ExperimentList() for db in DataBlockFactory.from_filenames( filenames, verbose=verbose, unhandled=unhandled, compare_beam=compare_beam, compare_detector=compare_detector, compare_goniometer=compare_goniometer, scan_tolerance=scan_tolerance, format_kwargs=format_kwargs, ): experiments.extend( ExperimentListFactory.from_datablock_and_crystal(db, None, load_models) ) return experiments
def exercise_dynamic_shadowing(): if dials_regression is None: print 'SKIP: dials_regression not configured' return path = os.path.join( dials_regression, "shadow_test_data/DLS_I04_SmarGon/Th_3_O45_C45_P48_1_0500.cbf") from dxtbx.datablock import DataBlockFactory assert os.path.exists(path), path for shadowing in (libtbx.Auto, True, False): format_kwargs = {'dynamic_shadowing': shadowing} datablock = DataBlockFactory.from_filenames( [path], format_kwargs=format_kwargs)[0] imageset = datablock.extract_imagesets()[0] detector = imageset.get_detector() scan = imageset.get_scan() filename = imageset.get_path(0) masker = imageset.masker().format_class( filename, **format_kwargs).get_goniometer_shadow_masker() #masker = imageset.reader().get_format().get_goniometer_shadow_masker() assert masker is not None mask = masker.get_mask(detector, scan_angle=scan.get_oscillation()[0]) assert len(mask) == len(detector) # only shadowed pixels masked assert (mask[0].count(True), mask[0].count(False)) == (5797243, 426758) mask = imageset.get_mask(0) # dead pixels, pixels in gaps, etc also masked if shadowing is libtbx.Auto or shadowing is True: assert (mask[0].count(True), mask[0].count(False)) == (5306061, 917940) else: assert (mask[0].count(True), mask[0].count(False)) == (5695969, 528032)
def do_import(filename): logger.info("Loading %s" % os.path.basename(filename)) datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: try: datablocks = DataBlockFactory.from_json_file(filename) except ValueError: raise Abort("Could not load %s" % filename) if len(datablocks) == 0: raise Abort("Could not load %s" % filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s" % filename) # Ensure the indexer and downstream applications treat this as set of stills reset_sets = [] from dxtbx.imageset import ImageSetFactory for imageset in datablocks[0].extract_imagesets(): imageset = ImageSetFactory.imageset_from_anyset(imageset) imageset.set_scan(None) imageset.set_goniometer(None) reset_sets.append(imageset) return DataBlockFactory.from_imageset(reset_sets)[0]
def test_spring8_ccp4_2018_zenodo_1443110_data03(): # https://zenodo.org/record/1443110#.XD8bD5ynzmE master_h5 = "/dls/mx-scratch/rjgildea/zenodo/spring8-ccp4-2018/1443110/ccp4school2018_bl41xu/05/data03/data03_master.h5" assert FormatHDF5EigerNearlyNexusSPring8.understand(master_h5) datablocks = DataBlockFactory.from_filenames([master_h5]) imageset = datablocks[0].extract_imagesets()[0] assert imageset.get_format_class() == FormatHDF5EigerNearlyNexusSPring8 detector = imageset.get_detector() gonio = imageset.get_goniometer() scan = imageset.get_scan() beam = imageset.get_beam() panel = detector[0] assert panel.get_pixel_size() == pytest.approx((0.075, 0.075)) assert panel.get_image_size() == (4150, 4371) assert panel.get_trusted_range() == (-1, 2.094707e06) assert panel.get_fast_axis() == (1, 0, 0) assert panel.get_slow_axis() == (0, -1, 0) assert panel.get_origin() == pytest.approx((-151.939, 169.629, -180), abs=1e-3) assert panel.get_distance() == pytest.approx(180) assert isinstance(gonio, Goniometer) assert gonio.get_rotation_axis() == (-1, 0, 0) assert gonio.get_fixed_rotation() == (1, 0, 0, 0, 1, 0, 0, 0, 1) assert gonio.get_setting_rotation() == (1, 0, 0, 0, 1, 0, 0, 0, 1) assert scan.get_oscillation() == pytest.approx((-10, 1)) assert scan.get_image_range() == (1, 180) assert beam.get_wavelength() == pytest.approx(1.28241, abs=1e-5) assert beam.get_s0() == pytest.approx((0, 0, -1 / beam.get_wavelength()))
def failover_hdf5(hdf5_file): from dxtbx.serialize import xds from dxtbx.datablock import DataBlockFactory import time t0 = time.time() db = DataBlockFactory.from_filenames([hdf5_file])[0] sweep = db.extract_sweeps()[0] t1 = time.time() if version == 2: try: write('Reading %s took %.2fs' % (hdf5_file, t1 - t0)) except: pass else: write('Reading {} took {:.2f}s'.format(hdf5_file, t1 - t0)) d = sweep.get_detector() s = sweep.get_scan() g = sweep.get_goniometer() b = sweep.get_beam() # returns slow, fast, convention here is reverse size = tuple(reversed(d[0].get_image_size())) size0k_to_class = { 1: 'eiger 1M', 2: 'eiger 4M', 3: 'eiger 9M', 4: 'eiger 16M' } header = {} header['detector_class'] = size0k_to_class[int(size[0] / 1000)] header['detector'] = size0k_to_class[int(size[0] / 1000)].upper().replace( ' ', '_') header['size'] = size header['serial_number'] = 0 header['extra_text'] = find_hdf5_lib() header['phi_start'] = s.get_angle_from_image_index(1.0, deg=True) header['phi_end'] = s.get_angle_from_image_index(2.0, deg=True) header['phi_width'] = header['phi_end'] - header['phi_start'] header['oscillation'] = header['phi_start'], header['phi_width'] header['exposure_time'] = s.get_exposure_times()[0] header['oscillation_axis'] = 'Omega_I_guess' header['distance'] = d[0].get_distance() header['wavelength'] = b.get_wavelength() header['pixel'] = d[0].get_pixel_size() header['saturation'] = d[0].get_trusted_range()[1] header['sensor'] = d[0].get_thickness() header['beam'] = d[0].get_beam_centre(b.get_s0()) images = s.get_image_range() directory, template = os.path.split(hdf5_file) header['directory'] = directory header['template'] = template.replace('master', '??????') header['start'] = images[0] header['end'] = images[1] header['matching'] = range(images[0], images[1] + 1) return header
def test_pickling(multiple_block_filenames): blocks1 = DataBlockFactory.from_filenames(multiple_block_filenames, verbose=True) blocks2 = pickle_then_unpickle(blocks1) assert len(blocks2) == len(blocks1) for b1, b2 in zip(blocks1, blocks2): assert b1.format_class() == b2.format_class() assert b1 == b2 assert blocks1 == blocks2
def do_import(filename): info("Loading %s"%os.path.basename(filename)) datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: raise Abort("Could not load %s"%filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s"%filename) return datablocks[0]
def test_screening(): master_h5 = "/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw-screen/Thaum/Thau_3/Thau_3_1_master.h5" assert FormatNexusEigerDLS16MI04.understand(master_h5) datablocks = DataBlockFactory.from_filenames([master_h5]) imagesets = datablocks[0].extract_imagesets() assert len(imagesets) == 3 assert imageset.get_format_class() == FormatNexusEigerDLS16MI04
def test_json(multiple_block_filenames): blocks1 = DataBlockFactory.from_filenames(multiple_block_filenames, verbose=True) blocks2 = encode_json_then_decode(blocks1) assert len(blocks2) == len(blocks1) for b1, b2 in zip(blocks1, blocks2): assert b1.format_class() == b2.format_class() assert b1 == b2 assert blocks1 == blocks2
def __init__(self, image=None, datablock=None): if (image is None and datablock is None): print 'ERROR: Need image or datablock for Radial Average Calculator' return if datablock is None: from dxtbx.datablock import DataBlockFactory self.datablock = DataBlockFactory.from_filenames([image])[0] else: self.datablock = datablock
def get(self): args = self.reqparse.parse_args() h5exts = ['h5', 'nxs'] dc = get_dc(args.dcid) if dc is None: abort(400, message='No such data collection') ext = os.path.splitext(str( dc.file_template_full_python))[1][1:].strip().lower() if ext in h5exts: file = str(dc.file_template_full_python) else: file = dc.file_template_full_python % args.image print 'file', file, ext if not os.path.exists(file): abort(400, message='No such file') datablocks = DataBlockFactory.from_filenames([file], verbose=True) if not len(datablocks): abort(400, message='Could not parse datablock') datablock = datablocks[0] imageset = datablock.extract_imagesets()[0] if ext in h5exts: image = imageset[(args.image - 1):args.image] else: image = imageset[0:1] params = phil_scope.extract() params.format = 'jpeg' if args.quality: params.quality = args.quality if args.binning: params.binning = args.binning params.output_dir = '/tmp' params.prefix = str(time.time()) params.imageset_index = 0 names = imageset_as_bitmaps(image, params) @after_this_request def remove_file(response): for n in names: if os.path.exists(n): os.remove(n) return response return send_file(names[0])
def test_create_single_sweep(single_sweep_filenames): blocks = DataBlockFactory.from_filenames(single_sweep_filenames) assert len(blocks) == 1 assert blocks[0].num_images() == 9 imageset = blocks[0].extract_imagesets() assert len(imageset) == 1 assert len(imageset[0]) == 9 sweeps = blocks[0].extract_sweeps() assert len(sweeps) == 1 assert len(sweeps[0]) == 9
def test_create_multiple_sweeps(multiple_sweep_filenames): blocks = DataBlockFactory.from_filenames(multiple_sweep_filenames) assert len(blocks) == 1 assert blocks[0].num_images() == 6 imageset = blocks[0].extract_imagesets() assert len(imageset) == 2 sweeps = blocks[0].extract_sweeps() assert len(sweeps) == 2 assert len(sweeps[0]) == 3 assert len(sweeps[1]) == 3
def loader(x): try: obj = DataBlockFactory.from_filenames([x])[0].extract_imagesets()[0] except IndexError: import dxtbx.datablock try: obj = DataBlockFactory.from_json_file(x)[0].extract_imagesets()[0] except dxtbx.datablock.InvalidDataBlockError: obj = ExperimentListFactory.from_json_file(x)[0].imageset return obj
def spf_wrapper(self, img): if os.path.isfile(self.term_file): os.remove(self.term_file) raise IOTATermination('IOTA_TRACKER: Termination signal received!') else: if os.path.isfile(img): datablock = DataBlockFactory.from_filenames([img])[0] observed = self.processor.find_spots(datablock=datablock) return [int(self.data_list.index(img)), int(len(observed)), img] else: return [int(self.data_list.index(img)), 0, img]
def test_create_multiple_sequences(multiple_sequence_filenames): blocks = DataBlockFactory.from_filenames(multiple_sequence_filenames) assert len(blocks) == 1 assert blocks[0].num_images() == 6 assert blocks[0].format_class() imageset = blocks[0].extract_imagesets() assert len(imageset) == 2 sequences = blocks[0].extract_sequences() assert len(sequences) == 2 assert len(sequences[0]) == 3 assert len(sequences[1]) == 3
def tst_pickling(self): filenames = self.multiple_block_filenames() blocks1 = DataBlockFactory.from_filenames(filenames) blocks2 = self.pickle_then_unpickle(blocks1) assert(len(blocks2) == len(blocks1)) for b1, b2 in zip(blocks1, blocks2): assert(b1.format_class() == b2.format_class()) assert(b1 == b2) assert(blocks1 == blocks2) print 'OK'
def test_create_single_sequence(single_sequence_filenames): blocks = DataBlockFactory.from_filenames(single_sequence_filenames, verbose=True) assert len(blocks) == 1 assert blocks[0].num_images() == 9 assert blocks[0].format_class() imageset = blocks[0].extract_imagesets() assert len(imageset) == 1 assert len(imageset[0]) == 9 sequences = blocks[0].extract_sequences() assert len(sequences) == 1 assert len(sequences[0]) == 9
def do_import(filename): logger.info("Loading %s" % os.path.basename(filename)) try: datablocks = DataBlockFactory.from_json_file(filename) except ValueError: datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: raise Abort("Could not load %s" % filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s" % filename) return datablocks[0]
def test_single_image_datablock(dials_regression): path = os.path.join( dials_regression, "image_examples", "LCLS_cspad_nexus", "idx-20130301060858401.cbf", ) datablocks_cbf = DataBlockFactory.from_filenames([path]) datablock_cbf = datablocks_cbf[0] imageset_cbf = datablock_cbf.extract_imagesets()[0] assert imageset_cbf.get_detector(0) is not None
def tst_create_multiple_sweeps(self): filenames = self.multiple_sweep_filenames() blocks = DataBlockFactory.from_filenames(filenames) assert(len(blocks) == 1) assert(blocks[0].num_images() == 6) imageset = blocks[0].extract_imagesets() assert(len(imageset) == 2) sweeps = blocks[0].extract_sweeps() assert(len(sweeps) == 2) assert(len(sweeps[0]) == 3) assert(len(sweeps[1]) == 3) print 'OK'
def tst_create_single_sweep(self): filenames = self.single_sweep_filenames() blocks = DataBlockFactory.from_filenames(filenames) assert(len(blocks) == 1) assert(blocks[0].num_images() == 9) imageset = blocks[0].extract_imagesets() assert(len(imageset) == 1) assert(len(imageset[0]) == 9) sweeps = blocks[0].extract_sweeps() assert(len(sweeps) == 1) assert(len(sweeps[0]) == 9) print 'OK'
def __init__(self, source_image=None, object_folder=None, gain = 0.32, params=None): '''Initialise the script.''' from dials.util.options import OptionParser from dxtbx.datablock import DataBlockFactory from dials.array_family import flex from iotbx.phil import parse from xfel.command_line.xfel_process import phil_scope phil_scope = parse(''' include scope xfel.command_line.xtc_process.phil_scope ''', process_includes=True) sub_phil_scope = parse(''' output { cxi_merge_picklefile = None .type = str .help = Output integration results for each color data to separate cctbx.xfel-style pickle files } indexing { stills { ewald_proximity_resolution_cutoff = 2.0 .type = float .help = For calculating the area under the green curve, or the acceptable .help = volume of reciprocal space for spot prediction, use this high-resolution cutoff } } cxi_merge { include scope xfel.command_line.cxi_merge.master_phil } ''', process_includes=True) phil_scope.adopt_scope(sub_phil_scope) # Create the parser self.parser = OptionParser( phil=phil_scope, read_datablocks=True, read_datablocks_from_images=True) self.params = params self.img = [source_image] self.obj_base = object_folder self.phil = phil_scope.extract() with misc.Capturing() as junk_output: self.datablock = DataBlockFactory.from_filenames(self.img)[0] self.obj_filename = "int_{}".format(os.path.basename(self.img[0])) self.phil.output.cxi_merge_picklefile = os.path.join(self.obj_base, self.img[0])
def work(filename, cl=[]): from dials.command_line.find_spots import phil_scope as params from dxtbx.datablock import DataBlockFactory from dials.array_family import flex interp = params.command_line_argument_interpreter() for cla in cl: params = params.fetch(interp.process(cla)) datablock = DataBlockFactory.from_filenames([filename])[0] reflections = flex.reflection_table.from_observations( datablock, params.extract()) detector = datablock.unique_detectors()[0] beam = datablock.unique_beams()[0] return analyse(reflections, detector, beam)
def tst_pickling(self): from dxtbx.datablock import DataBlockFactory filenames = self.multiple_block_filenames() blocks1 = DataBlockFactory.from_filenames(filenames) blocks2 = self.pickle_then_unpickle(blocks1) assert(len(blocks2) == len(blocks1)) for b1, b2 in zip(blocks1, blocks2): assert(b1.format_class() == b2.format_class()) assert(b1 == b2) assert(blocks1 == blocks2) print 'OK'
def tst_create_multiple_sweeps(self): from dxtbx.datablock import DataBlockFactory filenames = self.multiple_sweep_filenames() blocks = DataBlockFactory.from_filenames(filenames) assert(len(blocks) == 1) assert(blocks[0].num_images() == 6) imageset = blocks[0].extract_imagesets() assert(len(imageset) == 2) sweeps = blocks[0].extract_sweeps() assert(len(sweeps) == 2) assert(len(sweeps[0]) == 3) assert(len(sweeps[1]) == 3) print 'OK'
def OnChooseDirectory (self, event) : dir_name = self.dir_ctrl.GetPhilValue() if (dir_name is not None) : from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_filenames([dir_name]) imagesets = datablocks[0].extract_imagesets() self._imagesets = imagesets #from iotbx.detectors import identify_dataset #self._datasets = identify_dataset(dir_name) #choices = [ d.format() for d in self._datasets ] choices = [imgset.get_template() for imgset in self._imagesets] self.stack_ctrl.SetItems(choices) for i in range(len(choices)): self.stack_ctrl.SetSelection(i)
def __init__(self, source_image, object_folder, final_folder, final_filename, final, logfile, gain = 0.32, params=None): '''Initialise the script.''' from dxtbx.datablock import DataBlockFactory self.params = params # Read settings from the DIALS target (.phil) file # If none is provided, use default settings (and may God have mercy) if self.params.dials.target != None: with open(self.params.dials.target, 'r') as settings_file: settings_file_contents = settings_file.read() settings = parse(settings_file_contents) current_phil = phil_scope.fetch(sources=[settings]) self.phil = current_phil.extract() else: self.phil = phil_scope.extract() # Set general file-handling settings file_basename = os.path.basename(source_image).split('.')[0] self.phil.output.datablock_filename = "{}/{}.json".format(object_folder, file_basename) self.phil.output.indexed_filename = "{}/{}_indexed.pickle".format(object_folder, file_basename) self.phil.output.strong_filename = "{}/{}_strong.pickle".format(object_folder, file_basename) self.phil.output.refined_experiments_filename = "{}/{}_refined_experiments.json".format(object_folder, file_basename) self.phil.output.integrated_filename = "{}/{}_integrated.pickle".format(object_folder, file_basename) self.phil.output.profile_filename = "{}/{}_profile.phil".format(object_folder, file_basename) self.phil.output.integration_pickle = final_filename self.int_log = logfile #"{}/int_{}.log".format(final_folder, file_basename) self.img = [source_image] self.obj_base = object_folder self.gain = gain self.fail = None self.frame = None self.final = final self.final['final'] = final_filename with misc.Capturing() as junk_output: self.datablock = DataBlockFactory.from_filenames(self.img)[0] self.obj_filename = "int_{}".format(os.path.basename(self.img[0]))
def try_read_datablocks_from_images(self, args, verbose, compare_beam, compare_detector, compare_goniometer, scan_tolerance, format_kwargs): ''' Try to import images. :param args: The input arguments :param verbose: Print verbose output :return: Unhandled arguments ''' from dxtbx.datablock import DataBlockFactory from dials.phil import FilenameDataWrapper, DataBlockConverters from glob import glob # If filenames contain wildcards, expand args_new = [] for arg in args: if "*" in arg: args_new.extend(glob(arg)) else: args_new.append(arg) args = args_new unhandled = [] datablocks = DataBlockFactory.from_filenames( args, verbose=verbose, unhandled=unhandled, compare_beam=compare_beam, compare_detector=compare_detector, compare_goniometer=compare_goniometer, scan_tolerance=scan_tolerance, format_kwargs=format_kwargs) if len(datablocks) > 0: filename = "<image files>" obj = FilenameDataWrapper(filename, datablocks) DataBlockConverters.cache[filename] = obj self.datablocks.append(obj) return unhandled
def tst_create_multiple_blocks(self): from dxtbx.datablock import DataBlockFactory filenames = self.multiple_block_filenames() blocks = DataBlockFactory.from_filenames(filenames, verbose=False) assert(len(blocks) == 22) # Block 1 assert(blocks[0].num_images() == 9) imageset = blocks[0].extract_imagesets() assert(len(imageset) == 1) assert(len(imageset[0]) == 9) sweeps = blocks[0].extract_sweeps() assert(len(sweeps) == 1) assert(len(sweeps[0]) == 9) print 'OK'
def __call__(self): ''' Import the datablocks ''' from dxtbx.datablock import DataBlockTemplateImporter from dxtbx.datablock import DataBlockFactory from dials.util.options import flatten_datablocks from libtbx.utils import Sorry # Get the datablocks datablocks = flatten_datablocks(self.params.input.datablock) # Check we have some filenames if len(datablocks) == 0: format_kwargs = { 'dynamic_shadowing' : self.params.format.dynamic_shadowing } # Check if a template has been set and print help if not, otherwise try to # import the images based on the template input if len(self.params.input.template) > 0: importer = DataBlockTemplateImporter( self.params.input.template, max(self.params.verbosity-1, 0), format_kwargs=format_kwargs) datablocks = importer.datablocks if len(datablocks) == 0: raise Sorry('No datablocks found matching template %s' % self.params.input.template) elif len(self.params.input.directory) > 0: datablocks = DataBlockFactory.from_filenames( self.params.input.directory, max(self.params.verbosity-1, 0), format_kwargs=format_kwargs) if len(datablocks) == 0: raise Sorry('No datablocks found in directories %s' % self.params.input.directory) else: raise Sorry('No datablocks found') if len(datablocks) > 1: raise Sorry("More than 1 datablock found") # Return the datablocks return datablocks[0]
def __init__(self, img, gain, params): """ Initialization and data read-in """ from dxtbx.datablock import DataBlockFactory self.gain = gain self.params = params # Read settings from the DIALS target (.phil) file # If none is provided, use default settings (and may God have mercy) if self.params.dials.target != None: with open(self.params.dials.target, 'r') as settings_file: settings_file_contents = settings_file.read() settings = parse(settings_file_contents) current_phil = phil_scope.fetch(sources=[settings]) self.phil = current_phil.extract() else: self.phil = phil_scope.extract() # Convert raw image into single-image datablock with misc.Capturing() as junk_output: self.datablock = DataBlockFactory.from_filenames([img])[0]
def do_import(filename): logger.info("Loading %s"%os.path.basename(filename)) try: datablocks = DataBlockFactory.from_json_file(filename) except ValueError: datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: raise Abort("Could not load %s"%filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s"%filename) # Ensure the indexer and downstream applications treat this as set of stills from dxtbx.imageset import ImageSet reset_sets = [] for imageset in datablocks[0].extract_imagesets(): imageset = ImageSet(imageset.reader(), imageset.indices()) imageset._models = imageset._models imageset.set_scan(None) imageset.set_goniometer(None) reset_sets.append(imageset) return DataBlockFactory.from_imageset(reset_sets)[0]
filenames = [] for arg in args: if "indexing.data" in arg: path = arg.split('=')[1] if os.path.isdir(path): for subfile in os.listdir(path): subpath = os.path.join(path, subfile) if os.path.isfile(subpath): filenames.append(subpath) else: filenames.append(path) print filenames datablock = DataBlockFactory.from_filenames(filenames)[0] observed = flex.reflection_table.from_observations(datablock, params) observed.as_pickle("strong.pickle") print "Number of observed reflections:", len(observed) working_params = copy.deepcopy(params) imagesets = datablock.extract_imagesets() # old labelit # from spotfinder.applications.xfel import cxi_phil # horizons_phil = cxi_phil.cxi_versioned_extract(args) print "indexing..." t0 = clock()
def run(args): from dials.util.options import OptionParser import libtbx.load_env usage = "%s [options] find_spots.json" %( libtbx.env.dispatcher_name) parser = OptionParser( usage=usage, phil=phil_scope, epilog=help_message) params, options, args = parser.parse_args( show_diff_phil=True, return_unhandled=True) positions = None if params.positions is not None: with open(params.positions, 'rb') as f: positions = flex.vec2_double() for line in f.readlines(): line = line.replace('(', ' ').replace(')', '').replace(',', ' ').strip().split() assert len(line) == 3 i, x, y = [float(l) for l in line] positions.append((x, y)) assert len(args) == 1 json_file = args[0] import json with open(json_file, 'rb') as f: results = json.load(f) n_indexed = flex.double() fraction_indexed = flex.double() n_spots = flex.double() n_lattices = flex.double() crystals = [] image_names = flex.std_string() for r in results: n_spots.append(r['n_spots_total']) image_names.append(str(r['image'])) if 'n_indexed' in r: n_indexed.append(r['n_indexed']) fraction_indexed.append(r['fraction_indexed']) n_lattices.append(len(r['lattices'])) for d in r['lattices']: from dxtbx.serialize.crystal import from_dict crystals.append(from_dict(d['crystal'])) else: n_indexed.append(0) fraction_indexed.append(0) n_lattices.append(0) import matplotlib matplotlib.use('Agg') from matplotlib import pyplot blue = '#3498db' red = '#e74c3c' marker = 'o' alpha = 0.5 lw = 0 plot = True table = True grid = params.grid from libtbx import group_args from dials.algorithms.peak_finding.per_image_analysis \ import plot_stats, print_table estimated_d_min = flex.double() d_min_distl_method_1 = flex.double() d_min_distl_method_2 = flex.double() n_spots_total = flex.int() n_spots_no_ice = flex.int() total_intensity = flex.double() for d in results: estimated_d_min.append(d['estimated_d_min']) d_min_distl_method_1.append(d['d_min_distl_method_1']) d_min_distl_method_2.append(d['d_min_distl_method_2']) n_spots_total.append(d['n_spots_total']) n_spots_no_ice.append(d['n_spots_no_ice']) total_intensity.append(d['total_intensity']) stats = group_args(image=image_names, n_spots_total=n_spots_total, n_spots_no_ice=n_spots_no_ice, n_spots_4A=None, total_intensity=total_intensity, estimated_d_min=estimated_d_min, d_min_distl_method_1=d_min_distl_method_1, d_min_distl_method_2=d_min_distl_method_2, noisiness_method_1=None, noisiness_method_2=None) if plot: plot_stats(stats) pyplot.clf() if table: print_table(stats) print "Number of indexed lattices: ", (n_indexed > 0).count(True) print "Number with valid d_min but failed indexing: ", ( (d_min_distl_method_1 > 0) & (d_min_distl_method_2 > 0) & (estimated_d_min > 0) & (n_indexed == 0)).count(True) n_rows = 10 n_rows = min(n_rows, len(n_spots_total)) perm_n_spots_total = flex.sort_permutation(n_spots_total, reverse=True) print 'Top %i images sorted by number of spots:' %n_rows print_table(stats, perm=perm_n_spots_total, n_rows=n_rows) n_bins = 20 spot_count_histogram( n_spots_total, n_bins=n_bins, filename='hist_n_spots_total.png', log=True) spot_count_histogram( n_spots_no_ice, n_bins=n_bins, filename='hist_n_spots_no_ice.png', log=True) spot_count_histogram( n_indexed.select(n_indexed > 0), n_bins=n_bins, filename='hist_n_indexed.png', log=False) if len(crystals): plot_unit_cell_histograms(crystals) if params.stereographic_projections and len(crystals): from dxtbx.datablock import DataBlockFactory datablocks = DataBlockFactory.from_filenames( [image_names[0]], verbose=False) assert len(datablocks) == 1 imageset = datablocks[0].extract_imagesets()[0] s0 = imageset.get_beam().get_s0() # XXX what if no goniometer? rotation_axis = imageset.get_goniometer().get_rotation_axis() indices = ((1,0,0), (0,1,0), (0,0,1)) for i, index in enumerate(indices): from cctbx import crystal, miller from scitbx import matrix miller_indices = flex.miller_index([index]) symmetry = crystal.symmetry( unit_cell=crystals[0].get_unit_cell(), space_group=crystals[0].get_space_group()) miller_set = miller.set(symmetry, miller_indices) d_spacings = miller_set.d_spacings() d_spacings = d_spacings.as_non_anomalous_array().expand_to_p1() d_spacings = d_spacings.generate_bijvoet_mates() miller_indices = d_spacings.indices() # plane normal d0 = matrix.col(s0).normalize() d1 = d0.cross(matrix.col(rotation_axis)).normalize() d2 = d1.cross(d0).normalize() reference_poles = (d0, d1, d2) from dials.command_line.stereographic_projection import stereographic_projection projections = [] for cryst in crystals: reciprocal_space_points = list(cryst.get_U() * cryst.get_B()) * miller_indices.as_vec3_double() projections.append(stereographic_projection( reciprocal_space_points, reference_poles)) #from dials.algorithms.indexing.compare_orientation_matrices import \ # difference_rotation_matrix_and_euler_angles #R_ij, euler_angles, cb_op = difference_rotation_matrix_and_euler_angles( # crystals[0], cryst) #print max(euler_angles) from dials.command_line.stereographic_projection import plot_projections plot_projections(projections, filename='projections_%s.png' %('hkl'[i])) pyplot.clf() def plot_grid(values, grid, file_name, cmap=pyplot.cm.Reds, vmin=None, vmax=None, invalid='white'): values = values.as_double() # At DLS, fast direction appears to be largest direction if grid[0] > grid[1]: values.reshape(flex.grid(reversed(grid))) values = values.matrix_transpose() else: values.reshape(flex.grid(grid)) Z = values.as_numpy_array() #f, (ax1, ax2) = pyplot.subplots(2) f, ax1 = pyplot.subplots(1) mesh1 = ax1.pcolormesh( values.as_numpy_array(), cmap=cmap, vmin=vmin, vmax=vmax) mesh1.cmap.set_under(color=invalid, alpha=None) mesh1.cmap.set_over(color=invalid, alpha=None) #mesh2 = ax2.contour(Z, cmap=cmap, vmin=vmin, vmax=vmax) #mesh2 = ax2.contourf(Z, cmap=cmap, vmin=vmin, vmax=vmax) ax1.set_aspect('equal') ax1.invert_yaxis() #ax2.set_aspect('equal') #ax2.invert_yaxis() pyplot.colorbar(mesh1, ax=ax1) #pyplot.colorbar(mesh2, ax=ax2) pyplot.savefig(file_name, dpi=600) pyplot.clf() def plot_positions(values, positions, file_name, cmap=pyplot.cm.Reds, vmin=None, vmax=None, invalid='white'): values = values.as_double() assert positions.size() >= values.size() positions = positions[:values.size()] if vmin is None: vmin = flex.min(values) if vmax is None: vmax = flex.max(values) x, y = positions.parts() dx = flex.abs(x[1:] - x[:-1]) dy = flex.abs(y[1:] - y[:-1]) dx = dx.select(dx > 0) dy = dy.select(dy > 0) scale = 1/flex.min(dx) #print scale x = (x * scale).iround() y = (y * scale).iround() from libtbx.math_utils import iceil z = flex.double(flex.grid(iceil(flex.max(y))+1, iceil(flex.max(x))+1), -2) #print z.all() for x_, y_, z_ in zip(x, y, values): z[y_, x_] = z_ plot_grid(z.as_1d(), z.all(), file_name, cmap=cmap, vmin=vmin, vmax=vmax, invalid=invalid) return if grid is not None or positions is not None: if grid is not None: positions = tuple(reversed(grid)) plotter = plot_grid else: plotter = plot_positions cmap = pyplot.get_cmap(params.cmap) plotter(n_spots_total, positions, 'grid_spot_count_total.png', cmap=cmap, invalid=params.invalid) plotter(n_spots_no_ice, positions, 'grid_spot_count_no_ice.png', cmap=cmap, invalid=params.invalid) plotter(total_intensity, positions, 'grid_total_intensity.png', cmap=cmap, invalid=params.invalid) if flex.max(n_indexed) > 0: plotter(n_indexed, positions, 'grid_n_indexed.png', cmap=cmap, invalid=params.invalid) plotter(fraction_indexed, positions, 'grid_fraction_indexed.png', cmap=cmap, vmin=0, vmax=1, invalid=params.invalid) for i, d_min in enumerate((estimated_d_min, d_min_distl_method_1, d_min_distl_method_2)): from cctbx import uctbx d_star_sq = uctbx.d_as_d_star_sq(d_min) d_star_sq.set_selected(d_star_sq == 1, 0) vmin = flex.min(d_star_sq.select(d_star_sq > 0)) vmax = flex.max(d_star_sq) vmin = flex.min(d_min.select(d_min > 0)) vmax = flex.max(d_min) cmap = pyplot.get_cmap('%s_r' %params.cmap) d_min.set_selected(d_min <= 0, vmax) if i == 0: plotter(d_min, positions, 'grid_d_min.png', cmap=cmap, vmin=vmin, vmax=vmax, invalid=params.invalid) else: plotter( d_min, positions, 'grid_d_min_method_%i.png' %i, cmap=cmap, vmin=vmin, vmax=vmax, invalid=params.invalid) if flex.max(n_indexed) > 0: pyplot.hexbin( n_spots, n_indexed, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.scatter(n_spots, n_indexed, marker=marker, alpha=alpha, c=blue, lw=lw) xlim = pyplot.xlim() ylim = pyplot.ylim() pyplot.plot([0, max(n_spots)], [0, max(n_spots)], c=red) pyplot.xlim(0, xlim[1]) pyplot.ylim(0, ylim[1]) pyplot.xlabel('# spots') pyplot.ylabel('# indexed') pyplot.savefig('n_spots_vs_n_indexed.png') pyplot.clf() pyplot.hexbin( n_spots, fraction_indexed, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.scatter( #n_spots, fraction_indexed, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.xlim(0, pyplot.xlim()[1]) pyplot.ylim(0, pyplot.ylim()[1]) pyplot.xlabel('# spots') pyplot.ylabel('Fraction indexed') pyplot.savefig('n_spots_vs_fraction_indexed.png') pyplot.clf() pyplot.hexbin( n_indexed, fraction_indexed, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.scatter( #n_indexed, fraction_indexed, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.xlim(0, pyplot.xlim()[1]) pyplot.ylim(0, pyplot.ylim()[1]) pyplot.xlabel('# indexed') pyplot.ylabel('Fraction indexed') pyplot.savefig('n_indexed_vs_fraction_indexed.png') pyplot.clf() pyplot.hexbin( n_spots, n_lattices, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.scatter( #n_spots, n_lattices, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.xlim(0, pyplot.xlim()[1]) pyplot.ylim(0, pyplot.ylim()[1]) pyplot.xlabel('# spots') pyplot.ylabel('# lattices') pyplot.savefig('n_spots_vs_n_lattices.png') pyplot.clf() #pyplot.scatter( # estimated_d_min, d_min_distl_method_1, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.hexbin(estimated_d_min, d_min_distl_method_1, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.gca().set_aspect('equal') xlim = pyplot.xlim() ylim = pyplot.ylim() m = max(max(estimated_d_min), max(d_min_distl_method_1)) pyplot.plot([0, m], [0, m], c=red) pyplot.xlim(0, xlim[1]) pyplot.ylim(0, ylim[1]) pyplot.xlabel('estimated_d_min') pyplot.ylabel('d_min_distl_method_1') pyplot.savefig('d_min_vs_distl_method_1.png') pyplot.clf() #pyplot.scatter( # estimated_d_min, d_min_distl_method_2, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.hexbin(estimated_d_min, d_min_distl_method_2, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.gca().set_aspect('equal') xlim = pyplot.xlim() ylim = pyplot.ylim() m = max(max(estimated_d_min), max(d_min_distl_method_2)) pyplot.plot([0, m], [0, m], c=red) pyplot.xlim(0, xlim[1]) pyplot.ylim(0, ylim[1]) pyplot.xlabel('estimated_d_min') pyplot.ylabel('d_min_distl_method_2') pyplot.savefig('d_min_vs_distl_method_2.png') pyplot.clf() #pyplot.scatter( # d_min_distl_method_1, d_min_distl_method_2, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.hexbin(d_min_distl_method_1, d_min_distl_method_2, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.gca().set_aspect('equal') xlim = pyplot.xlim() ylim = pyplot.ylim() m = max(max(d_min_distl_method_1), max(d_min_distl_method_2)) pyplot.plot([0, m], [0, m], c=red) pyplot.xlim(0, xlim[1]) pyplot.ylim(0, ylim[1]) pyplot.xlabel('d_min_distl_method_1') pyplot.ylabel('d_min_distl_method_2') pyplot.savefig('distl_method_1_vs_distl_method_2.png') pyplot.clf() pyplot.hexbin( n_spots, estimated_d_min, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.scatter( #n_spots, estimated_d_min, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.xlim(0, pyplot.xlim()[1]) pyplot.ylim(0, pyplot.ylim()[1]) pyplot.xlabel('# spots') pyplot.ylabel('estimated_d_min') pyplot.savefig('n_spots_vs_d_min.png') pyplot.clf() pyplot.hexbin( n_spots, d_min_distl_method_1, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.scatter( #n_spots, d_min_distl_method_1, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.xlim(0, pyplot.xlim()[1]) pyplot.ylim(0, pyplot.ylim()[1]) pyplot.xlabel('# spots') pyplot.ylabel('d_min_distl_method_1') pyplot.savefig('n_spots_vs_distl_method_1.png') pyplot.clf() pyplot.hexbin( n_spots, d_min_distl_method_2, bins='log', cmap=pyplot.cm.jet, gridsize=50) pyplot.colorbar() #pyplot.scatter( #n_spots, d_min_distl_method_2, marker=marker, alpha=alpha, c=blue, lw=lw) pyplot.xlim(0, pyplot.xlim()[1]) pyplot.ylim(0, pyplot.ylim()[1]) pyplot.xlabel('# spots') pyplot.ylabel('d_min_distl_method_2') pyplot.savefig('n_spots_vs_distl_method_2.png') pyplot.clf()
def index_from_files(f_one,f_two,f_three): import dxtbx from iotbx.phil import parse from dxtbx.datablock import DataBlockFactory from dials.array_family import flex from dials.algorithms.indexing.indexer import indexer_base from dials.util.options import OptionParser import copy phil_scope_str=''' output {{ shoeboxes = True .type = bool .help = Save the raw pixel values inside the reflection shoeboxes. }} include scope dials.algorithms.spot_finding.factory.phil_scope include scope dials.algorithms.indexing.indexer.index_only_phil_scope include scope dials.algorithms.refinement.refiner.phil_scope indexing.known_symmetry.unit_cell={0} .type = unit_cell indexing.known_symmetry.space_group={1} .type = space_group ''' phil_scope = parse(phil_scope_str.format(target_cell,target_sg), process_includes=True) # from dials.util.options import OptionParser parser = OptionParser(phil=phil_scope) params, options = parser.parse_args(args=[], show_diff_phil=True) params.refinement.parameterisation.scan_varying = False params.indexing.method='real_space_grid_search' # params.indexing.method='fft3d' # params.indexing.max_cell=800 # params.spotfinder.filter.min_spot_size=3 filenames = [f_one,f_two,f_three] datablock = DataBlockFactory.from_filenames(filenames)[0] observed = flex.reflection_table.from_observations(datablock, params) observed.as_pickle("strong.pickle") print "Number of observed reflections:", len(observed) working_params = copy.deepcopy(params) imagesets = datablock.extract_imagesets() print "indexing..." t0 = time() # new dials, fix by Aaron idxr = indexer_base.from_parameters(observed, imagesets, params=params) idxr.index() tel = time()-t0 print "done indexing (",tel," sec)" # new dials indexed = idxr.refined_reflections experiments = idxr.refined_experiments print experiments.crystals()[0] crystal_params = experiments.crystals()[0] with open('crystal.pkl', 'wb') as output: pickle.dump(crystal_params, output, pickle.HIGHEST_PROTOCOL) return
def work(filename, cl=None): if cl is None: cl = [] import libtbx.phil phil_scope = libtbx.phil.parse('''\ index = False .type = bool integrate = False .type = bool indexing_min_spots = 10 .type = int(value_min=1) ''') if not os.access(filename, os.R_OK): raise RuntimeError("Server does not have read access to file %s" %filename) interp = phil_scope.command_line_argument_interpreter() params, unhandled = interp.process_and_fetch( cl, custom_processor='collect_remaining') index = params.extract().index integrate = params.extract().integrate indexing_min_spots = params.extract().indexing_min_spots from dials.command_line.find_spots import phil_scope as find_spots_phil_scope from dxtbx.datablock import DataBlockFactory from dials.array_family import flex interp = find_spots_phil_scope.command_line_argument_interpreter() phil_scope, unhandled = interp.process_and_fetch( unhandled, custom_processor='collect_remaining') logger.info('The following spotfinding parameters have been modified:') logger.info(find_spots_phil_scope.fetch_diff(source=phil_scope).as_str()) params = phil_scope.extract() # no need to write the hot mask in the server/client params.spotfinder.write_hot_mask = False datablock = DataBlockFactory.from_filenames([filename])[0] t0 = time.time() reflections = flex.reflection_table.from_observations(datablock, params) t1 = time.time() logger.info('Spotfinding took %.2f seconds' %(t1-t0)) from dials.algorithms.spot_finding import per_image_analysis imageset = datablock.extract_imagesets()[0] scan = imageset.get_scan() if scan is not None: i = scan.get_array_range()[0] else: i = 0 stats = per_image_analysis.stats_single_image( imageset, reflections, i=i, plot=False) stats = stats.__dict__ t2 = time.time() logger.info('Resolution analysis took %.2f seconds' %(t2-t1)) if index and stats['n_spots_no_ice'] > indexing_min_spots: import logging logging.basicConfig(stream=sys.stdout, level=logging.INFO) from dials.algorithms.indexing import indexer from dxtbx.serialize.crystal import to_dict interp = indexer.master_phil_scope.command_line_argument_interpreter() phil_scope, unhandled = interp.process_and_fetch( unhandled, custom_processor='collect_remaining') imagesets = [imageset] logger.info('The following indexing parameters have been modified:') indexer.master_phil_scope.fetch_diff(source=phil_scope).show() params = phil_scope.extract() params.indexing.scan_range=[] if (imageset.get_goniometer() is not None and imageset.get_scan() is not None and imageset.get_scan().get_oscillation()[1] == 0): imageset.set_goniometer(None) imageset.set_scan(None) try: idxr = indexer.indexer_base.from_parameters( reflections, imagesets, params=params) indexing_results = [] indexed_sel = idxr.refined_reflections.get_flags( idxr.refined_reflections.flags.indexed) indexed_sel &= ~(idxr.refined_reflections.get_flags( idxr.refined_reflections.flags.centroid_outlier)) for i_expt, expt in enumerate(idxr.refined_experiments): sel = idxr.refined_reflections['id'] == i_expt sel &= indexed_sel indexing_results.append({ 'crystal': to_dict(expt.crystal), 'n_indexed': sel.count(True), 'fraction_indexed': sel.count(True)/sel.size()}) stats['lattices'] = indexing_results stats['n_indexed'] = indexed_sel.count(True) stats['fraction_indexed'] = indexed_sel.count(True)/len(reflections) except Exception, e: logger.error(e) stats['error'] = str(e) #stats.crystal = None #stats.n_indexed = None #stats.fraction_indexed = None finally:
def load_imagesets(template, directory, id_image=None, image_range=None, use_cache=True, reversephi=False): global imageset_cache from dxtbx.datablock import DataBlockFactory from xia2.Applications.xia2setup import known_hdf5_extensions full_template_path = os.path.join(directory, template) if full_template_path not in imageset_cache or not use_cache: from dxtbx.datablock import BeamComparison from dxtbx.datablock import DetectorComparison from dxtbx.datablock import GoniometerComparison params = PhilIndex.params.xia2.settings compare_beam = BeamComparison( wavelength_tolerance=params.input.tolerance.beam.wavelength, direction_tolerance=params.input.tolerance.beam.direction, polarization_normal_tolerance=params.input.tolerance.beam.polarization_normal, polarization_fraction_tolerance=params.input.tolerance.beam.polarization_fraction) compare_detector = DetectorComparison( fast_axis_tolerance=params.input.tolerance.detector.fast_axis, slow_axis_tolerance=params.input.tolerance.detector.slow_axis, origin_tolerance=params.input.tolerance.detector.origin) compare_goniometer = GoniometerComparison( rotation_axis_tolerance=params.input.tolerance.goniometer.rotation_axis, fixed_rotation_tolerance=params.input.tolerance.goniometer.fixed_rotation, setting_rotation_tolerance=params.input.tolerance.goniometer.setting_rotation) scan_tolerance = params.input.tolerance.scan.oscillation format_kwargs = { 'dynamic_shadowing' : params.input.format.dynamic_shadowing, 'multi_panel' : params.input.format.multi_panel, } if os.path.splitext(full_template_path)[-1] in known_hdf5_extensions: import glob g = glob.glob(os.path.join(directory, '*_master.h5')) master_file = None for p in g: substr = longest_common_substring(template, p) if substr: if (master_file is None or (len(substr) > len(longest_common_substring(template, master_file)))): master_file = p if master_file is None: raise RuntimeError("Can't find master file for %s" %full_template_path) unhandled = [] datablocks = DataBlockFactory.from_filenames( [master_file], verbose=False, unhandled=unhandled, compare_beam=compare_beam, compare_detector=compare_detector, compare_goniometer=compare_goniometer, scan_tolerance=scan_tolerance, format_kwargs=format_kwargs) assert len(unhandled) == 0, "unhandled image files identified: %s" % \ unhandled assert len(datablocks) == 1, "1 datablock expected, %d found" % \ len(datablocks) else: from dxtbx.sweep_filenames import locate_files_matching_template_string params = PhilIndex.get_python_object() read_all_image_headers = params.xia2.settings.read_all_image_headers if read_all_image_headers: paths = sorted(locate_files_matching_template_string(full_template_path)) unhandled = [] datablocks = DataBlockFactory.from_filenames( paths, verbose=False, unhandled=unhandled, compare_beam=compare_beam, compare_detector=compare_detector, compare_goniometer=compare_goniometer, scan_tolerance=scan_tolerance, format_kwargs=format_kwargs) assert len(unhandled) == 0, "unhandled image files identified: %s" % \ unhandled assert len(datablocks) == 1, "1 datablock expected, %d found" % \ len(datablocks) else: from dxtbx.datablock import DataBlockTemplateImporter importer = DataBlockTemplateImporter( [full_template_path], kwargs=format_kwargs) datablocks = importer.datablocks imagesets = datablocks[0].extract_sweeps() assert len(imagesets) > 0, "no imageset found" imageset_cache[full_template_path] = OrderedDict() if reversephi: for imageset in imagesets: goniometer = imageset.get_goniometer() goniometer.set_rotation_axis( tuple((-g for g in goniometer.get_rotation_axis()))) reference_geometry = PhilIndex.params.xia2.settings.input.reference_geometry if reference_geometry is not None and len(reference_geometry) > 0: update_with_reference_geometry(imagesets, reference_geometry) from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater( PhilIndex.params.xia2.settings.input) imagesets = [update_geometry(imageset) for imageset in imagesets] from scitbx.array_family import flex for imageset in imagesets: scan = imageset.get_scan() exposure_times = scan.get_exposure_times() epochs = scan.get_epochs() if exposure_times.all_eq(0): exposure_times = flex.double(exposure_times.size(), 1) scan.set_exposure_times(exposure_times) elif not exposure_times.all_gt(0): exposure_times = flex.double(exposure_times.size(), exposure_times[0]) scan.set_exposure_times(exposure_times) if epochs.size() > 1 and not epochs.all_gt(0): for i in range(1, epochs.size()): epochs[i] = epochs[i-1] + exposure_times[i-1] scan.set_epochs(epochs) _id_image = scan.get_image_range()[0] imageset_cache[full_template_path][_id_image] = imageset if id_image is not None: return [imageset_cache[full_template_path][id_image]] elif image_range is not None: for imageset in imageset_cache[full_template_path].values(): scan = imageset.get_scan() scan_image_range = scan.get_image_range() if (image_range[0] >= scan_image_range[0] and image_range[1] <= scan_image_range[1]): imagesets = [imageset[ image_range[0] - scan_image_range[0]: image_range[1] + 1 - scan_image_range[0]]] assert len(imagesets[0]) == image_range[1] - image_range[0] + 1, \ len(imagesets[0]) return imagesets return imageset_cache[full_template_path].values()
def load_imagesets(template, directory, id_image=None, image_range=None, use_cache=True, reversephi=False): global imageset_cache full_template_path = os.path.join(directory, template) if full_template_path not in imageset_cache or not use_cache: from dxtbx.datablock import DataBlockFactory from dxtbx.sweep_filenames import locate_files_matching_template_string params = PhilIndex.get_python_object() read_all_image_headers = params.xia2.settings.read_all_image_headers if read_all_image_headers: paths = sorted(locate_files_matching_template_string(full_template_path)) unhandled = [] datablocks = DataBlockFactory.from_filenames( paths, verbose=False, unhandled=unhandled) assert len(unhandled) == 0, "unhandled image files identified: %s" % \ unhandled assert len(datablocks) == 1, "1 datablock expected, %d found" % \ len(datablocks) else: from dxtbx.datablock import DataBlockTemplateImporter importer = DataBlockTemplateImporter([full_template_path]) datablocks = importer.datablocks imagesets = datablocks[0].extract_sweeps() assert len(imagesets) > 0, "no imageset found" imageset_cache[full_template_path] = OrderedDict() if reversephi: for imageset in imagesets: goniometer = imageset.get_goniometer() goniometer.set_rotation_axis( tuple((-g for g in goniometer.get_rotation_axis()))) reference_geometry = PhilIndex.params.xia2.settings.input.reference_geometry if reference_geometry is not None and len(reference_geometry) > 0: update_with_reference_geometry(imagesets, reference_geometry) for imageset in imagesets: scan = imageset.get_scan() _id_image = scan.get_image_range()[0] imageset_cache[full_template_path][_id_image] = imageset if id_image is not None: return [imageset_cache[full_template_path][id_image]] elif image_range is not None: for imageset in imageset_cache[full_template_path].values(): scan = imageset.get_scan() scan_image_range = scan.get_image_range() if (image_range[0] >= scan_image_range[0] and image_range[1] <= scan_image_range[1]): imagesets = [imageset[ image_range[0] - scan_image_range[0]: image_range[1] + 1 - scan_image_range[0]]] assert len(imagesets[0]) == image_range[1] - image_range[0] + 1, \ len(imagesets[0]) return imagesets return imageset_cache[full_template_path].values()
def run(self): '''Execute the script.''' from dials.util import log from logging import info from time import time from libtbx.utils import Abort from libtbx import easy_mp import os, copy from dxtbx.datablock import DataBlockFactory # Parse the command line params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True) # Check we have some filenames if len(all_paths) == 0: self.parser.print_help() return # Save the options self.options = options self.params = params st = time() # Configure logging log.config( params.verbosity, info='dials.process.log', debug='dials.process.debug.log') # Log the diff phil diff_phil = self.parser.diff_phil.as_str() if diff_phil is not '': info('The following parameters have been modified:\n') info(diff_phil) # Import stuff info("Loading files...") if len(all_paths) == 1: datablocks = DataBlockFactory.from_filenames(all_paths) else: def do_import(filename): info("Loading %s"%os.path.basename(filename)) datablocks = DataBlockFactory.from_filenames([filename]) if len(datablocks) == 0: raise Abort("Could not load %s"%filename) if len(datablocks) > 1: raise Abort("Got multiple datablocks from file %s"%filename) return datablocks[0] datablocks = easy_mp.parallel_map( func=do_import, iterable=all_paths, processes=params.mp.nproc, method=params.mp.method, preserve_order=True, preserve_exception_message=True) if len(datablocks) == 0: raise Abort('No datablocks specified') # Handle still imagesets by breaking them apart into multiple datablocks # Further handle single file still imagesets (like HDF5) by tagging each # frame using its index indices = [] basenames = [] split_datablocks = [] for datablock in datablocks: for imageset in datablock.extract_imagesets(): for i in xrange(len(imageset)): subset = imageset[i:i+1] split_datablocks.append(DataBlockFactory.from_imageset(subset)[0]) indices.append(i) basenames.append(os.path.splitext(os.path.basename(subset.paths()[0]))[0]) tags = [] for i, basename in zip(indices, basenames): if basenames.count(basename) > 1: tags.append("%s_%d"%(basename, i)) else: tags.append(basename) # Wrapper function def do_work(item): Processor(copy.deepcopy(params)).process_datablock(item[0], item[1]) # Process the data easy_mp.parallel_map( func=do_work, iterable=zip(tags, split_datablocks), processes=params.mp.nproc, method=params.mp.method, preserve_order=True, preserve_exception_message=True) # Total Time info("") info("Total Time Taken = %f seconds" % (time() - st))