def test_RefinerData(testdata): experiment = testdata.experiment reflections = testdata.reflections panel = experiment.detector[0] s0_length = matrix.col(experiment.beam.get_s0()).length() reflections["bbox"] = flex.int6(len(reflections)) reflections["xyzobs.px.value"] = flex.vec3_double(len(reflections)) reflections["s2"] = reflections["s1"].each_normalize() * s0_length reflections["sp"] = flex.vec3_double(len(reflections)) for i, (x, y, z) in enumerate(reflections["xyzcal.px"]): x0 = int(x) - 5 x1 = int(x) + 5 + 1 y0 = int(y) - 5 y1 = int(y) + 5 + 1 z0 = int(z) z1 = z0 + 1 reflections["bbox"][i] = x0, x1, y0, y1, z0, z1 reflections["xyzobs.px.value"][i] = (int(x) + 0.5, int(y) + 0.5, int(z) + 0.5) reflections["sp"][i] = (matrix.col( panel.get_pixel_lab_coord( reflections["xyzobs.px.value"][i][0:2])).normalize() * s0_length) reflections["shoebox"] = flex.shoebox(reflections["panel"], reflections["bbox"], allocate=True) shoebox_data = flex.float(flex.grid(1, 11, 11)) shoebox_mask = flex.int(flex.grid(1, 11, 11)) for j in range(11): for i in range(11): shoebox_data[0, j, i] = (100 * exp(-0.5 * (j - 5)**2 / 1**2) * exp(-0.5 * (i - 5)**2 / 1**2)) shoebox_mask[0, j, i] = 5 for sbox in reflections["shoebox"]: sbox.data = shoebox_data sbox.mask = shoebox_mask data = RefinerData.from_reflections(experiment, reflections) assert tuple(data.s0) == pytest.approx(experiment.beam.get_s0()) assert data.h_list == reflections["miller_index"] for i, sp in enumerate(reflections["sp"]): assert data.sp_list[:, i] == pytest.approx(sp) assert data.ctot_list[0] == sum(shoebox_data) mobs1 = np.abs(data.mobs_list[0, :]) mobs2 = np.abs(data.mobs_list[1, :]) assert np.max(mobs1) < 1e-6 assert np.max(mobs2) < 1e-6
def refinerdata_testdata(testdata): experiment = testdata.experiment reflections = testdata.reflections panel = experiment.detector[0] s0_length = matrix.col(experiment.beam.get_s0()).length() reflections["bbox"] = flex.int6(len(reflections)) reflections["xyzobs.px.value"] = flex.vec3_double(len(reflections)) reflections["s2"] = reflections["s1"].each_normalize() * s0_length reflections["sp"] = flex.vec3_double(len(reflections)) for i, (x, y, z) in enumerate(reflections["xyzcal.px"]): x0 = int(x) - 5 x1 = int(x) + 5 + 1 y0 = int(y) - 5 y1 = int(y) + 5 + 1 z0 = int(z) z1 = z0 + 1 reflections["bbox"][i] = x0, x1, y0, y1, z0, z1 reflections["xyzobs.px.value"][i] = (int(x) + 0.5, int(y) + 0.5, int(z) + 0.5) reflections["sp"][i] = (matrix.col( panel.get_pixel_lab_coord( reflections["xyzobs.px.value"][i][0:2])).normalize() * s0_length) reflections["shoebox"] = flex.shoebox(reflections["panel"], reflections["bbox"], allocate=True) shoebox_data = flex.float(flex.grid(1, 11, 11)) shoebox_mask = flex.int(flex.grid(1, 11, 11)) for j in range(11): for i in range(11): shoebox_data[0, j, i] = (100 * exp(-0.5 * (j - 5)**2 / 1**2) * exp(-0.5 * (i - 5)**2 / 1**2)) shoebox_mask[0, j, i] = 5 for sbox in reflections["shoebox"]: sbox.data = shoebox_data sbox.mask = shoebox_mask return RefinerData.from_reflections(experiment, reflections)
def with_individual_given_intensity(self, N, In, Ba, Bb, Bc, Bd): """Generate reflections with given intensity and background.""" from dials.algorithms.simulation import simulate_reciprocal_space_gaussian from dials.algorithms.simulation.generate_test_reflections import ( random_background_plane2, ) from dials.util.command_line import ProgressBar # Check the lengths assert N == len(In) assert N == len(Ba) assert N == len(Bb) assert N == len(Bc) assert N == len(Bd) # Generate some predictions refl = self.generate_predictions(N) # Calculate the signal progress = ProgressBar( title=f"Calculating signal for {len(refl)} reflections") s1 = refl["s1"] phi = refl["xyzcal.mm"].parts()[2] bbox = refl["bbox"] shoebox = refl["shoebox"] m = int(len(refl) / 100) I_exp = flex.double(len(refl), 0) for i in range(len(refl)): if In[i] > 0: data = shoebox[i].data.as_double() I_exp[i] = simulate_reciprocal_space_gaussian( self.experiment.beam, self.experiment.detector, self.experiment.goniometer, self.experiment.scan, self.sigma_b, self.sigma_m, s1[i], phi[i], bbox[i], In[i], data, shoebox[i].mask, ) shoebox[i].data = data.as_float() if i % m == 0: progress.update(100.0 * float(i) / len(refl)) progress.finished( f"Calculated signal impacts for {len(refl)} reflections") # Calculate the background progress = ProgressBar( title=f"Calculating background for {len(refl)} reflections") for l in range(len(refl)): background = flex.float(flex.grid(shoebox[l].size()), 0.0) random_background_plane2(background, Ba[l], Bb[l], Bc[l], Bd[l]) shoebox[l].data += background shoebox[l].background = background if l % m == 0: progress.update(100.0 * float(l) / len(refl)) progress.update(100.0 * float(l) / len(refl)) progress.finished(f"Calculated background for {len(refl)} reflections") ## Calculate the expected intensity by monte-carlo integration # progress = ProgressBar(title='Integrating expected signal for %d reflections' % len(refl)) # s1 = refl['s1'] # phi = refl['xyzcal.mm'].parts()[2] # bbox = refl['bbox'] # shoebox = refl['shoebox'] # I_exp = flex.double(len(refl), 0) # m = int(len(refl) / 100) # for i in range(len(refl)): # if In[i] > 0: # I_exp[i] = integrate_reciprocal_space_gaussian( # self.experiment.beam, # self.experiment.detector, # self.experiment.goniometer, # self.experiment.scan, # self.sigma_b, # self.sigma_m, # s1[i], # phi[i], # bbox[i], # 10000, # shoebox[i].mask) / 10000.0 # if i % m == 0: # progress.update(100.0 * float(i) / len(refl)) # progress.finished('Integrated expected signal impacts for %d reflections' % len(refl)) # Save the expected intensity and background refl["intensity.sim"] = In refl["background.sim.a"] = Ba refl["background.sim.b"] = Bb refl["background.sim.c"] = Bc refl["background.sim.d"] = Bd refl["intensity.exp"] = I_exp # Return the reflections return refl
def add_batch_list( self, image_range, experiment, wavelength, dataset_id, batch_offset, force_static_model, ): """Add batch metadata to the mtz file.""" # Recalculate useful numbers and references here n_batches = image_range[1] - image_range[0] + 1 phi_start = flex.float(n_batches, 0) phi_range = flex.float(n_batches, 0) umat_array = flex.float(flex.grid(n_batches, 9)) cell_array = flex.float(flex.grid(n_batches, 6)) U = matrix.sqr(experiment.crystal.get_U()) if experiment.goniometer is not None: F = matrix.sqr(experiment.goniometer.get_fixed_rotation()) else: F = matrix.sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) i0 = image_range[0] for i in range(n_batches): if experiment.scan: phi_start[i], phi_range[ i] = experiment.scan.get_image_oscillation(i + i0) # unit cell (this is fine) and the what-was-refined-flags hardcoded # take time-varying parameters from the *end of the frame* unlikely to # be much different at the end - however only exist if scan-varying # refinement was used if not force_static_model and experiment.crystal.num_scan_points > 0: # Get the index of the image in the sequence e.g. first => 0, second => 1 image_index = i + i0 - experiment.scan.get_image_range()[0] _unit_cell = experiment.crystal.get_unit_cell_at_scan_point( image_index) _U = matrix.sqr( experiment.crystal.get_U_at_scan_point(image_index)) else: _unit_cell = experiment.crystal.get_unit_cell() _U = U # apply the fixed rotation to this to unify matrix definitions - F * U # was what was used in the actual prediction: U appears to be stored # as the transpose?! At least is for Mosflm... # # FIXME Do we need to apply the setting rotation here somehow? i.e. we have # the U.B. matrix assuming that the axis is equal to S * axis_datum but # here we are just giving the effective axis so at scan angle 0 this will # not be correct... FIXME 2 not even sure we can express the stack of # matrices S * R * F * U * B in MTZ format?... see [=A=] below _U = matrix.sqr( dials.util.ext.dials_u_to_mosflm(F * _U, _unit_cell)) # FIXME need to get what was refined and what was constrained from the # crystal model - see https://github.com/dials/dials/issues/355 _unit_cell_params = _unit_cell.parameters() for j in range(6): cell_array[i, j] = _unit_cell_params[j] _U_t_elements = _U.transpose().elems for j in range(9): umat_array[i, j] = _U_t_elements[j] # We ignore panels beyond the first one, at the moment panel = experiment.detector[0] panel_size = panel.get_image_size() panel_distance = panel.get_directed_distance() if experiment.goniometer: axis = flex.float(experiment.goniometer.get_rotation_axis()) else: axis = flex.float((0.0, 0.0, 0.0)) # FIXME hard-coded assumption on idealized beam vector below... this may be # broken when we come to process data from a non-imgCIF frame s0n = flex.float( matrix.col(experiment.beam.get_s0()).normalize().elems) # get the mosaic spread though today it may not actually be set - should # this be in the BATCH headers? try: mosaic = experiment.crystal.get_mosaicity() except AttributeError: mosaic = 0.0 # Jump into C++ to do the rest of the work dials.util.ext.add_dials_batches( self.mtz_file, dataset_id, image_range, batch_offset, wavelength, mosaic, phi_start, phi_range, cell_array, umat_array, panel_size, panel_distance, axis, s0n, )
def reconstruct_peabox(params): assert os.path.exists('xia2.json') from xia2.Schema.XProject import XProject xinfo = XProject.from_json(filename='xia2.json') from dxtbx.model.experiment_list import ExperimentListFactory import cPickle as pickle import dials # because WARNING:root:No profile class gaussian_rs registered from dials.array_family import flex crystals = xinfo.get_crystals() assert len(crystals) == 1 for xname in crystals: crystal = crystals[xname] scaler = crystal._get_scaler() epochs = scaler._sweep_handler.get_epochs() from xia2.command_line.rogues_gallery import munch_rogues from pprint import pprint batched_reflections = {} for epoch in epochs: si = scaler._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() experiments = ExperimentListFactory.from_json_file( intgr.get_integrated_experiments()) reflections = pickle.load(open(intgr.get_integrated_reflections())) batched_reflections[si.get_batch_range()] = (experiments, reflections, si.get_sweep_name()) from dials.util import debug_console # debug_console() good = reflections.get_flags(reflections.flags.integrated) # bad = reflections.get_flags(reflections.flags.bad_spot) reflections = reflections.select(good) for r in reflections: flags = r['flags'] r['flags'] = [] for v, f in reflections.flags.values.iteritems(): if flags & f: r['flags'].append(str(f)) r['flags'] = ', '.join(r['flags']) # pprint(r) print "Collecting shoeboxes for %d reflections" % len(reflections) reflections["shoebox"] = flex.shoebox(reflections["panel"], reflections["bbox"], allocate=True) reflections.extract_shoeboxes(experiments.imagesets()[0], verbose=True) print "Consolidating..." sizes = {} for r in reflections: s = r['shoebox'] a = s.size() if a not in sizes: sizes[a] = { 'sum': flex.float([0] * a[0] * a[1] * a[2]), 'weights': flex.int([0] * a[0] * a[1] * a[2]), 'count': 0 } s.mask.set_selected(s.data < 0, 0) s.data.set_selected(s.mask == 0, 0) s.background.set_selected(s.mask == 0, 0) sizes[a]['sum'] += s.data - s.background sizes[a]['weights'] += s.mask sizes[a]['count'] += 1 print len(sizes), "shoebox sizes extracted" for s, c in sizes.iteritems(): print "%dx %s" % (c['count'], str(s)) sdat = iter(c['sum']) wdat = iter(c['weights']) for z in range(s[0]): for y in range(s[1]): count = [next(sdat) for x in range(s[2])] weight = [next(wdat) for x in range(s[2])] truecount = (0 if w == 0 else 10 * c / w for c, w in zip(count, weight)) visualize = ("X" if c < 0 else ("." if c < 10 else str(int(math.log10(c)))) for c in truecount) print "".join(visualize) print "" print ""
data2d_tmp = ref2d.as_numpy_array() data2d[col_str:col_str + ncol, row_str:row_str + nrow] += numpy.float64(data2d_tmp) t_bbox[t_row] = [ col_str, col_str + ncol, row_str, row_str + nrow, 0, 1 ] t_xyzobs[t_row] = [col_str + centr_col, row_str + centr_row, 0.5] t_xyzcal[t_row] = [col_str + centr_col, row_str + centr_row, 0.5] np_shoebox = numpy.copy(data2d[col_str:col_str + ncol, row_str:row_str + nrow]) # tmp_2d_fl_shoebox = flex.double(np_shoebox) # tmp_2d_fl_shoebox.reshape(flex.grid(1, ncol, nrow)) fl_shoebox = flex.float(np_shoebox) fl_shoebox.reshape(flex.grid(1, ncol, nrow)) to_use_soon = """ fl_shoebox = flex.double(flex.grid(5, ncol, nrow)) for x_loc in range(ncol): for y_loc in range(nrow): for z_loc in range(5): fl_shoebox[z_loc, y_loc, x_loc] = tmp_2d_fl_shoebox[y_loc, x_loc] """ fl_shoebox_bkg = fl_shoebox[:, :, :] t_shoebox[t_row].data = fl_shoebox t_shoebox[t_row].background = fl_shoebox_bkg # lc_mask = flex.int(flex.grid(5, ncol, nrow), 3) lc_mask = flex.int(flex.grid(1, ncol, nrow), 3)
def export_mtz( integrated_data, experiment_list, hklout, ignore_panels=False, include_partials=False, keep_partials=False, min_isigi=None, force_static_model=False, filter_ice_rings=False, ): """Export data from integrated_data corresponding to experiment_list to an MTZ file hklout.""" from dials.array_family import flex # for the moment assume (and assert) that we will convert data from exactly # one lattice... # FIXME allow for more than one experiment in here: this is fine just add # multiple MTZ data sets (DIALS1...DIALSN) and multiple batch headers: one # range of batches for each experiment assert len(experiment_list) == 1 # select reflections that are assigned to an experiment (i.e. non-negative id) integrated_data = integrated_data.select(integrated_data["id"] >= 0) assert max(integrated_data["id"]) == 0 # strip out negative variance reflections: these should not really be there # FIXME Doing select on summation results. Should do on profile result if # present? Yes if "intensity.prf.variance" in integrated_data: selection = integrated_data.get_flags(integrated_data.flags.integrated, all=True) else: selection = integrated_data.get_flags(integrated_data.flags.integrated_sum) integrated_data = integrated_data.select(selection) selection = integrated_data["intensity.sum.variance"] <= 0 if selection.count(True) > 0: integrated_data.del_selected(selection) logger.info("Removing %d reflections with negative variance" % selection.count(True)) if "intensity.prf.variance" in integrated_data: selection = integrated_data["intensity.prf.variance"] <= 0 if selection.count(True) > 0: integrated_data.del_selected(selection) logger.info("Removing %d profile reflections with negative variance" % selection.count(True)) if filter_ice_rings: selection = integrated_data.get_flags(integrated_data.flags.in_powder_ring) integrated_data.del_selected(selection) logger.info("Removing %d reflections in ice ring resolutions" % selection.count(True)) if min_isigi is not None: selection = ( integrated_data["intensity.sum.value"] / flex.sqrt(integrated_data["intensity.sum.variance"]) ) < min_isigi integrated_data.del_selected(selection) logger.info("Removing %d reflections with I/Sig(I) < %s" % (selection.count(True), min_isigi)) if "intensity.prf.variance" in integrated_data: selection = ( integrated_data["intensity.prf.value"] / flex.sqrt(integrated_data["intensity.prf.variance"]) ) < min_isigi integrated_data.del_selected(selection) logger.info("Removing %d profile reflections with I/Sig(I) < %s" % (selection.count(True), min_isigi)) # FIXME in here work on including partial reflections => at this stage best # to split off the partial refections into a different selection & handle # gracefully... better to work on a short list as will need to "pop" them & # find matching parts to combine. if include_partials: integrated_data = sum_partial_reflections(integrated_data) integrated_data = scale_partial_reflections(integrated_data) if "partiality" in integrated_data: selection = integrated_data["partiality"] < 0.99 if selection.count(True) > 0 and not keep_partials: integrated_data.del_selected(selection) logger.info("Removing %d incomplete reflections" % selection.count(True)) # FIXME TODO for more than one experiment into an MTZ file: # # - add an epoch (or recover an epoch) from the scan and add this as an extra # column to the MTZ file for scaling, so we know that the two lattices were # integrated at the same time # - decide a sensible BATCH increment to apply to the BATCH value between # experiments and add this # # At the moment this is probably enough to be working on. experiment = experiment_list[0] # also only work with one panel(for the moment) if not ignore_panels: assert len(experiment.detector) == 1 from scitbx import matrix if experiment.goniometer: axis = matrix.col(experiment.goniometer.get_rotation_axis()) else: axis = 0.0, 0.0, 0.0 s0 = experiment.beam.get_s0() wavelength = experiment.beam.get_wavelength() panel = experiment.detector[0] origin = matrix.col(panel.get_origin()) fast = matrix.col(panel.get_fast_axis()) slow = matrix.col(panel.get_slow_axis()) pixel_size = panel.get_pixel_size() fast *= pixel_size[0] slow *= pixel_size[1] cb_op_to_ref = experiment.crystal.get_space_group().info().change_of_basis_op_to_reference_setting() experiment.crystal = experiment.crystal.change_basis(cb_op_to_ref) U = experiment.crystal.get_U() if experiment.goniometer is not None: F = matrix.sqr(experiment.goniometer.get_fixed_rotation()) else: F = matrix.sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) unit_cell = experiment.crystal.get_unit_cell() from iotbx import mtz from scitbx.array_family import flex from math import floor, sqrt m = mtz.object() m.set_title("from dials.export_mtz") m.set_space_group_info(experiment.crystal.get_space_group().info()) if experiment.scan: image_range = experiment.scan.get_image_range() else: image_range = 1, 1 # pointless (at least) doesn't like batches starting from zero b_incr = max(image_range[0], 1) for b in range(image_range[0], image_range[1] + 1): o = m.add_batch().set_num(b + b_incr).set_nbsetid(1).set_ncryst(1) o.set_time1(0.0).set_time2(0.0).set_title("Batch %d" % (b + b_incr)) o.set_ndet(1).set_theta(flex.float((0.0, 0.0))).set_lbmflg(0) o.set_alambd(wavelength).set_delamb(0.0).set_delcor(0.0) o.set_divhd(0.0).set_divvd(0.0) # FIXME hard-coded assumption on indealized beam vector below... this may be # broken when we come to process data from a non-imgCIF frame o.set_so(flex.float(s0)).set_source(flex.float((0, 0, -1))) # these are probably 0, 1 respectively, also flags for how many are set, sd o.set_bbfac(0.0).set_bscale(1.0) o.set_sdbfac(0.0).set_sdbscale(0.0).set_nbscal(0) # unit cell (this is fine) and the what-was-refined-flags FIXME hardcoded # take time-varying parameters from the *end of the frame* unlikely to # be much different at the end - however only exist if time-varying refinement # was used if not force_static_model and experiment.crystal.num_scan_points > 0: _unit_cell = experiment.crystal.get_unit_cell_at_scan_point(b - image_range[0]) _U = experiment.crystal.get_U_at_scan_point(b - image_range[0]) else: _unit_cell = unit_cell _U = U # apply the fixed rotation to this to unify matrix definitions - F * U # was what was used in the actual prediction: U appears to be stored # as the transpose?! At least is for Mosflm... # # FIXME Do we need to apply the setting rotation here somehow? i.e. we have # the U.B. matrix assuming that the axis is equal to S * axis_datum but # here we are just giving the effective axis so at scan angle 0 this will # not be correct... FIXME 2 not even sure we can express the stack of # matrices S * R * F * U * B in MTZ format?... _U = dials_u_to_mosflm(F * _U, _unit_cell) # FIXME need to get what was refined and what was constrained from the # crystal model o.set_cell(flex.float(_unit_cell.parameters())) o.set_lbcell(flex.int((-1, -1, -1, -1, -1, -1))) o.set_umat(flex.float(_U.transpose().elems)) # get the mosaic spread though today it may not actually be set mosaic = experiment.crystal.get_mosaicity() o.set_crydat(flex.float([mosaic, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) o.set_lcrflg(0) o.set_datum(flex.float((0.0, 0.0, 0.0))) # detector size, distance o.set_detlm(flex.float([0.0, panel.get_image_size()[0], 0.0, panel.get_image_size()[1], 0, 0, 0, 0])) o.set_dx(flex.float([panel.get_directed_distance(), 0.0])) # goniometer axes and names, and scan axis number, and number of axes, missets o.set_e1(flex.float(axis)) o.set_e2(flex.float((0.0, 0.0, 0.0))) o.set_e3(flex.float((0.0, 0.0, 0.0))) o.set_gonlab(flex.std_string(("AXIS", "", ""))) o.set_jsaxs(1) o.set_ngonax(1) o.set_phixyz(flex.float((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))) # scan ranges, axis if experiment.scan: phi_start, phi_range = experiment.scan.get_image_oscillation(b) else: phi_start, phi_range = 0.0, 0.0 o.set_phistt(phi_start) o.set_phirange(phi_range) o.set_phiend(phi_start + phi_range) o.set_scanax(flex.float(axis)) # number of misorientation angles o.set_misflg(0) # crystal axis closest to rotation axis (why do I want this?) o.set_jumpax(0) # type of data - 1; 2D, 2; 3D, 3; Laue o.set_ldtype(2) # now create the actual data structures - first keep a track of the columns # H K L M/ISYM BATCH I SIGI IPR SIGIPR FRACTIONCALC XDET YDET ROT WIDTH # LP MPART FLAG BGPKRATIOS from cctbx.array_family import flex as cflex # implicit import from cctbx.miller import map_to_asu_isym # implicit import # gather the required information for the reflection file nref = len(integrated_data["miller_index"]) x_px, y_px, z_px = integrated_data["xyzcal.px"].parts() xdet = flex.double(x_px) ydet = flex.double(y_px) zdet = flex.double(z_px) # compute ROT values if experiment.scan: rot = flex.double([experiment.scan.get_angle_from_image_index(z) for z in zdet]) else: rot = zdet # compute BATCH values batch = flex.floor(zdet).iround() + 1 + b_incr # we're working with full reflections so... fractioncalc = flex.double(nref, 1.0) # now go for it and make an MTZ file... x = m.add_crystal("XTAL", "DIALS", unit_cell.parameters()) d = x.add_dataset("FROMDIALS", wavelength) # now add column information... # FIXME add DIALS_FLAG which can include e.g. was partial etc. type_table = { "H": "H", "K": "H", "L": "H", "I": "J", "SIGI": "Q", "IPR": "J", "SIGIPR": "Q", "BG": "R", "SIGBG": "R", "XDET": "R", "YDET": "R", "BATCH": "B", "BGPKRATIOS": "R", "WIDTH": "R", "MPART": "I", "M_ISYM": "Y", "FLAG": "I", "LP": "R", "FRACTIONCALC": "R", "ROT": "R", "DQE": "R", } # derive index columns from original indices with # # from m.replace_original_index_miller_indices # # so all that is needed now is to make space for the reflections - fill with # zeros... m.adjust_column_array_sizes(nref) m.set_n_reflections(nref) # assign H, K, L, M_ISYM space for column in "H", "K", "L", "M_ISYM": d.add_column(column, type_table[column]).set_values(flex.double(nref, 0.0).as_float()) m.replace_original_index_miller_indices(cb_op_to_ref.apply(integrated_data["miller_index"])) d.add_column("BATCH", type_table["BATCH"]).set_values(batch.as_double().as_float()) if "lp" in integrated_data: lp = integrated_data["lp"] else: lp = flex.double(nref, 1.0) if "dqe" in integrated_data: dqe = integrated_data["dqe"] else: dqe = flex.double(nref, 1.0) I_profile = None V_profile = None I_sum = None V_sum = None # FIXME errors in e.g. LP correction need to be propogated here scl = lp / dqe if "intensity.prf.value" in integrated_data: I_profile = integrated_data["intensity.prf.value"] * scl V_profile = integrated_data["intensity.prf.variance"] * scl * scl # Trap negative variances assert V_profile.all_gt(0) d.add_column("IPR", type_table["I"]).set_values(I_profile.as_float()) d.add_column("SIGIPR", type_table["SIGI"]).set_values(flex.sqrt(V_profile).as_float()) if "intensity.sum.value" in integrated_data: I_sum = integrated_data["intensity.sum.value"] * scl V_sum = integrated_data["intensity.sum.variance"] * scl * scl # Trap negative variances assert V_sum.all_gt(0) d.add_column("I", type_table["I"]).set_values(I_sum.as_float()) d.add_column("SIGI", type_table["SIGI"]).set_values(flex.sqrt(V_sum).as_float()) if "background.sum.value" in integrated_data and "background.sum.variance" in integrated_data: bg = integrated_data["background.sum.value"] varbg = integrated_data["background.sum.variance"] assert (varbg >= 0).count(False) == 0 sigbg = flex.sqrt(varbg) d.add_column("BG", type_table["BG"]).set_values(bg.as_float()) d.add_column("SIGBG", type_table["SIGBG"]).set_values(sigbg.as_float()) d.add_column("FRACTIONCALC", type_table["FRACTIONCALC"]).set_values(fractioncalc.as_float()) d.add_column("XDET", type_table["XDET"]).set_values(xdet.as_float()) d.add_column("YDET", type_table["YDET"]).set_values(ydet.as_float()) d.add_column("ROT", type_table["ROT"]).set_values(rot.as_float()) d.add_column("LP", type_table["LP"]).set_values(lp.as_float()) d.add_column("DQE", type_table["DQE"]).set_values(dqe.as_float()) m.write(hklout) return m
def _add_batch(mtz, experiment, batch_number, image_number, force_static_model): """Add a single image's metadata to an mtz file. Returns the batch object. """ assert batch_number > 0 # Recalculate useful numbers and references here wavelength = experiment.beam.get_wavelength() # We ignore panels beyond the first one, at the moment panel = experiment.detector[0] if experiment.goniometer: axis = matrix.col(experiment.goniometer.get_rotation_axis()) else: axis = 0.0, 0.0, 0.0 U = matrix.sqr(experiment.crystal.get_U()) if experiment.goniometer is not None: F = matrix.sqr(experiment.goniometer.get_fixed_rotation()) else: F = matrix.sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) # Create the batch object and start configuring it o = mtz.add_batch().set_num(batch_number).set_nbsetid(1).set_ncryst(1) o.set_time1(0.0).set_time2(0.0).set_title('Batch {}'.format(batch_number)) o.set_ndet(1).set_theta(flex.float((0.0, 0.0))).set_lbmflg(0) o.set_alambd(wavelength).set_delamb(0.0).set_delcor(0.0) o.set_divhd(0.0).set_divvd(0.0) # FIXME hard-coded assumption on indealized beam vector below... this may be # broken when we come to process data from a non-imgCIF frame s0n = matrix.col(experiment.beam.get_s0()).normalize().elems o.set_so(flex.float(s0n)).set_source(flex.float((0, 0, -1))) # these are probably 0, 1 respectively, also flags for how many are set, sd o.set_bbfac(0.0).set_bscale(1.0) o.set_sdbfac(0.0).set_sdbscale(0.0).set_nbscal(0) # unit cell (this is fine) and the what-was-refined-flags FIXME hardcoded # take time-varying parameters from the *end of the frame* unlikely to # be much different at the end - however only exist if scan-varying # refinement was used if not force_static_model and experiment.crystal.num_scan_points > 0: # Get the index of the image in the sequence e.g. first => 0, second => 1 image_index = image_number - experiment.image_range[0] _unit_cell = experiment.crystal.get_unit_cell_at_scan_point( image_index) _U = matrix.sqr(experiment.crystal.get_U_at_scan_point(image_index)) else: _unit_cell = experiment.crystal.get_unit_cell() _U = U # apply the fixed rotation to this to unify matrix definitions - F * U # was what was used in the actual prediction: U appears to be stored # as the transpose?! At least is for Mosflm... # # FIXME Do we need to apply the setting rotation here somehow? i.e. we have # the U.B. matrix assuming that the axis is equal to S * axis_datum but # here we are just giving the effective axis so at scan angle 0 this will # not be correct... FIXME 2 not even sure we can express the stack of # matrices S * R * F * U * B in MTZ format?... see [=A=] below _U = dials_u_to_mosflm(F * _U, _unit_cell) # FIXME need to get what was refined and what was constrained from the # crystal model - see https://github.com/dials/dials/issues/355 o.set_cell(flex.float(_unit_cell.parameters())) o.set_lbcell(flex.int((-1, -1, -1, -1, -1, -1))) o.set_umat(flex.float(_U.transpose().elems)) # get the mosaic spread though today it may not actually be set - should # this be in the BATCH headers? try: mosaic = experiment.crystal.get_mosaicity() except AttributeError: mosaic = 0 o.set_crydat( flex.float( [mosaic, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) o.set_lcrflg(0) o.set_datum(flex.float((0.0, 0.0, 0.0))) # detector size, distance o.set_detlm( flex.float([ 0.0, panel.get_image_size()[0], 0.0, panel.get_image_size()[1], 0, 0, 0, 0 ])) o.set_dx(flex.float([panel.get_directed_distance(), 0.0])) # goniometer axes and names, and scan axis number, and num axes, missets # [=A=] should we be using this to unroll the setting matrix etc? o.set_e1(flex.float(axis)) o.set_e2(flex.float((0.0, 0.0, 0.0))) o.set_e3(flex.float((0.0, 0.0, 0.0))) o.set_gonlab(flex.std_string(('AXIS', '', ''))) o.set_jsaxs(1) o.set_ngonax(1) o.set_phixyz(flex.float((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))) # scan ranges, axis if experiment.scan: phi_start, phi_range = experiment.scan.get_image_oscillation( image_number) else: phi_start, phi_range = 0.0, 0.0 o.set_phistt(phi_start) o.set_phirange(phi_range) o.set_phiend(phi_start + phi_range) o.set_scanax(flex.float(axis)) # number of misorientation angles o.set_misflg(0) # crystal axis closest to rotation axis (why do I want this?) o.set_jumpax(0) # type of data - 1; 2D, 2; 3D, 3; Laue o.set_ldtype(2) return o
def tilt_fit(imgs, is_bg_pix, delta_q, photon_gain, sigma_rdout, zinger_zscore, exper, predicted_refls, sb_pad=0, filter_boundary_spots=False, minsnr=None, mintilt=None, plot=False, verbose=False, is_BAD_pix=None, min_strong=None, min_bg=10, min_dist_to_bad_pix=7, **kwargs): if is_BAD_pix is None: is_BAD_pix = np.zeros(np.array(is_bg_pix).shape, np.bool) predicted_refls['id'] = flex.int(len(predicted_refls), -1) predicted_refls['imageset_id'] = flex.int(len(predicted_refls), 0) El = ExperimentList() El.append(exper) predicted_refls.centroid_px_to_mm(El) predicted_refls.map_centroids_to_reciprocal_space(El) ss_dim, fs_dim = imgs[0].shape n_refl = len(predicted_refls) integrations = [] variances = [] coeffs = [] new_shoeboxes = [] tilt_error = [] boundary = [] detdist = exper.detector[0].get_distance() pixsize = exper.detector[0].get_pixel_size()[0] ave_wave = exper.beam.get_wavelength() bad_trees = {} unique_panels = set(predicted_refls["panel"]) for p in unique_panels: panel_bad_pix = is_BAD_pix[p] ybad, xbad = np.where(is_BAD_pix[0]) if ybad.size: bad_pts = zip(ybad, xbad) bad_trees[p] = cKDTree(bad_pts) else: bad_trees[p] = None sel = [] for i_ref in range(len(predicted_refls)): ref = predicted_refls[i_ref] i_com, j_com, _ = ref['xyzobs.px.value'] # which detector panel am I on ? i_panel = ref['panel'] if bad_trees[i_panel] is not None: if bad_trees[i_panel].query_ball_point((i_com, j_com), r=min_dist_to_bad_pix): sel.append(False) integrations.append(None) variances.append(None) coeffs.append(None) new_shoeboxes.append(None) tilt_error.append(None) boundary.append(None) continue i1_a, i2_a, j1_a, j2_a, _, _ = ref['bbox'] # bbox of prediction i1_ = max(i1_a, 0) i2_ = min(i2_a, fs_dim-1) j1_ = max(j1_a, 0) j2_ = min(j2_a, ss_dim-1) # get the number of pixels spanning the box in pixels Qmag = 2*np.pi*np.linalg.norm(ref['rlp']) # magnitude momentum transfer of the RLP in physicist convention rad1 = (detdist/pixsize) * np.tan(2*np.arcsin((Qmag-delta_q*.5)*ave_wave/4/np.pi)) rad2 = (detdist/pixsize) * np.tan(2*np.arcsin((Qmag+delta_q*.5)*ave_wave/4/np.pi)) bbox_extent = (rad2-rad1) / np.sqrt(2) # rad2 - rad1 is the diagonal across the bbox i_com = i_com - 0.5 j_com = j_com - 0.5 i_low = int(i_com - bbox_extent/2.) i_high = int(i_com + bbox_extent/2.) j_low = int(j_com - bbox_extent/2.) j_high = int(j_com + bbox_extent/2.) i1_orig = max(i_low, 0) i2_orig = min(i_high, fs_dim-1) j1_orig = max(j_low, 0) j2_orig = min(j_high, ss_dim-1) i_low = i_low - sb_pad i_high = i_high + sb_pad j_low = j_low - sb_pad j_high = j_high + sb_pad i1 = max(i_low, 0) i2 = min(i_high, fs_dim-1) j1 = max(j_low, 0) j2 = min(j_high, ss_dim-1) i1_p = i1_orig - i1 i2_p = i1_p + i2_orig-i1_orig j1_p = j1_orig - j1 j2_p = j1_p + j2_orig-j1_orig if i1 == 0 or i2 == fs_dim or j1 == 0 or j2 == ss_dim: boundary.append(True) if filter_boundary_spots: sel.append(False) integrations.append(None) variances.append(None) coeffs.append(None) new_shoeboxes.append(None) tilt_error.append(None) continue else: boundary.append(False) # get the iamge and mask shoebox_img = imgs[i_panel][j1:j2, i1:i2] / photon_gain # NOTE: gain is imortant here! dials_mask = np.zeros(shoebox_img.shape).astype(np.int32) # initially all pixels are valid dials_mask += MaskCode.Valid shoebox_mask = is_bg_pix[i_panel][j1:j2, i1:i2] badpix_mask = is_BAD_pix[i_panel][j1:j2, i1:i2] dials_mask[shoebox_mask] = dials_mask[shoebox_mask] + MaskCode.Background new_shoebox = Shoebox((i1_orig, i2_orig, j1_orig, j2_orig, 0, 1)) new_shoebox.allocate() new_shoebox.data = flex.float(np.ascontiguousarray(shoebox_img[None, j1_p:j2_p, i1_p: i2_p])) #new_shoebox.data = flex.float(shoebox_img[None,]) # get coordinates arrays of the image Y, X = np.indices(shoebox_img.shape) # determine if any more outliers are present in background pixels img1d = shoebox_img.ravel() mask1d = shoebox_mask.ravel() # mask specifies which pixels are bg # out1d specifies which bg pixels are outliers (zingers) out1d = np.zeros(mask1d.shape, bool) out1d[mask1d] = is_outlier(img1d[mask1d].ravel(), zinger_zscore) out2d = out1d.reshape(shoebox_img.shape) # combine bad2d with badpix mask out2d = np.logical_or(out2d, badpix_mask) # these are points we fit to: both zingers and original mask fit_sel = np.logical_and(~out2d, shoebox_mask) # fit plane to these points, no outliers, no masked if np.sum(fit_sel) < min_bg: integrations.append(None) variances.append(None) coeffs.append(None) new_shoeboxes.append(None) tilt_error.append(None) sel.append(False) continue # update the dials mask... dials_mask[fit_sel] = dials_mask[fit_sel] + MaskCode.BackgroundUsed # fast scan pixels, slow scan pixels, pixel values (corrected for gain) fast, slow, rho_bg = X[fit_sel], Y[fit_sel], shoebox_img[fit_sel] # do the fit of the background plane A = np.array([fast, slow, np.ones_like(fast)]).T # weights matrix: W = np.diag(1 / (sigma_rdout ** 2 + rho_bg)) AWA = np.dot(A.T, np.dot(W, A)) try: AWA_inv = np.linalg.inv(AWA) except np.linalg.LinAlgError: print ("WARNING: Fit did not work.. investigate reflection") print (ref) integrations.append(None) variances.append(None) coeffs.append(None) new_shoeboxes.append(None) tilt_error.append(None) sel.append(False) continue AtW = np.dot(A.T, W) a, b, c = np.dot(np.dot(AWA_inv, AtW), rho_bg) coeffs.append((a, b, c)) # fit of the tilt plane background X1d = np.ravel(X) Y1d = np.ravel(Y) background = (X1d * a + Y1d * b + c).reshape(shoebox_img.shape) new_shoebox.background = flex.float(np.ascontiguousarray(background[None, j1_p: j2_p, i1_p:i2_p])) # vector of residuals r = rho_bg - np.dot(A, (a, b, c)) Nbg = len(rho_bg) Nparam = 3 r_fact = np.dot(r.T, np.dot(W, r)) / (Nbg - Nparam) var_covar = AWA_inv * r_fact abc_var = var_covar[0][0], var_covar[1][1], var_covar[2][2] # place the strong spot mask in the expanded shoebox peak_mask = ref['shoebox'].mask.as_numpy_array()[0] == MaskCode.Valid + MaskCode.Foreground peak_mask_valid = peak_mask[j1_-j1_a:- j1_a + j2_, i1_-i1_a:-i1_a + i2_] peak_mask_expanded = np.zeros_like(shoebox_mask) # overlap region i1_o = max(i1_, i1) i2_o = min(i2_, i2) j1_o = max(j1_, j1) j2_o = min(j2_, j2) pk_mask_istart = i1_o - i1_ pk_mask_jstart = j1_o - j1_ pk_mask_istop = peak_mask_valid.shape[1] - (i2_ - i2_o) pk_mask_jstop = peak_mask_valid.shape[0] - (j2_ - j2_o) peak_mask_overlap = peak_mask_valid[pk_mask_jstart: pk_mask_jstop, pk_mask_istart: pk_mask_istop] pk_mask_exp_i1 = i1_o - i1 pk_mask_exp_j1 = j1_o - j1 pk_mask_exp_i2 = peak_mask_expanded.shape[1] - (i2 - i2_o) pk_mask_exp_j2 = peak_mask_expanded.shape[0] - (j2 - j2_o) peak_mask_expanded[pk_mask_exp_j1: pk_mask_exp_j2, pk_mask_exp_i1: pk_mask_exp_i2] = peak_mask_overlap # update the dials mask dials_mask[peak_mask_expanded] = dials_mask[peak_mask_expanded] + MaskCode.Foreground p = X[peak_mask_expanded] # fast scan coords q = Y[peak_mask_expanded] # slow scan coords rho_peak = shoebox_img[peak_mask_expanded] # pixel values Isum = np.sum(rho_peak - a*p - b*q - c) # summed spot intensity var_rho_peak = sigma_rdout ** 2 + rho_peak # include readout noise in the variance Ns = len(rho_peak) # number of integrated peak pixels # variance propagated from tilt plane constants var_a_term = abc_var[0] * ((np.sum(p))**2) var_b_term = abc_var[1] * ((np.sum(q))**2) var_c_term = abc_var[2] * (Ns**2) tilt_error.append(var_a_term + var_b_term + var_c_term) # total variance of the spot var_Isum = np.sum(var_rho_peak) + var_a_term + var_b_term + var_c_term integrations.append(Isum) variances.append(var_Isum) new_shoebox.mask = flex.int(np.ascontiguousarray(dials_mask[None, j1_p:j2_p, i1_p:i2_p])) new_shoeboxes.append(new_shoebox) sel.append(True) if i_ref % 50 == 0 and verbose: print("Integrated refls %d / %d" % (i_ref+1, n_refl)) #if filter_boundary_spots: # sel = flex.bool([I is not None for I in integrations]) boundary = np.array(boundary)[sel].astype(bool) integrations = np.array([I for I in integrations if I is not None]) variances = np.array([v for v in variances if v is not None]) coeffs = np.array([c for c in coeffs if c is not None]) tilt_error = np.array([te for te in tilt_error if te is not None]) #boundary = np.zeros(tilt_error.shape).astype(np.bool) predicted_refls = predicted_refls.select(flex.bool(sel)) predicted_refls['resolution'] = flex.double( 1/ np.linalg.norm(predicted_refls['rlp'], axis=1)) predicted_refls['boundary'] = flex.bool(boundary) predicted_refls["intensity.sum.value.Leslie99"] = flex.double(integrations) predicted_refls["intensity.sum.variance.Leslie99"] = flex.double(variances) predicted_refls['shoebox'] = flex.shoebox([sb for sb in new_shoeboxes if sb is not None]) idx_assign = assign_indices.AssignIndicesGlobal(tolerance=0.333) idx_assign(predicted_refls, El) return predicted_refls, coeffs, tilt_error, integrations, variances
num_ref = 3 ref_table = flex.reflection_table() shoebox = flex.shoebox(num_ref) ref_table['shoebox'] = shoebox intensity = flex.double(num_ref) ref_table['intensity.sum.value'] = intensity intensity_var = flex.double(num_ref) ref_table['intensity.sum.variance'] = intensity_var iterate = ref_table['shoebox'] n = 0 for arr in iterate: img = flex.float(flex.grid(3, 3, 3)) bkg = flex.float(flex.grid(3, 3, 3)) msk = flex.int(flex.grid(3, 3, 3)) for row in range(3): for col in range(3): for fra in range(3): img[row, col, fra] = row + col + fra + n * 9 bkg[row, col, fra] = 0.0 msk[row, col, fra] = 3 n += 1 msk[1, 1, 1] = 5 tmp_i = n * n * n * 3 img[1, 1, 1] += tmp_i print "intensity must be =", tmp_i arr.data = img[:, :, :] arr.background = bkg[:, :, :]
def with_individual_given_intensity(self, N, I, Ba, Bb, Bc, Bd): """ Generate reflections with given intensity and background. """ from dials.array_family import flex from dials.util.command_line import ProgressBar from dials.algorithms.simulation import simulate_reciprocal_space_gaussian from dials.algorithms.simulation.generate_test_reflections import random_background_plane2 # Check the lengths assert N == len(I) assert N == len(Ba) assert N == len(Bb) assert N == len(Bc) assert N == len(Bd) # Generate some predictions refl = self.generate_predictions(N) # Calculate the signal progress = ProgressBar(title="Calculating signal for %d reflections" % len(refl)) s1 = refl["s1"] phi = refl["xyzcal.mm"].parts()[2] bbox = refl["bbox"] shoebox = refl["shoebox"] m = int(len(refl) / 100) I_exp = flex.double(len(refl), 0) for i in range(len(refl)): if I[i] > 0: data = shoebox[i].data.as_double() I_exp[i] = simulate_reciprocal_space_gaussian( self.experiment.beam, self.experiment.detector, self.experiment.goniometer, self.experiment.scan, self.sigma_b, self.sigma_m, s1[i], phi[i], bbox[i], I[i], data, shoebox[i].mask, ) shoebox[i].data = data.as_float() if i % m == 0: progress.update(100.0 * float(i) / len(refl)) progress.finished("Calculated signal impacts for %d reflections" % len(refl)) # Calculate the background progress = ProgressBar(title="Calculating background for %d reflections" % len(refl)) for l in range(len(refl)): background = flex.float(flex.grid(shoebox[l].size()), 0.0) random_background_plane2(background, Ba[l], Bb[l], Bc[l], Bd[l]) shoebox[l].data += background shoebox[l].background = background if l % m == 0: progress.update(100.0 * float(l) / len(refl)) progress.update(100.0 * float(l) / len(refl)) progress.finished("Calculated background for %d reflections" % len(refl)) ## Calculate the expected intensity by monte-carlo integration # progress = ProgressBar(title='Integrating expected signal for %d reflections' % len(refl)) # s1 = refl['s1'] # phi = refl['xyzcal.mm'].parts()[2] # bbox = refl['bbox'] # shoebox = refl['shoebox'] # I_exp = flex.double(len(refl), 0) # m = int(len(refl) / 100) # for i in range(len(refl)): # if I[i] > 0: # I_exp[i] = integrate_reciprocal_space_gaussian( # self.experiment.beam, # self.experiment.detector, # self.experiment.goniometer, # self.experiment.scan, # self.sigma_b, # self.sigma_m, # s1[i], # phi[i], # bbox[i], # 10000, # shoebox[i].mask) / 10000.0 # if i % m == 0: # progress.update(100.0 * float(i) / len(refl)) # progress.finished('Integrated expected signal impacts for %d reflections' % len(refl)) # Save the expected intensity and background refl["intensity.sim"] = I refl["background.sim.a"] = Ba refl["background.sim.b"] = Bb refl["background.sim.c"] = Bc refl["background.sim.d"] = Bd refl["intensity.exp"] = I_exp # Return the reflections return refl
num_ref = 3 ref_table = flex.reflection_table() shoebox = flex.shoebox(num_ref) ref_table["shoebox"] = shoebox intensity = flex.double(num_ref) ref_table["intensity.sum.value"] = intensity intensity_var = flex.double(num_ref) ref_table["intensity.sum.variance"] = intensity_var iterate = ref_table["shoebox"] n = 0 for arr in iterate: img = flex.float(flex.grid(3, 3, 3)) bkg = flex.float(flex.grid(3, 3, 3)) msk = flex.int(flex.grid(3, 3, 3)) for row in range(3): for col in range(3): for fra in range(3): img[row, col, fra] = row + col + fra + n * 9 bkg[row, col, fra] = 0.0 msk[row, col, fra] = 3 n += 1 msk[1, 1, 1] = 5 tmp_i = n * n * n * 3 img[1, 1, 1] += tmp_i print("intensity must be =", tmp_i) arr.data = img[:, :, :] arr.background = bkg[:, :, :]
def export_mtz(observed_hkls, experiment, filename): if experiment.goniometer: axis = experiment.goniometer.get_rotation_axis() else: axis = 0.0, 0.0, 0.0 s0 = experiment.beam.get_s0() wavelength = experiment.beam.get_wavelength() from scitbx import matrix panel = experiment.detector[0] pixel_size = panel.get_pixel_size() cb_op_to_ref = experiment.crystal.get_space_group().info( ).change_of_basis_op_to_reference_setting() experiment.crystal = experiment.crystal.change_basis(cb_op_to_ref) from iotbx import mtz from scitbx.array_family import flex import itertools m = mtz.object() m.set_title('from dials.scratch.mg.strategy_i19') m.set_space_group_info(experiment.crystal.get_space_group().info()) nrefcount = sum(observed_hkls.itervalues()) nref = max(observed_hkls.itervalues()) for batch in range(1, nref+1): o = m.add_batch().set_num(batch).set_nbsetid(1).set_ncryst(1) o.set_time1(0.0).set_time2(0.0).set_title('Batch %d' % batch) o.set_ndet(1).set_theta(flex.float((0.0, 0.0))).set_lbmflg(0) o.set_alambd(wavelength).set_delamb(0.0).set_delcor(0.0) o.set_divhd(0.0).set_divvd(0.0) o.set_so(flex.float(s0)).set_source(flex.float((0, 0, -1))) o.set_bbfac(0.0).set_bscale(1.0) o.set_sdbfac(0.0).set_sdbscale(0.0).set_nbscal(0) _unit_cell = experiment.crystal.get_unit_cell() _U = experiment.crystal.get_U() o.set_cell(flex.float(_unit_cell.parameters())) o.set_lbcell(flex.int((-1, -1, -1, -1, -1, -1))) o.set_umat(flex.float(_U.transpose().elems)) mosaic = experiment.crystal.get_mosaicity() o.set_crydat(flex.float([mosaic, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) o.set_lcrflg(0) o.set_datum(flex.float((0.0, 0.0, 0.0))) # detector size, distance o.set_detlm(flex.float([0.0, panel.get_image_size()[0], 0.0, panel.get_image_size()[1], 0, 0, 0, 0])) o.set_dx(flex.float([panel.get_directed_distance(), 0.0])) # goniometer axes and names, and scan axis number, and number of axes, missets o.set_e1(flex.float(axis)) o.set_e2(flex.float((0.0, 0.0, 0.0))) o.set_e3(flex.float((0.0, 0.0, 0.0))) o.set_gonlab(flex.std_string(('AXIS', '', ''))) o.set_jsaxs(1) o.set_ngonax(1) o.set_phixyz(flex.float((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))) phi_start, phi_range = 0.0, 0.0 o.set_phistt(phi_start) o.set_phirange(phi_range) o.set_phiend(phi_start + phi_range) o.set_scanax(flex.float(axis)) # number of misorientation angles o.set_misflg(0) # crystal axis closest to rotation axis (why do I want this?) o.set_jumpax(0) # type of data - 1; 2D, 2; 3D, 3; Laue o.set_ldtype(2) # now create the actual data structures - first keep a track of the columns # H K L M/ISYM BATCH I SIGI IPR SIGIPR FRACTIONCALC XDET YDET ROT WIDTH # LP MPART FLAG BGPKRATIOS from cctbx.array_family import flex as cflex # implicit import # now go for it and make an MTZ file... x = m.add_crystal('XTAL', 'DIALS', unit_cell.parameters()) d = x.add_dataset('FROMDIALS', wavelength) # now add column information... type_table = {'IPR': 'J', 'BGPKRATIOS': 'R', 'WIDTH': 'R', 'I': 'J', 'H': 'H', 'K': 'H', 'MPART': 'I', 'L': 'H', 'BATCH': 'B', 'M_ISYM': 'Y', 'SIGI': 'Q', 'FLAG': 'I', 'XDET': 'R', 'LP': 'R', 'YDET': 'R', 'SIGIPR': 'Q', 'FRACTIONCALC': 'R', 'ROT': 'R'} m.adjust_column_array_sizes(nrefcount) m.set_n_reflections(nrefcount) # assign H, K, L, M_ISYM space for column in 'H', 'K', 'L', 'M_ISYM': d.add_column(column, type_table[column]).set_values(flex.float(nrefcount, 0.0)) batchnums = ( _ for (x, n) in observed_hkls.iteritems() for _ in range(1, n+1) ) d.add_column('BATCH', type_table['BATCH']).set_values(flex.float(batchnums)) d.add_column('FRACTIONCALC', type_table['FRACTIONCALC']).set_values(flex.float(nrefcount, 3.0)) m.replace_original_index_miller_indices(cb_op_to_ref.apply( cflex.miller_index([ _ for (x, n) in observed_hkls.iteritems() for _ in itertools.repeat(x, n) ]) )) m.write(filename) return m
def export_mtz(observed_hkls, experiment, filename): if experiment.goniometer: axis = experiment.goniometer.get_rotation_axis() else: axis = 0.0, 0.0, 0.0 s0 = experiment.beam.get_s0() wavelength = experiment.beam.get_wavelength() from scitbx import matrix panel = experiment.detector[0] pixel_size = panel.get_pixel_size() cb_op_to_ref = (experiment.crystal.get_space_group().info(). change_of_basis_op_to_reference_setting()) experiment.crystal = experiment.crystal.change_basis(cb_op_to_ref) from iotbx import mtz from scitbx.array_family import flex import itertools m = mtz.object() m.set_title("from dials.scratch.mg.strategy_i19") m.set_space_group_info(experiment.crystal.get_space_group().info()) nrefcount = sum(observed_hkls.itervalues()) nref = max(observed_hkls.itervalues()) for batch in range(1, nref + 1): o = m.add_batch().set_num(batch).set_nbsetid(1).set_ncryst(1) o.set_time1(0.0).set_time2(0.0).set_title("Batch %d" % batch) o.set_ndet(1).set_theta(flex.float((0.0, 0.0))).set_lbmflg(0) o.set_alambd(wavelength).set_delamb(0.0).set_delcor(0.0) o.set_divhd(0.0).set_divvd(0.0) o.set_so(flex.float(s0)).set_source(flex.float((0, 0, -1))) o.set_bbfac(0.0).set_bscale(1.0) o.set_sdbfac(0.0).set_sdbscale(0.0).set_nbscal(0) _unit_cell = experiment.crystal.get_unit_cell() _U = experiment.crystal.get_U() o.set_cell(flex.float(_unit_cell.parameters())) o.set_lbcell(flex.int((-1, -1, -1, -1, -1, -1))) o.set_umat(flex.float(_U.transpose().elems)) mosaic = experiment.crystal.get_mosaicity() o.set_crydat( flex.float([ mosaic, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ])) o.set_lcrflg(0) o.set_datum(flex.float((0.0, 0.0, 0.0))) # detector size, distance o.set_detlm( flex.float([ 0.0, panel.get_image_size()[0], 0.0, panel.get_image_size()[1], 0, 0, 0, 0, ])) o.set_dx(flex.float([panel.get_directed_distance(), 0.0])) # goniometer axes and names, and scan axis number, and number of axes, missets o.set_e1(flex.float(axis)) o.set_e2(flex.float((0.0, 0.0, 0.0))) o.set_e3(flex.float((0.0, 0.0, 0.0))) o.set_gonlab(flex.std_string(("AXIS", "", ""))) o.set_jsaxs(1) o.set_ngonax(1) o.set_phixyz(flex.float((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))) phi_start, phi_range = 0.0, 0.0 o.set_phistt(phi_start) o.set_phirange(phi_range) o.set_phiend(phi_start + phi_range) o.set_scanax(flex.float(axis)) # number of misorientation angles o.set_misflg(0) # crystal axis closest to rotation axis (why do I want this?) o.set_jumpax(0) # type of data - 1; 2D, 2; 3D, 3; Laue o.set_ldtype(2) # now create the actual data structures - first keep a track of the columns # H K L M/ISYM BATCH I SIGI IPR SIGIPR FRACTIONCALC XDET YDET ROT WIDTH # LP MPART FLAG BGPKRATIOS from cctbx.array_family import flex as cflex # implicit import # now go for it and make an MTZ file... x = m.add_crystal("XTAL", "DIALS", unit_cell.parameters()) d = x.add_dataset("FROMDIALS", wavelength) # now add column information... type_table = { "IPR": "J", "BGPKRATIOS": "R", "WIDTH": "R", "I": "J", "H": "H", "K": "H", "MPART": "I", "L": "H", "BATCH": "B", "M_ISYM": "Y", "SIGI": "Q", "FLAG": "I", "XDET": "R", "LP": "R", "YDET": "R", "SIGIPR": "Q", "FRACTIONCALC": "R", "ROT": "R", } m.adjust_column_array_sizes(nrefcount) m.set_n_reflections(nrefcount) # assign H, K, L, M_ISYM space for column in "H", "K", "L", "M_ISYM": d.add_column(column, type_table[column]).set_values( flex.float(nrefcount, 0.0)) batchnums = (_ for (x, n) in observed_hkls.iteritems() for _ in range(1, n + 1)) d.add_column("BATCH", type_table["BATCH"]).set_values(flex.float(batchnums)) d.add_column("FRACTIONCALC", type_table["FRACTIONCALC"]).set_values( flex.float(nrefcount, 3.0)) m.replace_original_index_miller_indices( cb_op_to_ref.apply( cflex.miller_index([ _ for (x, n) in observed_hkls.iteritems() for _ in itertools.repeat(x, n) ]))) m.write(filename) return m
ref2d = model_2d(nrow, ncol, 5, 1, ref_ang, i_loc, 0.5) data2d_tmp = ref2d.as_numpy_array() data2d[col_str:col_str + ncol, row_str:row_str + nrow] += \ numpy.float64(data2d_tmp) t_bbox[t_row] = [col_str, col_str + ncol, row_str, row_str + nrow, 0, 1] t_xyzobs[t_row] = [col_str + centr_col, row_str + centr_row, 0.5] t_xyzcal[t_row] = [col_str + centr_col, row_str + centr_row, 0.5] np_shoebox = numpy.copy( \ data2d[col_str:col_str + ncol, row_str:row_str + nrow]) #tmp_2d_fl_shoebox = flex.double(np_shoebox) #tmp_2d_fl_shoebox.reshape(flex.grid(1, ncol, nrow)) fl_shoebox = flex.float(np_shoebox) fl_shoebox.reshape(flex.grid(1, ncol, nrow)) to_use_soon = ''' fl_shoebox = flex.double(flex.grid(5, ncol, nrow)) for x_loc in range(ncol): for y_loc in range(nrow): for z_loc in range(5): fl_shoebox[z_loc, y_loc, x_loc] = tmp_2d_fl_shoebox[y_loc, x_loc] ''' fl_shoebox_bkg=fl_shoebox[:,:,:] t_shoebox[t_row].data = fl_shoebox t_shoebox[t_row].background = fl_shoebox_bkg #lc_mask = flex.int(flex.grid(5, ncol, nrow), 3)