def spot_count_per_image(self, rlist): ''' Analyse the spot count per image. ''' from os.path import join x,y,z = rlist['xyzobs.px.value'].parts() max_z = int(math.ceil(flex.max(z))) ids = rlist['id'] spot_count_per_image = [] for j in range(flex.max(ids)+1): spot_count_per_image.append(flex.int()) zsel = z.select(ids == j) for i in range(max_z): sel = (zsel >= i) & (zsel < (i+1)) spot_count_per_image[j].append(sel.count(True)) colours = ['blue', 'red', 'green', 'orange', 'purple', 'black'] * 10 assert len(spot_count_per_image) <= colours from matplotlib import pyplot fig = pyplot.figure() ax = fig.add_subplot(111) ax.set_title("Spot count per image") for j in range(len(spot_count_per_image)): ax.scatter( list(range(len(spot_count_per_image[j]))), spot_count_per_image[j], s=5, color=colours[j], marker='o', alpha=0.4) ax.set_xlabel("Image #") ax.set_ylabel("# spots") pyplot.savefig(join(self.directory, "spots_per_image.png")) pyplot.close()
def plot_histograms(self, reflections, panel = None, ax = None, bounds = None): data = reflections['difference_vector_norms'] colors = ['b-', 'g-', 'g--', 'r-', 'b-', 'b--'] n_slots = 20 if self.params.residuals.histogram_max is None: h = flex.histogram(data, n_slots=n_slots) else: h = flex.histogram(data.select(data <= self.params.residuals.histogram_max), n_slots=n_slots) n = len(reflections) rmsd_obs = math.sqrt((reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).sum_sq()/n) sigma = mode = h.slot_centers()[list(h.slots()).index(flex.max(h.slots()))] mean_obs = flex.mean(data) median = flex.median(data) mean_rayleigh = math.sqrt(math.pi/2)*sigma rmsd_rayleigh = math.sqrt(2)*sigma data = flex.vec2_double([(i,j) for i, j in zip(h.slot_centers(), h.slots())]) n = len(data) for i in [mean_obs, mean_rayleigh, mode, rmsd_obs, rmsd_rayleigh]: data.extend(flex.vec2_double([(i, 0), (i, flex.max(h.slots()))])) data = self.get_bounded_data(data, bounds) tmp = [data[:n]] for i in xrange(len(colors)): tmp.append(data[n+(i*2):n+((i+1)*2)]) data = tmp for d, c in zip(data, colors): ax.plot(d.parts()[0], d.parts()[1], c) if ax.get_legend() is None: ax.legend([r"$\Delta$XY", "MeanObs", "MeanRayl", "Mode", "RMSDObs", "RMSDRayl"])
def spot_count_per_panel(self, rlist): ''' Analyse the spot count per panel. ''' from os.path import join panel = rlist['panel'] if flex.max(panel) == 0: # only one panel, don't bother generating a plot return n_panels = int(flex.max(panel)) spot_count_per_panel = flex.int() for i in range(n_panels): sel = (panel >= i) & (panel < (i+1)) spot_count_per_panel.append(sel.count(True)) from matplotlib import pyplot fig = pyplot.figure() ax = fig.add_subplot(111) ax.set_title("Spot count per panel") ax.scatter( list(range(len(spot_count_per_panel))), spot_count_per_panel, s=10, color='blue', marker='o', alpha=0.4) ax.set_xlabel("Panel #") ax.set_ylabel("# spots") pyplot.savefig(join(self.directory, "spots_per_panel.png")) pyplot.close()
def get_one_spot_length_width_angle(self, id): # the radial direction is along the vector from the beam center to # the spot centroid for each spot shoebox = self.strong['shoebox'][id] s0_position = self.panel_s0_intersections[shoebox.panel] centroid = shoebox.centroid_strong().px_xy dx, dy = [centroid[i] - s0_position[i] for i in (0, 1)] radial_abs = matrix.col((dx, dy)) radial = radial_abs.normalize() transverse = self.s0.normalize().cross(radial) mask = flex.bool([(m & self.valid_code) != 0 for m in shoebox.mask]) bbox = self.strong['bbox'][id] x_start, y_start = bbox[0], bbox[2] x_range, y_range = bbox[1] - bbox[0], bbox[3] - bbox[2] radial_distances = flex.double() transverse_distances = flex.double() for i, valid_foreground in enumerate(mask): if valid_foreground: position = matrix.col( (x_start + i % x_range, y_start + i // y_range)) projection_radial = position.dot(radial) projection_transverse = position.dot(transverse) radial_distances.append(projection_radial) transverse_distances.append(projection_transverse) length = flex.max(radial_distances) - flex.min(radial_distances) width = flex.max(transverse_distances) - flex.min(transverse_distances) # The angle subtended is centered at the spot centroid, spanning the # spot width. Half this angle makes a right triangle with legs of lengths # [distance to beam center] and [half the spot width]. Use the tangent. angle = 2 * math.atan(width / (2 * radial_abs.length())) return (length, width, angle)
def spot_count_per_panel(self, rlist): """ Analyse the spot count per panel. """ panel = rlist["panel"] if flex.max(panel) == 0: # only one panel, don't bother generating a plot return n_panels = int(flex.max(panel)) spot_count_per_panel = flex.int() for i in range(n_panels): sel = (panel >= i) & (panel < (i + 1)) spot_count_per_panel.append(sel.count(True)) fig = pyplot.figure() ax = fig.add_subplot(111) ax.set_title("Spot count per panel") ax.scatter( list(range(len(spot_count_per_panel))), spot_count_per_panel, s=10, color="blue", marker="o", alpha=0.4, ) ax.set_xlabel("Panel #") ax.set_ylabel("# spots") pyplot.savefig(os.path.join(self.directory, "spots_per_panel.png")) pyplot.close()
def go(filename): with open(filename, 'rb') as fh: allprof = pickle.load(fh) for prof in allprof: x, y, z = mosaic_profile_xyz(prof[0]) for profile in prof[1:]: _x, _y, _z = mosaic_profile_xyz(profile) x += _x y += _y z += _z x /= flex.max(x) data = (100 * x).iround() fmt = '%3d ' * x.size() print('X:', fmt % tuple(data)) y /= flex.max(y) data = (100 * y).iround() fmt = '%3d ' * y.size() print('Y:', fmt % tuple(data)) z /= flex.max(z) data = (100 * z).iround() fmt = '%3d ' * z.size() print('Z:', fmt % tuple(data))
def plot_prob_for_zero(c, b, s): from math import log, exp, factorial from dials.array_family import flex L = flex.double(flex.grid(100, 100)) MASK = flex.bool(flex.grid(100, 100)) c = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] b = [bb / sum(b) for bb in b] s = [ss / sum(s) for ss in s] for BB in range(0, 100): for SS in range(0, 100): B = 0 + BB / 10000.0 S = 0 + SS / 40.0 LL = 0 for i in range(len(b)): if B*b[i] + S*s[i] <= 0: MASK[BB, SS] = True LL = -999999 break else: LL += c[i]*log(B*b[i]+S*s[i]) - log(factorial(c[i])) - B*b[i] - S*s[i] L[BB, SS] = LL index = flex.max_index(L) i = index % 100 j = index // 100 B = 0 + j / 10000.0 S = 0 + i / 40.0 print flex.max(L), B, S from matplotlib import pylab import numpy im = numpy.ma.masked_array(flex.exp(L).as_numpy_array(), mask=MASK.as_numpy_array()) pylab.imshow(im) pylab.show() exit(0)
def spot_count_per_image(self, rlist): """ Analyse the spot count per image. """ x, y, z = rlist["xyzobs.px.value"].parts() max_z = int(math.ceil(flex.max(z))) ids = rlist["id"] spot_count_per_image = [] for j in range(flex.max(ids) + 1): spot_count_per_image.append(flex.int()) zsel = z.select(ids == j) for i in range(max_z): sel = (zsel >= i) & (zsel < (i + 1)) spot_count_per_image[j].append(sel.count(True)) colours = ["blue", "red", "green", "orange", "purple", "black"] * 10 assert len(spot_count_per_image) <= colours fig = pyplot.figure() ax = fig.add_subplot(111) ax.set_title("Spot count per image") for j, spots in enumerate(spot_count_per_image): ax.scatter( list(range(len(spots))), spots, s=5, color=colours[j], marker="o", alpha=0.4, ) ax.set_xlabel("Image #") ax.set_ylabel("# spots") pyplot.savefig(os.path.join(self.directory, "spots_per_image.png")) pyplot.close()
def go(filename): with open(filename, "rb") as fh: allprof = pickle.load(fh) for prof in allprof: x, y, z = mosaic_profile_xyz(prof[0]) for profile in prof[1:]: _x, _y, _z = mosaic_profile_xyz(profile) x += _x y += _y z += _z x /= flex.max(x) data = (100 * x).iround() fmt = "%3d " * x.size() print("X:", fmt % tuple(data)) y /= flex.max(y) data = (100 * y).iround() fmt = "%3d " * y.size() print("Y:", fmt % tuple(data)) z /= flex.max(z) data = (100 * z).iround() fmt = "%3d " * z.size() print("Z:", fmt % tuple(data))
def get_bounded_data(self, data, bounds): assert len(bounds) == 4 x = [b[0] for b in bounds] y = [b[1] for b in bounds] left = sorted(x)[1] right = sorted(x)[2] top = sorted(y)[2] bottom = sorted(y)[1] origin = col((left, bottom)) scale_x = right-left scale_y = top-bottom scale = min(scale_x, scale_y) data_max_x = flex.max(data.parts()[0]) data_min_x = flex.min(data.parts()[0]) data_max_y = flex.max(data.parts()[1]) data_min_y = flex.min(data.parts()[1]) data_scale_x = data_max_x - data_min_x data_scale_y = data_max_y - data_min_y if data_scale_x == 0 or data_scale_y == 0: print "WARNING bad scale" return data return flex.vec2_double(data.parts()[0] * (scale/abs(data_scale_x)), data.parts()[1] * (scale/abs(data_scale_y))) + origin
def display(self, num): """ Display some shoeboxes """ from dials_scratch.jmp.viewer import show_image_stack_multi_view from random import sample from dials.array_family import flex def simulate(experiments, reflections, parameters): from dials_scratch.jmp.profile_modelling import MLTarget3D func = MLTarget3D(experiments[0], reflections) return [ func.simulate(i, parameters) for i in range(len(reflections)) ] # Sample from reflections reflections = self.reflections.select( flex.size_t(sample(range(len(self.reflections)), num))) # Simulate the reflection profiles from the current model simulated = simulate(self.experiments, reflections, self.parameters) # Display stuff for model, data_sbox in zip(simulated, reflections["shoebox"]): data = data_sbox.data show_image_stack_multi_view(model.as_numpy_array(), vmax=flex.max(model)) show_image_stack_multi_view(data.as_numpy_array(), vmax=flex.max(data))
def pickle_histogram(params, data): from dials.array_family import flex import cPickle as pickle from dials.array_family import flex with open(data) as fin: r, g, b = pickle.load(fin) if params.min is None: _min = min(flex.min(r), flex.min(g), flex.min(b)) else: _min = params.min if params.max is None: _max = max(flex.max(r), flex.max(g), flex.max(b)) else: _max = params.max tr = flex.histogram(r.as_1d(), data_min=_min, data_max=_max, n_slots=params.bins) tg = flex.histogram(g.as_1d(), data_min=_min, data_max=_max, n_slots=params.bins) tb = flex.histogram(b.as_1d(), data_min=_min, data_max=_max, n_slots=params.bins) with open(params.output, 'w') as f: for cn in zip(tr.slot_centers(), tr.slots(), tg.slots(), tb.slots()): f.write('%.2f %f %f %f\n' % cn)
def blank_counts_analysis(reflections, scan, phi_step, fractional_loss): if not len(reflections): raise ValueError("Input contains no reflections") xyz_px = reflections["xyzobs.px.value"] x_px, y_px, z_px = xyz_px.parts() phi = scan.get_angle_from_array_index(z_px) osc = scan.get_oscillation()[1] n_images_per_step = iceil(phi_step / osc) phi_step = n_images_per_step * osc array_range = scan.get_array_range() phi_min = scan.get_angle_from_array_index(array_range[0]) phi_max = scan.get_angle_from_array_index(array_range[1]) assert phi_min <= flex.min(phi) assert phi_max >= flex.max(phi) n_steps = max(int(round((phi_max - phi_min) / phi_step)), 1) hist = flex.histogram( z_px, data_min=array_range[0], data_max=array_range[1], n_slots=n_steps ) logger.debug("Histogram:") logger.debug(hist.as_str()) counts = hist.slots() fractional_counts = counts.as_double() / flex.max(counts) potential_blank_sel = fractional_counts <= fractional_loss xmin, xmax = zip( *[ (slot_info.low_cutoff, slot_info.high_cutoff) for slot_info in hist.slot_infos() ] ) d = { "data": [ { "x": list(hist.slot_centers()), "y": list(hist.slots()), "xlow": xmin, "xhigh": xmax, "blank": list(potential_blank_sel), "type": "bar", "name": "blank_counts_analysis", } ], "layout": { "xaxis": {"title": "z observed (images)"}, "yaxis": {"title": "Number of reflections"}, "bargap": 0, }, } blank_regions = blank_regions_from_sel(d["data"][0]) d["blank_regions"] = blank_regions return d
def __init__(self, strategies, n_bins=8, degrees_per_bin=5): from cctbx import crystal, miller import copy sg = strategies[0].experiment.crystal.get_space_group() \ .build_derived_reflection_intensity_group(anomalous_flag=True) cs = crystal.symmetry( unit_cell=strategies[0].experiment.crystal.get_unit_cell(), space_group=sg) for i, strategy in enumerate(strategies): if i == 0: predicted = copy.deepcopy(strategy.predicted) else: predicted_ = copy.deepcopy(strategy.predicted) predicted_['dose'] += (flex.max(predicted['dose']) + 1) predicted.extend(predicted_) ms = miller.set(cs, indices=predicted['miller_index'], anomalous_flag=True) ma = miller.array(ms, data=flex.double(ms.size(),1), sigmas=flex.double(ms.size(), 1)) if 1: merging = ma.merge_equivalents() o = merging.array().customized_copy( data=merging.redundancies().data().as_double()).as_mtz_dataset('I').mtz_object() o.write('predicted.mtz') d_star_sq = ma.d_star_sq().data() binner = ma.setup_binner_d_star_sq_step( d_star_sq_step=(flex.max(d_star_sq)-flex.min(d_star_sq)+1e-8)/n_bins) dose = predicted['dose'] range_width = 1 range_min = flex.min(dose) - range_width range_max = flex.max(dose) n_steps = 2 + int((range_max - range_min) - range_width) binner_non_anom = ma.as_non_anomalous_array().use_binning( binner) self.n_complete = flex.size_t(binner_non_anom.counts_complete()[1:-1]) from xia2.Modules.PyChef2 import ChefStatistics chef_stats = ChefStatistics( ma.indices(), ma.data(), ma.sigmas(), ma.d_star_sq().data(), dose, self.n_complete, binner, ma.space_group(), ma.anomalous_flag(), n_steps) def fraction_new(completeness): # Completeness so far at end of image completeness_end = completeness[1:] # Completeness so far at start of image completeness_start = completeness[:-1] # Fraction of unique reflections observed for the first time on each image return completeness_end - completeness_start self.dose = dose self.ieither_completeness = chef_stats.ieither_completeness() self.iboth_completeness = chef_stats.iboth_completeness() self.frac_new_ref = fraction_new(self.ieither_completeness) / degrees_per_bin self.frac_new_pairs = fraction_new(self.iboth_completeness) / degrees_per_bin
def generate_cif(crystal, refiner, filename): logger.info("Saving CIF information to %s", filename) from cctbx import miller import iotbx.cif.model block = iotbx.cif.model.block() block["_audit_creation_method"] = dials_version() block["_audit_creation_date"] = datetime.date.today().isoformat() # block["_publ_section_references"] = '' # once there is a reference... for cell, esd, cifname in zip( crystal.get_unit_cell().parameters(), crystal.get_cell_parameter_sd(), [ "length_a", "length_b", "length_c", "angle_alpha", "angle_beta", "angle_gamma", ], ): block["_cell_%s" % cifname] = format_float_with_standard_uncertainty( cell, esd ) block["_cell_volume"] = format_float_with_standard_uncertainty( crystal.get_unit_cell().volume(), crystal.get_cell_volume_sd() ) used_reflections = refiner.get_matches() block["_cell_measurement_reflns_used"] = len(used_reflections) block["_cell_measurement_theta_min"] = ( flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2 ) block["_cell_measurement_theta_max"] = ( flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2 ) block["_diffrn_reflns_number"] = len(used_reflections) miller_span = miller.index_span(used_reflections["miller_index"]) min_h, min_k, min_l = miller_span.min() max_h, max_k, max_l = miller_span.max() block["_diffrn_reflns_limit_h_min"] = min_h block["_diffrn_reflns_limit_h_max"] = max_h block["_diffrn_reflns_limit_k_min"] = min_k block["_diffrn_reflns_limit_k_max"] = max_k block["_diffrn_reflns_limit_l_min"] = min_l block["_diffrn_reflns_limit_l_max"] = max_l block["_diffrn_reflns_theta_min"] = ( flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2 ) block["_diffrn_reflns_theta_max"] = ( flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2 ) cif = iotbx.cif.model.cif() cif["two_theta_refine"] = block with open(filename, "w") as fh: cif.show(out=fh)
def generate_mmcif(crystal, refiner, filename): logger.info("Saving mmCIF information to %s", filename) block = iotbx.cif.model.block() block["_audit.revision_id"] = 1 block["_audit.creation_method"] = dials_version() block["_audit.creation_date"] = datetime.date.today().isoformat() block["_entry.id"] = "two_theta_refine" # block["_publ.section_references"] = '' # once there is a reference... block["_cell.entry_id"] = "two_theta_refine" for cell, esd, cifname in zip( crystal.get_unit_cell().parameters(), crystal.get_cell_parameter_sd(), [ "length_a", "length_b", "length_c", "angle_alpha", "angle_beta", "angle_gamma", ], ): block["_cell.%s" % cifname] = "%.8f" % cell block["_cell.%s_esd" % cifname] = "%.8f" % esd block["_cell.volume"] = "%f" % crystal.get_unit_cell().volume() block["_cell.volume_esd"] = "%f" % crystal.get_cell_volume_sd() used_reflections = refiner.get_matches() block["_cell_measurement.entry_id"] = "two_theta_refine" block["_cell_measurement.reflns_used"] = len(used_reflections) block["_cell_measurement.theta_min"] = ( flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2) block["_cell_measurement.theta_max"] = ( flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2) block["_exptl_crystal.id"] = 1 block["_diffrn.id"] = "two_theta_refine" block["_diffrn.crystal_id"] = 1 block["_diffrn_reflns.diffrn_id"] = "two_theta_refine" block["_diffrn_reflns.number"] = len(used_reflections) miller_span = miller.index_span(used_reflections["miller_index"]) min_h, min_k, min_l = miller_span.min() max_h, max_k, max_l = miller_span.max() block["_diffrn_reflns.limit_h_min"] = min_h block["_diffrn_reflns.limit_h_max"] = max_h block["_diffrn_reflns.limit_k_min"] = min_k block["_diffrn_reflns.limit_k_max"] = max_k block["_diffrn_reflns.limit_l_min"] = min_l block["_diffrn_reflns.limit_l_max"] = max_l block["_diffrn_reflns.theta_min"] = ( flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2) block["_diffrn_reflns.theta_max"] = ( flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2) cif = iotbx.cif.model.cif() cif["two_theta_refine"] = block with open(filename, "w") as fh: cif.show(out=fh)
def get_min_max_xy(self, rlist): xc, yc, zc = rlist['xyzcal.px'].parts() xo, yo, zo = rlist['xyzobs.px.value'].parts() min_x = math.floor(min(flex.min(xc), flex.min(xo))) min_y = math.floor(min(flex.min(yc), flex.min(yo))) max_x = math.ceil(max(flex.max(xc), flex.max(xo))) max_y = math.ceil(max(flex.max(yc), flex.max(yo))) return min_x, max_x, min_y, max_y
def Get_Max(self, opt=0): if self.data_flex != None: if opt != 3: self.I_Max = flex.max(self.data_flex) else: self.I_Max = flex.max(self.mask_flex) * 2 else: self.I_Max = -1 return self.I_Max
def __str__(self): return "\n".join( ( "Max. completeness (non-anom): %.2f %%" % (100 * flex.max(self.ieither_completeness)), "Max. completeness (anom): %.2f %%" % (100 * flex.max(self.iboth_completeness)), ) )
def get_min_max_xy(self, rlist): xc, yc, zc = rlist['xyzcal.px'].parts() xo, yo, zo = rlist['xyzobs.px.value'].parts() dx = xc - xo dz = zc - zo min_x = math.floor(flex.min(dx)) min_y = math.floor(flex.min(dz)) max_x = math.ceil(flex.max(dx)) max_y = math.ceil(flex.max(dz)) return min_x, max_x, min_y, max_y
def update_reflection_data(self, selection=None, block_selections=None): """control access to setting all of reflection data at once""" self._normalised_x_values = [] self._normalised_y_values = [] self._normalised_z_values = [] self._n_refl = [] normalised_x_values = self.data["x"] normalised_y_values = self.data["y"] normalised_z_values = self.data["z"] if selection: normalised_x_values = normalised_x_values.select(selection) normalised_y_values = normalised_y_values.select(selection) normalised_z_values = normalised_z_values.select(selection) """Set the normalised coordinate values and configure the smoother.""" normalised_x_values = normalised_x_values - flex.min( normalised_x_values) normalised_y_values = normalised_y_values - flex.min( normalised_y_values) normalised_z_values = normalised_z_values - flex.min( normalised_z_values) x_range = [ floor(round(flex.min(normalised_x_values), 10)), max(ceil(round(flex.max(normalised_x_values), 10)), 1), ] y_range = [ floor(round(flex.min(normalised_y_values), 10)), max(ceil(round(flex.max(normalised_y_values), 10)), 1), ] z_range = [ floor(round(flex.min(normalised_z_values), 10)), max(ceil(round(flex.max(normalised_z_values), 10)), 1), ] self._smoother = GaussianSmoother3D( x_range, self.nparam_to_val(self._n_x_params), y_range, self.nparam_to_val(self._n_y_params), z_range, self.nparam_to_val(self._n_z_params), ) if block_selections: for i, sel in enumerate(block_selections): self._normalised_x_values.append( normalised_x_values.select(sel)) self._normalised_y_values.append( normalised_y_values.select(sel)) self._normalised_z_values.append( normalised_z_values.select(sel)) self._n_refl.append(self._normalised_x_values[i].size()) else: self._normalised_x_values.append(normalised_x_values) self._normalised_y_values.append(normalised_y_values) self._normalised_z_values.append(normalised_z_values) self._n_refl.append(normalised_x_values.size())
def run(self, flags, sequence=None, observations=None, **kwargs): obs_x, obs_y = observations.centroids().px_position_xy().parts() import numpy as np H, xedges, yedges = np.histogram2d( obs_x.as_numpy_array(), obs_y.as_numpy_array(), bins=self.nbins ) H_flex = flex.double(H.flatten().astype(np.float64)) n_slots = min(int(flex.max(H_flex)), 30) hist = flex.histogram(H_flex, n_slots=n_slots) slots = hist.slots() cumulative_hist = flex.long(len(slots)) for i, slot in enumerate(slots): cumulative_hist[i] = slot if i > 0: cumulative_hist[i] += cumulative_hist[i - 1] cumulative_hist = cumulative_hist.as_double() / flex.max( cumulative_hist.as_double() ) cutoff = None gradients = flex.double() for i in range(len(slots) - 1): x1 = cumulative_hist[i] x2 = cumulative_hist[i + 1] g = (x2 - x1) / hist.slot_width() gradients.append(g) if ( cutoff is None and i > 0 and g < self.gradient_cutoff and gradients[i - 1] < self.gradient_cutoff ): cutoff = hist.slot_centers()[i - 1] - 0.5 * hist.slot_width() sel = np.column_stack(np.where(H > cutoff)) for (ix, iy) in sel: flags.set_selected( ( (obs_x > xedges[ix]) & (obs_x < xedges[ix + 1]) & (obs_y > yedges[iy]) & (obs_y < yedges[iy + 1]) ), False, ) return flags
def generate_mmcif(crystal, refiner, file): logger.info('Saving mmCIF information to %s' % file) from cctbx import miller import datetime import iotbx.cif.model import math block = iotbx.cif.model.block() block["_audit.creation_method"] = dials_version() block["_audit.creation_date"] = datetime.date.today().isoformat() # block["_publ.section_references"] = '' # once there is a reference... for cell, esd, cifname in zip( crystal.get_unit_cell().parameters(), crystal.get_cell_parameter_sd(), [ 'length_a', 'length_b', 'length_c', 'angle_alpha', 'angle_beta', 'angle_gamma' ]): block['_cell.%s' % cifname] = "%.8f" % cell block['_cell.%s_esd' % cifname] = "%.8f" % esd block['_cell.volume'] = "%f" % crystal.get_unit_cell().volume() block['_cell.volume_esd'] = "%f" % crystal.get_cell_volume_sd() used_reflections = refiner.get_matches() block['_cell_measurement.reflns_used'] = len(used_reflections) block['_cell_measurement.theta_min'] = flex.min( used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 block['_cell_measurement.theta_max'] = flex.max( used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 block['_diffrn_reflns.number'] = len(used_reflections) miller_span = miller.index_span(used_reflections['miller_index']) min_h, min_k, min_l = miller_span.min() max_h, max_k, max_l = miller_span.max() block['_diffrn_reflns.limit_h_min'] = min_h block['_diffrn_reflns.limit_h_max'] = max_h block['_diffrn_reflns.limit_k_min'] = min_k block['_diffrn_reflns.limit_k_max'] = max_k block['_diffrn_reflns.limit_l_min'] = min_l block['_diffrn_reflns.limit_l_max'] = max_l block['_diffrn_reflns.theta_min'] = flex.min( used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 block['_diffrn_reflns.theta_max'] = flex.max( used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 cif = iotbx.cif.model.cif() cif['two_theta_refine'] = block with open(file, 'w') as fh: cif.show(out=fh)
def calculate_delta_statistics(self): '''Calculate min, max, mean, and stddev for the normalized deltas''' delta_min = flex.min(self.deltas) if self.deltas.size() > 0 else float('inf') delta_max = flex.max(self.deltas) if self.deltas.size() > 0 else float('-inf') delta_sum = flex.sum(self.deltas) if self.deltas.size() > 0 else 0.0 comm = self.mpi_helper.comm MPI = self.mpi_helper.MPI # global min and max self.global_delta_min = comm.allreduce(delta_min, MPI.MIN) self.global_delta_max = comm.allreduce(delta_max, MPI.MAX) # global mean self.global_delta_count = comm.allreduce(self.deltas.size(), MPI.SUM) if self.global_delta_count < 20: raise ValueError("Too few reflections available for ev11 algorithm") global_delta_sum = comm.allreduce(delta_sum, MPI.SUM) self.global_delta_mean = global_delta_sum / self.global_delta_count # global standard deviation array_of_global_delta_means = flex.double(self.deltas.size(), self.global_delta_mean) array_of_diffs = self.deltas - array_of_global_delta_means array_of_square_diffs = array_of_diffs * array_of_diffs sum_of_square_diffs = flex.sum(array_of_square_diffs) global_sum_of_square_diffs = comm.allreduce(sum_of_square_diffs, MPI.SUM) self.global_delta_stddev = math.sqrt(global_sum_of_square_diffs / (self.global_delta_count - 1)) if self.mpi_helper.rank == 0: self.logger.main_log("Global delta statistics (count,min,max,mean,stddev): (%d,%f,%f,%f,%f)"%(self.global_delta_count, self.global_delta_min, self.global_delta_max, self.global_delta_mean, self.global_delta_stddev))
def profile2d(p, vmin=None, vmax=None): from dials.array_family import flex import string if vmin is None: vmin = flex.min(p) if vmax is None: vmax = flex.max(p) assert(vmax >= vmin) dv = vmax - vmin if dv == 0: c = 0 m = 0 else: m = 35.0 / dv c = -m * vmin lookup = string.digits + string.ascii_uppercase ny, nx = p.all() text = '' for j in range(ny): for i in range(nx): v = int(m * p[j,i] + c) if v < 0: v = 0 elif v > 35: v = 35 t = lookup[v] text += t + ' ' text += '\n' return text
def fix_xy(reflections_in, reflections_out): reflections = pickle.load(open(reflections_in, "r")) # validate that input is from 60 panel detector, assumed to be # 5 x 12 configuration assert flex.max(reflections["panel"]) == 59 delta_x = 487 + 7 delta_y = 195 + 17 panel_x = reflections["panel"].as_int() % 5 panel_y = reflections["panel"].as_int() / 5 x_offset = delta_x * panel_x y_offset = delta_y * panel_y # apply fixes x, y, z = reflections["xyzobs.px.value"].parts() x += x_offset.as_double() y += y_offset.as_double() reflections["xyzobs.px.value"] = flex.vec3_double(x, y, z) x, y, z = reflections["xyzcal.px"].parts() x += x_offset.as_double() y += y_offset.as_double() reflections["xyzcal.px"] = flex.vec3_double(x, y, z) # save - should probably do this "properly" pickle.dump(reflections, open(reflections_out, "wb")) return
def filter_ice(reflections, steps): from cctbx import miller, sgtbx, uctbx from matplotlib import pyplot as plt d_spacings = 1 / reflections["rlp"].norms() d_star_sq = uctbx.d_as_d_star_sq(d_spacings) from dials.algorithms.spot_finding.per_image_analysis import ice_rings_selection from dials.algorithms.integration import filtering ice_uc = uctbx.unit_cell((4.498, 4.498, 7.338, 90, 90, 120)) ice_sg = sgtbx.space_group_info(number=194).group() ice_generator = miller.index_generator(ice_uc, ice_sg.type(), False, flex.min(d_spacings)) ice_indices = ice_generator.to_array() ice_d_spacings = flex.sorted(ice_uc.d(ice_indices)) ice_d_star_sq = uctbx.d_as_d_star_sq(ice_d_spacings) cubic_ice_uc = uctbx.unit_cell((6.358, 6.358, 6.358, 90, 90, 90)) cubic_ice_sg = sgtbx.space_group_info(number=227).group() cubic_ice_generator = miller.index_generator(cubic_ice_uc, cubic_ice_sg.type(), False, flex.min(d_spacings)) cubic_ice_indices = cubic_ice_generator.to_array() cubic_ice_d_spacings = flex.sorted(cubic_ice_uc.d(cubic_ice_indices)) cubic_ice_d_star_sq = uctbx.d_as_d_star_sq(cubic_ice_d_spacings) import numpy widths = flex.double(numpy.geomspace(start=0.0001, stop=0.01, num=steps)) n_spots = flex.double() total_intensity = flex.double() for width in widths: d_min = flex.min(d_spacings) ice_filter = filtering.PowderRingFilter(ice_uc, ice_sg, d_min, width) ice_sel = ice_filter(d_spacings) n_spots.append(ice_sel.count(False)) if "intensity.sum.value" in reflections: total_intensity.append( flex.sum(reflections["intensity.sum.value"].select(~ice_sel))) fig, axes = plt.subplots(nrows=2, figsize=(12, 8), sharex=True) axes[0].plot(widths, n_spots, label="#spots", marker="+") if total_intensity.size(): axes[1].plot(widths, total_intensity, label="total intensity", marker="+") axes[0].set_ylabel("# spots remaining") axes[1].set_xlabel("Ice ring width (1/d^2)") axes[1].set_ylabel("Total intensity") for ax in axes: ax.set_xlim(0, flex.max(widths)) plt.savefig("ice_ring_filtering.png") plt.clf() return
def run(experiment): from scitbx import matrix from math import sqrt, exp, pi from dials.array_family import flex # Do some prediction refl = flex.reflection_table.from_predictions(experiment) # Get the geometry s0 = matrix.col(experiment.beam.get_s0()) m2 = matrix.col(experiment.goniometer.get_rotation_axis()) dx = matrix.col(experiment.detector[0].get_fast_axis()) dy = matrix.col(experiment.detector[0].get_slow_axis()) dz = matrix.col(experiment.detector[0].get_origin()) ub = matrix.col(experiment.crystal.get_A()) ra = matrix.col((ub[0], ub[3], ub[6])) rb = matrix.col((ub[1], ub[4], ub[7])) rc = matrix.col((ub[2], ub[5], ub[8])) print(ra.dot(rb)) print(ra.dot(rc)) print(rb.dot(rc)) # Orthogonal vectors to create the profile on ea = ra.normalize() eb = ea.cross(rc.normalize()) ec = ea.cross(eb) # The sigma along each axis sigma_a = 0.005 sigma_b = 0.005 sigma_c = 0.001 # The covariance matrix for the normal distribution sigma = matrix.sqr((sigma_a**2, 0, 0, 0, sigma_b**2, 0, 0, 0, sigma_c**2)) sigmam1 = sigma.inverse() s1 = matrix.col(refl["s1"][0]) rlp = s1 - s0 xc, yc, zc = refl["xyzcal.mm"][0] print(xc, yc, zc) data = flex.double(flex.grid(200, 200)) for j in range(200): for i in range(200): x = xc - 10 + 20 * i / 200.0 y = yc - 10 + 20 * j / 200.0 v = x * dx + y * dy + dz s = v * s0.length() / v.length() c = 1.0 / (sqrt((2 * pi)**3 * sigma.determinant())) sc = s0 + rlp d = -0.5 * (((s - sc).transpose() * sigmam1 * (s - sc))[0]) f = c * exp(d) data[j, i] = f print(flex.max(data)) from matplotlib import pylab, cm pylab.imshow(data.as_numpy_array(), cmap=cm.Greys) pylab.show()
def test_mtz_primitive_cell(dials_data, tmpdir): scaled_expt = dials_data("insulin_processed") / "scaled.expt" scaled_refl = dials_data("insulin_processed") / "scaled.refl" # First reindex to the primitive setting expts = ExperimentList.from_file(scaled_expt.strpath, check_format=False) cs = expts[0].crystal.get_crystal_symmetry() cb_op = cs.change_of_basis_op_to_primitive_setting() procrunner.run( [ "dials.reindex", scaled_expt.strpath, scaled_refl.strpath, 'change_of_basis_op="%s"' % cb_op, ], working_directory=tmpdir.strpath, ) # Now export the reindexed experiments/reflections procrunner.run( ["dials.export", "reindexed.expt", "reindexed.refl"], working_directory=tmpdir.strpath, ) mtz_obj = mtz.object(os.path.join(tmpdir.strpath, "scaled.mtz")) cs_primitive = cs.change_basis(cb_op) assert mtz_obj.space_group() == cs_primitive.space_group() refl = flex.reflection_table.from_file(scaled_refl.strpath) refl = refl.select(~refl.get_flags(refl.flags.bad_for_scaling, all=False)) for ma in mtz_obj.as_miller_arrays(): assert ma.crystal_symmetry().is_similar_symmetry(cs_primitive) assert ma.d_max_min() == pytest.approx( (flex.max(refl["d"]), flex.min(refl["d"])) )
def compute_processors(self): """ Compute the number of processors """ from dials.array_family import flex # Set the memory usage per processor if self.params.mp.method == "multiprocessing" and self.params.mp.nproc > 1: # Get the maximum shoebox memory max_memory = flex.max( self.jobs.shoebox_memory(self.reflections, self.params.shoebox.flatten)) # Compute expected memory usage and warn if not enough total_memory = psutil.virtual_memory().total assert (total_memory is not None and total_memory > 0 ), "psutil call appears to have given unexpected output" limit_memory = total_memory * self.params.block.max_memory_usage njobs = int(math.floor(limit_memory / max_memory)) if njobs < 1: raise RuntimeError(""" No enough memory to run integration jobs. Possible solutions include increasing the percentage of memory allowed for shoeboxes or decreasing the block size. Total system memory: %g GB Limit shoebox memory: %g GB Max shoebox memory: %g GB """ % (total_memory / 1e9, limit_memory / 1e9, max_memory / 1e9)) else: self.params.mp.nproc = min(self.params.mp.nproc, njobs) self.params.block.max_memory_usage /= self.params.mp.nproc
def per_image(self): """Set one block per image for all experiments""" self._create_block_columns() # get observed phi in radians phi_obs = self._reflections['xyzobs.mm.value'].parts()[2] for iexp, exp in enumerate(self._experiments): sel = self._reflections['id'] == iexp isel = sel.iselection() exp_phi = phi_obs.select(isel) # convert phi to integer frames frames = exp.scan.get_array_index_from_angle(exp_phi, deg=False) frames = flex.floor(frames).iround() start, stop = flex.min(frames), flex.max(frames) frame_range = range(start, stop + 1) for f_num, f in enumerate(frame_range): sub_isel = isel.select(frames == f) self._reflections['block'].set_selected(sub_isel, f_num) self._reflections['block_centre'].set_selected(sub_isel, f_num) return self._reflections
def calculate_intensity_bin_limits(self): '''Calculate the minimum and maximum values of the mean intensities for each HKL''' count = self.work_table.size() mean_intensity_min = flex.min( self.work_table['biased_mean']) if count > 0 else float('inf') mean_intensity_max = flex.max( self.work_table['biased_mean']) if count > 0 else float('-inf') if count > 0: self.logger.log( "Using %d multiply-measured HKLs; mean intensity (min,max): (%f,%f)" % (count, mean_intensity_min, mean_intensity_max)) else: self.logger.log("No multiply-measured HKLs available") comm = self.mpi_helper.comm MPI = self.mpi_helper.MPI global_mean_intensity_min = comm.allreduce(mean_intensity_min, MPI.MIN) global_mean_intensity_max = comm.allreduce(mean_intensity_max, MPI.MAX) self.logger.log("Global mean intensity (min,max): (%f,%f)" % (global_mean_intensity_min, global_mean_intensity_max)) self.intensity_bin_limits = np.linspace(global_mean_intensity_min, global_mean_intensity_max, number_of_intensity_bins + 1) self.intensity_bin_limits[0] = float('-inf') self.intensity_bin_limits[len(self.intensity_bin_limits) - 1] = float('inf')
def plot_cdf_manually(self, reflections, panel = None, ax = None, bounds = None): colors = ['blue', 'green'] r = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms() h = flex.histogram(r) sigma = h.slot_centers()[list(h.slots()).index(flex.max(h.slots()))] # mode x_extent = max(r) y_extent = len(r) xobs = [i/x_extent for i in sorted(r)] yobs = [i/y_extent for i in xrange(y_extent)] obs = [(x, y) for x, y in zip(xobs, yobs)] ncalc = 100 xcalc = [i/ncalc for i in xrange(ncalc)] ycalc = [1-math.exp((-i**2)/(2*(sigma**2))) for i in xcalc] calc = [(x, y) for x, y in zip(xcalc, ycalc)] data = [flex.vec2_double(obs), flex.vec2_double(calc)] if bounds is None: ax.set_xlim((-1,1)) ax.set_ylim((-1,1)) ax.set_title("%s Outlier SP Manually"%self.params.tag) if bounds is not None: data = [self.get_bounded_data(d, bounds) for d in data] if ax is None: fig = plt.figure() ax = fig.add_subplot(111) for subset,c in zip(data, colors): ax.plot(subset.parts()[0], subset.parts()[1], '-', c=c)
def setup_binner( self, unit_cell: uctbx.unit_cell, space_group: sgtbx.space_group, n_resolution_bins: int, ) -> None: """Create a binner for the reflections contained in the table.""" ma = _reflection_table_to_iobs(self.as_reflection_table(), unit_cell, space_group) # need d star sq step d_star_sq = ma.d_star_sq().data() d_star_sq_min = flex.min(d_star_sq) d_star_sq_max = flex.max(d_star_sq) span = d_star_sq_max - d_star_sq_min relative_tolerance = 1e-6 d_star_sq_max += span * relative_tolerance d_star_sq_min -= span * relative_tolerance # Avoid a zero-size step that would otherwise anger the d_star_sq_step binner. step = max((d_star_sq_max - d_star_sq_min) / n_resolution_bins, 0.004) self.binner = ma.setup_binner_d_star_sq_step( auto_binning=False, d_max=uctbx.d_star_sq_as_d(d_star_sq_max), d_min=uctbx.d_star_sq_as_d(d_star_sq_min), d_star_sq_step=step, )
def tst_flatten(self): from dials.array_family import flex from dials.algorithms.shoebox import MaskCode for shoebox, (XC, I) in self.random_shoeboxes(10, mask=True): assert (not shoebox.flat) zs = shoebox.zsize() ys = shoebox.ysize() xs = shoebox.xsize() expected_data = flex.real(flex.grid(1, ys, xs), 0) expected_mask = flex.int(flex.grid(1, ys, xs), 0) for k in range(zs): for j in range(ys): for i in range(xs): expected_data[0, j, i] += shoebox.data[k, j, i] expected_mask[0, j, i] |= shoebox.mask[k, j, i] if (not (expected_mask[0, j, i] & MaskCode.Valid) or not (shoebox.mask[k, j, i] & MaskCode.Valid)): expected_mask[0, j, i] &= ~MaskCode.Valid shoebox.flatten() diff = expected_data.as_double() - shoebox.data.as_double() max_diff = flex.max(flex.abs(diff)) assert (max_diff < 1e-7) assert (expected_mask.all_eq(shoebox.mask)) assert (shoebox.flat) assert (shoebox.is_consistent()) print 'OK'
def per_image(self): """Set one block per image for all experiments""" self._create_block_columns() # get observed phi in radians phi_obs = self._reflections["xyzobs.mm.value"].parts()[2] for iexp, exp in enumerate(self._experiments): sel = self._reflections["id"] == iexp isel = sel.iselection() exp_phi = phi_obs.select(isel) # convert phi to integer frames frames = exp.scan.get_array_index_from_angle(exp_phi, deg=False) frames = flex.floor(frames).iround() start, stop = flex.min(frames), flex.max(frames) frame_range = range(start, stop + 1) for f_num, f in enumerate(frame_range): sub_isel = isel.select(frames == f) f_cent = f + 0.5 self._reflections["block"].set_selected(sub_isel, f_num) self._reflections["block_centre"].set_selected(sub_isel, f_cent) return self._reflections
def test_all_expt_ids_have_expts(dials_data, tmpdir): result = procrunner.run( [ "dials.index", dials_data("vmxi_thaumatin_grid_index").join("split_07602.expt"), dials_data("vmxi_thaumatin_grid_index").join("split_07602.refl"), "stills.indexer=sequences", "indexing.method=real_space_grid_search", "space_group=P4", "unit_cell=58,58,150,90,90,90", "max_lattices=8", "beam.fix=all", "detector.fix=all", ], working_directory=tmpdir, ) assert not result.returncode and not result.stderr assert tmpdir.join("indexed.expt").check(file=1) assert tmpdir.join("indexed.refl").check(file=1) refl = flex.reflection_table.from_file(tmpdir / "indexed.refl") expt = ExperimentList.from_file(tmpdir / "indexed.expt", check_format=False) assert flex.max(refl["id"]) + 1 == len(expt)
def compose(self, reflections): """Compose scan-varying crystal parameterisations at the specified image number, for the specified experiment, for each image. Put the U, B and UB matrices in the reflection table, and cache the derivatives.""" self._prepare_for_compose(reflections) for iexp, exp in enumerate(self._experiments): # select the reflections of interest sel = reflections['id'] == iexp isel = sel.iselection() blocks = reflections['block'].select(isel) # identify which crystal parameterisations to use for this experiment xl_op = self._get_xl_orientation_parameterisation(iexp) xl_ucp = self._get_xl_unit_cell_parameterisation(iexp) # get state and derivatives for each block for block in xrange(flex.min(blocks), flex.max(blocks) + 1): # determine the subset of reflections this affects subsel = isel.select(blocks == block) if len(subsel) == 0: continue # get the integer frame number nearest the centre of that block frames = reflections['block_centre'].select(subsel) # can only be false if original block assignment has gone wrong assert frames.all_eq(frames[0]), \ "Failing: a block contains reflections that shouldn't be there" frame = int(floor(frames[0])) # model states at current frame U = self._get_state_from_parameterisation(xl_op, frame) if U is None: U = exp.crystal.get_U() B = self._get_state_from_parameterisation(xl_ucp, frame) if B is None: B = exp.crystal.get_B() # set states reflections['u_matrix'].set_selected(subsel, U.elems) reflections['b_matrix'].set_selected(subsel, B.elems) # set derivatives of the states if xl_op is not None: for j, dU in enumerate(xl_op.get_ds_dp()): colname = "dU_dp{0}".format(j) reflections[colname].set_selected(subsel, dU) if xl_ucp is not None: for j, dB in enumerate(xl_ucp.get_ds_dp()): colname = "dB_dp{0}".format(j) reflections[colname].set_selected(subsel, dB) # set the UB matrices for prediction reflections['ub_matrix'] = reflections['u_matrix'] * reflections['b_matrix'] return
def profile2d(p, vmin=None, vmax=None): import string from dials.array_family import flex if vmin is None: vmin = flex.min(p) if vmax is None: vmax = flex.max(p) assert vmax >= vmin dv = vmax - vmin if dv == 0: c = 0 m = 0 else: m = 35.0 / dv c = -m * vmin lookup = string.digits + string.ascii_uppercase ny, nx = p.all() text = "" for j in range(ny): for i in range(nx): v = int(m * p[j, i] + c) if v < 0: v = 0 elif v > 35: v = 35 t = lookup[v] text += t + " " text += "\n" return text
def add_imagesets_buttons(self): if 'imageset_id' not in self.parent.reflections_input: self.imgset_btn = None return n = flex.max(self.parent.reflections_input['imageset_id']) if n <= 0: self.imgset_btn = None return box = wx.BoxSizer(wx.VERTICAL) self.panel_sizer.Add(box) label = wx.StaticText(self, -1, "Imageset ids:") box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5) from wxtbx.segmentedctrl import SegmentedToggleControl, SEGBTN_HORIZONTAL self.imgset_btn = SegmentedToggleControl(self, style=SEGBTN_HORIZONTAL) for i in range(n + 1): self.imgset_btn.AddSegment(str(i)) if (self.settings.imageset_ids is not None and i in self.settings.imageset_ids): self.imgset_btn.SetValue(i + 1, True) self.imgset_btn.Realize() self.Bind(wx.EVT_TOGGLEBUTTON, self.OnChangeSettings, self.imgset_btn) box.Add(self.imgset_btn, 0, wx.ALL, 5)
def show_experiments(self, experiments, reflections, d_min=None): if d_min is not None: reciprocal_lattice_points = reflections["rlp"] d_spacings = 1 / reciprocal_lattice_points.norms() reflections = reflections.select(d_spacings > d_min) for i_expt, expt in enumerate(experiments): logger.info("model %i (%i reflections):" % (i_expt + 1, (reflections["id"] == i_expt).count(True))) logger.info(expt.crystal) indexed_flags = reflections.get_flags(reflections.flags.indexed) imageset_id = reflections["imageset_id"] rows = [["Imageset", "# indexed", "# unindexed", "% indexed"]] for i in range(flex.max(imageset_id) + 1): imageset_indexed_flags = indexed_flags.select(imageset_id == i) indexed_count = imageset_indexed_flags.count(True) unindexed_count = imageset_indexed_flags.count(False) rows.append([ str(i), str(indexed_count), str(unindexed_count), "{:.1%}".format(indexed_count / (indexed_count + unindexed_count)), ]) logger.info(dials.util.tabulate(rows, headers="firstrow"))
def determine_grid_size(rlist, grid_size=None): from libtbx import Auto panel_ids = rlist['panel'] n_panels = flex.max(panel_ids) + 1 if grid_size is not None and grid_size is not Auto: assert (grid_size[0] * grid_size[1]) >= n_panels, (n_panels) return grid_size n_cols = int(math.floor(math.sqrt(n_panels))) n_rows = int(math.ceil(n_panels / n_cols)) return n_cols, n_rows
def generate_mmcif(crystal, refiner, file): logger.info('Saving mmCIF information to %s' % file) from cctbx import miller import datetime import iotbx.cif.model import math block = iotbx.cif.model.block() block["_audit.creation_method"] = dials_version() block["_audit.creation_date"] = datetime.date.today().isoformat() # block["_publ.section_references"] = '' # once there is a reference... for cell, esd, cifname in zip(crystal.get_unit_cell().parameters(), crystal.get_cell_parameter_sd(), ['length_a', 'length_b', 'length_c', 'angle_alpha', 'angle_beta', 'angle_gamma']): block['_cell.%s' % cifname] = "%.8f" % cell block['_cell.%s_esd' % cifname] = "%.8f" % esd block['_cell.volume'] = "%f" % crystal.get_unit_cell().volume() block['_cell.volume_esd'] = "%f" % crystal.get_cell_volume_sd() used_reflections = refiner.get_matches() block['_cell_measurement.reflns_used'] = len(used_reflections) block['_cell_measurement.theta_min'] = flex.min(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 block['_cell_measurement.theta_max'] = flex.max(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 block['_diffrn_reflns.number'] = len(used_reflections) miller_span = miller.index_span(used_reflections['miller_index']) min_h, min_k, min_l = miller_span.min() max_h, max_k, max_l = miller_span.max() block['_diffrn_reflns.limit_h_min'] = min_h block['_diffrn_reflns.limit_h_max'] = max_h block['_diffrn_reflns.limit_k_min'] = min_k block['_diffrn_reflns.limit_k_max'] = max_k block['_diffrn_reflns.limit_l_min'] = min_l block['_diffrn_reflns.limit_l_max'] = max_l block['_diffrn_reflns.theta_min'] = flex.min(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 block['_diffrn_reflns.theta_max'] = flex.max(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2 cif = iotbx.cif.model.cif() cif['two_theta_refine'] = block with open(file, 'w') as fh: cif.show(out=fh)
def _find_nearest_neighbours(self, observed, predicted): ''' Find the nearest predicted spot to the observed spot. :param observed: The observed reflections :param predicted: The predicted reflections :returns: (nearest neighbours, distance) ''' from scitbx.array_family import flex from logging import warn # Get the predicted coordinates predicted_panel = predicted['panel'] predicted_xyz = predicted['xyzcal.px'] observed_panel = observed['panel'] observed_xyz = observed['xyzobs.px.value'] # Get the number of panels max_panel1 = flex.max(predicted_panel) max_panel2 = flex.max(observed_panel) max_panel = max([max_panel1, max_panel2]) nn_all = flex.size_t() dd_all = flex.double() for panel in range(max_panel+1): pind = predicted_panel == panel oind = observed_panel == panel pxyz = predicted_xyz.select(pind) oxyz = observed_xyz.select(oind) try: nn, d = self._find_nearest_neighbours_single(oxyz, pxyz) indices = flex.size_t(range(len(pind))).select(pind) indices = indices.select(flex.size_t(list(nn))) nn_all.extend(indices) dd_all.extend(d) except Exception: warn("Unable to match spots on panel %d" % panel) return nn_all, dd_all
def profile3d(p, vmin=None, vmax=None): ''' Print a 3D profile. ''' from dials.array_family import flex if vmin is None: vmin = flex.min(p) if vmax is None: vmax = flex.max(p) nz, ny, nx = p.all() text = [] for k in range(nz): p2 = p[k:k+1,:,:] p2.reshape(flex.grid(ny, nx)) text.append(profile2d(p2, vmin=vmin, vmax=vmax)) return '\n'.join(text)
def get_normalized_colors(self, data, vmin=None, vmax=None): if vmax is None: vmax = self.params.residuals.plot_max if vmax is None: vmax = flex.max(data) if vmin is None: vmin = flex.min(data) # initialize the color map norm = Normalize(vmin=vmin, vmax=vmax) cmap = plt.cm.get_cmap(self.params.colormap) sm = cm.ScalarMappable(norm=norm, cmap=cmap) color_vals = np.linspace(vmin, vmax, 11) sm.set_array(color_vals) # needed for colorbar return norm, cmap, color_vals, sm
def normalize_profile(self, profile): from scitbx.array_family import flex max_profile = flex.max(profile) threshold = self.threshold * max_profile sum_profile = 0.0 for i in range(len(profile)): if profile[i] > threshold: sum_profile += profile[i] else: profile[i] = 0.0 result = flex.double(flex.grid(profile.all())) for i in range(len(profile)): result[i] = profile[i] / sum_profile return result
def plot_statistics(statistics, prefix='', degrees_per_bin=5, cutoff_anom=None, cutoff_non_anom=None): range_width = 1 range_min = flex.min(statistics.dose) - range_width range_max = flex.max(statistics.dose) n_steps = 2 + int((range_max - range_min) - range_width) x = flex.double_range(n_steps) * range_width + range_min x *= degrees_per_bin dpi = 300 import matplotlib matplotlib.use('Agg') from matplotlib import pyplot as plt try: plt.style.use('ggplot') except AttributeError: pass line1, = plt.plot(x, statistics.ieither_completeness, label='Unique reflections') line2, = plt.plot(x, statistics.iboth_completeness, label='Bijvoet pairs') if cutoff_non_anom is not None: plt.plot([cutoff_non_anom, cutoff_non_anom], plt.ylim(), c=line1.get_color(), linestyle='dashed') if cutoff_anom is not None: plt.plot([cutoff_anom, cutoff_anom], plt.ylim(), c=line2.get_color(), linestyle='dotted') plt.xlim(0, plt.xlim()[1]) plt.xlabel('Scan angle (degrees)') plt.ylabel('Completeness (%)') plt.ylim(0, 1) plt.legend(loc='lower right', fontsize='small') plt.savefig('%scompleteness_vs_scan_angle.png' %prefix, dpi=dpi) plt.clf() line1, = plt.plot(x[1:], 100 * statistics.frac_new_ref, label='Unique reflections') line2, = plt.plot(x[1:], 100 * statistics.frac_new_pairs, label='Bijvoet pairs') ylim = plt.ylim() if cutoff_non_anom is not None: plt.plot([cutoff_non_anom, cutoff_non_anom], ylim, c=line1.get_color(), linestyle='dashed') if cutoff_anom is not None: plt.plot([cutoff_anom, cutoff_anom], ylim, c=line2.get_color(), linestyle='dotted') plt.ylim(ylim) plt.xlim(0, plt.xlim()[1]) plt.xlabel('Scan angle (degrees)') plt.ylabel('% new reflections per degree') plt.legend(loc='upper right', fontsize='small') plt.savefig('%spercent_new_reflections_vs_scan_angle.png' %prefix, dpi=dpi) plt.clf()
def histogram(self, reflections, title): data = reflections['difference_vector_norms'] n_slots = 100 if self.params.residuals.histogram_max is None: h = flex.histogram(data, n_slots=n_slots) else: h = flex.histogram(data.select(data <= self.params.residuals.histogram_max), n_slots=n_slots) n = len(reflections) rmsd = math.sqrt((reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).sum_sq()/n) sigma = mode = h.slot_centers()[list(h.slots()).index(flex.max(h.slots()))] mean = flex.mean(data) median = flex.median(data) print "RMSD (microns)", rmsd * 1000 print "Histogram mode (microns):", mode * 1000 print "Overall mean (microns):", mean * 1000 print "Overall median (microns):", median * 1000 mean2 = math.sqrt(math.pi/2)*sigma rmsd2 = math.sqrt(2)*sigma print "Rayleigh Mean (microns)", mean2 * 1000 print "Rayleigh RMSD (microns)", rmsd2 * 1000 r = reflections['radial_displacements'] t = reflections['transverse_displacements'] print "Overall radial RMSD (microns)", math.sqrt(flex.sum_sq(r)/len(r)) * 1000 print "Overall transverse RMSD (microns)", math.sqrt(flex.sum_sq(t)/len(t)) * 1000 fig = plt.figure() ax = fig.add_subplot(111) ax.plot(h.slot_centers().as_numpy_array(), h.slots().as_numpy_array(), '-') vmax = self.params.residuals.plot_max if self.params.residuals.histogram_xmax is not None: ax.set_xlim((0,self.params.residuals.histogram_xmax)) if self.params.residuals.histogram_ymax is not None: ax.set_ylim((0,self.params.residuals.histogram_ymax)) plt.title(title) ax.plot((mean, mean), (0, flex.max(h.slots())), 'g-') ax.plot((mean2, mean2), (0, flex.max(h.slots())), 'g--') ax.plot((mode, mode), (0, flex.max(h.slots())), 'r-') ax.plot((rmsd, rmsd), (0, flex.max(h.slots())), 'b-') ax.plot((rmsd2, rmsd2), (0, flex.max(h.slots())), 'b--') ax.legend([r"$\Delta$XY", "MeanObs", "MeanRayl", "Mode", "RMSDObs", "RMSDRayl"]) ax.set_xlabel("(mm)") ax.set_ylabel("Count")
def compute_processors(self): ''' Compute the number of processors ''' from libtbx.introspection import machine_memory_info from math import floor from dials.array_family import flex # Set the memory usage per processor if self.params.mp.method == 'multiprocessing' and self.params.mp.nproc > 1: # Get the maximum shoebox memory max_memory = flex.max(self.jobs.shoebox_memory( self.reflections, self.params.shoebox.flatten)) # Compute percentage of max available. The function is not portable to # windows so need to add a check if the function fails. On windows no # warning will be printed memory_info = machine_memory_info() total_memory = memory_info.memory_total() if total_memory is not None: assert total_memory > 0, "Your system appears to have no memory!" limit_memory = total_memory * self.params.block.max_memory_usage njobs = int(floor(limit_memory / max_memory)) if njobs < 1: raise RuntimeError(''' No enough memory to run integration jobs. Possible solutions include increasing the percentage of memory allowed for shoeboxes or decreasing the block size. Total system memory: %g GB Limit shoebox memory: %g GB Max shoebox memory: %g GB ''' % (total_memory/1e9, limit_memory/1e9, max_memory/1e9)) else: self.params.mp.nproc = min(self.params.mp.nproc, njobs) self.params.block.max_memory_usage /= self.params.mp.nproc
def finalize(self, data, mask): ''' Finalize the model :param data: The data array :param mask: The mask array ''' from dials.algorithms.image.filter import median_filter, mean_filter from dials.algorithms.image.fill_holes import diffusion_fill from dials.algorithms.image.fill_holes import simple_fill from dials.array_family import flex # Print some image properties sub_data = data.as_1d().select(mask.as_1d()) logger.info('Raw image statistics:') logger.info(' min: %d' % int(flex.min(sub_data))) logger.info(' max: %d' % int(flex.max(sub_data))) logger.info(' mean: %d' % int(flex.mean(sub_data))) logger.info('') # Transform to polar logger.info('Transforming image data to polar grid') result = self.transform.to_polar(data, mask) data = result.data() mask = result.mask() sub_data = data.as_1d().select(mask.as_1d()) logger.info('Polar image statistics:') logger.info(' min: %d' % int(flex.min(sub_data))) logger.info(' max: %d' % int(flex.max(sub_data))) logger.info(' mean: %d' % int(flex.mean(sub_data))) logger.info('') # Filter the image to remove noise if self.kernel_size > 0: if self.filter_type == 'median': logger.info('Applying median filter') data = median_filter(data, mask, (self.kernel_size, 0)) sub_data = data.as_1d().select(mask.as_1d()) logger.info('Median polar image statistics:') logger.info(' min: %d' % int(flex.min(sub_data))) logger.info(' max: %d' % int(flex.max(sub_data))) logger.info(' mean: %d' % int(flex.mean(sub_data))) logger.info('') elif self.filter_type == 'mean': logger.info('Applying mean filter') mask_as_int = mask.as_1d().as_int() mask_as_int.reshape(mask.accessor()) data = mean_filter(data, mask_as_int, (self.kernel_size, 0), 1) sub_data = data.as_1d().select(mask.as_1d()) logger.info('Mean polar image statistics:') logger.info(' min: %d' % int(flex.min(sub_data))) logger.info(' max: %d' % int(flex.max(sub_data))) logger.info(' mean: %d' % int(flex.mean(sub_data))) logger.info('') else: raise RuntimeError('Unknown filter_type: %s' % self.filter_type) # Fill any remaining holes logger.info("Filling holes") data = simple_fill(data, mask) data = diffusion_fill(data, mask, self.niter) mask = flex.bool(data.accessor(), True) sub_data = data.as_1d().select(mask.as_1d()) logger.info('Filled polar image statistics:') logger.info(' min: %d' % int(flex.min(sub_data))) logger.info(' max: %d' % int(flex.max(sub_data))) logger.info(' mean: %d' % int(flex.mean(sub_data))) logger.info('') # Transform back logger.info('Transforming image data from polar grid') result = self.transform.from_polar(data, mask) data = result.data() mask = result.mask() sub_data = data.as_1d().select(mask.as_1d()) logger.info('Final image statistics:') logger.info(' min: %d' % int(flex.min(sub_data))) logger.info(' max: %d' % int(flex.max(sub_data))) logger.info(' mean: %d' % int(flex.mean(sub_data))) logger.info('') # Fill in any discontinuities # FIXME NEED TO HANDLE DISCONTINUITY # mask = ~self.transform.discontinuity()[:-1,:-1] # data = diffusion_fill(data, mask, self.niter) # Get and apply the mask mask = self.experiment.imageset.get_mask(0)[0] mask = mask.as_1d().as_int().as_double() mask.reshape(data.accessor()) data *= mask # Return the result return data
def export_sadabs(integrated_data, experiment_list, hklout, run=0, summation=False, include_partials=False, keep_partials=False, debug=False, predict=True): '''Export data from integrated_data corresponding to experiment_list to a file for input to SADABS. FIXME probably need to make a .p4p file as well...''' from dials.array_family import flex from scitbx import matrix import math # for the moment assume (and assert) that we will convert data from exactly # one lattice... assert(len(experiment_list) == 1) # select reflections that are assigned to an experiment (i.e. non-negative id) integrated_data = integrated_data.select(integrated_data['id'] >= 0) assert max(integrated_data['id']) == 0 if not summation: assert('intensity.prf.value' in integrated_data) # strip out negative variance reflections: these should not really be there # FIXME Doing select on summation results. Should do on profile result if # present? Yes if 'intensity.prf.variance' in integrated_data: selection = integrated_data.get_flags( integrated_data.flags.integrated, all=True) else: selection = integrated_data.get_flags( integrated_data.flags.integrated_sum) integrated_data = integrated_data.select(selection) selection = integrated_data['intensity.sum.variance'] <= 0 if selection.count(True) > 0: integrated_data.del_selected(selection) logger.info('Removing %d reflections with negative variance' % \ selection.count(True)) if 'intensity.prf.variance' in integrated_data: selection = integrated_data['intensity.prf.variance'] <= 0 if selection.count(True) > 0: integrated_data.del_selected(selection) logger.info('Removing %d profile reflections with negative variance' % \ selection.count(True)) if include_partials: integrated_data = sum_partial_reflections(integrated_data) integrated_data = scale_partial_reflections(integrated_data) if 'partiality' in integrated_data: selection = integrated_data['partiality'] < 0.99 if selection.count(True) > 0 and not keep_partials: integrated_data.del_selected(selection) logger.info('Removing %d incomplete reflections' % \ selection.count(True)) experiment = experiment_list[0] assert(not experiment.scan is None) # sort data before output nref = len(integrated_data['miller_index']) indices = flex.size_t_range(nref) perm = sorted(indices, key=lambda k: integrated_data['miller_index'][k]) integrated_data = integrated_data.select(flex.size_t(perm)) assert (not experiment.goniometer is None) axis = matrix.col(experiment.goniometer.get_rotation_axis_datum()) beam = matrix.col(experiment.beam.get_direction()) s0 = matrix.col(experiment.beam.get_s0()) F = matrix.sqr(experiment.goniometer.get_fixed_rotation()) S = matrix.sqr(experiment.goniometer.get_setting_rotation()) unit_cell = experiment.crystal.get_unit_cell() if debug: m_format = '%6.3f%6.3f%6.3f\n%6.3f%6.3f%6.3f\n%6.3f%6.3f%6.3f' c_format = '%.2f %.2f %.2f %.2f %.2f %.2f' logger.info('Unit cell parameters from experiment: %s' % (c_format % unit_cell.parameters())) logger.info('Symmetry: %s' % experiment.crystal.get_space_group().type( ).lookup_symbol()) logger.info('Goniometer fixed matrix:\n%s' % (m_format % F.elems)) logger.info('Goniometer setting matrix:\n%s' % (m_format % S.elems)) logger.info('Goniometer scan axis:\n%6.3f%6.3f%6.3f' % (axis.elems)) # detector scaling info assert(len(experiment.detector) == 1) panel = experiment.detector[0] dims = panel.get_image_size() pixel = panel.get_pixel_size() fast_axis = matrix.col(panel.get_fast_axis()) slow_axis = matrix.col(panel.get_slow_axis()) normal = fast_axis.cross(slow_axis) detector2t = s0.angle(normal, deg=True) origin = matrix.col(panel.get_origin()) if debug: logger.info('Detector fast, slow axes:') logger.info('%6.3f%6.3f%6.3f' % (fast_axis.elems)) logger.info('%6.3f%6.3f%6.3f' % (slow_axis.elems)) logger.info('Detector two theta (degrees): %.2f' % detector2t) scl_x = 512.0 / (dims[0] * pixel[0]) scl_y = 512.0 / (dims[1] * pixel[1]) image_range = experiment.scan.get_image_range() from cctbx.array_family import flex as cflex # implicit import from cctbx.miller import map_to_asu_isym # implicit import # gather the required information for the reflection file nref = len(integrated_data['miller_index']) zdet = flex.double(integrated_data['xyzcal.px'].parts()[2]) miller_index = integrated_data['miller_index'] I = None sigI = None # export including scale factors if 'lp' in integrated_data: lp = integrated_data['lp'] else: lp = flex.double(nref, 1.0) if 'dqe' in integrated_data: dqe = integrated_data['dqe'] else: dqe = flex.double(nref, 1.0) scl = lp / dqe if summation: I = integrated_data['intensity.sum.value'] * scl V = integrated_data['intensity.sum.variance'] * scl * scl assert V.all_gt(0) sigI = flex.sqrt(V) else: I = integrated_data['intensity.prf.value'] * scl V = integrated_data['intensity.prf.variance'] * scl * scl assert V.all_gt(0) sigI = flex.sqrt(V) # figure out scaling to make sure data fit into format 2F8.2 i.e. Imax < 1e5 Imax = flex.max(I) if debug: logger.info('Maximum intensity in file: %8.2f' % Imax) if Imax > 99999.0: scale = 99999.0 / Imax I = I * scale sigI = sigI * scale phi_start, phi_range = experiment.scan.get_image_oscillation(image_range[0]) if predict: logger.info('Using scan static predicted spot locations') from dials.algorithms.spot_prediction import ScanStaticReflectionPredictor predictor = ScanStaticReflectionPredictor(experiment) UB = experiment.crystal.get_A() predictor.for_reflection_table(integrated_data, UB) if not experiment.crystal.num_scan_points: logger.info('No scan varying model: use static') static = True else: static = False fout = open(hklout, 'w') for j in range(nref): h, k, l = miller_index[j] if predict: x_mm, y_mm, z_rad = integrated_data['xyzcal.mm'][j] else: x_mm, y_mm, z_rad = integrated_data['xyzobs.mm.value'][j] z0 = integrated_data['xyzcal.px'][j][2] istol = int(round(10000 * unit_cell.stol((h, k, l)))) if predict or static: # work from a scan static model & assume perfect goniometer # FIXME maybe should work back in the option to predict spot positions UB = experiment.crystal.get_A() phi = phi_start + z0 * phi_range R = axis.axis_and_angle_as_r3_rotation_matrix(phi, deg=True) RUB = S * R * F * UB else: # properly compute RUB for every reflection UB = experiment.crystal.get_A_at_scan_point(int(round(z0))) phi = phi_start + z0 * phi_range R = axis.axis_and_angle_as_r3_rotation_matrix(phi, deg=True) RUB = S * R * F * UB x = RUB * (h, k, l) s = (s0 + x).normalize() # can also compute s based on centre of mass of spot # s = (origin + x_mm * fast_axis + y_mm * slow_axis).normalize() astar = (RUB * (1, 0, 0)).normalize() bstar = (RUB * (0, 1, 0)).normalize() cstar = (RUB * (0, 0, 1)).normalize() ix = beam.dot(astar) iy = beam.dot(bstar) iz = beam.dot(cstar) dx = s.dot(astar) dy = s.dot(bstar) dz = s.dot(cstar) x = x_mm * scl_x y = y_mm * scl_y z = (z_rad * 180 / math.pi - phi_start) / phi_range fout.write('%4d%4d%4d%8.2f%8.2f%4d%8.5f%8.5f%8.5f%8.5f%8.5f%8.5f' % \ (h, k, l, I[j], sigI[j], run, ix, dx, iy, dy, iz, dz)) fout.write('%7.2f%7.2f%8.2f%7.2f%5d\n' % (x, y, z, detector2t, istol)) fout.close() logger.info('Output %d reflections to %s' % (nref, hklout)) return
def paper_test(B, S): from numpy.random import poisson from math import exp background_shape = [1 for i in range(20)] signal_shape = [1 if i >= 6 and i < 15 else 0 for i in range(20)] background = [poisson(bb * B,1)[0] for bb in background_shape] signal = [poisson(ss * S, 1)[0] for ss in signal_shape] # background = [bb * B for bb in background_shape] # signal = [ss * S for ss in signal_shape] total = [b + s for b, s in zip(background, signal)] # from matplotlib import pylab # pylab.plot(total) # pylab.plot(signal) # pylab.plot(background) # pylab.show() total = [0, 1, 0, 0, 0, 0, 3, 1, 3, 3, 6, 6, 4, 1, 4, 0, 2, 0, 1, 1] total = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] # plot_prob_for_zero(total, background_shape, signal_shape) # signal_shape = [exp(-(x - 10.0)**2 / (2*3.0**2)) for x in range(20)] # signal_shape = [ss / sum(signal_shape) for ss in signal_shape] # print signal_shape #plot_valid(background_shape, signal_shape) B = 155.0 / 296.0 from dials.array_family import flex from math import log, factorial V = flex.double(flex.grid(100, 100)) L = flex.double(flex.grid(100, 100)) DB = flex.double(flex.grid(100, 100)) DS = flex.double(flex.grid(100, 100)) P = flex.double(flex.grid(100, 100)) Fb = sum(background_shape) Fs = sum(signal_shape) SV = [] MASK = flex.bool(flex.grid(100, 100), False) for BB in range(100): for SS in range(100): B = -5.0 + (BB) / 10.0 S = -5.0 + (SS) / 10.0 # SV.append(S) VV = 0 LL = 0 DDB = 0 DDS = 0 for i in range(20): s = signal_shape[i] b = background_shape[i] c = total[i] if B*b + S*s <= 0: # MASK[BB, SS] = True LL = 0 if b == 0: DDB += 0 else: DDB += 1e7 if s == 0: DDS += 0 else: DDS += 1e7 # break else: # VV += (b + s)*c / (B*b + S*s) # LL += c*log(B*b+S*s) - log(factorial(c)) - B*b - S*s DDB += c*b/(B*b+S*s) - b DDS += c*s/(B*b+S*s) - s VV -= (Fb + Fs) # print B, S, VV # V[BB, SS] = abs(VV) L[BB, SS] = LL DB[BB,SS] = DDB DS[BB,SS] = DDS max_ind = flex.max_index(L) j = max_ind // 100 i = max_ind % 100 print "Approx: ", (j+1) / 20.0, (i+1) / 20.0 print "Min/Max DB: ", flex.min(DB), flex.max(DB) print "Min/Max DS: ", flex.min(DS), flex.max(DS) from matplotlib import pylab # pylab.imshow(flex.log(V).as_numpy_array(), extent=[0.05, 5.05, 5.05, 0.05]) # pylab.plot(SV, V) # pylab.plot(SV, [0] * 100) # pylab.show() im = flex.exp(L).as_numpy_array() import numpy # im = numpy.ma.masked_array(im, mask=MASK.as_numpy_array()) # pylab.imshow(im)#, extent=[-5.0, 5.0, 5.0, -5.0], origin='lower') pylab.imshow(DB.as_numpy_array(), vmin=-100, vmax=100)#, extent=[-5.0, 5.0, 5.0, -5.0], origin='lower') pylab.contour(DB.as_numpy_array(), levels=[0], colors=['red']) pylab.contour(DS.as_numpy_array(), levels=[0], colors=['black']) pylab.show() # im = numpy.ma.masked_array(DB.as_numpy_array(), mask=MASK.as_numpy_array()) # pylab.imshow(im, extent=[-5.0, 5.0, 5.0, -5.0], vmin=-20, vmax=100) # pylab.show() # im = numpy.ma.masked_array(DS.as_numpy_array(), mask=MASK.as_numpy_array()) # pylab.imshow(im, extent=[-5.0, 5.0, 5.0, -5.0], vmin=-20, vmax=100) # pylab.show() # exit(0) S1, B1 = value(total, background_shape, signal_shape) exit(0) try: S1, B1 = value(total, background_shape, signal_shape) print "Result:" print S1, B1 exit(0) except Exception, e: raise e import sys import traceback traceback.print_exc() from dials.array_family import flex Fs = sum(signal_shape) Fb = sum(background_shape) Rs = flex.double(flex.grid(100, 100)) print "-----" print B, S # from matplotlib import pylab # pylab.plot(total) # pylab.show() print background_shape print signal_shape print total from math import exp, factorial, log minx = -1 miny = -1 minr = 9999 for BB in range(0, 100): for SS in range(0, 100): B = -10 + (BB) / 5.0 S = -10 + (SS) / 5.0 L = 0 Fb2 = 0 Fs2 = 0 for i in range(len(total)): c = total[i] b = background_shape[i] s = signal_shape[i] # P = exp(-(B*b + S*s)) * (B*b+S*s)**c / factorial(c) # print P # if P > 0: # L += log(P) den = B*b + S*s num1 = b*c num2 = s*c if den != 0: Fb2 += num1 / den Fs2 += num2 / den R = (Fb2 - Fb)**2 + (Fs2 - Fs)**2 if R > 1000: R = 0 # Rs[BB,SS] = L#R#Fs2 - Fs Rs[BB,SS] = R#Fs2 - Fs from matplotlib import pylab pylab.imshow(flex.log(Rs).as_numpy_array(), extent=[-5,5,5,-5]) pylab.show() exit(0)
def run_stills_pred_param(self, verbose = False): if verbose: print 'Testing derivatives for StillsPredictionParameterisation' print '========================================================' # Build a prediction parameterisation for the stills experiment pred_param = StillsPredictionParameterisation(self.stills_experiments, detector_parameterisations = [self.det_param], beam_parameterisations = [self.s0_param], xl_orientation_parameterisations = [self.xlo_param], xl_unit_cell_parameterisations = [self.xluc_param]) # Predict the reflections in place. Must do this ahead of calculating # the analytical gradients so quantities like s1 are correct from dials.algorithms.refinement.prediction import ExperimentsPredictor ref_predictor = ExperimentsPredictor(self.stills_experiments) ref_predictor.update() ref_predictor.predict(self.reflections) # get analytical gradients an_grads = pred_param.get_gradients(self.reflections) fd_grads = self.get_fd_gradients(pred_param, ref_predictor) for i, (an_grad, fd_grad) in enumerate(zip(an_grads, fd_grads)): # compare FD with analytical calculations if verbose: print "\nParameter {0}: {1}". format(i, fd_grad['name']) for idx, name in enumerate(["dX_dp", "dY_dp", "dDeltaPsi_dp"]): if verbose: print name a = fd_grad[name] b = an_grad[name] abs_error = a - b denom = a + b fns = five_number_summary(abs_error) if verbose: print (" summary of absolute errors: %9.6f %9.6f %9.6f " + \ "%9.6f %9.6f") % fns assert flex.max(flex.abs(abs_error)) < 0.0003 # largest absolute error found to be about 0.00025 for dY/dp of # Crystal0g_param_3. Reject outlying absolute errors and test again. iqr = fns[3] - fns[1] # skip further stats on errors with an iqr of near zero, e.g. dDeltaPsi_dp # for detector parameters, which are all equal to zero if iqr < 1.e-10: continue sel1 = abs_error < fns[3] + 1.5 * iqr sel2 = abs_error > fns[1] - 1.5 * iqr sel = sel1 & sel2 tst = flex.max_index(flex.abs(abs_error.select(sel))) tst_val = abs_error.select(sel)[tst] n_outliers = sel.count(False) if verbose: print (" {0} outliers rejected, leaving greatest " + \ "absolute error: {1:9.6f}").format(n_outliers, tst_val) # largest absolute error now 0.000086 for dX/dp of Beam0Mu2 assert abs(tst_val) < 0.00009 # Completely skip parameters with FD gradients all zero (e.g. gradients of # DeltaPsi for detector parameters) sel1 = flex.abs(a) < 1.e-10 if sel1.all_eq(True): continue # otherwise calculate normalised errors, by dividing absolute errors by # the IQR (more stable than relative error calculation) norm_error = abs_error / iqr fns = five_number_summary(norm_error) if verbose: print (" summary of normalised errors: %9.6f %9.6f %9.6f " + \ "%9.6f %9.6f") % fns # largest normalised error found to be about 25.7 for dY/dp of # Crystal0g_param_3. try: assert flex.max(flex.abs(norm_error)) < 30 except AssertionError as e: e.args += ("extreme normalised error value: {0}".format( flex.max(flex.abs(norm_error))),) raise e # Reject outlying normalised errors and test again iqr = fns[3] - fns[1] if iqr > 0.: sel1 = norm_error < fns[3] + 1.5 * iqr sel2 = norm_error > fns[1] - 1.5 * iqr sel = sel1 & sel2 tst = flex.max_index(flex.abs(norm_error.select(sel))) tst_val = norm_error.select(sel)[tst] n_outliers = sel.count(False) # most outliers found for for dY/dp of Crystal0g_param_3 (which had # largest errors, so no surprise there). try: assert n_outliers < 250 except AssertionError as e: e.args += ("too many outliers rejected: {0}".format(n_outliers),) raise e if verbose: print (" {0} outliers rejected, leaving greatest " + \ "normalised error: {1:9.6f}").format(n_outliers, tst_val) # largest normalied error now about -4. for dX/dp of Detector0Tau1 assert abs(tst_val) < 4.5 if verbose: print return
def compose(self, reflections, skip_derivatives=False): """Compose scan-varying crystal parameterisations at the specified image number, for the specified experiment, for each image. Put the U, B and UB matrices in the reflection table, and cache the derivatives.""" self._prepare_for_compose(reflections, skip_derivatives) for iexp, exp in enumerate(self._experiments): # select the reflections of interest sel = reflections['id'] == iexp isel = sel.iselection() blocks = reflections['block'].select(isel) # identify which parameterisations to use for this experiment xl_op = self._get_xl_orientation_parameterisation(iexp) xl_ucp = self._get_xl_unit_cell_parameterisation(iexp) bp = self._get_beam_parameterisation(iexp) dp = self._get_detector_parameterisation(iexp) # reset current frame cache for scan-varying parameterisations self._current_frame = {} # get state and derivatives for each block for block in xrange(flex.min(blocks), flex.max(blocks) + 1): # determine the subset of reflections this affects subsel = isel.select(blocks == block) if len(subsel) == 0: continue # get the panels hit by these reflections panels = reflections['panel'].select(subsel) # get the integer frame number nearest the centre of that block frames = reflections['block_centre'].select(subsel) # can only be false if original block assignment has gone wrong assert frames.all_eq(frames[0]), \ "Failing: a block contains reflections that shouldn't be there" frame = int(floor(frames[0])) # model states at current frame U = self._get_state_from_parameterisation(xl_op, frame) if U is None: U = exp.crystal.get_U() B = self._get_state_from_parameterisation(xl_ucp, frame) if B is None: B = exp.crystal.get_B() s0 = self._get_state_from_parameterisation(bp, frame) if s0 is None: s0 = exp.beam.get_s0() # set states for crystal and beam reflections['u_matrix'].set_selected(subsel, U.elems) reflections['b_matrix'].set_selected(subsel, B.elems) reflections['s0_vector'].set_selected(subsel, s0.elems) # set states and derivatives for multi-panel detector if dp is not None and dp.is_multi_state(): # loop through the panels in this detector for panel_id, _ in enumerate(exp.detector): # get the right subset of array indices to set for this panel subsel2 = subsel.select(panels == panel_id) if len(subsel2) == 0: # if no reflections intersect this panel, skip calculation continue dmat = self._get_state_from_parameterisation(dp, frame, multi_state_elt=panel_id) if dmat is None: dmat = exp.detector[panel_id].get_d_matrix() Dmat = exp.detector[panel_id].get_D_matrix() reflections['d_matrix'].set_selected(subsel2, dmat) reflections['D_matrix'].set_selected(subsel2, Dmat) if dp is not None and self._varying_detectors and not skip_derivatives: for j, dd in enumerate(dp.get_ds_dp(multi_state_elt=panel_id, use_none_as_null=True)): if dd is None: continue colname = "dd_dp{0}".format(j) reflections[colname].set_selected(subsel, dd) else: # set states and derivatives for single panel detector dmat = self._get_state_from_parameterisation(dp, frame) if dmat is None: dmat = exp.detector[0].get_d_matrix() Dmat = exp.detector[0].get_D_matrix() reflections['d_matrix'].set_selected(subsel, dmat) reflections['D_matrix'].set_selected(subsel, Dmat) if dp is not None and self._varying_detectors and not skip_derivatives: for j, dd in enumerate(dp.get_ds_dp(use_none_as_null=True)): if dd is None: continue colname = "dd_dp{0}".format(j) reflections[colname].set_selected(subsel, dd) # set derivatives of the states for crystal and beam if not skip_derivatives: if xl_op is not None and self._varying_xl_orientations: for j, dU in enumerate(xl_op.get_ds_dp(use_none_as_null=True)): if dU is None: continue colname = "dU_dp{0}".format(j) reflections[colname].set_selected(subsel, dU) if xl_ucp is not None and self._varying_xl_unit_cells: for j, dB in enumerate(xl_ucp.get_ds_dp(use_none_as_null=True)): if dB is None: continue colname = "dB_dp{0}".format(j) reflections[colname].set_selected(subsel, dB) if bp is not None and self._varying_beams: for j, ds0 in enumerate(bp.get_ds_dp(use_none_as_null=True)): if ds0 is None: continue colname = "ds0_dp{0}".format(j) reflections[colname].set_selected(subsel, ds0) # set the UB matrices for prediction reflections['ub_matrix'] = reflections['u_matrix'] * reflections['b_matrix'] return
def spot_counts_per_image_plot(reflections, char='*', width=60, height=10): from dials.array_family import flex if len(reflections) == 0: return '\n' assert isinstance(char, basestring) assert len(char) == 1 x,y,z = reflections['xyzobs.px.value'].parts() min_z = flex.min(z) max_z = flex.max(z) # image numbers to display on x-axis label xlab = (int(round(min_z + 0.5)), int(round(max_z + 0.5))) # estimate the total number of images image_count = xlab[1] - xlab[0] + 1 z_range = max_z - min_z + 1 if z_range <= 1: return '%i spots found on 1 image' %len(reflections) width = int(min(z_range, width)) z_step = z_range / width z_bound = min_z + z_step - 0.5 # print [round(i * 10) / 10 for i in sorted(z)] counts = flex.double() sel = (z < z_bound) counts.append(sel.count(True)) # print 0, ('-', z_bound), sel.count(True) for i in range(1, width-1): sel = ((z >= z_bound) & (z < (z_bound + z_step))) counts.append(sel.count(True)) # print i, (z_bound, z_bound + z_step), sel.count(True) z_bound += z_step sel = (z >= z_bound) # print i + 1, (z_bound, '-'), sel.count(True) counts.append(sel.count(True)) max_count = flex.max(counts) total_counts = flex.sum(counts) assert total_counts == len(z) counts *= (height/max_count) counts = counts.iround() rows = [] rows.append('%i spots found on %i images (max %i / bin)' %( total_counts, image_count, max_count)) for i in range(height, 0, -1): row = [] for j, c in enumerate(counts): if c > (i - 1): row.append(char) else: row.append(' ') rows.append(''.join(row)) padding = width - len(str(xlab[0])) - len(str(xlab[1])) rows.append('%i%s%i' % (xlab[0], (' ' if padding < 7 else 'image').center(padding), xlab[1])) return '\n'.join(rows)
#fig = pylab.figure(dpi=300) #pylab.imshow(scale_data.as_numpy_array(), vmin=0, vmax=2, interpolation='none') #pylab.colorbar() #pylab.savefig("scale_%d.png" % frame) #pylab.clf() ##pylab.show() #exit(0) #pylab.hist(scale_data.as_1d().select(scale_mask.as_1d()).as_numpy_array(), # bins=100) #pylab.show() sd1 = scale_data.as_1d() sm1 = scale_mask.as_1d() scale_min = flex.min(sd1.select(sm1)) scale_max = flex.max(sd1.select(sm1)) scale_avr = flex.sum(sd1.select(sm1)) / sm1.count(True) background = model_data * scale_data reflections['shoebox'].select(indices).apply_pixel_data( data.as_double(), background, raw_mask, frame, 1) subset = reflections.select(indices3) if len(subset) > 0: subset.compute_summed_intensity() subset.compute_centroid(experiments)
# counts along pixels (and allow for DQE i.e. photon passing right through # the detector) patch[(y, x)] += 1.0 * iw / scale cc = profile_correlation(data, patch) if params.show: print 'Simulated reflection (flattened in Z):' print for j in range(dy): for i in range(dx): print '%5d' % int(patch[(j, i)]), print print 'Correlation coefficient: %.3f isum: %.1f ' % (cc, i0) import numpy as np maxx = flex.max(all_pix.parts()[0]) maxy = flex.max(all_pix.parts()[1]) minx = flex.min(all_pix.parts()[0]) miny = flex.min(all_pix.parts()[1]) dx = (maxx-minx)/2 dy = (maxy-miny)/2 medx = minx + (dx) medy = miny + (dy) if maxx-minx > maxy-miny: miny = medy-dx maxy = medy+dx else: minx = medx-dy maxx = medx+dy limits = [[minx, maxx], [miny, maxy]]