def beginjob(self, evt, env): """The beginjob() function does one-time initialisation from event- or environment data. It is called at an XTC configure transition. @param evt Event data object, a configure object @param env Environment object """ super(average_mixin, self).beginjob(evt, env) if self.dark_img is not None and self.hot_threshold is not None: self.hot_threshold *= flex.median(self.dark_img.as_1d()) self.logger.info("HOT THRESHOLD: %.2f" % self.hot_threshold) self.logger.info("Number of pixels above hot threshold: %i" % \ (self.dark_img > self.hot_threshold).count(True)) self._nfail = 0 self._nmemb = 0 # The time_base metadata item is a two-long array of seconds and # milliseconds, and it must be recorded only once. It indicates # the base time, which is subtracted from all per-shot times. # Assuming an average run length of ten minutes, five minutes past # the start of a run is a good base time. self._lock.acquire() if 'time_base' not in self._metadata.keys(): self._metadata['time_base'] = (cspad_tbx.evt_time(evt)[0] + 5 * 60, 500) self._lock.release()
def residuals(self): """Calculate and return the residuals""" cells = [ xlucp.get_model().get_unit_cell().parameters() for xlucp in self._xlucp ] a, b, c, aa, bb, cc = [flex.double(e) for e in zip(*cells)] resid_a = a - flex.median(a) if self._sel[0] else None resid_b = b - flex.median(b) if self._sel[1] else None resid_c = c - flex.median(c) if self._sel[2] else None resid_aa = aa - flex.median(aa) if self._sel[3] else None resid_bb = bb - flex.median(bb) if self._sel[4] else None resid_cc = cc - flex.median(cc) if self._sel[5] else None # collect the residuals for restrained parameters only resid = [ e for e in [resid_a, resid_b, resid_c, resid_aa, resid_bb, resid_cc] if e is not None ] # stack the columns R = resid[0] for r in resid[1:]: R.extend(r) return R
def residuals(self): """Calculate and return the residuals""" cells = [xlucp.get_model().get_unit_cell().parameters() for xlucp in self._xlucp] a, b, c, aa, bb, cc = [flex.double(e) for e in zip(*cells)] resid_a = a - flex.median(a) if self._sel[0] else None resid_b = b - flex.median(b) if self._sel[1] else None resid_c = c - flex.median(c) if self._sel[2] else None resid_aa = aa - flex.median(aa) if self._sel[3] else None resid_bb = bb - flex.median(bb) if self._sel[4] else None resid_cc = cc - flex.median(cc) if self._sel[5] else None # collect the residuals for restrained parameters only resid = [e for e in [resid_a, resid_b, resid_c, resid_aa, resid_bb, resid_cc] if e is not None] # stack the columns R = resid[0] for r in resid[1:]: R.extend(r) return R
def common_mode(self, img, stddev, mask): """The common_mode() function returns the mode of image stored in the array pointed to by @p img. @p mask must be such that the @p stddev at the selected pixels is greater than zero. @param img 2D integer array of the image @param stddev 2D integer array of the standard deviation of each pixel in @p img @param mask 2D Boolean array, @c True if the pixel is to be included, @c False otherwise @return Mode of the image, as a real number """ # Flatten the image and take out inactive pixels XXX because we # cannot take means and medians of 2D arrays? img_1d = img.as_1d().select(mask.as_1d()).as_double() assert img_1d.size() > 0 if (self.common_mode_correction == "mean"): # The common mode is approximated by the mean of the pixels with # signal-to-noise ratio less than a given threshold. XXX Breaks # if the selection is empty! THRESHOLD_SNR = 2 img_snr = img_1d / stddev.as_double().as_1d().select(mask.as_1d()) return (flex.mean(img_1d.select(img_snr < THRESHOLD_SNR))) elif (self.common_mode_correction == "median"): return (flex.median(img_1d)) # Identify the common-mode correction as the peak histogram of the # histogram of pixel values (the "standard" common-mode correction, as # previously implemented in this class). hist_min = -40 hist_max = 40 n_slots = 100 hist = flex.histogram(img_1d, hist_min, hist_max, n_slots=n_slots) slots = hist.slots() i = flex.max_index(slots) common_mode = list(hist.slot_infos())[i].center() if (self.common_mode_correction == "mode"): return (common_mode) # Determine the common-mode correction from the peak of a single # Gaussian function fitted to the histogram. from scitbx.math.curve_fitting import single_gaussian_fit x = hist.slot_centers() y = slots.as_double() fit = single_gaussian_fit(x, y) scale, mu, sigma = fit.a, fit.b, fit.c self.logger.debug("fitted gaussian: mu=%.3f, sigma=%.3f" %(mu, sigma)) mode = common_mode common_mode = mu if abs(mode-common_mode) > 1000: common_mode = mode # XXX self.logger.debug("delta common mode corrections: %.3f" %(mode-common_mode)) if 0 and abs(mode-common_mode) > 0: #if 0 and skew > 0.5: # view histogram and fitted gaussian from numpy import exp from matplotlib import pyplot x_all = x n, bins, patches = pyplot.hist(section_img.as_1d().as_numpy_array(), bins=n_slots, range=(hist_min, hist_max)) y_all = scale * flex.exp(-flex.pow2(x_all-mu) / (2 * sigma**2)) scale = slots[flex.max_index(slots)] y_all *= scale/flex.max(y_all) pyplot.plot(x_all, y_all) pyplot.show() return (common_mode)
def __init__( self, intensities, normalisation="ml_aniso", lattice_symmetry_max_delta=2.0, d_min=libtbx.Auto, min_i_mean_over_sigma_mean=4, min_cc_half=0.6, relative_length_tolerance=None, absolute_angle_tolerance=None, ): """Initialise a symmetry_base object. Args: intensities (cctbx.miller.array): The intensities on which to perform symmetry anaylsis. normalisation (str): The normalisation method to use. Possible choices are 'kernel', 'quasi', 'ml_iso' and 'ml_aniso'. Set to None to switch off normalisation altogether. lattice_symmetry_max_delta (float): The maximum value of delta for determining the lattice symmetry using the algorithm of Le Page (1982). d_min (float): Optional resolution cutoff to be applied to the input intensities. If set to :data:`libtbx.Auto` then d_min will be automatically determined according to the parameters ``min_i_mean_over_sigma_mean`` and ``min_cc_half``. min_i_mean_over_sigma_mean (float): minimum value of |I|/|sigma(I)| for automatic determination of resolution cutoff. min_cc_half (float): minimum value of CC1/2 for automatic determination of resolution cutoff. relative_length_tolerance (float): Relative length tolerance in checking consistency of input unit cells against the median unit cell. absolute_angle_tolerance (float): Absolute angle tolerance in checking consistency of input unit cells against the median unit cell. """ self.input_intensities = intensities uc_params = [flex.double() for i in range(6)] for d in self.input_intensities: for i, p in enumerate(d.unit_cell().parameters()): uc_params[i].append(p) self.median_unit_cell = uctbx.unit_cell( parameters=[flex.median(p) for p in uc_params]) self._check_unit_cell_consistency(relative_length_tolerance, absolute_angle_tolerance) self.intensities = self.input_intensities[0] self.dataset_ids = flex.double(self.intensities.size(), 0) for i, d in enumerate(self.input_intensities[1:]): self.intensities = self.intensities.concatenate( d, assert_is_similar_symmetry=False) self.dataset_ids.extend(flex.double(d.size(), i + 1)) self.intensities = self.intensities.customized_copy( unit_cell=self.median_unit_cell) self.intensities.set_observation_type_xray_intensity() sys_absent_flags = self.intensities.sys_absent_flags( integral_only=True).data() self.intensities = self.intensities.select(~sys_absent_flags) self.dataset_ids = self.dataset_ids.select(~sys_absent_flags) self.lattice_symmetry_max_delta = lattice_symmetry_max_delta self.subgroups = metric_subgroups( self.intensities.crystal_symmetry(), max_delta=self.lattice_symmetry_max_delta, bravais_types_only=False, ) self.cb_op_inp_min = self.subgroups.cb_op_inp_minimum self.intensities = (self.intensities.change_basis( self.cb_op_inp_min).customized_copy( space_group_info=sgtbx.space_group_info( "P1")).map_to_asu().set_info(self.intensities.info())) self.lattice_group = (self.subgroups.result_groups[0] ["subsym"].space_group().make_tidy()) self.patterson_group = ( self.lattice_group.build_derived_patterson_group().make_tidy()) logger.info("Patterson group: %s" % self.patterson_group.info()) sel = self.patterson_group.epsilon(self.intensities.indices()) == 1 self.intensities = self.intensities.select(sel) self.dataset_ids = self.dataset_ids.select(sel) # Correct SDs by "typical" SD factors self._correct_sigmas(sd_fac=2.0, sd_b=0.0, sd_add=0.03) self._normalise(normalisation) self._resolution_filter(d_min, min_i_mean_over_sigma_mean, min_cc_half)
def average_fn(vals): return flex.median(vals)
def discover_better_experimental_model( experiments, reflections, params, nproc=1, d_min=None, mm_search_scope=4.0, wide_search_binning=1, plot_search_scope=False, ): assert len(experiments) == len(reflections) assert len(experiments) > 0 refl_lists = [] max_cell_list = [] # The detector/beam of the first experiment is used to define the basis for the # optimisation, so assert that the beam intersects with the detector detector = experiments[0].detector beam = experiments[0].beam beam_panel = detector.get_panel_intersection(beam.get_s0()) if beam_panel == -1: raise Sorry("input beam does not intersect detector") for expt, refl in zip(experiments, reflections): refl = copy.deepcopy(refl) refl["imageset_id"] = flex.int(len(refl), 0) refl.centroid_px_to_mm([expt]) refl.map_centroids_to_reciprocal_space([expt]) if d_min is not None: d_spacings = 1 / refl["rlp"].norms() sel = d_spacings > d_min refl = refl.select(sel) # derive a max_cell from mm spots if params.max_cell is None: max_cell = find_max_cell(refl, max_cell_multiplier=1.3, step_size=45).max_cell max_cell_list.append(max_cell) if params.max_reflections is not None and refl.size( ) > params.max_reflections: logger.info("Selecting subset of %i reflections for analysis" % params.max_reflections) perm = flex.random_permutation(refl.size()) sel = perm[:params.max_reflections] refl = refl.select(sel) refl_lists.append(refl) if params.max_cell is None: max_cell = flex.median(flex.double(max_cell_list)) else: max_cell = params.max_cell with concurrent.futures.ProcessPoolExecutor(max_workers=nproc) as pool: solution_lists = [] amax_list = [] for result in pool.map(run_dps, experiments, refl_lists, itertools.repeat(max_cell)): if result.get("solutions"): solution_lists.append(result["solutions"]) amax_list.append(result["amax"]) if not solution_lists: raise Sorry("No solutions found") new_experiments = optimize_origin_offset_local_scope( experiments, refl_lists, solution_lists, amax_list, mm_search_scope=mm_search_scope, wide_search_binning=wide_search_binning, plot_search_scope=plot_search_scope, ) new_detector = new_experiments[0].detector old_panel, old_beam_centre = detector.get_ray_intersection(beam.get_s0()) new_panel, new_beam_centre = new_detector.get_ray_intersection( beam.get_s0()) old_beam_centre_px = detector[old_panel].millimeter_to_pixel( old_beam_centre) new_beam_centre_px = new_detector[new_panel].millimeter_to_pixel( new_beam_centre) logger.info("Old beam centre: %.2f, %.2f mm" % old_beam_centre + " (%.1f, %.1f px)" % old_beam_centre_px) logger.info("New beam centre: %.2f, %.2f mm" % new_beam_centre + " (%.1f, %.1f px)" % new_beam_centre_px) logger.info( "Shift: %.2f, %.2f mm" % (matrix.col(old_beam_centre) - matrix.col(new_beam_centre)).elems + " (%.1f, %.1f px)" % (matrix.col(old_beam_centre_px) - matrix.col(new_beam_centre_px)).elems ) return new_experiments
def cosine_analysis(self): from scipy.cluster import hierarchy import scipy.spatial.distance as ssd X = self.coords.as_numpy_array() dist_mat = ssd.pdist(X, metric='cosine') cos_angle = 1 - ssd.squareform(dist_mat) linkage_matrix = hierarchy.linkage(dist_mat, method='average') c, coph_dists = hierarchy.cophenet(linkage_matrix, dist_mat) logger.debug( 'Cophenetic correlation coefficient between heirarchical clustering and pairwise distance matrix: %.3f' % c) if self.params.save_plot: plot_matrix(cos_angle, linkage_matrix, '%scos_angle_matrix.png' % self.params.plot_prefix) plot_dendrogram( linkage_matrix, '%scos_angle_dendrogram.png' % self.params.plot_prefix) sym_ops = [ sgtbx.rt_mx(s).new_denominators(1, 12) for s in self.target.get_sym_ops() ] sym_ops_cos_angle = OrderedDict() for dataset_id in range(len(self.datasets)): ref_sym_op_id = None ref_cluster_id = None for sym_op_id in range(len(sym_ops)): if ref_sym_op_id is None: ref_sym_op_id = sym_op_id continue op = sym_ops[ref_sym_op_id].inverse().multiply( sym_ops[sym_op_id]) op = op.new_denominators(1, 12) ref_idx = len(self.datasets) * ref_sym_op_id + dataset_id comp_idx = len(self.datasets) * sym_op_id + dataset_id sym_ops_cos_angle.setdefault(op, flex.double()) sym_ops_cos_angle[op].append(cos_angle[ref_idx, comp_idx]) # print symops sorted by average cos(angle) sg = copy.deepcopy(self.input_space_group) rows = [[ 'symop', 'order', 'sg', 'mean(cos(angle))', 'median(cos(angle))' ]] perm = flex.sort_permutation(flex.double( [flex.mean(ca) for ca in sym_ops_cos_angle.values()]), reverse=True) for p in perm: op, ca = sym_ops_cos_angle.items()[p] sg.expand_smx(op) rows.append(( str(op), str(op.r().order()), str(sg.info().reference_setting()), '%.3f' % flex.mean(ca), '%.3f' % flex.median(ca), )) logger.info( 'Analysis of cos(angle) between points corresponding to the same datasets:' ) logger.info(table_utils.format(rows, has_header=True))
def discover_better_experimental_model(imagesets, spot_lists, params, dps_params, nproc=1, wide_search_binning=1): assert len(imagesets) == len(spot_lists) assert len(imagesets) > 0 # XXX should check that all the detector and beam objects are the same spot_lists_mm = [] max_cell_list = [] detector = imagesets[0].get_detector() beam = imagesets[0].get_beam() beam_panel = detector.get_panel_intersection(beam.get_s0()) if beam_panel == -1: raise Sorry("input beam does not intersect detector") for imageset, spots in zip(imagesets, spot_lists): if "imageset_id" not in spots: spots["imageset_id"] = spots["id"] spots_mm = copy.deepcopy(spots) spots_mm.centroid_px_to_mm(imageset.get_detector(), scan=imageset.get_scan()) spots_mm.map_centroids_to_reciprocal_space( detector=imageset.get_detector(), beam=imageset.get_beam(), goniometer=imageset.get_goniometer(), ) if dps_params.d_min is not None: d_spacings = 1 / spots_mm["rlp"].norms() sel = d_spacings > dps_params.d_min spots_mm = spots_mm.select(sel) # derive a max_cell from mm spots if params.max_cell is None: max_cell = find_max_cell(spots_mm, max_cell_multiplier=1.3, step_size=45).max_cell max_cell_list.append(max_cell) if (params.max_reflections is not None and spots_mm.size() > params.max_reflections): logger.info("Selecting subset of %i reflections for analysis" % params.max_reflections) perm = flex.random_permutation(spots_mm.size()) sel = perm[:params.max_reflections] spots_mm = spots_mm.select(sel) spot_lists_mm.append(spots_mm) if params.max_cell is None: max_cell = flex.median(flex.double(max_cell_list)) else: max_cell = params.max_cell args = [(imageset, spots, max_cell, dps_params) for imageset, spots in zip(imagesets, spot_lists_mm)] results = easy_mp.parallel_map( func=run_dps, iterable=args, processes=nproc, method="multiprocessing", preserve_order=True, asynchronous=True, preserve_exception_message=True, ) solution_lists = [r["solutions"] for r in results] amax_list = [r["amax"] for r in results] assert len(solution_lists) > 0 detector = imagesets[0].get_detector() beam = imagesets[0].get_beam() # perform calculation if dps_params.indexing.improve_local_scope == "origin_offset": discoverer = better_experimental_model_discovery( imagesets, spot_lists_mm, solution_lists, amax_list, dps_params, wide_search_binning=wide_search_binning, ) new_detector = discoverer.optimize_origin_offset_local_scope() old_panel, old_beam_centre = detector.get_ray_intersection( beam.get_s0()) new_panel, new_beam_centre = new_detector.get_ray_intersection( beam.get_s0()) old_beam_centre_px = detector[old_panel].millimeter_to_pixel( old_beam_centre) new_beam_centre_px = new_detector[new_panel].millimeter_to_pixel( new_beam_centre) logger.info("Old beam centre: %.2f, %.2f mm" % old_beam_centre + " (%.1f, %.1f px)" % old_beam_centre_px) logger.info("New beam centre: %.2f, %.2f mm" % new_beam_centre + " (%.1f, %.1f px)" % new_beam_centre_px) logger.info( "Shift: %.2f, %.2f mm" % (matrix.col(old_beam_centre) - matrix.col(new_beam_centre)).elems + " (%.1f, %.1f px)" % (matrix.col(old_beam_centre_px) - matrix.col(new_beam_centre_px)).elems) return new_detector, beam elif dps_params.indexing.improve_local_scope == "S0_vector": raise NotImplementedError()
ac_error = 100.0 * flex.fabs(ac_new - ac[q_i]) / ac_new # write output for i in xrange(x[q_i].size()): f.write('%f %e\n' % (x[q_i][i], ac[q_i][i])) f.write('&\n') for i in xrange(x[q_i].size()): f.write('%f %e\n' % (x[q_i][i], ac_new[i])) f2.write('%f %e\n' % (x[q_i][i], ac_error[i])) f.close() f2.close() q_count += 1 print flex.mean(ac_error), flex.median(ac_error) q_count = 0 f = open(output_directory + 'b.dat', 'w') for i in ring_indices: f.write('%s ' % q[i].split()[-1]) for j in xrange(0, len(result[-1][q_count][0]), 2): f.write('%f ' % result[-1][q_count][0][j]) f.write('\n') q_count += 1 f.close() # output coefficients q_count = 0 for q_i in ring_indices: f = open(output_directory + str(q_i).zfill(2) + '.dat', 'w')