def __init__(self, reflections, av_callback=flex.mean, debug=False): # flags to indicate at what level the analysis has been performed self._average_residuals = False self._spectral_analysis = False self._av_callback = av_callback # Remove invalid reflections reflections = reflections.select(~(reflections["miller_index"] == (0, 0, 0))) # FIXME - better way to recognise non-predictions. Can't rely on flags # in e.g. indexed.refl I think. x, y, z = reflections["xyzcal.mm"].parts() sel = (x == 0) & (y == 0) reflections = reflections.select(~sel) self._nexp = flex.max(reflections["id"]) + 1 # Ensure required keys are present if not all(k in reflections for k in ["x_resid", "y_resid", "phi_resid"]): x_obs, y_obs, phi_obs = reflections["xyzobs.mm.value"].parts() x_cal, y_cal, phi_cal = reflections["xyzcal.mm"].parts() # do not wrap around multiples of 2*pi; keep the full rotation # from zero to differentiate repeat observations. TWO_PI = 2.0 * math.pi resid = phi_cal - (flex.fmod_positive(phi_obs, TWO_PI)) # ensure this is the smaller of two possibilities resid = flex.fmod_positive((resid + math.pi), TWO_PI) - math.pi phi_cal = phi_obs + resid reflections["x_resid"] = x_cal - x_obs reflections["y_resid"] = y_cal - y_obs reflections["phi_resid"] = phi_cal - phi_obs # create empty results list self._results = [] # first, just determine a suitable block size for analysis for iexp in range(self._nexp): ref_this_exp = reflections.select(reflections["id"] == iexp) if len(ref_this_exp) == 0: # can't do anything, just keep an empty dictionary self._results.append({}) continue phi_obs_deg = ref_this_exp["xyzobs.mm.value"].parts()[2] * RAD2DEG phi_range = flex.min(phi_obs_deg), flex.max(phi_obs_deg) phi_width = phi_range[1] - phi_range[0] ideal_block_size = 1.0 old_nblocks = 0 while True: nblocks = int(phi_width // ideal_block_size) if nblocks == old_nblocks: nblocks -= 1 nblocks = max(nblocks, 1) block_size = phi_width / nblocks nr = flex.int() for i in range(nblocks - 1): blk_start = phi_range[0] + i * block_size blk_end = blk_start + block_size sel = (phi_obs_deg >= blk_start) & (phi_obs_deg < blk_end) nref_in_block = sel.count(True) nr.append(nref_in_block) # include max phi in the final block blk_start = phi_range[0] + (nblocks - 1) * block_size blk_end = phi_range[1] sel = (phi_obs_deg >= blk_start) & (phi_obs_deg <= blk_end) nref_in_block = sel.count(True) nr.append(nref_in_block) # Break if there are enough reflections, otherwise increase block size, # unless only one block remains if nblocks == 1: break min_nr = flex.min(nr) if min_nr >= 50: break if min_nr < 5: fac = 2 else: fac = 50 / min_nr ideal_block_size *= fac old_nblocks = nblocks # collect the basic data for this experiment self._results.append({ "block_size": block_size, "nref_per_block": nr, "nblocks": nblocks, "phi_range": phi_range, }) # keep reflections for analysis self._reflections = reflections # for debugging, write out reflections used if debug: self._reflections.as_file("centroid_analysis.refl")
def __init__(self, reflections, av_callback=flex.mean): # flags to indicate at what level the analysis has been performed self._average_residuals = False self._spectral_analysis = False self._av_callback = av_callback # Remove invalid reflections reflections = reflections.select(~(reflections['miller_index'] == (0,0,0))) # FIXME - better way to recognise non-predictions. Can't rely on flags # in e.g. indexed.pickle I think. x, y, z = reflections['xyzcal.mm'].parts() sel = (x == 0) & (y == 0) reflections = reflections.select(~sel) self._nexp = flex.max(reflections['id']) + 1 # Ensure required keys are present if not all([k in reflections for k in ['x_resid', 'y_resid', 'phi_resid']]): x_obs, y_obs, phi_obs = reflections['xyzobs.mm.value'].parts() x_cal, y_cal, phi_cal = reflections['xyzcal.mm'].parts() # do not wrap around multiples of 2*pi; keep the full rotation # from zero to differentiate repeat observations. from math import pi TWO_PI = 2.0 * pi resid = phi_cal - (flex.fmod_positive(phi_obs, TWO_PI)) # ensure this is the smaller of two possibilities resid = flex.fmod_positive((resid + pi), TWO_PI) - pi phi_cal = phi_obs + resid reflections['x_resid'] = x_cal - x_obs reflections['y_resid'] = y_cal - y_obs reflections['phi_resid'] = phi_cal - phi_obs # create empty results list self._results = [] # first, just determine a suitable block size for analysis for iexp in range(self._nexp): ref_this_exp = reflections.select(reflections['id'] == iexp) if len(ref_this_exp) == 0: # can't do anything, just keep an empty dictionary self._results.append({}) continue phi_obs_deg = ref_this_exp['xyzobs.mm.value'].parts()[2] * RAD2DEG phi_range = flex.min(phi_obs_deg), flex.max(phi_obs_deg) phi_width = phi_range[1] - phi_range[0] ideal_block_size = 1.0 old_nblocks = 0 while True: nblocks = int(phi_width // ideal_block_size) if nblocks == old_nblocks: nblocks -= 1 nblocks = max(nblocks, 1) block_size = phi_width / nblocks nr = flex.int() for i in range(nblocks - 1): blk_start = phi_range[0] + i * block_size blk_end = blk_start + block_size sel = (phi_obs_deg >= blk_start) & (phi_obs_deg < blk_end) nref_in_block = sel.count(True) nr.append(nref_in_block) # include max phi in the final block blk_start = phi_range[0] + (nblocks - 1) * block_size blk_end = phi_range[1] sel = (phi_obs_deg >= blk_start) & (phi_obs_deg <= blk_end) nref_in_block = sel.count(True) nr.append(nref_in_block) # Break if there are enough reflections, otherwise increase block size, # unless only one block remains if nblocks == 1: break min_nr = flex.min(nr) if min_nr >= 50: break if min_nr < 5: fac = 2 else: fac = 50 / min_nr ideal_block_size *= fac old_nblocks = nblocks # collect the basic data for this experiment self._results.append({'block_size':block_size, 'nref_per_block':nr, 'nblocks':nblocks, 'phi_range':phi_range}) # keep reflections for analysis self._reflections = reflections