def _assess_scaling_model(self, tails, bfactor, secondary): epochs = self._sweep_handler.get_epochs() sc_tst = self._updated_aimless() sc_tst.set_cycles(5) sc_tst.set_hklin(self._prepared_reflections) sc.set_intensities(PhilIndex.params.ccp4.aimless.intensities) sc_tst.set_hklout('temp.mtz') sc_tst.set_tails(tails = tails) sc_tst.set_bfactor(bfactor = bfactor) resolutions = self._resolution_limit_estimates if secondary: sc_tst.set_scaling_parameters( 'rotation', secondary = Flags.get_aimless_secondary()) else: sc_tst.set_scaling_parameters('rotation', secondary = 0) for epoch in epochs: si = self._sweep_handler.get_sweep_information(epoch) start, end = si.get_batch_range() resolution = resolutions[(start, end)] pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() sc_tst.add_run(start, end, exclude = False, resolution = resolution, name = sname) if self.get_scaler_anomalous(): sc_tst.set_anomalous() if True: # try: sc_tst.scale() else: # except RuntimeError, e: if 'scaling not converged' in str(e): return -1, -1 if 'negative scales' in str(e): return -1, -1 raise e data_tst = sc_tst.get_summary() # compute average Rmerge, number of cycles to converge - these are # what will form the basis of the comparison target = {'overall':0, 'low':1, 'high':2} rmerges_tst = [data_tst[k]['Rmerge'][target[ Flags.get_rmerge_target()]] for k in data_tst] rmerge_tst = sum(rmerges_tst) / len(rmerges_tst) return rmerge_tst
def __init__(self): # generic things CCP4DriverInstance.__class__.__init__(self) self.set_executable(os.path.join( os.environ.get('CBIN', ''), 'aimless')) if not os.path.exists(self.get_executable()): raise RuntimeError, 'aimless binary not found' self.start() self.close_wait() version = None for record in self.get_all_output(): if '##' in record and 'AIMLESS' in record: version = record.split()[5] if not version: raise RuntimeError, 'version not found' Debug.write('Using version: %s' % version) # FIXME (i) check program exists and (ii) version is known - # if not then default back in the calling code to using scala. # recently fixed things... version_values = map(int, version.split('.')) if (version_values[1] > 5) or (version_values[1] == 5 and \ version_values[2] > 10): self._fixed_secondary_lmax = True else: self._fixed_secondary_lmax = False # clear all the header junk self.reset() # input and output files self._scalepack = False self._chef_unmerged = False self._unmerged_reflections = None self._xmlout = None # scaling parameters self._resolution = None self._resolution_by_run = { } # scales file for recycling self._scales_file = None # this defaults to SCALES - and is useful for when we # want to refine the SD parameters because we can # recycle the scale factors through the above interface self._new_scales_file = None # this flag indicates that the input reflections are already # scaled and just need merging e.g. from XDS/XSCALE. self._onlymerge = False # by default, switch this on if decay_correction is None: self._bfactor = True else: self._bfactor = decay_correction # this will often be wanted self._anomalous = False # by default switch this on too... if partiality_correction is None: self._tails = True else: self._tails = partiality_correction # alternative for this is 'batch' err.. no rotation if Flags.get_batch_scale(): self._mode = 'batch' else: self._mode = 'rotation' # these are only relevant for 'rotation' mode scaling self._spacing = 5 if absorption_correction == None: self._secondary = Flags.get_aimless_secondary() elif absorption_correction == True: self._secondary = Flags.get_aimless_secondary() else: self._secondary = 0 self._cycles = 100 self._brotation = None self._bfactor_tie = None self._surface_tie = None self._surface_link = True self._intensities = 'combine' self._project_crystal_dataset = { } self._runs = [] # for adding data on merge - one dname self._pname = None self._xname = None self._dname = None return