def run(self): from xia2.Handlers.Streams import Debug Debug.write("Running cctbx.brehm_diederichs") self.clear_command_line() if self._asymmetric is not None: assert isinstance(self._asymmetric, int) self.add_command_line("asymmetric=%i" % self._asymmetric) self.add_command_line("show_plot=False") self.add_command_line("save_plot=True") for filename in self._input_filenames: self.add_command_line(filename) self.start() self.close_wait() self.check_for_errors() import os results_filename = os.path.join(self.get_working_directory(), "reindex.txt") assert os.path.exists(results_filename) with open(results_filename, "rb") as f: for line in f.readlines(): filename, reindex_op = line.strip().rsplit(" ", 1) self._reindexing_dict[os.path.abspath(filename)] = reindex_op return
def get_indexer_done(self): if not self.get_indexer_prepare_done(): Debug.write('Resetting indexer done as prepare not done') self.set_indexer_done(False) return self._indxr_done
def close_wait(self): '''Close the standard input channel and wait for the standard output to stop. Note that the results can still be obtained through self.get_all_output()...''' self.close() while True: line = self.output() if not line: break if self._log_file: # close the existing log file: also add a comment at the end containing the # command-line (replacing working directory & executable path for brevity) command_line = '%s ' % os.path.split(self._executable)[-1] for c in self._command_line: command_line += ' \'%s\'' % c.replace(self._working_directory + os.sep, '') self._log_file.write('# command line:\n') self._log_file.write('# %s\n' % command_line) self._log_file.close() self._log_file = None from xia2.Handlers.Streams import Debug with open(self._log_file_name, 'rb') as f: lines = f.readlines() n = min(50, len(lines)) Debug.write('Last %i lines of %s:' %(n, self._log_file_name)) for line in lines[-n:]: Debug.write(line.rstrip('\n'), strip=False) self.cleanup()
def run(self): assert(self._hklin) cl = [self._hklin] cl.append('nbins=%s' % self._nbins) cl.append('rmerge=%s' % self._limit_rmerge) cl.append('completeness=%s' % self._limit_completeness) cl.append('cc_half=%s' % self._limit_cc_half) cl.append('cc_half_significance_level=%s' % self._cc_half_significance_level) cl.append('isigma=%s' % self._limit_isigma) cl.append('misigma=%s' % self._limit_misigma) if self._batch_range is not None: cl.append('batch_range=%i,%i' % self._batch_range) for c in cl: self.add_command_line(c) Debug.write('Resolution analysis: %s' % (' '.join(cl))) self.start() self.close_wait() for record in self.get_all_output(): if 'Resolution rmerge' in record: self._resolution_rmerge = float(record.split()[-1]) if 'Resolution completeness' in record: self._resolution_completeness = float(record.split()[-1]) if 'Resolution cc_half' in record: self._resolution_cc_half = float(record.split()[-1]) if 'Resolution I/sig' in record: self._resolution_isigma = float(record.split()[-1]) if 'Resolution Mn(I/sig)' in record: self._resolution_misigma = float(record.split()[-1]) return
def auto_logfiler(DriverInstance, extra = None): '''Create a "sensible" log file for this program wrapper & connect it.''' working_directory = DriverInstance.get_working_directory() if not working_directory: return executable = os.path.split(DriverInstance.get_executable())[-1] number = _get_number() if executable[-4:] == '.bat': executable = executable[:-4] if executable[-4:] == '.exe': executable = executable[:-4] if extra: logfile = os.path.join(working_directory, '%d_%s_%s.log' % (number, executable, extra)) else: logfile = os.path.join(working_directory, '%d_%s.log' % (number, executable)) DriverInstance.set_xpid(number) Debug.write('Logfile: %s -> %s' % (executable, logfile)) DriverInstance.write_log_file(logfile) return logfile
def _setup(self): if self._is_setup: return self._is_setup = True harvest_directory = self.generate_directory('Harvest') self.setenv('HARVESTHOME', harvest_directory) # create a USER environment variable, to allow harvesting # in Mosflm to work (hacky, I know, but it really doesn't # matter too much... if not 'USER' in os.environ: if 'USERNAME' in os.environ: os.environ['USER'] = os.environ['USERNAME'] else: os.environ['USER'] = '******' # define a local CCP4_SCR ccp4_scr = tempfile.mkdtemp() os.environ['CCP4_SCR'] = ccp4_scr Debug.write('Created CCP4_SCR: %s' % ccp4_scr) self._is_setup = True return
def xds_to_mtz(self): '''Use pointless to convert XDS file to MTZ.''' if not self._xdsin: raise RuntimeError, 'XDSIN not set' self.check_hklout() # -c for copy - just convert the file to MTZ multirecord self.add_command_line('-c') self.start() if self._pname and self._xname and self._dname: self.input('name project %s crystal %s dataset %s' % \ (self._pname, self._xname, self._dname)) self.input('xdsin %s' % self._xdsin) if self._scale_factor: Debug.write('Scaling intensities by factor %e' % \ self._scale_factor) self.input('multiply %e' % self._scale_factor) self.close_wait() # FIXME need to check the status and so on here return
def _index_select_images_small_molecule(self): '''Select correct images based on image headers. This one is for when you have small molecule data so want more images.''' phi_width = self.get_phi_width() images = self.get_matching_images() Debug.write('Selected image %s' % images[0]) self.add_indexer_image_wedge(images[0]) offset = images[0] - 1 # add an image every 15 degrees up to 90 degrees for j in range(6): image_number = offset + int(15 * (j + 1) / phi_width) if not image_number in images: break Debug.write('Selected image %s' % image_number) self.add_indexer_image_wedge(image_number) return
def _integrate_select_images_wedges(self): '''Select correct images based on image headers.''' phi_width = self.get_phi_width() images = self.get_matching_images() # characterise the images - are there just two (e.g. dna-style # reference images) or is there a full block? wedges = [] if len(images) < 3: # work on the assumption that this is a reference pair wedges.append(images[0]) if len(images) > 1: wedges.append(images[1]) else: block_size = min(len(images), int(math.ceil(5/phi_width))) Debug.write('Adding images for indexer: %d -> %d' % \ (images[0], images[block_size - 1])) wedges.append((images[0], images[block_size - 1])) if int(90.0 / phi_width) + block_size in images: # assume we can add a wedge around 45 degrees as well... Debug.write('Adding images for indexer: %d -> %d' % \ (int(45.0 / phi_width) + images[0], int(45.0 / phi_width) + images[0] + block_size - 1)) Debug.write('Adding images for indexer: %d -> %d' % \ (int(90.0 / phi_width) + images[0], int(90.0 / phi_width) + images[0] + block_size - 1)) wedges.append( (int(45.0 / phi_width) + images[0], int(45.0 / phi_width) + images[0] + block_size - 1)) wedges.append( (int(90.0 / phi_width) + images[0], int(90.0 / phi_width) + images[0] + block_size - 1)) else: # add some half-way anyway first = (len(images) // 2) - (block_size // 2) + images[0] - 1 if first > wedges[0][1]: last = first + block_size - 1 Debug.write('Adding images for indexer: %d -> %d' % \ (first, last)) wedges.append((first, last)) if len(images) > block_size: Debug.write('Adding images for indexer: %d -> %d' % \ (images[- block_size], images[-1])) wedges.append((images[- block_size], images[-1])) return wedges
def run(self): from xia2.Handlers.Streams import Debug Debug.write('Running dials.refine_bravais_settings') self.clear_command_line() self.add_command_line(self._experiments_filename) self.add_command_line(self._indexed_filename) nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc self.set_cpu_threads(nproc) self.add_command_line('nproc=%i' % nproc) #self.add_command_line('reflections_per_degree=10') if self._detector_fix: self.add_command_line('detector.fix=%s' % self._detector_fix) if self._beam_fix: self.add_command_line('beam.fix=%s' % self._beam_fix) #self.add_command_line('engine=GaussNewton') if self._close_to_spindle_cutoff is not None: self.add_command_line( 'close_to_spindle_cutoff=%f' %self._close_to_spindle_cutoff) self.start() self.close_wait() self.check_for_errors() from json import loads import os self._bravais_summary = loads(open(os.path.join( self.get_working_directory(), 'bravais_summary.json'), 'r').read()) return
def add_scaler_integrater(self, integrater): '''Add an integrater to this scaler, to provide the input.''' # epoch values are trusted as long as they are unique. # if a collision is detected, all epoch values are replaced by an # integer series, starting with 0 if 0 in self._scalr_integraters.keys(): epoch = len(self._scalr_integraters) else: epoch = integrater.get_integrater_epoch() # FIXME This is now probably superflous? if epoch == 0 and self._scalr_integraters: raise RuntimeError, 'multi-sweep integrater has epoch 0' if epoch in self._scalr_integraters.keys(): Debug.write('integrater with epoch %d already exists. will not trust epoch values' % epoch) # collision. Throw away all epoch keys, and replace with integer series self._scalr_integraters = dict(zip( range(0,len(self._scalr_integraters)), self._scalr_integraters.values())) epoch = len(self._scalr_integraters) self._scalr_integraters[epoch] = integrater self.scaler_reset() return
def _index_remove_masked_regions(self): if not PhilIndex.params.xia2.settings.untrusted_rectangle_indexing: return untrusted_rectangle_indexing \ = PhilIndex.params.xia2.settings.untrusted_rectangle_indexing limits = untrusted_rectangle_indexing spot_xds = [] removed = 0 lines = open(self._indxr_payload['SPOT.XDS'], 'rb').readlines() for record in lines: if not record.strip(): continue remove = False x, y, phi, i = map(float, record.split()[:4]) for limits in untrusted_rectangle_indexing: if x > limits[0] and x < limits[1] and \ y > limits[2] and y < limits[3]: removed += 1 remove = True break if not remove: spot_xds.append('%s' % record) Debug.write('Removed %d peaks from SPOT.XDS' % removed) masked_spot_xds = os.path.splitext(self._indxr_payload['SPOT.XDS'])[0] + '_masked.XDS' with open(masked_spot_xds, 'wb') as f: f.writelines(spot_xds) self._indxr_payload['SPOT.XDS'] = masked_spot_xds return
def run(self): from xia2.Handlers.Streams import Debug Debug.write('Running xia2.integrate') self.clear_command_line() if self._phil_file is not None: self.add_command_line('%s' % self._phil_file) for arg in self._argv: self.add_command_line(arg) if self._nproc is not None: self.set_cpu_threads(self._nproc) self.add_command_line('nproc=%i' %self._nproc) if self._njob is not None: self.add_command_line('njob=%i' %self._njob) if self._mp_mode is not None: self.add_command_line('multiprocessing.mode=%s' %self._mp_mode) self.start() self.close_wait() self.check_for_errors() for line in self.get_all_output(): if 'Status: error' in line: raise RuntimeError(line.split('error')[-1].strip()) return
def run(self): from xia2.Handlers.Streams import Debug Debug.write('Running %s' %self.get_executable()) self.clear_command_line() self.add_command_line(self._sweep_filename) self.add_command_line(self._spot_filename) nproc = Flags.get_parallel() self.set_cpu_threads(nproc) self.add_command_line('nproc=%i' % nproc) for scan_range in self._scan_ranges: self.add_command_line('scan_range=%d,%d' % scan_range) if self._phil_file is not None: self.add_command_line("%s" %self._phil_file) self._optimized_filename = os.path.join( self.get_working_directory(), '%d_optimized_datablock.json' %self.get_xpid()) self.add_command_line("output.datablock=%s" %self._optimized_filename) self.start() self.close_wait() self.check_for_errors() records = self.get_all_output() assert os.path.exists(self._optimized_filename), self._optimized_filename return
def digest_template(template, images): '''Digest the template and image numbers to copy as much of the common characters in the numbers as possible to the template to give smaller image numbers.''' length = template.count('#') format = '%%0%dd' % length strings = [format % i for i in images] offset = 0 if len(strings) > 1: prefix = common_prefix(strings) if prefix: offset = int(prefix + '0' * (length - len(prefix))) template = template.replace(len(prefix) * '#', prefix, 1) images = [int(s.replace(prefix, '', 1)) for s in strings] try: template, images, offset = ensure_no_batches_numbered_zero( template, images, offset) except RuntimeError, e: Debug.write('Throwing away image 0 from template %s' % template) template, images, offset = ensure_no_batches_numbered_zero( template, images[1:], offset)
def _index_select_images(self): '''Select correct images based on image headers. This will in general use the 20 frames. N.B. only if they have good spots on them!''' phi_width = self.get_phi_width() images = self.get_matching_images() # N.B. now bodging this to use up to 20 frames which have decent # spots on, spaced from throughout the data set. spacing = max(1, int(len(images) // 20)) selected = [] for j in range(0, len(images), spacing): selected.append(images[j]) for image in selected[:20]: ld = LabelitDistl() ld.set_working_directory(self.get_working_directory()) auto_logfiler(ld) ld.add_image(self.get_image_name(image)) ld.distl() spots = ld.get_statistics( self.get_image_name(image))['spots_good'] Debug.write('Image %d good spots %d' % (image, spots)) if spots > 10: self.add_indexer_image_wedge(image) return
def run(self): from xia2.Handlers.Streams import Debug Debug.write('Running dials.reindex') wd = self.get_working_directory() self.clear_command_line() if self._experiments_filename is not None: self.add_command_line(self._experiments_filename) self._reindexed_experiments_filename = os.path.join( wd, "%d_experiments_reindexed.json" %self.get_xpid()) self.add_command_line( "output.experiments=%s" %self._reindexed_experiments_filename) if self._indexed_filename is not None: self.add_command_line(self._indexed_filename) self._reindexed_reflections_filename = os.path.join( wd, "%d_reflections_reindexed.pickle" %self.get_xpid()) self.add_command_line( "output.reflections=%s" %self._reindexed_reflections_filename) if self._reference_filename is not None: self.add_command_line("reference=%s" % self._reference_filename) if self._cb_op: self.add_command_line("change_of_basis_op=%s" % self._cb_op) if self._space_group: self.add_command_line("space_group=%s" % self._space_group) if self._hkl_offset is not None: self.add_command_line("hkl_offset=%i,%i,%i" %self._hkl_offset) self.start() self.close_wait() self.check_for_errors()
def __call__(self, indxr, images): from xia2.Handlers.Streams import Debug Debug.write('Running mosflm to generate RASTER, SEPARATION') self.start() self.input('template "%s"' % indxr.get_template()) self.input('directory "%s"' % indxr.get_directory()) self.input('beam %f %f' % indxr.get_indexer_beam_centre()) self.input('distance %f' % indxr.get_indexer_distance()) self.input('wavelength %f' % indxr.get_wavelength()) self.input('findspots file spots.dat') for i in images: self.input('findspots find %d' % i) self.input('go') self.close_wait() p = { } # scrape from the output the values we want... for o in self.get_all_output(): if 'parameters have been set to' in o: p['raster'] = map(int, o.split()[-5:]) if '(currently SEPARATION' in o: p['separation'] = map(float, o.replace(')', '').split()[-2:]) return p
def __call__(self, fp, images = None): from xia2.Handlers.Streams import Debug if images is None: images = self.select_images(fp) images_str = ' '.join(map(str, images)) if self._spot_file: Debug.write('Running mosflm to autoindex from %s' % self._spot_file) else: Debug.write('Running mosflm to autoindex from images %s' % images_str) self.start() self.input('template "%s"' % fp.get_template()) self.input('directory "%s"' % fp.get_directory()) self.input('beam %f %f' % fp.get_beam_centre()) self.input('distance %f' % fp.get_distance()) self.input('wavelength %f' % fp.get_wavelength()) if self._spot_file: self.input('autoindex dps refine image %s file %s' % (images_str, self._spot_file)) else: self.input('autoindex dps refine image %s' % images_str) self.input('go') self.close_wait() from AutoindexHelpers import parse_index_log return parse_index_log(self.get_all_output())
def Correct(self): correct = _Correct(params=PhilIndex.params.xds.correct) correct.set_working_directory(self.get_working_directory()) correct.setup_from_imageset(self.get_imageset()) if self.get_distance(): correct.set_distance(self.get_distance()) if self.get_wavelength(): correct.set_wavelength(self.get_wavelength()) if self.get_integrater_ice(): correct.set_ice(self.get_integrater_ice()) if self.get_integrater_excluded_regions(): correct.set_excluded_regions(self.get_integrater_excluded_regions()) if self.get_integrater_anomalous(): correct.set_anomalous(True) if self.get_integrater_low_resolution() > 0.0: Debug.write('Using low resolution limit: %.2f' % \ self.get_integrater_low_resolution()) correct.set_resolution_high(0.0) correct.set_resolution_low( self.get_integrater_low_resolution()) auto_logfiler(correct, 'CORRECT') return correct
def _index_finish(self): '''Check that the autoindexing gave a convincing result, and if not (i.e. it gave a centred lattice where a primitive one would be correct) pick up the correct solution.''' if self._indxr_input_lattice: return if self.get_indexer_sweep(): if self.get_indexer_sweep().get_user_lattice(): return try: status, lattice, matrix, cell = mosflm_check_indexer_solution( self) except: return if status is False or status is None: return # ok need to update internals... self._indxr_lattice = lattice self._indxr_cell = cell Debug.write('Inserting solution: %s ' % lattice + '%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % cell) self._indxr_replace(lattice, cell) self._indxr_payload['mosflm_orientation_matrix'] = matrix return
def set_spacegroup(self, spacegroup): '''A handler for the command-line option -spacegroup - this will set the spacegroup and derive from this the pointgroup and lattice appropriate for such...''' from xia2.Handlers.Syminfo import Syminfo spacegroup = spacegroup.upper() # validate by deriving the pointgroup and lattice... pointgroup = Syminfo.get_pointgroup(spacegroup) lattice = Syminfo.get_lattice(spacegroup) # assign self._spacegroup = spacegroup self._pointgroup = pointgroup self._lattice = lattice # debug print from xia2.Handlers.Streams import Debug Debug.write('Derived information from spacegroup flag: %s' % \ spacegroup) Debug.write('Pointgroup: %s Lattice: %s' % (pointgroup, lattice)) # indicate that since this has been assigned, we do not wish to # test it! self.set_no_lattice_test(True) return
def memory_usage(): try: import resource return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss except exceptions.Exception, e: Debug.write('Error getting RAM usage: %s' % str(e)) return 0
def set_xdsin(self, xdsin): # copy this file for debugging purposes - may take up a lot # of disk space so remove before release! if True: self._xdsin = xdsin return # now use this step to remove the misfit reflections # from the XDS_ASCII file. copyto = os.path.join(self.get_working_directory(), '%s_%s' % \ (self.get_xpid(), os.path.split(xdsin)[-1])) # shutil.copyfile(xdsin, copyto) ignored = remove_misfits(xdsin, copyto) Debug.write('Copied XDSIN to %s' % copyto) Debug.write('Removed %d misfits' % ignored) self._xdsin = copyto return
def _integrate_finish(self): '''Finish the integration - if necessary performing reindexing based on the pointgroup and the reindexing operator.''' if self._intgr_reindex_operator is None and \ self._intgr_spacegroup_number == lattice_to_spacegroup( self.get_integrater_refiner().get_refiner_lattice()): return self._mosflm_hklout if self._intgr_reindex_operator is None and \ self._intgr_spacegroup_number == 0: return self._mosflm_hklout Debug.write('Reindexing to spacegroup %d (%s)' % \ (self._intgr_spacegroup_number, self._intgr_reindex_operator)) hklin = self._mosflm_hklout reindex = Reindex() reindex.set_working_directory(self.get_working_directory()) auto_logfiler(reindex) reindex.set_operator(self._intgr_reindex_operator) if self._intgr_spacegroup_number: reindex.set_spacegroup(self._intgr_spacegroup_number) hklout = '%s_reindex.mtz' % hklin[:-4] reindex.set_hklin(hklin) reindex.set_hklout(hklout) reindex.reindex() return hklout
def set_refiner_finish_done(self, done=True): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) Debug.write('Called refiner finish done from %s %d (%s)' % (mod.__name__, frm[0].f_lineno, done)) self._refinr_finish_done = done
def refiner_reset(self): Debug.write('Refiner reset') self._refinr_done = False self._refinr_prepare_done = False self._refinr_finish_done = False self._refinr_result = None
def _set_integrater_reindex_operator_callback(self): '''If a REMOVE.HKL file exists in the working directory, remove it...''' if os.path.exists( os.path.join(self.get_working_directory(), 'REMOVE.HKL')): os.remove(os.path.join(self.get_working_directory(), 'REMOVE.HKL')) Debug.write('Deleting REMOVE.HKL as reindex op set.') return
def scaler_reset(self): Debug.write('Scaler reset') self._scalr_done = False self._scalr_prepare_done = False self._scalr_finish_done = False self._scalr_result = None
def set_scaler_finish_done(self, done = True): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) Debug.write('Called scaler finish done from %s %d (%s)' % (mod.__name__, frm[0].f_lineno, done)) self._scalr_finish_done = done
def refiner_reset(self): Debug.write("Refiner reset") self._refinr_done = False self._refinr_prepare_done = False self._refinr_finish_done = False self._refinr_result = None
def set_refiner_done(self, done = True): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) Debug.write('Called refiner done from %s %d (%s)' % (mod.__name__, frm[0].f_lineno, done)) self._refinr_done = done
def memory_usage(): try: import resource return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss except Exception as e: Debug.write("Error getting RAM usage: %s" % str(e)) return 0
def get_integrater_finish_done(self): if not self.get_integrater_done(): Debug.write( 'Resetting integrater finish done as integrate not done') self.set_integrater_finish_done(False) return self._intgr_finish_done
def close_wait(self): """Close the standard input channel and wait for the standard output to stop. Note that the results can still be obtained through self.get_all_output()...""" self.close() while True: line = self.output() if not line: break endtime = time.time() if self._log_file: # close the existing log file: also add a comment at the end containing the # command-line (replacing working directory & executable path for brevity) command_line = "%s " % os.path.basename(self._executable) for c in self._command_line: command_line += " '%s'" % c.replace( self._working_directory + os.sep, "") self._log_file.write("# command line:\n") self._log_file.write("# %s\n" % command_line) if hasattr(self, "_runtime_log") and self._runtime_log: self._log_file.write("#\n# timing information:\n") for k in self._runtime_log: self._log_file.write( "# time since {name}: {time:.1f} seconds\n".format( name=k, time=endtime - self._runtime_log[k])) self._log_file.close() self._log_file = None with open(self._log_file_name, "rb") as f: lines = f.readlines() n = min(50, len(lines)) Debug.write("Last %i lines of %s:" % (n, self._log_file_name)) for line in lines[-n:]: Debug.write(line.rstrip("\n"), strip=False) elif hasattr(self, "_runtime_log") and self._runtime_log: if self._executable: command_line = "%s " % os.path.basename(self._executable) for c in self._command_line: command_line += " '%s'" % c.replace( self._working_directory + os.sep, "") else: command_line = "(unknown)" if self._runtime_log: xia2.Driver.timing.record({ "command": command_line.strip(), "time_end": endtime, "time_start": min(self._runtime_log.values()), "details": self._runtime_log, }) self.cleanup()
def set_beam_centre(self, beam_centre): from dxtbx.model.detector_helpers import set_mosflm_beam_centre try: set_mosflm_beam_centre(self.get_detector(), self.get_beam_obj(), beam_centre) self._fp_beam_prov = "user" except AssertionError as e: Debug.write("Error setting mosflm beam centre: %s" % e)
def _input_pointgroup_scale_prepare(self): # is this function completely pointless? # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- ####Redoing batches only seems to be in multi_sweep_idxing for CCP4A self._scalr_likely_spacegroups = [self._scalr_input_pointgroup] Debug.write("Using input pointgroup: %s" % self._scalr_input_pointgroup) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) self._helper.reindex_jiffy(si, self._scalr_input_pointgroup, "h,k,l")
def run(self): from xia2.Handlers.Streams import Debug Debug.write('Running dials.find_spots') self.clear_command_line() self.add_command_line('input.datablock="%s"' % self._input_sweep_filename) if self._output_sweep_filename is not None: self.add_command_line('output.datablock="%s"' % self._output_sweep_filename) self.add_command_line('output.reflections="%s"' % self._input_spot_filename) nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc njob = PhilIndex.params.xia2.settings.multiprocessing.njob mp_mode = PhilIndex.params.xia2.settings.multiprocessing.mode mp_type = PhilIndex.params.xia2.settings.multiprocessing.type self.set_cpu_threads(nproc) self.add_command_line('nproc=%i' % nproc) if mp_mode == 'serial' and mp_type == 'qsub' and njob > 1: self.add_command_line('mp.method=drmaa') self.add_command_line('mp.njobs=%i' % njob) for scan_range in self._scan_ranges: self.add_command_line('spotfinder.scan_range=%d,%d' % scan_range) if self._min_spot_size is not None: self.add_command_line('min_spot_size=%i' % self._min_spot_size) if self._min_local is not None: self.add_command_line('dispersion.min_local=%i' % self._min_local) if self._kernel_size is not None: self.add_command_line('dispersion.kernel_size=%i %i' % \ (self._kernel_size, self._kernel_size)) if self._global_threshold is not None: self.add_command_line('dispersion.global_threshold=%s' % self._global_threshold) if self._sigma_strong is not None: self.add_command_line('dispersion.sigma_strong=%i' % self._sigma_strong) if self._filter_ice_rings: self.add_command_line('ice_rings.filter=%s' % self._filter_ice_rings) if self._phil_file is not None: self.add_command_line("%s" % self._phil_file) if self._write_hot_mask: self.add_command_line("write_hot_mask=true") if self._hot_mask_prefix: self.add_command_line("hot_mask_prefix=%s" % self._hot_mask_prefix) if self._gain: self.add_command_line("gain=%f" % self._gain) self.start() self.close_wait() self.check_for_errors() for record in self.get_all_output(): if record.startswith('Saved') and 'reflections to' in record: self._nspots = int(record.split()[1])
def run(self): from xia2.Handlers.Streams import Debug Debug.write("Running dials.refine") self.clear_command_line() self.add_command_line(self._experiments_filename) self.add_command_line(self._indexed_filename) self.add_command_line("scan_varying=%s" % self._scan_varying) if self._close_to_spindle_cutoff is not None: self.add_command_line("close_to_spindle_cutoff=%f" % self._close_to_spindle_cutoff) if self._outlier_algorithm: self.add_command_line("outlier.algorithm=%s" % self._outlier_algorithm) self._refined_experiments_filename = os.path.join( self.get_working_directory(), "%s_refined.expt" % self.get_xpid()) self.add_command_line("output.experiments=%s" % self._refined_experiments_filename) self._refined_filename = os.path.join( self.get_working_directory(), "%s_refined.refl" % self.get_xpid()) self.add_command_line("output.reflections=%s" % self._refined_filename) if self._reflections_per_degree is not None: self.add_command_line("reflections_per_degree=%i" % self._reflections_per_degree) if self._interval_width_degrees is not None: self.add_command_line( "unit_cell.smoother.interval_width_degrees=%i" % self._interval_width_degrees) self.add_command_line( "orientation.smoother.interval_width_degrees=%i" % self._interval_width_degrees) if self._detector_fix: self.add_command_line("detector.fix=%s" % self._detector_fix) if self._beam_fix: self.add_command_line("beam.fix=%s" % self._beam_fix) if self._phil_file is not None: self.add_command_line("%s" % self._phil_file) self.start() self.close_wait() if not os.path.isfile( self._refined_filename) or not os.path.isfile( self._refined_experiments_filename): raise RuntimeError( "DIALS did not refine the data, see log file for more details: %s" % self.get_log_file()) for record in self.get_all_output(): if "Sorry: Too few reflections to" in record: raise RuntimeError(record.strip()) self.check_for_errors()
def get_integrater_prepare_done(self): if not self.get_integrater_refiner(): return self._intgr_prepare_done if not self.get_integrater_refiner().get_refiner_done() \ and self._intgr_prepare_done: Debug.write('Resetting integrater as refiner updated.') self._integrater_reset() return self._intgr_prepare_done
def run(self): from xia2.Handlers.Streams import Debug Debug.write('Running dials.export') self.clear_command_line() self.add_command_line(self._experiments_filename) self.add_command_line('format=xds') self.start() self.close_wait() self.check_for_errors()
def _Othercell(): '''A factory to produce either a wrapper for LaticeSymmetry or OtherCell depending on what is available.''' try: return LatticeSymmetry() Debug.write('Using iotbx.lattice_symmetry') except Exception: return Othercell() Debug.write('Using othercell')
def set_refiner_prepare_done(self, done=True): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) Debug.write( "Called refiner prepare done from %s %d (%s)" % (mod.__name__, frm[0].f_lineno, done) ) self._refinr_prepare_done = done
def _prepare_pointless_hklin(working_directory, hklin, phi_width): """Prepare some data for pointless - this will take only 180 degrees of data if there is more than this (through a "pointless" command) else will simply return hklin.""" # also remove blank images? if not PhilIndex.params.xia2.settings.small_molecule: Debug.write("Excluding blank images") hklout = os.path.join( working_directory, "%s_noblank.mtz" % (os.path.split(hklin)[-1][:-4]) ) FileHandler.record_temporary_file(hklout) hklin = remove_blank(hklin, hklout) # find the number of batches batches = MtzUtils.batches_from_mtz(hklin) n_batches = max(batches) - min(batches) phi_limit = 180 if ( n_batches * phi_width < phi_limit or PhilIndex.params.xia2.settings.small_molecule ): return hklin hklout = os.path.join( working_directory, "%s_prepointless.mtz" % (os.path.split(hklin)[-1][:-4]) ) pl = xia2.Wrappers.CCP4.Pointless.Pointless() pl.set_working_directory(working_directory) auto_logfiler(pl) pl.set_hklin(hklin) pl.set_hklout(hklout) first = min(batches) last = first + int(phi_limit / phi_width) Debug.write( "Preparing data for pointless - %d batches (%d degrees)" % ((last - first), phi_limit) ) pl.limit_batches(first, last) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) return hklout
def __init__(self): # set up the object ancestors... DriverInstance.__class__.__init__(self) # now set myself up... self._parallel = PhilIndex.params.xia2.settings.multiprocessing.nproc if self._parallel <= 1: self.set_executable('xscale') else: self.set_executable('xscale_par') self._version = 'new' # overall information self._resolution_shells = '' self._cell = None self._spacegroup_number = None self._reindex_matrix = None # corrections to apply - N.B. default values come from the # factory function default arguments... self._correct_decay = correct_decay self._correct_absorption = correct_absorption self._correct_modulation = correct_modulation # input reflections information - including grouping information # in the same way as the .xinfo files - through the wavelength # names, which will be used for the output files. self._input_reflection_files = [] self._input_reflection_wavelength_names = [] self._input_resolution_ranges = [] # these are generated at the run time self._transposed_input = { } self._transposed_input_keys = [] # output self._output_reflection_files = { } self._remove = [] # decisions about the scaling self._crystal = None self._zero_dose = PhilIndex.params.xds.xscale.zero_dose if self._zero_dose: Debug.write('Switching on zero-dose extrapolation') self._anomalous = True self._merge = False # scale factor output self._scale_factor = 1.0 # Rmerge values - for the scale model analysis - N.B. get # one for each data set, obviously... self._rmerges = { }
def run(self): from xia2.Handlers.Streams import Debug Debug.write('Running dials.align_crystal') self.clear_command_line() self.add_command_line('experiments=%s' % self._experiments_filename) self.add_command_line('output.json=%s' % self._json_filename) self.start() self.close_wait() self.check_for_errors()
def debug_memory_usage(): '''Print line, file, memory usage.''' try: import inspect frameinfo = inspect.getframeinfo(inspect.stack()[1][0]) Debug.write('RAM usage at %s %d: %d' % (os.path.split( frameinfo.filename)[-1], frameinfo.lineno, memory_usage())) except Exception as e: Debug.write('Error getting RAM usage: %s' % str(e))
def reindex_sym_related(A, A_ref): """Calculate a reindexing matrix to move the indices referred to in A to the reference frame in A_ref: both are orientation matrices from Mosflm.""" Amat = matrix.sqr(parse_matrix(A)[1]) Amat_ref = matrix.sqr(parse_matrix(A_ref)[1]) R = Amat_ref.inverse() * Amat Debug.write("%5.3f %5.3f %5.3f %5.3f %5.3f %5.3f %5.3f %5.3f %5.3f" % R.elems) reindex = mat_to_symop(R) return reindex
def get_indexer_finish_done(self): if not self.get_indexer_done(): f = inspect.currentframe().f_back m = f.f_code.co_filename l = f.f_lineno Debug.write( "Resetting indexer finish done as index not done, from %s/%d" % (m, l) ) self.set_indexer_finish_done(False) return self._indxr_finish_done
def rectangle(self, header): '''Return a configured rectangle object to test whether pixels are within the backstop region.''' p1, p2, p3, p4 = self.calculate_mask(header) from xia2.Handlers.Streams import Debug Debug.write('Vertices of mask: (%d, %d), (%d, %d), (%d, %d), (%d, %d)' % \ (int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1]), int(p3[0]), int(p3[1]), int(p4[0]), int(p4[1]))) return rectangle(p1, p2, p3, p4)
def chdir_override(arg): if os.getpid() != pid: return origchdir(arg) # Try to determine the name of the calling module. # Use exception trick to pick up the current frame. try: raise Exception() except Exception: f = sys.exc_info()[2].tb_frame.f_back Debug.write('Directory change to %r in %s:%d' % (arg, f.f_code.co_filename, f.f_lineno)) return origchdir(arg)
def get_detector_identification(self): detector_id = (PhilIndex.get_python_object().xia2.settings. developmental.detector_id) # eg. 'PILATUS 2M, S/N 24-0107 Diamond' if not detector_id and self.get_imageset(): detector_id = self.get_imageset().get_detector()[0].get_identifier( ) if detector_id: Debug.write("Detector identified as %s" % detector_id) return detector_id else: Debug.write("Detector could not be identified") return None
def run(self): from xia2.Handlers.Streams import Debug Debug.write("Running dials.export") self.clear_command_line() self.add_command_line("experiments=%s" % self._experiments_filename) self.add_command_line("reflections=%s" % self._reflections_filename) self.add_command_line("format=best") self.add_command_line("best.prefix=%s" % self._prefix) self.start() self.close_wait() self.check_for_errors()
def _scale_finish_chunk_1_compute_anomalous(self): for key in self._scalr_scaled_refl_files: f = self._scalr_scaled_refl_files[key] m = mtz.object(f) if m.space_group().is_centric(): Debug.write('Spacegroup is centric: %s' % f) continue Debug.write('Running anomalous signal analysis on %s' % f) a_s = anomalous_signals(f) self._scalr_statistics[(self._scalr_pname, self._scalr_xname, key)]['dF/F'] = [a_s[0]] self._scalr_statistics[(self._scalr_pname, self._scalr_xname, key)]['dI/s(dI)'] = [a_s[1]]
def find_blank(hklin): try: # first dump to temp. file with tempfile.NamedTemporaryFile(suffix=".hkl", dir=os.environ["CCP4_SCR"], delete=False) as fh: hklout = fh.name p = Pointless() p.set_hklin(hklin) _ = p.sum_mtz(hklout) if os.path.getsize(hklout) == 0: Debug.write("Pointless failed:") Debug.write("".join(p.get_all_output())) raise RuntimeError("Pointless failed: no output file written") isig = {} with open(hklout, "r") as fh: for record in fh: lst = record.split() if not lst: continue batch = int(lst[3]) i, sig = float(lst[4]), float(lst[5]) if not sig: continue if not batch in isig: isig[batch] = [] isig[batch].append(i / sig) finally: os.remove(hklout) # look at the mean and sd blank = [] good = [] for batch in sorted(isig): m, s = meansd(isig[batch]) if m < 1: blank.append(batch) else: good.append(batch) return blank, good
def run(self, fast_mode=False): # fast_mode: read first two image headers then extrapolate the rest # from what xia2 read from the image headers... from xia2.Handlers.Streams import Debug if fast_mode: if not self._image_to_epoch: raise RuntimeError("fast mode needs image_to_epoch map") Debug.write("Running dials.import in fast mode") else: Debug.write("Running dials.import in slow mode") self.clear_command_line() for i in range(self._image_range[0], self._image_range[1] + 1): self._images.append(self.get_image_name(i)) if self._wavelength_tolerance is not None: self.add_command_line("input.tolerance.beam.wavelength=%s" % self._wavelength_tolerance) if self._reference_geometry is not None: self.add_command_line("input.reference_geometry=%s" % self._reference_geometry) elif self._mosflm_beam_centre is not None: assert len(self._mosflm_beam_centre) == 2 self.add_command_line("mosflm_beam_centre=%s,%s" % (self._mosflm_beam_centre)) if fast_mode: for image in self._images[:2]: self.add_command_line(image) else: for image in self._images: self.add_command_line(image) self.add_command_line("output.experiments=%s" % self._sweep_filename) self.start() self.close_wait() self.check_for_errors() if fast_mode: self.fix_experiments_import() assert os.path.exists( os.path.join(self.get_working_directory(), self._sweep_filename))
def decide_correct_lattice_using_refiner(possible_lattices, refiner): """Use the refiner to determine which of the possible lattices is the correct one.""" correct_lattice, rerun_symmetry, need_to_return = (None, False, False) for lattice in possible_lattices: state = refiner.set_refiner_asserted_lattice(lattice) if state == refiner.LATTICE_CORRECT: Debug.write("Agreed lattice %s" % lattice) correct_lattice = lattice break elif state == refiner.LATTICE_IMPOSSIBLE: Debug.write("Rejected lattice %s" % lattice) rerun_symmetry = True continue elif state == refiner.LATTICE_POSSIBLE: Debug.write("Accepted lattice %s, will reprocess" % lattice) need_to_return = True correct_lattice = lattice break if correct_lattice is None: correct_lattice = refiner.get_refiner_lattice() rerun_symmetry = True Debug.write("No solution found: assuming lattice from refiner") return correct_lattice, rerun_symmetry, need_to_return
def check_environment(): '''Check the environment we are running in...''' if sys.hexversion < 0x02070000: raise RuntimeError('Python versions older than 2.7 are not supported') import cctbx executable = sys.executable cctbx_dir = os.sep.join(cctbx.__file__.split(os.sep)[:-3]) # to help wrapper code - print process id... Debug.write('Process ID: %d' % os.getpid()) Chatter.write('Environment configuration...') Chatter.write('Python => %s' % executable) Chatter.write('CCTBX => %s' % cctbx_dir) ccp4_keys = ['CCP4', 'CLIBD', 'CCP4_SCR'] for k in ccp4_keys: v = Environment.getenv(k) if not v: raise RuntimeError('%s not defined - is CCP4 set up?' % k) if not v == v.strip(): raise RuntimeError('spaces around "%s"' % v) Chatter.write('%s => %s' % (k, v)) from xia2.Handlers.Flags import Flags Chatter.write('Starting directory: %s' % Flags.get_starting_directory()) Chatter.write('Working directory: %s' % os.getcwd()) # temporary workaround to bug in pointless... if ' ' in os.getcwd(): raise RuntimeError('Space in working directory ' \ '(https://github.com/xia2/xia2/issues/114)') Chatter.write('Free space: %.2f GB' % (df() / math.pow(2, 30))) try: if os.name == 'nt': hostname = os.environ['COMPUTERNAME'].split('.')[0] else: hostname = os.environ['HOSTNAME'].split('.')[0] Chatter.write('Host: %s' % hostname) except KeyError: pass Chatter.write('Contact: [email protected]') Chatter.write(Version)
def decide_i_or_ii(self): Debug.write('Testing II or I indexing') try: fraction_etc_i = self.test_i() fraction_etc_ii = self.test_ii() if not fraction_etc_i and fraction_etc_ii: return 'ii' if fraction_etc_i and not fraction_etc_ii: return 'i' Debug.write('I: %.2f %.2f %.2f' % fraction_etc_i) Debug.write('II: %.2f %.2f %.2f' % fraction_etc_ii) if fraction_etc_i[0] > fraction_etc_ii[0] and \ fraction_etc_i[1] < fraction_etc_ii[1] and \ fraction_etc_i[2] < fraction_etc_ii[2]: return 'i' return 'ii' except Exception as e: Debug.write(str(e)) return 'ii'
def set_xinfo(self, xinfo): Debug.write(60 * "-") Debug.write("XINFO file: %s" % xinfo) with open(xinfo, "rU") as fh: Debug.write(fh.read().strip()) Debug.write(60 * "-") self._xinfo = XProject(xinfo)