def run_xds_sequence(root, params): tmpdir = None if params.use_tmpdir_if_available: tmpdir = util.get_temp_local_dir("xdskamo", min_gb=2) # TODO guess required tempdir size if tmpdir is None: print "Can't get temp dir with sufficient size." # If tmpdir is not used if tmpdir is None: return xds_sequence(root, params) print "Using %s as temp dir.." % tmpdir # If tempdir is used for f in glob.glob(os.path.join(root, "*")): shutil.copy2(f, tmpdir) xdsinp = os.path.join(tmpdir, "XDS.INP") xdsinp_dict = dict(get_xdsinp_keyword(xdsinp)) # Make a link to data dir org_data_template = xdsinp_dict["NAME_TEMPLATE_OF_DATA_FRAMES"] ord_data_dir = os.path.dirname(org_data_template) if not os.path.isabs(ord_data_dir): ord_data_dir = os.path.join(root, ord_data_dir) datadir_lns = os.path.join(tmpdir, "data_loc") os.symlink(ord_data_dir, datadir_lns) # Modify XDS.INP modify_xdsinp(xdsinp, inp_params=[("NAME_TEMPLATE_OF_DATA_FRAMES", os.path.join("data_loc", os.path.basename(org_data_template)))]) try: ret = xds_sequence(tmpdir, params) finally: # Revert XDS.INP modify_xdsinp(xdsinp, inp_params=[("NAME_TEMPLATE_OF_DATA_FRAMES", org_data_template)]) # Remove link os.remove(datadir_lns) # Move to original directory for f in glob.glob(os.path.join(tmpdir, "*")): f_dest = os.path.join(root, os.path.relpath(f, tmpdir)) if os.path.isfile(f_dest): # Copy only if newer if os.stat(f).st_mtime > os.stat(f_dest).st_mtime: shutil.copy2(f, root) else: print "%s already exists and not modified. skip." % f os.remove(f) else: shutil.move(f, root) # Remove tmpdir shutil.rmtree(tmpdir) return ret
def run(master_h5, master_h5_ctime, dbfile, tmpdir=None, spots_min=3, remove_files=False): """ Download must be finished when this script started!! Everything in tmpdir will be removed! """ os.stat_float_times(False) if tmpdir: assert os.path.isdir(tmpdir) try: assert master_h5.endswith("_master.h5") prefix = os.path.basename(master_h5)[:-len("master.h5")] # If tmpdir given, copy of master.h5 and data h5 files should be there if tmpdir: master_h5_in_tmp = os.path.join(tmpdir, os.path.basename(master_h5)) else: master_h5_in_tmp = master_h5 # If not, just use this # If tmpdir given, just use there; otherwise create new one. if not tmpdir: tmpdir = get_temp_local_dir("hitsonly", min_gb=1) hits_h5 = os.path.join(tmpdir, prefix + "onlyhits.h5") print "tmpdir is %s" % tmpdir create_hitsonly_h5(master_h5_in_tmp, dbfile, hits_h5, spots_min) if not os.path.isfile(hits_h5): raise Exception("Generation of %s failed" % hits_h5) print "ctime_master_h5 =", os.path.getctime(master_h5) if os.path.getctime(master_h5) != master_h5_ctime: raise Exception( "Master h5 file (%s, %d) is changed! Discarding this hitsonly h5" % (master_h5, master_h5_ctime)) #shutil.move(hits_h5, os.path.join(os.path.dirname(master_h5), os.path.basename(hits_h5))) safe_copy(hits_h5, os.path.normpath(os.path.dirname(master_h5)), move=True) finally: files_in_tmp = glob.glob(os.path.join(tmpdir, "*")) if files_in_tmp: print "Removing %d files in tmpdir:" % len(files_in_tmp) for f in files_in_tmp: print " %s" % f shutil.rmtree(tmpdir)
def tst_h5toxds(): print "Testing H5ToXds.." rcode, out, err = util.call("H5ToXds") ignore_msg = "(You can ignore this if you don't process hdf5 files which usually mean Eiger data)" if rcode == 127: # 127 is "command not found". print " Not installed. NG %s" % ignore_msg return False import numpy from yamtbx.dataproc.eiger import make_dummy_h5_for_test from yamtbx.dataproc import cbf tmpdir = util.get_temp_local_dir("h5test") data = numpy.random.randint(0, 65535, size=100).astype(numpy.uint32).reshape( (1, 10, 10)) master_h5 = make_dummy_h5_for_test(tmpdir, data) rcode, out, err = util.call("H5ToXds", "%s 1 1.cbf" % os.path.basename(master_h5), wdir=tmpdir) cbfout = os.path.join(tmpdir, "1.cbf") if not os.path.isfile(cbfout): print " H5ToXds exists, but not works. Probably Dectris original H5ToXds? Test it with real h5 file. %s" % ignore_msg if out.strip(): print " -- stdout:" print out if err.strip(): print " -- stderr:" print err shutil.rmtree(tmpdir) return False data_read, _, _ = cbf.load_minicbf_as_numpy(cbfout) shutil.rmtree(tmpdir) if numpy.all(data_read.flatten() == data.flatten()): print " OK" return True else: print " H5ToXds exists, but not correctly works. Probably Dectris original H5ToXds? Test it with real h5 file. %s" % ignore_msg if out.strip(): print " -- stdout:" print out if err.strip(): print " -- stderr:" print err return False
def check_xds_version(): tmpdir = util.get_temp_local_dir("xdstest") rcode, out, err = util.call("xds", wdir=tmpdir) if tmpdir: shutil.rmtree(tmpdir) # just in case; xds shouldn't make any files # Expected outout: # ***** XDS ***** (VERSION Mar 15, 2019 BUILT=20191211) 6-Jan-2020 # Author: Wolfgang Kabsch # Copy licensed until 30-Sep-2020 to r = re.search("VERSION (.*[0-9]) *BUILT=(.*)\)", out) if r: ver, built = r.groups() return ver, built return None, None
def tst_xds(): print "Testing XDS.." tmpdir = util.get_temp_local_dir("xdstest") rcode, out, err = util.call("xds_par", wdir=tmpdir) if tmpdir: shutil.rmtree(tmpdir) # just in case; xds shouldn't make any files if rcode != 0: print " Not installed. NG" return False if "license expired" in out: print " license expired. Get latest version. NG" return False print " OK" return True
def run_xscale(xscale_inp, cbf_to_dat=False, use_tmpdir_if_available=False): ftable = {} outfile = None count = 0 inpdir = os.path.dirname(os.path.abspath(xscale_inp)) wdir = inpdir # may be overridden tmpdir = None if use_tmpdir_if_available: tmpdir = util.get_temp_local_dir("xscale", min_gb=1) # TODO guess required tempdir size print tmpdir if tmpdir is None: print "Can't get temp dir with sufficient size." if tmpdir is not None: shutil.copy2(xscale_inp, tmpdir) xscale_inp = os.path.join(tmpdir, os.path.basename(xscale_inp)) wdir = tmpdir os.rename(xscale_inp, xscale_inp+".org") ofs = open(xscale_inp, "w") # Check line length and make symlink if needed for l in open(xscale_inp+".org"): ll = l[:l.index("!")] if "!" in l else l if "OUTPUT_FILE=" in ll: outfile = ll[ll.index("=")+1:].strip() # TODO what if the file is not in current directory? if "INPUT_FILE=" in ll: # and len(l) > 132: # one line is limited to 131 characters! filename = ll[ll.index("=")+1:].strip() if "*" in filename: filename = filename[filename.index("*")+1:].strip() assert " " not in filename lnkf = "lnk%.6d.hkl" % count assert not os.path.isfile(os.path.join(wdir, lnkf)) filename_abs = os.path.normpath(os.path.join(inpdir, filename)) if not os.path.isabs(filename) else filename os.symlink(filename_abs, os.path.join(wdir, lnkf)) print "xscale: %s -> %s" % (lnkf, filename) count += 1 ftable[lnkf] = filename l = l.replace(filename, lnkf) ofs.write(l) ofs.close() assert outfile is not None if len(ftable) == 0: os.rename(xscale_inp+".org", xscale_inp) # Run xscale util.call(xscale_comm, wdir=wdir) # Replace file names if needed if len(ftable) > 0: for i, f in enumerate(("XSCALE.LP", outfile)): f = os.path.join(wdir, f) if not os.path.isfile(f): continue os.rename(f, f+".org") ofs = open(f, "w") if i == 0: for l in open(f+".org"): if ".hkl" in l: for lfn in ftable: l = l.replace(lfn, ftable[lfn]) ofs.write(l) else: ifs = open(f+".org") while True: l = ifs.readline() if ".hkl" in l: for lfn in ftable: l = l.replace(lfn, ftable[lfn]) ofs.write(l) if l.startswith("!END_OF_HEADER"): break shutil.copyfileobj(ifs, ofs) ofs.close() os.remove(f+".org") for lfn in ftable: os.remove(os.path.join(wdir, lfn)) os.rename(xscale_inp+".org", xscale_inp) if cbf_to_dat: xscale_lp = os.path.join(wdir, "XSCALE.LP") cbfouts = glob.glob(os.path.join(wdir, "*.cbf")) if len(cbfouts) > 0: xscalelp.cbf_to_dat(xscale_lp) for f in cbfouts: os.remove(f) # Move to original directory if tmpdir is not None: for f in glob.glob(os.path.join(tmpdir, "*")): shutil.copy2(f, inpdir) shutil.rmtree(tmpdir)
def download_files(e, files, wdir, bssid, tmpdir=None, omega_offset_by_trigger=None): """ If bssid is not None, 'files' contains bssid+prefix_(master.h5|data_*.h5). When `tmpdir' is not None, download files to tmpdir once and then copy to destination. Files in tmpdir are kept, and will be used for hit-extraction. """ failed_files = [] for f in files: src = "http://%s/data/%s" % (e._host, f) #tmpfd, tmp = tempfile.mkstemp(prefix=f, dir=default_tmpd) #os.close(tmpfd) tmp = None if not bssid: trg = os.path.join(wdir, f) else: assert f.startswith(bssid) trg = os.path.join(wdir, f[len(bssid):]) dl_failed = False for i in xrange(10): dl_failed = False print now(), " donwloading %s (%dth try).." % (f, i + 1) startt = time.time() try: u = urllib.urlopen(src) file_bytes = u.info().getheaders("Content-Length")[0] u.close() print now(), " file size on server %s = %s" % (f, file_bytes) tmpd = get_temp_local_dir("eigerdl", min_bytes=int(file_bytes), additional_tmpd=wdir) if tmpd is None: print now( ), " ERROR: no space available to download this file!" dl_failed = True break print now(), " to %s" % tmpd tmp = os.path.join(tmpd, f) urllib.urlretrieve(src, tmp) except: print traceback.format_exc() dl_failed = True # if success in last trial, dl_failed==False eltime = time.time() - startt if tmp and os.path.isfile(tmp): print now(), " done in %.2f sec. %.3f KB/s" % ( eltime, os.path.getsize(tmp) / 1024 / eltime) break print # retry if downloading failed time.sleep(1) if dl_failed: print now(), " Download failed. Keeping on server: %s" % src failed_files.append(f) if not tmp or not os.path.isfile(tmp): continue if f.endswith("_master.h5"): try: modify_master(tmp, tmp + "-fix.h5", bssid, omega_offset_by_trigger) if tmpdir: safe_copy(tmp + "-fix.h5", os.path.join(tmpdir, os.path.basename(trg))) startt = time.time() safe_copy(tmp + "-fix.h5", trg, move=True) eltime = time.time() - startt print now(), " local_copy done in %.2f sec. %.3f KB/s" % ( eltime, os.path.getsize(trg) / 1024 / eltime) except: print traceback.format_exc() if os.path.isfile(tmp): if tmpdir: safe_copy(tmp, os.path.join(tmpdir, os.path.basename(trg))) startt = time.time() safe_copy(tmp, trg, move=True) eltime = time.time() - startt print now(), " local_copy done in %.2f sec. %.3f KB/s" % ( eltime, os.path.getsize(trg) / 1024 / eltime) if os.path.isfile(tmp): os.remove(tmp) else: try: if tmpdir: safe_copy(tmp, os.path.join(tmpdir, os.path.basename(trg))) startt = time.time() safe_copy(tmp, trg, move=True) eltime = time.time() - startt print now(), " local_copy done in %.2f sec. %.3f KB/s" % ( eltime, os.path.getsize(trg) / 1024 / eltime) except: dl_failed = True print now(), " local_copy failed. Keeping on server: %s" % src failed_files.append(f) # delete from server if not dl_failed: e.fileWriterFiles(f, method="DELETE") os.rmdir(os.path.dirname(tmp)) return failed_files