def calc_cchalf_by_removing(wdir, inp_head, inpfiles, with_sigma=False, stat_bin="total", nproc=1, nproc_each=None, batchjobs=None): assert not with_sigma # Not supported now assert stat_bin in ("total", "outer") if not os.path.exists(wdir): os.makedirs(wdir) datout = open(os.path.join(wdir, "cchalf.dat"), "w") datout.write("idx exfile cc1/2(%s) Nuniq\n" % stat_bin) cchalf_list = [] # (i_ex, CC1/2, Nuniq) # Prep runs tmpdirs = map( lambda x: _calc_cchalf_by_removing_worker_1(wdir, inp_head, inpfiles, x, nproc_each), xrange(len(inpfiles))) # Run XSCALE if batchjobs is not None: jobs = [] for tmpdir in tmpdirs: job = batchjob.Job(tmpdir, "xscale.sh", nproc=nproc_each) job.write_script(xscale_comm) batchjobs.submit(job) jobs.append(job) batchjobs.wait_all(jobs) else: easy_mp.pool_map(fixed_func=lambda x: util.call(xscale_comm, wdir=x), args=tmpdirs, processes=nproc) # Finish runs cchalf_list = map( lambda x: _calc_cchalf_by_removing_worker_2(wdir, x[1], x[0], stat_bin ), enumerate(tmpdirs)) for iex, cchalf_exi, nuniq in cchalf_list: datout.write("%3d %s %.4f %d\n" % (iex, inpfiles[iex], cchalf_exi, nuniq)) cchalf_list.sort(key=lambda x: -x[1]) print print "# Sorted table" for idx, cch, nuniq in cchalf_list: print "%3d %-.4f %4d %s" % (idx, cch, nuniq, inpfiles[idx]) # Remove unuseful (failed) data cchalf_list = filter(lambda x: x[1] == x[1], cchalf_list) return cchalf_list
def run(params): if os.path.isdir(params.workdir) and os.listdir(params.workdir): print "Directory already exists and not empty:", params.workdir return # Check parameters if params.program == "xscale": if (params.xscale.frames_per_batch, params.xscale.degrees_per_batch).count(None) == 0: print "ERROR! You can't specify both of xscale.frames_per_batch and xscale.degrees_per_batch" return if params.reference_file is not None and params.program != "xscale": print "WARNING - reference file is not used unless program=xscale." if not os.path.isdir(params.workdir): os.makedirs(params.workdir) if params.batch.engine == "sge": batchjobs = batchjob.SGE(pe_name=params.batch.sge_pe_name) elif params.batch.engine == "sh": batchjobs = batchjob.ExecLocal(max_parallel=params.batch.sh_max_jobs) else: raise "Unknown batch engine: %s" % params.batch.engine out = multi_out() out.register("log", open(os.path.join(params.workdir, "multi_merge.log"), "w"), atexit_send_to=None) out.register("stdout", sys.stdout) out.write("kamo.multi_merge started at %s\n\n" % time.strftime("%Y-%m-%d %H:%M:%S")) time_started = time.time() print >> out, "Paramters:" libtbx.phil.parse(master_params_str).format(params).show(out=out, prefix=" ") print >> out, "" # XXX Not works when clustering is used.. html_report = multi_merging.html_report.HtmlReportMulti( os.path.abspath(params.workdir)) try: html_report.add_params(params, master_params_str) except: print >> out, traceback.format_exc() xds_ascii_files = util.read_path_list(params.lstin, only_exists=True, as_abspath=True, err_out=out) if not xds_ascii_files: print >> out, "ERROR! Cannot find (existing) files in %s." % params.lstin return if len(xds_ascii_files) < 2: print >> out, "ERROR! Only one file in %s." % params.lstin print >> out, " Give at least two files for merging." return cells = collections.OrderedDict() laues = {} # for check for xac in xds_ascii_files: try: symm = XDS_ASCII(xac, read_data=False).symm except: print >> out, "Error in reading %s" % xac print >> out, traceback.format_exc() return cells[xac] = symm.unit_cell().parameters() laue = symm.space_group().build_derived_reflection_intensity_group( False).info() laues.setdefault(str(laue), {}).setdefault( symm.space_group_info().type().number(), []).append(xac) if len(laues) > 1: print >> out, "ERROR! more than one space group included." for laue in laues: print "Laue symmetry", laue for sg in laues[laue]: print >> out, " SPACE_GROUP_NUMBER= %d (%d data)" % ( sg, len(laues[laue][sg])) for f in laues[laue][sg]: print >> out, " %s" % f print >> out, "" return space_group = None if params.space_group is not None: space_group = sgtbx.space_group_info(params.space_group).group() laue_given = str( space_group.build_derived_reflection_intensity_group(False).info()) if laue_given != laues.keys()[0]: print >> out, "ERROR! user-specified space group (space_group=%s) is not compatible with input files (%s)" % ( params.space_group, laues.keys()[0]) return sg_refset = space_group.info().as_reference_setting().group() if space_group != sg_refset: print >> out, "Sorry! currently space group in non-reference setting is not supported." print >> out, "(You requested %s, which is different from reference setting: %s)" % ( space_group.info(), sg_refset.info()) return else: tmp = sgtbx.space_group_info( laues.values()[0].keys() [0]).group().build_derived_reflection_intensity_group(True) print >> out, "Space group for merging:", tmp.info() test_flag_will_be_transferred = False if params.reference.data is not None: params.reference.data = os.path.abspath(params.reference.data) print >> out, "Reading reference data file: %s" % params.reference.data tmp = iotbx.file_reader.any_file(params.reference.data, force_type="hkl", raise_sorry_if_errors=True) if params.reference.copy_test_flag: from yamtbx.dataproc.command_line import copy_free_R_flag if None in copy_free_R_flag.get_flag_array( tmp.file_server.miller_arrays, log_out=out): print >> out, " Warning: no test flag found in reference file (%s)" % params.reference.data else: test_flag_will_be_transferred = True print >> out, " test flag will be transferred" if space_group is not None: if space_group != tmp.file_server.miller_arrays[0].space_group(): print >> out, " ERROR! space_group=(%s) and that of reference.data (%s) do not match." % ( space_group.info(), tmp.file_server.miller_arrays[0].space_group_info()) return else: space_group = tmp.file_server.miller_arrays[0].space_group() print >> out, " space group for merging: %s" % space_group.info() if params.add_test_flag: if test_flag_will_be_transferred: print >> out, "Warning: add_test_flag=True was set, but the flag will be transferred from the reference file given." else: from cctbx import r_free_utils med_cell = numpy.median(cells.values(), axis=0) d_min = max( params.d_min - 0.2, 1.0 ) if params.d_min is not None else 1.5 # to prevent infinite set sg = space_group if not sg: sg = sgtbx.space_group_info( laues.values()[0].keys() [0]).group().build_derived_reflection_intensity_group(True) tmp = miller.build_set(crystal.symmetry(tuple(med_cell), space_group=sg), False, d_min=d_min, d_max=None) print >> out, "Generating test set using the reference symmetry:" crystal.symmetry.show_summary(tmp, out, " ") tmp = tmp.generate_r_free_flags(fraction=0.05, max_free=None, lattice_symmetry_max_delta=5.0, use_lattice_symmetry=True, n_shells=20) tmp.show_r_free_flags_info(out=out, prefix=" ") tmp = tmp.customized_copy( data=r_free_utils.export_r_free_flags_for_ccp4( flags=tmp.data(), test_flag_value=True)) mtz_object = tmp.as_mtz_dataset( column_root_label="FreeR_flag").mtz_object() test_flag_mtz = os.path.abspath( os.path.join(params.workdir, "test_flag.mtz")) mtz_object.write(file_name=test_flag_mtz) # Override the parameters params.reference.copy_test_flag = True params.reference.data = test_flag_mtz try: html_report.add_cells_and_files(cells, laues.keys()[0]) except: print >> out, traceback.format_exc() data_for_merge = [] if params.clustering == "blend": if params.blend.use_old_result is None: blend_wdir = os.path.join(params.workdir, "blend") os.mkdir(blend_wdir) blend.run_blend0R(blend_wdir, xds_ascii_files) print >> out, "\nRunning BLEND with analysis mode" else: blend_wdir = params.blend.use_old_result print >> out, "\nUsing precalculated BLEND result in %s" % params.blend.use_old_result blend_clusters = blend.BlendClusters(workdir=blend_wdir, d_min=params.d_min) summary_out = os.path.join(blend_wdir, "blend_cluster_summary.dat") clusters = blend_clusters.show_cluster_summary( out=open(summary_out, "w")) print >> out, "Clusters found by BLEND were summarized in %s" % summary_out if params.blend.min_cmpl is not None: clusters = filter(lambda x: x[3] >= params.blend.min_cmpl, clusters) if params.blend.min_acmpl is not None: clusters = filter(lambda x: x[5] >= params.blend.min_acmpl, clusters) if params.blend.min_redun is not None: clusters = filter(lambda x: x[4] >= params.blend.min_redun, clusters) if params.blend.min_aredun is not None: clusters = filter(lambda x: x[6] >= params.blend.min_aredun, clusters) if params.blend.max_LCV is not None: clusters = filter(lambda x: x[7] <= params.blend.max_LCV, clusters) if params.blend.max_aLCV is not None: clusters = filter(lambda x: x[8] <= params.blend.max_aLCV, clusters) if params.max_clusters is not None and len( clusters) > params.max_clusters: print >> out, "Only first %d (/%d) clusters will be merged (as specified by max_clusters=)" % ( params.max_clusters, len(clusters)) clusters = clusters[:params.max_clusters] if clusters: print >> out, "With specified conditions, following %d clusters will be merged:" % len( clusters) else: print >> out, "\nERROR: No clusters satisfied the specified conditions for merging!" print >> out, "Please change criteria of completeness or redundancy" print >> out, "Here is the table of completeness and redundancy for each cluster:\n" print >> out, open(summary_out).read() for clno, IDs, clh, cmpl, redun, acmpl, aredun, LCV, aLCV in clusters: # process largest first print >> out, " Cluster_%.4d NumDS= %4d CLh= %5.1f Cmpl= %6.2f Redun= %4.1f ACmpl=%6.2f ARedun=%4.1f LCV= %5.1f aLCV=%5.1f" % ( clno, len(IDs), clh, cmpl, redun, acmpl, aredun, LCV, aLCV) data_for_merge.append((os.path.join(params.workdir, "cluster_%.4d" % clno), map(lambda x: blend_clusters.files[x - 1], IDs), LCV, aLCV, clh)) print >> out try: html_report.add_clutering_result(clusters, "blend") except: print >> out, traceback.format_exc() elif params.clustering == "cc": ccc_wdir = os.path.join(params.workdir, "cc_clustering") os.mkdir(ccc_wdir) cc_clusters = cc_clustering.CCClustering( ccc_wdir, xds_ascii_files, d_min=params.cc_clustering.d_min if params.cc_clustering.d_min is not None else params.d_min, min_ios=params.cc_clustering.min_ios) print >> out, "\nRunning CC-based clustering" cc_clusters.do_clustering( nproc=params.cc_clustering.nproc, b_scale=params.cc_clustering.b_scale, use_normalized=params.cc_clustering.use_normalized, cluster_method=params.cc_clustering.method, distance_eqn=params.cc_clustering.cc_to_distance, min_common_refs=params.cc_clustering.min_common_refs, html_maker=html_report) summary_out = os.path.join(ccc_wdir, "cc_cluster_summary.dat") clusters = cc_clusters.show_cluster_summary(d_min=params.d_min, out=open(summary_out, "w")) print >> out, "Clusters were summarized in %s" % summary_out if params.cc_clustering.min_cmpl is not None: clusters = filter(lambda x: x[3] >= params.cc_clustering.min_cmpl, clusters) if params.cc_clustering.min_acmpl is not None: clusters = filter(lambda x: x[5] >= params.cc_clustering.min_acmpl, clusters) if params.cc_clustering.min_redun is not None: clusters = filter(lambda x: x[4] >= params.cc_clustering.min_redun, clusters) if params.cc_clustering.min_aredun is not None: clusters = filter( lambda x: x[6] >= params.cc_clustering.min_aredun, clusters) if params.cc_clustering.max_clheight is not None: clusters = filter( lambda x: x[2] <= params.cc_clustering.max_clheight, clusters) if params.max_clusters is not None and len( clusters) > params.max_clusters: print >> out, "Only first %d (/%d) clusters will be merged (as specified by max_clusters=)" % ( params.max_clusters, len(clusters)) clusters = clusters[:params.max_clusters] if clusters: print >> out, "With specified conditions, following %d clusters will be merged:" % len( clusters) else: print >> out, "\nERROR: No clusters satisfied the specified conditions for merging!" print >> out, "Please change criteria of completeness or redundancy" print >> out, "Here is the table of completeness and redundancy for each cluster:\n" print >> out, open(summary_out).read() for clno, IDs, clh, cmpl, redun, acmpl, aredun, ccmean, ccmin in clusters: # process largest first print >> out, " Cluster_%.4d NumDS= %4d CLh= %5.1f Cmpl= %6.2f Redun= %4.1f ACmpl=%6.2f ARedun=%4.1f CCmean=% .4f CCmin=% .4f" % ( clno, len(IDs), clh, cmpl, redun, acmpl, aredun, ccmean, ccmin) data_for_merge.append((os.path.join(params.workdir, "cluster_%.4d" % clno), map(lambda x: xds_ascii_files[x - 1], IDs), float("nan"), float("nan"), clh)) print >> out try: html_report.add_clutering_result(clusters, "cc_clustering") except: print >> out, traceback.format_exc() else: data_for_merge.append((os.path.join(params.workdir, "all_data"), xds_ascii_files, float("nan"), float("nan"), 0)) ofs_summary = open(os.path.join(params.workdir, "cluster_summary.dat"), "w") ofs_summary.write( "# d_min= %.3f A\n" % (params.d_min if params.d_min is not None else float("nan"))) ofs_summary.write("# LCV and aLCV are values of all data\n") ofs_summary.write( " cluster ClH LCV aLCV run ds.all ds.used Cmpl Redun I/sigI Rmeas CC1/2 Cmpl.ou Red.ou I/sig.ou Rmeas.ou CC1/2.ou Cmpl.in Red.in I/sig.in Rmeas.in CC1/2.in SigAno.in CCano.in WilsonB Aniso.bst Aniso.wst dmin.est\n" ) out.flush() def write_ofs_summary(workdir, cycle, clh, LCV, aLCV, xds_files, num_files, stats): tmps = "%12s %6.2f %4.1f %4.1f %3d %6d %7d %5.1f %5.1f %6.2f %5.1f %5.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %9.1f %8.1f %7.2f %9.2f %9.2f %.2f\n" ofs_summary.write(tmps % ( os.path.relpath(workdir, params.workdir), clh, LCV, aLCV, cycle, len(xds_files), num_files, stats["cmpl"][0], stats["redundancy"][0], stats["i_over_sigma"][0], stats["r_meas"][0], stats["cc_half"][0], stats["cmpl"][2], stats["redundancy"][2], stats["i_over_sigma"][2], stats["r_meas"][2], stats["cc_half"][2], stats["cmpl"][1], stats["redundancy"][1], stats["i_over_sigma"][1], stats["r_meas"][1], stats["cc_half"][1], stats["sig_ano"][1], stats["cc_ano"][1], stats["xtriage_log"].wilson_b, #stats["xtriage_log"].anisotropy, stats["aniso"]["d_min_best"], stats["aniso"]["d_min_worst"], stats["dmin_est"], )) ofs_summary.flush() # write_ofs_summary() if "merging" in params.batch.par_run: params.nproc = params.batch.nproc_each jobs = [] for workdir, xds_files, LCV, aLCV, clh in data_for_merge: if not os.path.exists(workdir): os.makedirs(workdir) shname = "merge_%s.sh" % os.path.relpath(workdir, params.workdir) pickle.dump((params, os.path.abspath(workdir), xds_files, cells, space_group), open(os.path.join(workdir, "args.pkl"), "w"), -1) job = batchjob.Job(workdir, shname, nproc=params.batch.nproc_each) job.write_script("""\ cd "%s" || exit 1 "%s" -c '\ import pickle; \ from yamtbx.dataproc.auto.command_line.multi_merge import merge_datasets; \ args = pickle.load(open("args.pkl")); \ ret = merge_datasets(*args); \ pickle.dump(ret, open("result.pkl","w")); \ ' """ % (os.path.abspath(workdir), sys.executable)) batchjobs.submit(job) jobs.append(job) batchjobs.wait_all(jobs) for workdir, xds_files, LCV, aLCV, clh in data_for_merge: try: results = pickle.load(open(os.path.join(workdir, "result.pkl"))) except: print >> out, "Error in unpickling result in %s" % workdir print >> out, traceback.format_exc() results = [] if len(results) == 0: ofs_summary.write("#%s failed\n" % os.path.relpath(workdir, params.workdir)) lcv, alcv = float("nan"), float("nan") for cycle, wd, num_files, stats in results: lcv, alcv = stats.get("lcv", LCV), stats.get("alcv", aLCV) write_ofs_summary(workdir, cycle, clh, lcv, alcv, xds_files, num_files, stats) # Last lcv & alcv try: html_report.add_merge_result(workdir, clh, lcv, alcv, xds_files, results[-1][2], results[-1][3]) except: print >> out, traceback.format_exc() else: for workdir, xds_files, LCV, aLCV, clh in data_for_merge: print >> out, "Merging %s..." % os.path.relpath( workdir, params.workdir) out.flush() results = merge_datasets(params, workdir, xds_files, cells, space_group) if len(results) == 0: ofs_summary.write("#%s failed\n" % os.path.relpath(workdir, params.workdir)) for cycle, wd, num_files, stats in results: lcv, alcv = stats.get("lcv", LCV), stats.get("alcv", aLCV) write_ofs_summary(workdir, cycle, clh, lcv, alcv, xds_files, num_files, stats) try: html_report.add_merge_result(workdir, clh, lcv, alcv, xds_files, results[-1][2], results[-1][3]) except: print >> out, traceback.format_exc() try: html_report.write_html() except: print >> out, traceback.format_exc() print "firefox %s" % os.path.join(html_report.root, "report.html") out.write("\nNormal exit at %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) out.write("Total wall-clock time: %.2f sec.\n" % (time.time() - time_started)) return
def run(params): if os.path.isdir(params.workdir) and os.listdir(params.workdir): print "Directory already exists and not empty:", params.workdir return if params.reference_file is not None and params.program != "xscale": print "WARNING - reference file is not used unless program=xscale." if not os.path.isdir(params.workdir): os.makedirs(params.workdir) if params.batch.engine == "sge": batchjobs = batchjob.SGE(pe_name=params.batch.sge_pe_name) elif params.batch.engine == "sh": batchjobs = batchjob.ExecLocal(max_parallel=params.batch.sh_max_jobs) else: raise "Unknown batch engine: %s" % params.batch.engine out = multi_out() out.register("log", open(os.path.join(params.workdir, "multi_merge.log"), "w"), atexit_send_to=None) out.register("stdout", sys.stdout) print >>out, "Paramters:" libtbx.phil.parse(master_params_str).format(params).show(out=out, prefix=" ") print >>out, "" # XXX Not works when clustering is used.. html_report = multi_merging.html_report.HtmlReportMulti(os.path.abspath(params.workdir)) try: html_report.add_params(params, master_params_str) except: print >>out, traceback.format_exc() xds_ascii_files = map(lambda x: x[:(x.index("#") if "#" in x else None)].strip(), open(params.lstin)) xds_ascii_files = filter(lambda x: x!="" and os.path.isfile(x), xds_ascii_files) xds_ascii_files = map(lambda x: os.path.abspath(x), xds_ascii_files) cells = collections.OrderedDict() laues = {} # for check for xac in xds_ascii_files: try: symm = XDS_ASCII(xac, read_data=False).symm except: try: symm = any_reflection_file(xac).as_miller_arrays()[0].crystal_symmetry() except: print >>out, "Error in reading %s" % xac print >>out, traceback.format_exc() return cells[xac] = symm.unit_cell().parameters() laue = symm.space_group().build_derived_reflection_intensity_group(False).info() laues.setdefault(str(laue),{}).setdefault(symm.space_group_info().type().number(), []).append(xac) if len(laues) > 1: print >>out, "ERROR! more than one space group included." for laue in laues: print "Laue symmetry", laue for sg in laues[laue]: print >>out, " SPACE_GROUP_NUMBER= %d (%d data)" % (sg, len(laues[laue][sg])) for f in laues[laue][sg]: print >>out, " %s" % f print >>out, "" return space_group = None if params.space_group is not None: space_group = sgtbx.space_group_info(params.space_group).group() laue_given = str(space_group.build_derived_reflection_intensity_group(False).info()) if laue_given != laues.keys()[0]: print >>out, "ERROR! user-specified space group (space_group=%s) is not compatible with input files (%s)" % (params.space_group, laues.keys()[0]) return else: tmp = sgtbx.space_group_info(laues.values()[0].keys()[0]).group().build_derived_reflection_intensity_group(True) print >>out, "Space group for merging:", tmp.info() try: html_report.add_cells_and_files(cells, laues.keys()[0]) except: print >>out, traceback.format_exc() data_for_merge = [] if params.clustering == "blend": if params.blend.use_old_result is None: blend_wdir = os.path.join(params.workdir, "blend") os.mkdir(blend_wdir) blend.run_blend0R(blend_wdir, xds_ascii_files) print >>out, "\nRunning BLEND with analysis mode" else: blend_wdir = params.blend.use_old_result print >>out, "\nUsing precalculated BLEND result in %s" % params.blend.use_old_result blend_clusters = blend.BlendClusters(workdir=blend_wdir, d_min=params.d_min) summary_out = os.path.join(blend_wdir, "blend_cluster_summary.dat") clusters = blend_clusters.show_cluster_summary(out=open(summary_out, "w")) print >>out, "Clusters found by BLEND were summarized in %s" % summary_out if params.blend.min_cmpl is not None: clusters = filter(lambda x: x[3] >= params.blend.min_cmpl, clusters) if params.blend.min_acmpl is not None: clusters = filter(lambda x: x[5] >= params.blend.min_acmpl, clusters) if params.blend.min_redun is not None: clusters = filter(lambda x: x[4] >= params.blend.min_redun, clusters) if params.blend.min_aredun is not None: clusters = filter(lambda x: x[6] >= params.blend.min_aredun, clusters) if params.blend.max_LCV is not None: clusters = filter(lambda x: x[7] <= params.blend.max_LCV, clusters) if params.blend.max_aLCV is not None: clusters = filter(lambda x: x[8] <= params.blend.max_aLCV, clusters) if params.max_clusters is not None and len(clusters) > params.max_clusters: print >>out, "Only first %d (/%d) clusters will be merged (as specified by max_clusters=)" % (params.max_clusters, len(clusters)) clusters = clusters[:params.max_clusters] print >>out, "With specified conditions, following %d clusters will be merged:" % len(clusters) for clno, IDs, clh, cmpl, redun, acmpl, aredun, LCV, aLCV in clusters: # process largest first print >>out, " Cluster_%.4d NumDS= %4d CLh= %5.1f Cmpl= %6.2f Redun= %4.1f ACmpl=%6.2f ARedun=%4.1f LCV= %5.1f aLCV=%5.1f" % (clno, len(IDs), clh, cmpl, redun, acmpl, aredun, LCV, aLCV) data_for_merge.append((os.path.join(params.workdir, "cluster_%.4d"%clno), map(lambda x: blend_clusters.files[x-1], IDs), LCV, aLCV,clh)) print >>out try: html_report.add_clutering_result(clusters, "blend") except: print >>out, traceback.format_exc() elif params.clustering == "cc": ccc_wdir = os.path.join(params.workdir, "cc_clustering") os.mkdir(ccc_wdir) cc_clusters = cc_clustering.CCClustering(ccc_wdir, xds_ascii_files, d_min=params.cc_clustering.d_min if params.cc_clustering.d_min is not None else params.d_min, min_ios=params.cc_clustering.min_ios) print >>out, "\nRunning CC-based clustering" cc_clusters.do_clustering(nproc=params.cc_clustering.nproc, b_scale=params.cc_clustering.b_scale, use_normalized=params.cc_clustering.use_normalized, html_maker=html_report) summary_out = os.path.join(ccc_wdir, "cc_cluster_summary.dat") clusters = cc_clusters.show_cluster_summary(d_min=params.d_min, out=open(summary_out, "w")) print >>out, "Clusters were summarized in %s" % summary_out if params.cc_clustering.min_cmpl is not None: clusters = filter(lambda x: x[3] >= params.cc_clustering.min_cmpl, clusters) if params.cc_clustering.min_acmpl is not None: clusters = filter(lambda x: x[5] >= params.cc_clustering.min_acmpl, clusters) if params.cc_clustering.min_redun is not None: clusters = filter(lambda x: x[4] >= params.cc_clustering.min_redun, clusters) if params.cc_clustering.min_aredun is not None: clusters = filter(lambda x: x[6] >= params.cc_clustering.min_aredun, clusters) if params.cc_clustering.max_clheight is not None: clusters = filter(lambda x: x[2] <= params.cc_clustering.max_clheight, clusters) if params.max_clusters is not None and len(clusters) > params.max_clusters: print >>out, "Only first %d (/%d) clusters will be merged (as specified by max_clusters=)" % (params.max_clusters, len(clusters)) clusters = clusters[:params.max_clusters] print >>out, "With specified conditions, following %d clusters will be merged:" % len(clusters) for clno, IDs, clh, cmpl, redun, acmpl, aredun in clusters: # process largest first print >>out, " Cluster_%.4d NumDS= %4d CLh= %5.1f Cmpl= %6.2f Redun= %4.1f ACmpl=%6.2f ARedun=%4.1f" % (clno, len(IDs), clh, cmpl, redun, acmpl, aredun) data_for_merge.append((os.path.join(params.workdir, "cluster_%.4d"%clno), map(lambda x: xds_ascii_files[x-1], IDs), float("nan"),float("nan"),clh)) print >>out try: html_report.add_clutering_result(clusters, "cc_clustering") except: print >>out, traceback.format_exc() else: data_for_merge.append((os.path.join(params.workdir, "all_data"), xds_ascii_files, float("nan"), float("nan"), 0)) ofs_summary = open(os.path.join(params.workdir, "cluster_summary.dat"), "w") ofs_summary.write("# d_min= %.3f A\n" % (params.d_min if params.d_min is not None else float("nan"))) ofs_summary.write("# LCV and aLCV are values of all data\n") ofs_summary.write(" cluster ClH LCV aLCV run ds.all ds.used Cmpl Redun I/sigI Rmeas CC1/2 Cmpl.ou Red.ou I/sig.ou Rmeas.ou CC1/2.ou Cmpl.in Red.in I/sig.in Rmeas.in CC1/2.in SigAno.in CCano.in WilsonB Aniso \n") out.flush() def write_ofs_summary(workdir, cycle, clh, LCV, aLCV, xds_files, num_files, stats): tmps = "%12s %5.2f %4.1f %4.1f %3d %6d %7d %5.1f %5.1f %6.2f %5.1f %5.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %9.1f %8.1f %7.2f %7.1e\n" ofs_summary.write(tmps % (os.path.relpath(workdir, params.workdir), clh, LCV, aLCV, cycle, len(xds_files), num_files, stats["cmpl"][0], stats["redundancy"][0], stats["i_over_sigma"][0], stats["r_meas"][0], stats["cc_half"][0], stats["cmpl"][2], stats["redundancy"][2], stats["i_over_sigma"][2], stats["r_meas"][2], stats["cc_half"][2], stats["cmpl"][1], stats["redundancy"][1], stats["i_over_sigma"][1], stats["r_meas"][1], stats["cc_half"][1], stats["sig_ano"][1], stats["cc_ano"][1], stats["xtriage_log"].wilson_b, stats["xtriage_log"].anisotropy, )) ofs_summary.flush() # write_ofs_summary() if "merging" in params.batch.par_run: params.nproc = params.batch.nproc_each jobs = [] for workdir, xds_files, LCV, aLCV, clh in data_for_merge: if not os.path.exists(workdir): os.makedirs(workdir) shname = "merge_%s.sh" % os.path.relpath(workdir, params.workdir) pickle.dump((params, os.path.abspath(workdir), xds_files, cells, space_group, batchjobs), open(os.path.join(workdir, "args.pkl"), "w"), -1) job = batchjob.Job(workdir, shname, nproc=params.batch.nproc_each) job.write_script("""\ cd "%s" || exit 1 "%s" -c '\ import pickle; \ from yamtbx.dataproc.auto.command_line.multi_merge import merge_datasets; \ args = pickle.load(open("args.pkl")); \ ofs = open("result.pkl","w"); \ ret = merge_datasets(*args); \ pickle.dump(ret, ofs); \ ' """ % (os.path.abspath(workdir), sys.executable)) batchjobs.submit(job) jobs.append(job) batchjobs.wait_all(jobs) for workdir, xds_files, LCV, aLCV, clh in data_for_merge: try: results = pickle.load(open(os.path.join(workdir, "result.pkl"))) except: print >>out, "Error in unpickling result in %s" % workdir print >>out, traceback.format_exc() results = [] if len(results) == 0: ofs_summary.write("#%s failed\n" % os.path.relpath(workdir, params.workdir)) lcv, alcv = float("nan"), float("nan") for cycle, wd, num_files, stats in results: lcv, alcv = stats.get("lcv", LCV), stats.get("alcv", aLCV) write_ofs_summary(workdir, cycle, clh, lcv, alcv, xds_files, num_files, stats) # Last lcv & alcv try: html_report.add_merge_result(workdir, clh, lcv, alcv, xds_files, results[-1][2], results[-1][3]) except: print >>out, traceback.format_exc() else: for workdir, xds_files, LCV, aLCV, clh in data_for_merge: print >>out, "Merging %s..." % os.path.relpath(workdir, params.workdir) out.flush() results = merge_datasets(params, workdir, xds_files, cells, space_group, batchjobs) if len(results) == 0: ofs_summary.write("#%s failed\n" % os.path.relpath(workdir, params.workdir)) for cycle, wd, num_files, stats in results: lcv, alcv = stats.get("lcv", LCV), stats.get("alcv", aLCV) write_ofs_summary(workdir, cycle, clh, lcv, alcv, xds_files, num_files, stats) try: html_report.add_merge_result(workdir, clh, lcv, alcv, xds_files, results[-1][2], results[-1][3]) except: print >>out, traceback.format_exc() try: html_report.write_html() except: print >>out, traceback.format_exc() print "firefox %s" % os.path.join(html_report.root, "report.html") return
def run(params): if not os.path.exists(params.workdir): os.makedirs(params.workdir) log_out = multi_out() log_out.register("log", open( os.path.join( params.workdir, time.strftime("automerge_%y%m%d-%H%M%S.log")), "w"), atexit_send_to=None) log_out.register("stdout", sys.stdout) log_out.write("Paramters:\n") libtbx.phil.parse(gui_phil_str).format(params).show(out=log_out, prefix=" ") log_out.write("\n") ref_arrays = {} if params.reference: ref_arrays[params.reference] = read_reference_data( params.reference, log_out) samples = read_sample_info(params.csv, params.datadir) log_out.write("Loaded from %s\n" % params.csv) for k in samples: log_out.write(" %s\n" % k) log_out.write(" # Overridden parameters: %s\n" % samples[k][1]) for d in samples[k][0]: log_out.write(" %s\n" % d) log_out.write("\n") log_out.flush() # Load custom reference data for k in samples: if not "reference" in samples[k][1]: continue ref_filename = samples[k][1]["reference"] if ref_filename in ref_arrays: continue ref_arrays[ref_filename] = read_reference_data(ref_filename, log_out) if params.batch.engine == "sge": batchjobs = batchjob.SGE(pe_name=params.batch.sge_pe_name) elif params.batch.engine == "sh": batchjobs = batchjob.ExecLocal(max_parallel=params.batch.sh_max_jobs) else: batchjobs = None jobs = [] for k in samples: params2 = copy.deepcopy(params) ref_array = ref_arrays.get(params.reference, None) params2.merge.reference.data = params.reference # Reflect custom parameters if "anomalous" in samples[k][1]: params2.merge.anomalous = samples[k][1]["anomalous"] if "reference" in samples[k][1]: ref_array = ref_arrays[samples[k][1]["reference"]] params2.merge.reference.data = samples[k][1]["reference"] if params2.merge.reference.data: params2.merge.reference.data = os.path.abspath( params2.merge.reference.data) log_out.write("\n\n") workdir = os.path.join( params2.workdir, "%s%s" % (params2.prefix, replace_forbidden_chars(k).replace(" ", "_"))) os.mkdir(workdir) if batchjobs: shname = "multimerge.sh" pickle.dump( dict(workdir=workdir, topdirs=samples[k][0], cell_method=params2.cell_method, ref_array=ref_array, merge_params=params2.merge, rescut_params=params2.rescut), open(os.path.join(workdir, "kwargs.pkl"), "w"), -1) job = batchjob.Job(workdir, shname, nproc=params2.batch.nproc_each) job.write_script("""\ cd "%s" || exit 1 "%s" -c '\ import pickle; \ from yamtbx.dataproc.auto.command_line.auto_multi_merge import auto_merge; \ kwargs = pickle.load(open("kwargs.pkl")); \ auto_merge(**kwargs); \ ' """ % (os.path.abspath(workdir), sys.executable)) batchjobs.submit(job) jobs.append(job) else: try: auto_merge(workdir=workdir, topdirs=samples[k][0], cell_method=params2.cell_method, ref_array=ref_array, merge_params=params2.merge, rescut_params=params2.rescut, log_out_all=log_out) except: log_out.write("Error occurred in %s\n%s\n" % (workdir, traceback.format_exc())) log_out.flush() if batchjobs: batchjobs.wait_all(jobs)
def run(params): if not os.path.exists(params.workdir): os.makedirs(params.workdir) log_out = multi_out() log_out.register("log", open( os.path.join( params.workdir, time.strftime("automerge_%y%m%d-%H%M%S.log")), "w"), atexit_send_to=None) log_out.register("stdout", sys.stdout) log_out.write("Paramters:\n") libtbx.phil.parse(gui_phil_str).format(params).show(out=log_out, prefix=" ") log_out.write("\n") if (params.space_group, params.unit_cell).count(None) == 1: log_out.write("Error: Specify both space_group and unit_cell!") return ref_sym_global = None if params.space_group is not None: try: ref_sym_global = crystal.symmetry(params.unit_cell, params.space_group) if not ref_sym_global.change_of_basis_op_to_reference_setting( ).is_identity_op(): xs_refset = ref_sym_global.as_reference_setting() log_out.write( 'Sorry. Currently space group in non-reference setting is not supported. In this case please give space_group=%s unit_cell="%s" instead.' % (str(xs_refset.space_group_info()).replace( " ", ""), format_unit_cell(xs_refset.unit_cell()))) return except: log_out.write( "Invalid crystal symmetry. Check space_group= and unit_cell=.") return ref_arrays = {} if params.reference: ref_arrays[params.reference] = read_reference_data( params.reference, log_out) samples = read_sample_info(params.csv, params.datadir) log_out.write("Loaded from %s\n" % params.csv) for k in samples: log_out.write(" %s\n" % k) log_out.write(" # Overridden parameters: %s\n" % samples[k][1]) for d in samples[k][0]: log_out.write(" %s\n" % d) log_out.write("\n") log_out.flush() # Load custom reference data for k in samples: if not "reference" in samples[k][1]: continue ref_filename = samples[k][1]["reference"] if ref_filename in ref_arrays: continue ref_arrays[ref_filename] = read_reference_data(ref_filename, log_out) if params.batch.engine == "sge": batchjobs = batchjob.SGE(pe_name=params.batch.sge_pe_name) elif params.batch.engine == "sh": batchjobs = batchjob.ExecLocal(max_parallel=params.batch.sh_max_jobs) else: batchjobs = None jobs = [] for k in samples: params2 = copy.deepcopy(params) ref_array = ref_arrays.get(params.reference, None) ref_sym = ref_sym_global params2.merge.reference.data = params.reference # Reflect custom parameters if "anomalous" in samples[k][1]: params2.merge.anomalous = samples[k][1]["anomalous"] if "reference" in samples[k][1]: ref_array = ref_arrays[samples[k][1]["reference"]] params2.merge.reference.data = samples[k][1]["reference"] if "reference_sym" in samples[k][1]: ref_sym = samples[k][1]["reference_sym"] if params2.merge.reference.data: params2.merge.reference.data = os.path.abspath( params2.merge.reference.data) log_out.write("\n\n") workdir = os.path.join( params2.workdir, "%s%s" % (params2.prefix, replace_forbidden_chars(k).replace(" ", "_"))) os.mkdir(workdir) if batchjobs: shname = "multimerge.sh" pickle.dump( dict(workdir=workdir, topdirs=samples[k][0], cell_method=params2.cell_method, ref_array=ref_array, ref_sym=ref_sym, merge_params=params2.merge, rescut_params=params2.rescut, filter_params=params2.filtering), open(os.path.join(workdir, "kwargs.pkl"), "w"), -1) job = batchjob.Job(workdir, shname, nproc=params2.batch.nproc_each) job.write_script("""\ cd "%s" || exit 1 "%s" -c '\ import pickle; \ from yamtbx.dataproc.auto.command_line.auto_multi_merge import auto_merge; \ kwargs = pickle.load(open("kwargs.pkl")); \ auto_merge(**kwargs); \ ' """ % (os.path.abspath(workdir), sys.executable)) batchjobs.submit(job) jobs.append(job) else: try: auto_merge(workdir=workdir, topdirs=samples[k][0], cell_method=params2.cell_method, ref_array=ref_array, ref_sym=ref_sym, merge_params=params2.merge, rescut_params=params2.rescut, filter_params=params2.filtering, log_out_all=log_out) except: log_out.write("Error occurred in %s\n%s\n" % (workdir, traceback.format_exc())) log_out.flush() if batchjobs: batchjobs.wait_all(jobs)