def multi_crystal_analysis(stop_after=None): '''Actually process something...''' assert os.path.exists('xia2.json') from xia2.Schema.XProject import XProject xinfo = XProject.from_json(filename='xia2.json') crystals = xinfo.get_crystals() for crystal_id, crystal in crystals.iteritems(): cwd = os.path.abspath(os.curdir) working_directory = Environment.generate_directory( [crystal.get_name(), 'analysis']) os.chdir(working_directory) scaler = crystal._get_scaler() #epoch_to_si = {} epoch_to_batches = {} epoch_to_integrated_intensities = {} epoch_to_sweep_name = {} epoch_to_experiments_filename = {} epoch_to_experiments = {} sweep_name_to_epoch = {} epoch_to_first_image = {} from dxtbx.serialize import load try: epochs = scaler._sweep_information.keys() for epoch in epochs: si = scaler._sweep_information[epoch] epoch_to_batches[epoch] = si['batches'] epoch_to_integrated_intensities[epoch] = si[ 'corrected_intensities'] epoch_to_sweep_name[epoch] = si['sname'] sweep_name_to_epoch[si['name']] = epoch intgr = si['integrater'] epoch_to_experiments_filename[ epoch] = intgr.get_integrated_experiments() epoch_to_experiments[epoch] = load.experiment_list( intgr.get_integrated_experiments()) except AttributeError: epochs = scaler._sweep_handler.get_epochs() for epoch in epochs: si = scaler._sweep_handler.get_sweep_information(epoch) epoch_to_batches[epoch] = si.get_batches() epoch_to_integrated_intensities[epoch] = si.get_reflections() epoch_to_sweep_name[epoch] = si.get_sweep_name() sweep_name_to_epoch[si.get_sweep_name()] = epoch intgr = si.get_integrater() epoch_to_experiments_filename[ epoch] = intgr.get_integrated_experiments() epoch_to_experiments[epoch] = load.experiment_list( intgr.get_integrated_experiments()) from xia2.Wrappers.Dials.StereographicProjection import StereographicProjection sp_json_files = {} for hkl in ((1, 0, 0), (0, 1, 0), (0, 0, 1)): sp = StereographicProjection() auto_logfiler(sp) sp.set_working_directory(working_directory) for experiments in epoch_to_experiments_filename.values(): sp.add_experiments(experiments) sp.set_hkl(hkl) sp.run() sp_json_files[hkl] = sp.get_json_filename() unmerged_mtz = scaler.get_scaled_reflections( 'mtz_unmerged').values()[0] from iotbx.reflection_file_reader import any_reflection_file reader = any_reflection_file(unmerged_mtz) from xia2.Wrappers.XIA.PlotMultiplicity import PlotMultiplicity mult_json_files = {} for axis in ('h', 'k', 'l'): pm = PlotMultiplicity() auto_logfiler(pm) pm.set_working_directory(working_directory) pm.set_mtz_filename(unmerged_mtz) pm.set_slice_axis(axis) pm.set_show_missing(True) pm.run() mult_json_files[axis] = pm.get_json_filename() intensities = None batches = None assert reader.file_type() == 'ccp4_mtz' arrays = reader.as_miller_arrays(merge_equivalents=False) for ma in arrays: if ma.info().labels == ['BATCH']: batches = ma elif ma.info().labels == ['I', 'SIGI']: intensities = ma elif ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']: intensities = ma from xia2.Wrappers.CCP4.Blend import Blend hand_blender = Blend() hand_blender.set_working_directory(working_directory) auto_logfiler(hand_blender) Citations.cite('blend') from xia2.Handlers.Environment import which Rscript_binary = which('Rscript', debug=False) if Rscript_binary is None: Chatter.write('Skipping BLEND analysis: Rscript not available') else: for epoch in epochs: hand_blender.add_hklin(epoch_to_integrated_intensities[epoch], label=epoch_to_sweep_name[epoch]) hand_blender.analysis() Chatter.write("Dendrogram saved to: %s" % hand_blender.get_dendrogram_file()) analysis = hand_blender.get_analysis() summary = hand_blender.get_summary() clusters = hand_blender.get_clusters() ddict = hand_blender.plot_dendrogram() phil_files_dir = 'phil_files' if not os.path.exists(phil_files_dir): os.makedirs(phil_files_dir) rows = [] headers = [ 'Cluster', 'Datasets', 'Multiplicity', 'Completeness', 'LCV', 'aLCV', 'Average unit cell' ] completeness = flex.double() average_unit_cell_params = [] for i, cluster in clusters.iteritems(): print i sel_cluster = flex.bool(batches.size(), False) cluster_uc_params = [flex.double() for k in range(6)] for j in cluster['dataset_ids']: epoch = epochs[j - 1] batch_start, batch_end = epoch_to_batches[epoch] sel_cluster |= ((batches.data() >= batch_start) & (batches.data() <= batch_end)) expts = epoch_to_experiments.get(epoch) assert expts is not None, (epoch) assert len(expts) == 1, len(expts) expt = expts[0] uc_params = expt.crystal.get_unit_cell().parameters() for k in range(6): cluster_uc_params[k].append(uc_params[k]) intensities_cluster = intensities.select(sel_cluster) merging = intensities_cluster.merge_equivalents() merged_intensities = merging.array() multiplicities = merging.redundancies() completeness.append(merged_intensities.completeness()) average_unit_cell_params.append( tuple(flex.mean(p) for p in cluster_uc_params)) dataset_ids = cluster['dataset_ids'] assert min(dataset_ids) > 0 with open( os.path.join(phil_files_dir, 'blend_cluster_%i_images.phil' % i), 'wb') as f: sweep_names = [ hand_blender._labels[dataset_id - 1] for dataset_id in dataset_ids ] for sweep_name in sweep_names: expts = epoch_to_experiments.get( sweep_name_to_epoch.get(sweep_name)) assert expts is not None, ( sweep_name, sweep_name_to_epoch.get(sweep_name)) assert len(expts) == 1, len(expts) expt = expts[0] print >> f, 'xia2.settings.input.image = %s' % expt.imageset.get_path( 0) rows.append([ '%i' % i, ' '.join(['%i'] * len(dataset_ids)) % tuple(dataset_ids), '%.1f' % flex.mean(multiplicities.data().as_double()), '%.2f' % completeness[-1], '%.2f' % cluster['lcv'], '%.2f' % cluster['alcv'], '%g %g %g %g %g %g' % average_unit_cell_params[-1] ]) # sort table by completeness perm = flex.sort_permutation(completeness) rows = [rows[i] for i in perm] print print 'Unit cell clustering summary:' print tabulate(rows, headers, tablefmt='rst') print blend_html = tabulate(rows, headers, tablefmt='html').replace( '<table>', '<table class="table table-hover table-condensed">').replace( '<td>', '<td style="text-align: right;">') # XXX what about multiple wavelengths? with open('batches.phil', 'wb') as f: try: for epoch, si in scaler._sweep_information.iteritems(): print >> f, "batch {" print >> f, " id=%s" % si['sname'] print >> f, " range=%i,%i" % tuple(si['batches']) print >> f, "}" except AttributeError: for epoch in scaler._sweep_handler.get_epochs(): si = scaler._sweep_handler.get_sweep_information(epoch) print >> f, "batch {" print >> f, " id=%s" % si.get_sweep_name() print >> f, " range=%i,%i" % tuple(si.get_batches()) print >> f, "}" from xia2.Wrappers.XIA.MultiCrystalAnalysis import MultiCrystalAnalysis mca = MultiCrystalAnalysis() auto_logfiler(mca, extra="MultiCrystalAnalysis") mca.add_command_line_args([ scaler.get_scaled_reflections(format="sca_unmerged").values()[0], "unit_cell=%s %s %s %s %s %s" % tuple(scaler.get_scaler_cell()), "batches.phil" ]) mca.set_working_directory(working_directory) mca.run() intensity_clusters = mca.get_clusters() rows = [] headers = [ 'Cluster', 'Datasets', 'Multiplicity', 'Completeness', 'Height', 'Average unit cell' ] completeness = flex.double() average_unit_cell_params = [] for i, cluster in intensity_clusters.iteritems(): sel_cluster = flex.bool(batches.size(), False) cluster_uc_params = [flex.double() for k in range(6)] for j in cluster['datasets']: epoch = epochs[j - 1] batch_start, batch_end = epoch_to_batches[epoch] sel_cluster |= ((batches.data() >= batch_start) & (batches.data() <= batch_end)) expts = epoch_to_experiments.get(epoch) assert expts is not None, (epoch) assert len(expts) == 1, len(expts) expt = expts[0] uc_params = expt.crystal.get_unit_cell().parameters() for k in range(6): cluster_uc_params[k].append(uc_params[k]) intensities_cluster = intensities.select(sel_cluster) merging = intensities_cluster.merge_equivalents() merged_intensities = merging.array() multiplicities = merging.redundancies() completeness.append(merged_intensities.completeness()) average_unit_cell_params.append( tuple(flex.mean(p) for p in cluster_uc_params)) dataset_ids = cluster['datasets'] rows.append([ '%i' % int(i), ' '.join(['%i'] * len(dataset_ids)) % tuple(dataset_ids), '%.1f' % flex.mean(multiplicities.data().as_double()), '%.2f' % completeness[-1], '%.2f' % cluster['height'], '%g %g %g %g %g %g' % average_unit_cell_params[-1] ]) # sort table by completeness perm = flex.sort_permutation(completeness) rows = [rows[i] for i in perm] print 'Intensity clustering summary:' print tabulate(rows, headers, tablefmt='rst') print intensity_clustering_html = tabulate( rows, headers, tablefmt='html').replace( '<table>', '<table class="table table-hover table-condensed">').replace( '<td>', '<td style="text-align: right;">') import json json_data = {} if ddict is not None: from xia2.Modules.MultiCrystalAnalysis import scipy_dendrogram_to_plotly_json json_data['blend_dendrogram'] = scipy_dendrogram_to_plotly_json(ddict) else: json_data['blend_dendrogram'] = {'data': [], 'layout': {}} json_data['intensity_clustering'] = mca.get_dict() del json_data['intensity_clustering']['clusters'] for hkl in ((1, 0, 0), (0, 1, 0), (0, 0, 1)): with open(sp_json_files[hkl], 'rb') as f: d = json.load(f) d['layout'][ 'title'] = 'Stereographic projection (hkl=%i%i%i)' % hkl json_data['stereographic_projection_%s%s%s' % hkl] = d for axis in ('h', 'k', 'l'): with open(mult_json_files[axis], 'rb') as f: json_data['multiplicity_%s' % axis] = json.load(f) json_str = json.dumps(json_data, indent=2) javascript = ['var graphs = %s' % (json_str)] javascript.append( 'Plotly.newPlot(blend_dendrogram, graphs.blend_dendrogram.data, graphs.blend_dendrogram.layout);' ) javascript.append( 'Plotly.newPlot(intensity_clustering, graphs.intensity_clustering.data, graphs.intensity_clustering.layout);' ) for hkl in ((1, 0, 0), (0, 1, 0), (0, 0, 1)): javascript.append( 'Plotly.newPlot(stereographic_projection_%(hkl)s, graphs.stereographic_projection_%(hkl)s.data, graphs.stereographic_projection_%(hkl)s.layout);' % ({ 'hkl': "%s%s%s" % hkl })) for axis in ('h', 'k', 'l'): javascript.append( 'Plotly.newPlot(multiplicity_%(axis)s, graphs.multiplicity_%(axis)s.data, graphs.multiplicity_%(axis)s.layout);' % ({ 'axis': axis })) html_header = ''' <head> <!-- Plotly.js --> <script src="https://cdn.plot.ly/plotly-latest.min.js"></script> <meta name="viewport" content="width=device-width, initial-scale=1" charset="UTF-8"> <link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"/> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> <script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> <style type="text/css"> body { /*font-family: Helmet, Freesans, Helvetica, Arial, sans-serif;*/ margin: 8px; min-width: 240px; margin-left: 5%; margin-right: 5%; } .plot { float: left; width: 1200px; height: 800px; margin-bottom: 20px; } .square_plot { float: left; width: 800px; height: 800px; margin-bottom: 20px; } </style> </head> ''' html_body = ''' <body> <div class="page-header"> <h1>Multi-crystal analysis report</h1> </div> <div class="panel-group"> <div class="panel panel-default"> <div class="panel-heading" data-toggle="collapse" href="#collapse_multiplicity"> <h4 class="panel-title"> <a>Multiplicity plots</a> </h4> </div> <div id="collapse_multiplicity" class="panel-collapse collapse"> <div class="panel-body"> <div class="col-xs-12 col-sm-12 col-md-12 square_plot" id="multiplicity_h"></div> <div class="col-xs-12 col-sm-12 col-md-12 square_plot" id="multiplicity_k"></div> <div class="col-xs-12 col-sm-12 col-md-12 square_plot" id="multiplicity_l"></div> </div> </div> </div> <div class="panel panel-default"> <div class="panel-heading" data-toggle="collapse" href="#collapse_stereographic_projection"> <h4 class="panel-title"> <a>Stereographic projections</a> </h4> </div> <div id="collapse_stereographic_projection" class="panel-collapse collapse"> <div class="panel-body"> <div class="col-xs-12 col-sm-12 col-md-12 square_plot" id="stereographic_projection_100"></div> <div class="col-xs-12 col-sm-12 col-md-12 square_plot" id="stereographic_projection_010"></div> <div class="col-xs-12 col-sm-12 col-md-12 square_plot" id="stereographic_projection_001"></div> </div> </div> </div> <div class="panel panel-default"> <div class="panel-heading" data-toggle="collapse" href="#collapse_cell"> <h4 class="panel-title"> <a>Unit cell clustering</a> </h4> </div> <div id="collapse_cell" class="panel-collapse collapse"> <div class="panel-body"> <div class="col-xs-12 col-sm-12 col-md-12 plot" id="blend_dendrogram"></div> <div class="table-responsive" style="width: 800px"> %(blend_html)s </div> </div> </div> </div> <div class="panel panel-default"> <div class="panel-heading" data-toggle="collapse" href="#collapse_intensity"> <h4 class="panel-title"> <a>Intensity clustering</a> </h4> </div> <div id="collapse_intensity" class="panel-collapse collapse"> <div class="panel-body"> <div class="col-xs-12 col-sm-12 col-md-12 plot" id="intensity_clustering" style="height:1000px"></div> <div class="table-responsive" style="width: 800px"> %(intensity_clustering_html)s </div> </div> </div> </div> </div> <script> %(script)s </script> </body> ''' % { 'script': '\n'.join(javascript), 'blend_html': blend_html, 'intensity_clustering_html': intensity_clustering_html } html = '\n'.join([html_header, html_body]) print "Writing html report to: %s" % 'multi-crystal-report.html' with open('multi-crystal-report.html', 'wb') as f: print >> f, html.encode('ascii', 'xmlcharrefreplace') write_citations() Environment.cleanup() return
def integration_status_section(xproject): lines = [] status_lines = [] lines.append('\n') lines.append('.. _Integration status for images by wavelength and sweep:\n') lines.append('Integration status per image') lines.append('=' * len(lines[-1])) lines.append( 'The following sections show the status of each image from the final ' 'integration run performed on each sweep within each dataset. The table ' 'below summarises the image status for each dataset and sweep.') overall_table = [] headers = ['Dataset', 'Sweep', 'Good', 'Ok', 'Bad rmsd', 'Overloaded', 'Many bad', 'Weak', 'Abandoned', 'Total'] status_to_symbol = dict( good='o', ok='%', bad_rmsd='!', overloaded='O', many_bad='#', weak='.', abandoned='@' ) symbol_to_status = dict(reversed(item) for item in status_to_symbol.items()) xia2_root_dir = libtbx.env.find_in_repositories("xia2", optional=False) # FIXME we should copy these icons to the local directory img_dir = os.path.join(xia2_root_dir, 'Modules', 'Xia2html', 'icons') for s in status_to_symbol.keys(): status_lines.append('.. |%s| image:: %s/img_%s.png' %(s, img_dir, s)) #status_lines.append(' :align: center') status_lines.append('\n') for cname, xcryst in xproject.get_crystals().iteritems(): for wname in xcryst.get_wavelength_names(): xwav = xcryst.get_xwavelength(wname) for xsweep in xwav.get_sweeps(): intgr = xsweep._get_integrater() stats = intgr.show_per_image_statistics() status = stats.split( 'Integration status per image')[1].split(':')[1].split( '"o" => good')[0].strip() status = ''.join(status.split()) overall_table.append( [wname, xsweep.get_name()] + [status.count(status_to_symbol[h.lower().replace(' ', '_')]) for h in headers[2:-1]] + [len(status)]) status_table = [] row = [] for symbol in status: if len(row) == 60: status_table.append(row) row = [] row.append('<div class="%s status"></div>' %symbol_to_status[symbol]) if len(row): status_table.append(row) status_lines.append('\n') status_lines.append('Dataset %s' %wname) status_lines.append('-' * len(status_lines[-1])) status_lines.append('\n') batches = xsweep.get_image_range() status_lines.append( '%s: batches %d to %d' %(xsweep.get_name(), batches[0], batches[1])) status_lines.append('\n') html_table = indent_text( tabulate(status_table, tablefmt='html'), indent=3) html_table = html_table.replace('<table>', '<table class="status-table">') status_lines.append('.. raw:: html\n') status_lines.append(html_table) status_lines.append('\n') #if '(60/record)' in stats: #status_lines.append('\n') #status_lines.append('.. note:: (60 images/record)') #status_lines.append('\n') lines.append('\n') icons = [] for h in headers: h = h.lower().replace(' ', '_') if h in status_to_symbol: icons.append('<div class="%s status"></div>' %h) else: icons.append(' ') overall_table.insert(0, icons) lines.append('.. raw:: html\n') html_table = indent_text( tabulate(overall_table, headers, tablefmt='html'), indent=3) lines.append(html_table) lines.append('\n') lines.extend(status_lines) return lines
def detailed_statistics_section(xproject): lines = [] lines.append('\n') lines.append('.. _Full statistics for each wavelength:\n') lines.append('\n') lines.append('Detailed statistics for each dataset') lines.append('=' * len(lines[-1])) lines.append('\n') for cname, xcryst in xproject.get_crystals().iteritems(): statistics_all = xcryst.get_statistics() from collections import OrderedDict for key, statistics in statistics_all.iteritems(): pname, xname, dname = key lines.append('Dataset %s' %dname) lines.append('-' * len(lines[-1])) table = [] headers = [' ', 'Overall', 'Low', 'High'] width = 3 if 'Completeness' in statistics and len(statistics['Completeness']) == 4: headers = [' ', 'Suggested', 'Low', 'High', 'All reflections'] width = 4 available = statistics.keys() formats = OrderedDict([ ('High resolution limit', '%6.2f'), ('Low resolution limit', '%6.2f'), ('Completeness', '%5.1f'), ('Multiplicity', '%5.1f'), ('I/sigma', '%5.1f'), ('Rmerge(I)', '%5.3f'), ('Rmerge(I+/-)', '%5.3f'), ('Rmeas(I)', '%5.3f'), ('Rmeas(I+/-)', '%5.3f'), ('Rpim(I)', '%5.3f'), ('Rpim(I+/-)', '%5.3f'), ('CC half', '%5.3f'), ('Wilson B factor', '%.3f'), ('Partial bias', '%5.3f'), ('Anomalous completeness', '%5.1f'), ('Anomalous multiplicity', '%5.1f'), ('Anomalous correlation', '%6.3f'), ('Anomalous slope', '%5.3f'), ('dF/F', '%.3f'), ('dI/s(dI)', '%.3f'), ('Total observations', '%d'), ('Total unique', '%d') ]) for k in formats.keys(): if k in available: values = [(formats[k] % v if v is not None else '') for v in statistics[k]] if len(values) == 1: if width == 3: # number goes in the first column values = [values[0]] + [''] * (width - 1) else: # number goes in the last column values = [''] * (width - 1) + [values[0]] assert len(values) == width table.append([k] + values) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='grid')) lines.append('\n') return lines
def crystallographic_parameters_section(xproject): lines = [] for cname, xcryst in xproject.get_crystals().iteritems(): lines.append('\n') lines.append('Crystallographic parameters') lines.append('=' * len(lines[-1])) lines.append('\n') lines.append('Unit cell') lines.append('-' * len(lines[-1])) lines.append('\n') cell = xcryst.get_cell() headers = [u'a (Å)', u'b (Å)', u'c (Å)', u'α (°)', u'β (°)', u'γ (°)'] table = [['%.3f' %c for c in cell]] lines.append('\n') lines.append(tabulate(table, headers, tablefmt='grid')) lines.append('\n') lines.append('.. note:: The unit cell parameters are the average for all measurements.') lines.append('\n') from cctbx import sgtbx spacegroups = xcryst.get_likely_spacegroups() spacegroup = spacegroups[0] sg = sgtbx.space_group_type(str(spacegroup)) #spacegroup = sg.lookup_symbol() spacegroup = space_group_symbol_rst(sg) table.append(['Space group', spacegroup, '']) lines.append('Space group') lines.append('-' * len(lines[-1])) lines.append('\n') lines.append('Space group: %s' %spacegroup) lines.append('\n') lines.append('Other possibilities could be:') lines.append('\n') if len(spacegroups) > 1: for sg in spacegroups[1:]: sg = sgtbx.space_group_type(str(sg)) lines.append('* %s\n' %space_group_symbol_rst(sg)) lines.append('\n') lines.append('.. note:: The spacegroup was determined using pointless (see log file)') lines.append('\n') twinning_score = xcryst._get_scaler()._scalr_twinning_score twinning_conclusion = xcryst._get_scaler()._scalr_twinning_conclusion if twinning_score is not None and twinning_conclusion is not None: lines.append('Twinning analysis') lines.append('-' * len(lines[-1])) lines.append('\n') lines.append('Overall twinning score: %.2f' %twinning_score) lines.append('%s' %twinning_conclusion) lines.append('\n') lines.append( '.. note:: The twinning score is the value of <E4>/<I2> reported by') lines.append( ' sfcheck (see `documentation <http://www.ccp4.ac.uk/html/sfcheck.html#Twinning%20test>`_)') lines.append('\n') lines.append('Asymmetric unit contents') lines.append('-' * len(lines[-1])) lines.append('\n') lines.append('\n') lines.append('.. note:: No information on ASU contents (because no sequence information was supplied?)') lines.append('\n') return lines
def output_files_section(xproject): lines = [] for cname, xcryst in xproject.get_crystals().iteritems(): lines.append('Output files') lines.append('=' * len(lines[-1])) lines.append('\n') lines.append('.. _Reflection files output from xia2:\n') lines.append('Reflection data files') lines.append('-' * len(lines[-1])) lines.append('\n') lines.append( 'xia2 produced the following reflection data files - to download,' 'right-click on the link and select "Save Link As..."') lines.append('\n') # hack to replace path to reflection files with DataFiles directory data_dir = os.path.join(os.path.abspath(os.path.curdir), 'DataFiles') g = glob.glob(os.path.join(data_dir, '*')) reflection_files = xcryst.get_scaled_merged_reflections() for k, rfile in reflection_files.iteritems(): if isinstance(rfile, basestring): for datafile in g: if os.path.basename(datafile) == os.path.basename(rfile): reflection_files[k] = datafile break else: for kk in rfile.keys(): for datafile in g: if os.path.basename(datafile) == os.path.basename(rfile[kk]): reflection_files[k][kk] = datafile break lines.append('MTZ files (useful for CCP4 and Phenix)') lines.append('_' * len(lines[-1])) lines.append('\n') headers = ['Dataset', 'File name'] merged_mtz = reflection_files['mtz'] table = [['All datasets', '`%s <%s>`_' %(os.path.basename(merged_mtz), os.path.relpath(merged_mtz))]] #['All datasets (unmerged)', '`%s <%s>`_' %(os.path.basename(merged_mtz), merged_mtz], for wname, unmerged_mtz in reflection_files['mtz_unmerged'].iteritems(): table.append( [wname, '`%s <%s>`_' %( os.path.basename(unmerged_mtz), os.path.relpath(unmerged_mtz))]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') lines.append('SCA files (useful for AutoSHARP, etc.)') lines.append('_' * len(lines[-1])) lines.append('\n') table = [] for wname, merged_sca in reflection_files['sca'].iteritems(): table.append( [wname, '`%s <%s>`_' %( os.path.basename(merged_sca), os.path.relpath(merged_sca))]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') lines.append('SCA_UNMERGED files (useful for XPREP and Shelx C/D/E)') lines.append('_' * len(lines[-1])) lines.append('\n') table = [] for wname, unmerged_sca in reflection_files['sca_unmerged'].iteritems(): table.append( [wname, '`%s <%s>`_' %( os.path.basename(unmerged_sca), os.path.relpath(unmerged_sca))]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') lines.append('.. _Log files from individual stages:\n') lines.append('Log files') lines.append('-' * len(lines[-1])) lines.append('\n') lines.append( 'The log files are located in `<%s/LogFiles>`_ and are grouped by ' 'processing stage:' %os.path.abspath(os.path.curdir)) table = [] log_dir = os.path.join(os.path.abspath(os.path.curdir), 'LogFiles') g = glob.glob(os.path.join(log_dir, '*.log')) for logfile in g: html_file = make_logfile_html(logfile) html_file = os.path.splitext(logfile)[0] + '.html' if os.path.exists(html_file): table.append( [os.path.basename(logfile), '`original <%s>`__' %os.path.relpath(logfile), '`html <%s>`__' %os.path.relpath(html_file) ]) else: table.append( [os.path.basename(logfile), '`original <%s>`__' %os.path.relpath(logfile), ' ', ]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') return lines
def overview_section(xproject): lines = [] lines.append('xia2 Processing Report: %s' %xproject.get_name()) lines.append('#' * len(lines[-1])) lines.append('\n') xia2_status = 'normal termination' # XXX FIXME lines.append("xia2 version %s completed with status '%s'\n" %( Version.split()[-1], xia2_status)) lines.append('Read output from `<%s/>`_\n' %os.path.abspath(os.path.curdir)) columns = [] columns.append([ '', u'Wavelength (Å)', 'High resolution limit', 'Low resolution limit', 'Completeness', 'Multiplicity', 'CC-half', 'I/sigma', 'Rmerge(I)', #'See all statistics', ]) for cname, xcryst in xproject.get_crystals().iteritems(): statistics_all = xcryst.get_statistics() for wname in xcryst.get_wavelength_names(): statistics = statistics_all[(xproject.get_name(), cname, wname)] xwav = xcryst.get_xwavelength(wname) high_res = statistics['High resolution limit'] low_res = statistics['Low resolution limit'] column = [ wname, xwav.get_wavelength(), '%6.2f (%6.2f - %6.2f)' % (high_res[0], low_res[2], high_res[2]), '%6.2f (%6.2f - %6.2f)' % (low_res[0], high_res[2], low_res[2]), '%6.2f' % statistics['Completeness'][0], '%6.2f' % statistics['Multiplicity'][0], '%6.4f' % statistics['CC half'][0], '%6.2f' % statistics['I/sigma'][0], '%6.4f' % statistics['Rmerge(I)'][0], ] for c in ('Anomalous completeness', 'Anomalous multiplicity'): if c in statistics: column.append('%6.2f' % statistics[c][0]) if c not in columns[0]: columns[0].append(c) columns.append(column) table = [[c[i] for c in columns] for i in range(len(columns[0]))] cell = xcryst.get_cell() table.append(['','','']) table.append([u'Unit cell dimensions: a (Å)', '%.3f' %cell[0], '']) table.append([u'b (Å)', '%.3f' %cell[1], '']) table.append([u'c (Å)', '%.3f' %cell[2], '']) table.append([u'α (°)', '%.3f' %cell[3], '']) table.append([u'β (°)', '%.3f' %cell[4], '']) table.append([u'γ (°)', '%.3f' %cell[5], '']) from cctbx import sgtbx spacegroups = xcryst.get_likely_spacegroups() spacegroup = spacegroups[0] sg = sgtbx.space_group_type(str(spacegroup)) #spacegroup = sg.lookup_symbol() spacegroup = space_group_symbol_rst(sg) table.append(['','','']) table.append(['Space group', spacegroup, '']) twinning_score = xcryst._get_scaler()._scalr_twinning_score twinning_conclusion = xcryst._get_scaler()._scalr_twinning_conclusion if twinning_score is not None: table.append(['','','']) table.append(['Twinning score', '%.2f' %twinning_score, '']) if twinning_conclusion is not None: table.append(['', twinning_conclusion, '']) headers = table.pop(0) lines.append('\n') lines.append('.. class:: table-one') lines.append('\n') lines.append(tabulate(table, headers, tablefmt='grid')) lines.append('\n') #Spacegroup P 41 2 2 #Sfcheck twinning score 2.99 #Your data do not appear to be twinned #All crystallographic parameters.. lines.append('Contents of the rest of this document:') lines.append('\n') lines.append( '* `Reflection files output from xia2`_') lines.append( '* `Full statistics for each wavelength`_') lines.append( '* `Log files from individual stages`_') lines.append( '* `Integration status for images by wavelength and sweep`_') #lines.append( #'* `Lists of programs and citations`_') #lines.append('Inter-wavelength B and R-factor analysis') #lines.append('-' * len(lines[-1])) #lines.append('\n') return lines
def multi_crystal_analysis(stop_after=None): '''Actually process something...''' assert os.path.exists('xia2.json') from xia2.Schema.XProject import XProject xinfo = XProject.from_json(filename='xia2.json') crystals = xinfo.get_crystals() for crystal_id, crystal in crystals.iteritems(): cwd = os.path.abspath(os.curdir) working_directory = Environment.generate_directory( [crystal.get_name(), 'analysis']) os.chdir(working_directory) from xia2.Wrappers.CCP4.Blend import Blend from xia2.lib.bits import auto_logfiler hand_blender = Blend() hand_blender.set_working_directory(working_directory) auto_logfiler(hand_blender) Citations.cite('blend') scaler = crystal._get_scaler() #epoch_to_si = {} epoch_to_batches = {} epoch_to_integrated_intensities = {} epoch_to_sweep_name = {} try: epochs = scaler._sweep_information.keys() for epoch in epochs: si = scaler._sweep_information[epoch] epoch_to_batches[epoch] = si['batches'] epoch_to_integrated_intensities[epoch] = si['corrected_intensities'] epoch_to_sweep_name[epoch] = si['sname'] except AttributeError, e: epochs = scaler._sweep_handler.get_epochs() for epoch in epochs: si = scaler._sweep_handler.get_sweep_information(epoch) epoch_to_batches[epoch] = si.get_batches() epoch_to_integrated_intensities[epoch] = si.get_reflections() epoch_to_sweep_name[epoch] = si.get_sweep_name() unmerged_mtz = scaler.get_scaled_reflections('mtz_unmerged').values()[0] from iotbx.reflection_file_reader import any_reflection_file reader = any_reflection_file(unmerged_mtz) intensities = None batches = None assert reader.file_type() == 'ccp4_mtz' arrays = reader.as_miller_arrays(merge_equivalents=False) for ma in arrays: if ma.info().labels == ['BATCH']: batches = ma elif ma.info().labels == ['I', 'SIGI']: intensities = ma elif ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']: intensities = ma from xia2.Handlers.Environment import which Rscript_binary = which('Rscript', debug=False) if Rscript_binary is None: Chatter.write('Skipping BLEND analysis: Rscript not available') else: for epoch in epochs: hand_blender.add_hklin(epoch_to_integrated_intensities[epoch], label=epoch_to_sweep_name[epoch]) hand_blender.analysis() Chatter.write("Dendrogram saved to: %s" %hand_blender.get_dendrogram_file()) analysis = hand_blender.get_analysis() summary = hand_blender.get_summary() clusters = hand_blender.get_clusters() linkage_matrix = hand_blender.get_linkage_matrix() ddict = hand_blender.plot_dendrogram() rows = [] headers = ['Cluster', 'Datasets', 'Multiplicity', 'Completeness', 'LCV', 'aLCV'] completeness = flex.double() for i, cluster in clusters.iteritems(): sel_cluster = flex.bool(batches.size(), False) for j in cluster['dataset_ids']: batch_start, batch_end = epoch_to_batches[epochs[j-1]] sel_cluster |= ( (batches.data() >= batch_start) & (batches.data() <= batch_end)) intensities_cluster = intensities.select(sel_cluster) merging = intensities_cluster.merge_equivalents() merged_intensities = merging.array() multiplicities = merging.redundancies() completeness.append(merged_intensities.completeness()) dataset_ids = cluster['dataset_ids'] rows.append( ['%i' %i, ' '.join(['%i'] * len(dataset_ids)) %tuple(dataset_ids), '%.1f' %flex.mean(multiplicities.data().as_double()), '%.2f' %completeness[-1], '%.2f' %cluster['lcv'], '%.2f' %cluster['alcv']]) # sort table by completeness perm = flex.sort_permutation(completeness) rows = [rows[i] for i in perm] print print 'Unit cell clustering summary:' print tabulate(rows, headers, tablefmt='rst') print blend_html = tabulate(rows, headers, tablefmt='html').replace( '<table>', '<table class="table table-hover table-condensed">').replace( '<td>', '<td style="text-align: right;">')
multiplicities = merging.redundancies() completeness.append(merged_intensities.completeness()) dataset_ids = cluster['datasets'] rows.append( ['%i' %int(i), ' '.join(['%i'] * len(dataset_ids)) %tuple(dataset_ids), '%.1f' %flex.mean(multiplicities.data().as_double()), '%.2f' %completeness[-1], '%.2f' %cluster['height']]) # sort table by completeness perm = flex.sort_permutation(completeness) rows = [rows[i] for i in perm] print 'Intensity clustering summary:' print tabulate(rows, headers, tablefmt='rst') print intensity_clustering_html = tabulate(rows, headers, tablefmt='html').replace( '<table>', '<table class="table table-hover table-condensed">').replace( '<td>', '<td style="text-align: right;">') import json json_data = {} if ddict is not None: from xia2.Modules.MultiCrystalAnalysis import scipy_dendrogram_to_plotly_json json_data['blend_dendrogram'] = scipy_dendrogram_to_plotly_json(ddict) else: json_data['blend_dendrogram'] = {'data': [], 'layout': {}}
def run(args): from iotbx.reflection_file_reader import any_reflection_file from xia2.Modules.Analysis import phil_scope interp = phil_scope.command_line_argument_interpreter() params, unhandled = interp.process_and_fetch( args, custom_processor='collect_remaining') params = params.extract() n_bins = params.resolution_bins args = unhandled intensities = None batches = None scales = None dose = None reader = any_reflection_file(args[0]) assert reader.file_type() == 'ccp4_mtz' arrays = reader.as_miller_arrays(merge_equivalents=False) for ma in arrays: if ma.info().labels == ['BATCH']: batches = ma elif ma.info().labels == ['DOSE']: dose = ma elif ma.info().labels == ['I', 'SIGI']: intensities = ma elif ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']: intensities = ma elif ma.info().labels == ['SCALEUSED']: scales = ma assert intensities is not None assert batches is not None mtz_object = reader.file_content() indices = mtz_object.extract_original_index_miller_indices() intensities = intensities.customized_copy( indices=indices, info=intensities.info()) batches = batches.customized_copy(indices=indices, info=batches.info()) from iotbx import merging_statistics merging_stats = merging_statistics.dataset_statistics( intensities, n_bins=n_bins) merging_acentric = intensities.select_acentric().merge_equivalents() merging_centric = intensities.select_centric().merge_equivalents() multiplicities_acentric = {} multiplicities_centric = {} for x in sorted(set(merging_acentric.redundancies().data())): multiplicities_acentric[x] = merging_acentric.redundancies().data().count(x) for x in sorted(set(merging_centric.redundancies().data())): multiplicities_centric[x] = merging_centric.redundancies().data().count(x) headers = [u'Resolution (Å)', 'N(obs)', 'N(unique)', 'Multiplicity', 'Completeness', 'Mean(I)', 'Mean(I/sigma)', 'Rmerge', 'Rmeas', 'Rpim', 'CC1/2', 'CCano'] rows = [] for bin_stats in merging_stats.bins: row = ['%.2f - %.2f' %(bin_stats.d_max, bin_stats.d_min), bin_stats.n_obs, bin_stats.n_uniq, '%.2f' %bin_stats.mean_redundancy, '%.2f' %(100*bin_stats.completeness), '%.1f' %bin_stats.i_mean, '%.1f' %bin_stats.i_over_sigma_mean, '%.3f' %bin_stats.r_merge, '%.3f' %bin_stats.r_meas, '%.3f' %bin_stats.r_pim, '%.3f' %bin_stats.cc_one_half, '%.3f' %bin_stats.cc_anom] rows.append(row) from xia2.lib.tabulate import tabulate merging_stats_table_html = tabulate(rows, headers, tablefmt='html') merging_stats_table_html = merging_stats_table_html.replace( '<table>', '<table class="table table-hover table-condensed">') unit_cell_params = intensities.unit_cell().parameters() headers = ['', 'Overall', 'Low resolution', 'High resolution'] stats = (merging_stats.overall, merging_stats.bins[0], merging_stats.bins[-1]) rows = [ [u'Resolution (Å)'] + [ '%.2f - %.2f' %(s.d_max, s.d_min) for s in stats], ['Observations'] + ['%i' %s.n_obs for s in stats], ['Unique reflections'] + ['%i' %s.n_uniq for s in stats], ['Multiplicity'] + ['%.1f' %s.mean_redundancy for s in stats], ['Completeness'] + ['%.2f%%' %(s.completeness * 100) for s in stats], #['Mean intensity'] + ['%.1f' %s.i_mean for s in stats], ['Mean I/sigma(I)'] + ['%.1f' %s.i_over_sigma_mean for s in stats], ['Rmerge'] + ['%.3f' %s.r_merge for s in stats], ['Rmeas'] + ['%.3f' %s.r_meas for s in stats], ['Rpim'] + ['%.3f' %s.r_pim for s in stats], ['CC1/2'] + ['%.3f' %s.cc_one_half for s in stats], ] rows = [[u'<strong>%s</strong>' %r[0]] + r[1:] for r in rows] overall_stats_table_html = tabulate(rows, headers, tablefmt='html') overall_stats_table_html = overall_stats_table_html.replace( '<table>', '<table class="table table-hover table-condensed">') #headers = ['Crystal symmetry', ''] #rows = [ #[u'Unit cell: a (Å)', '%.3f' %unit_cell_params[0]], #[u'b (Å)', '%.3f' %unit_cell_params[1]], #[u'c (Å)', '%.3f' %unit_cell_params[2]], #[u'α (°)', '%.3f' %unit_cell_params[3]], #[u'β (°)', '%.3f' %unit_cell_params[4]], #[u'γ (°)', '%.3f' %unit_cell_params[5]], #['Space group', intensities.space_group_info().symbol_and_number()], #] #symmetry_table_html = tabulate(rows, headers, tablefmt='html') symmetry_table_html = """ <p> <b>Filename:</b> %s <br> <b>Unit cell:</b> %s <br> <b>Space group:</b> %s </p> """ %(os.path.abspath(reader.file_name()), intensities.space_group_info().symbol_and_number(), str(intensities.unit_cell())) if params.anomalous: intensities = intensities.as_anomalous_array() batches = batches.as_anomalous_array() from xia2.Modules.PyChef2.PyChef import remove_batch_gaps new_batch_data = remove_batch_gaps(batches.data()) new_batches = batches.customized_copy(data=new_batch_data) sc_vs_b = scales_vs_batch(scales, new_batches) rmerge_vs_b = rmerge_vs_batch(intensities, new_batches) intensities.setup_binner(n_bins=n_bins) merged_intensities = intensities.merge_equivalents().array() from mmtbx.scaling import twin_analyses normalised_intensities = twin_analyses.wilson_normalised_intensities( miller_array=merged_intensities) nz_test = twin_analyses.n_z_test( normalised_acentric=normalised_intensities.acentric, normalised_centric=normalised_intensities.centric) from mmtbx.scaling import data_statistics if not intensities.space_group().is_centric(): wilson_scaling = data_statistics.wilson_scaling( miller_array=merged_intensities, n_residues=200) # XXX default n_residues? acentric = intensities.select_acentric() centric = intensities.select_centric() if acentric.size(): acentric.setup_binner(n_bins=n_bins) second_moments_acentric = acentric.second_moment_of_intensities(use_binning=True) if centric.size(): centric.setup_binner(n_bins=n_bins) second_moments_centric = centric.second_moment_of_intensities(use_binning=True) d_star_sq_bins = [ (1/bin_stats.d_min**2) for bin_stats in merging_stats.bins] i_over_sig_i_bins = [ bin_stats.i_over_sigma_mean for bin_stats in merging_stats.bins] cc_one_half_bins = [ bin_stats.cc_one_half for bin_stats in merging_stats.bins] cc_anom_bins = [ bin_stats.cc_anom for bin_stats in merging_stats.bins] from xia2.Modules.PyChef2 import PyChef if params.chef_min_completeness: d_min = PyChef.resolution_limit( mtz_file=args[0], min_completeness=params.chef_min_completeness, n_bins=8) print 'Estimated d_min for CHEF analysis: %.2f' %d_min sel = flex.bool(intensities.size(), True) d_spacings = intensities.d_spacings().data() sel &= d_spacings >= d_min intensities = intensities.select(sel) batches = batches.select(sel) if dose is not None: dose = dose.select(sel) if dose is None: dose = PyChef.batches_to_dose(batches.data(), params.dose) else: dose = dose.data() pychef_stats = PyChef.Statistics(intensities, dose) pychef_dict = pychef_stats.to_dict() def d_star_sq_to_d_ticks(d_star_sq, nticks): from cctbx import uctbx d_spacings = uctbx.d_star_sq_as_d(flex.double(d_star_sq)) min_d_star_sq = min(d_star_sq) dstep = (max(d_star_sq) - min_d_star_sq)/nticks tickvals = list(min_d_star_sq + (i*dstep) for i in range(nticks)) ticktext = ['%.2f' %(uctbx.d_star_sq_as_d(dsq)) for dsq in tickvals] return tickvals, ticktext tickvals, ticktext = d_star_sq_to_d_ticks(d_star_sq_bins, nticks=5) tickvals_wilson, ticktext_wilson = d_star_sq_to_d_ticks( wilson_scaling.d_star_sq, nticks=5) second_moment_d_star_sq = [] if acentric.size(): second_moment_d_star_sq.extend(second_moments_acentric.binner.bin_centers(2)) if centric.size(): second_moment_d_star_sq.extend(second_moments_centric.binner.bin_centers(2)) tickvals_2nd_moment, ticktext_2nd_moment = d_star_sq_to_d_ticks( second_moment_d_star_sq, nticks=5) json_data = { 'multiplicities': { 'data': [ { 'x': multiplicities_acentric.keys(), 'y': multiplicities_acentric.values(), 'type': 'bar', 'name': 'Acentric', 'opacity': 0.75, }, { 'x': multiplicities_centric.keys(), 'y': multiplicities_centric.values(), 'type': 'bar', 'name': 'Centric', 'opacity': 0.75, }, ], 'layout': { 'title': 'Distribution of multiplicities', 'xaxis': {'title': 'Multiplicity'}, 'yaxis': { 'title': 'Frequency', #'rangemode': 'tozero' }, 'bargap': 0, 'barmode': 'overlay', }, }, 'scale_rmerge_vs_batch': { 'data': [ { 'x': sc_vs_b.batches, 'y': sc_vs_b.data, 'type': 'scatter', 'name': 'Scale', 'opacity': 0.75, }, { 'x': rmerge_vs_b.batches, 'y': rmerge_vs_b.data, 'yaxis': 'y2', 'type': 'scatter', 'name': 'Rmerge', 'opacity': 0.75, }, ], 'layout': { 'title': 'Scale and Rmerge vs batch', 'xaxis': {'title': 'N'}, 'yaxis': { 'title': 'Scale', 'rangemode': 'tozero' }, 'yaxis2': { 'title': 'Rmerge', 'overlaying': 'y', 'side': 'right', 'rangemode': 'tozero' } }, }, 'cc_one_half': { 'data': [ { 'x': d_star_sq_bins, # d_star_sq 'y': cc_one_half_bins, 'type': 'scatter', 'name': 'CC-half', }, ({ 'x': d_star_sq_bins, # d_star_sq 'y': cc_anom_bins, 'type': 'scatter', 'name': 'CC-anom', } if not intensities.space_group().is_centric() else {}), ], 'layout':{ 'title': 'CC-half vs resolution', 'xaxis': { 'title': u'Resolution (Å)', 'tickvals': tickvals, 'ticktext': ticktext, }, 'yaxis': { 'title': 'CC-half', 'range': [min(cc_one_half_bins + cc_anom_bins + [0]), 1] }, }, }, 'i_over_sig_i': { 'data': [{ 'x': d_star_sq_bins, # d_star_sq 'y': i_over_sig_i_bins, 'type': 'scatter', 'name': 'Scales vs batch', }], 'layout': { 'title': '<I/sig(I)> vs resolution', 'xaxis': { 'title': u'Resolution (Å)', 'tickvals': tickvals, 'ticktext': ticktext, }, 'yaxis': { 'title': '<I/sig(I)>', 'rangemode': 'tozero' }, } }, 'second_moments': { 'data': [ ({ 'x': list(second_moments_acentric.binner.bin_centers(2)), # d_star_sq 'y': second_moments_acentric.data[1:-1], 'type': 'scatter', 'name': '<I^2> acentric', } if acentric.size() else {}), ({ 'x': list(second_moments_centric.binner.bin_centers(2)), # d_star_sq 'y': second_moments_centric.data[1:-1], 'type': 'scatter', 'name': '<I^2> centric', } if centric.size() else {}) ], 'layout': { 'title': 'Second moment of I', 'xaxis': { 'title': u'Resolution (Å)', 'tickvals': tickvals_2nd_moment, 'ticktext': ticktext_2nd_moment, }, 'yaxis': { 'title': '<I^2>', 'rangemode': 'tozero' }, } }, 'cumulative_intensity_distribution': { 'data': [ { 'x': list(nz_test.z), 'y': list(nz_test.ac_obs), 'type': 'scatter', 'name': 'Acentric observed', 'mode': 'lines', 'line': { 'color': 'rgb(31, 119, 180)', }, }, { 'x': list(nz_test.z), 'y': list(nz_test.c_obs), 'type': 'scatter', 'name': 'Centric observed', 'mode': 'lines', 'line': { 'color': 'rgb(255, 127, 14)', }, }, { 'x': list(nz_test.z), 'y': list(nz_test.ac_untwinned), 'type': 'scatter', 'name': 'Acentric theory', 'mode': 'lines', 'line': { 'color': 'rgb(31, 119, 180)', 'dash': 'dot', }, 'opacity': 0.8, }, { 'x': list(nz_test.z), 'y': list(nz_test.c_untwinned), 'type': 'scatter', 'name': 'Centric theory', 'mode': 'lines', 'line': { 'color': 'rgb(255, 127, 14)', 'dash': 'dot', }, 'opacity': 0.8, }, ], 'layout': { 'title': 'Cumulative intensity distribution', 'xaxis': {'title': 'z'}, 'yaxis': { 'title': 'P(Z <= Z)', 'rangemode': 'tozero' }, } }, 'wilson_intensity_plot': { 'data': ([ { 'x': list(wilson_scaling.d_star_sq), 'y': list(wilson_scaling.mean_I_obs_data), 'type': 'scatter', 'name': 'Observed', }, { 'x': list(wilson_scaling.d_star_sq), 'y': list(wilson_scaling.mean_I_obs_theory), 'type': 'scatter', 'name': 'Expected', }, { 'x': list(wilson_scaling.d_star_sq), 'y': list(wilson_scaling.mean_I_normalisation), 'type': 'scatter', 'name': 'Smoothed', }] if not intensities.space_group().is_centric() else []), 'layout': { 'title': 'Wilson intensity plot', 'xaxis': { 'title': u'Resolution (Å)', 'tickvals': tickvals_wilson, 'ticktext': ticktext_wilson, }, 'yaxis': { 'type': 'log', 'title': 'Mean(I)', 'rangemode': 'tozero', }, }, }, } json_data.update(pychef_dict) from dials.report import html_report report = html_report.html_report() page_header = html_report.page_header('xia2 report') report.add_content(page_header) overall_panel = html_report.panel('Overall', 'overall', show=True) overall_table = html_report.table_responsive( overall_stats_table_html, width=800) overall_panel.add_content(overall_table) merging_stats_panel = html_report.panel('Resolution shells', 'merging_stats') merging_stats_table = html_report.table_responsive(merging_stats_table_html) merging_stats_panel.add_content(merging_stats_table) merging_stats_panel_group = html_report.panel_group( [overall_panel, merging_stats_panel]) div = html_report.div() div.add_content(html_report.raw_html('<h2>Merging statistics</h2>')) div.add_content(html_report.raw_html(symmetry_table_html)) div.add_content(merging_stats_panel_group) report.add_content(div) resolution_plots_panel = html_report.panel('Analysis by resolution', 'resolution') for graph in ('cc_one_half', 'i_over_sig_i', 'second_moments', 'wilson_intensity_plot'): resolution_plots_panel.add_content(html_report.plotly_graph( json_data[graph], graph)) batch_plots_panel = html_report.panel('Analysis by batch', 'batch') for graph in ('scale_rmerge_vs_batch', 'completeness_vs_dose', 'rcp_vs_dose', 'scp_vs_dose', 'rd_vs_batch_difference'): batch_plots_panel.add_content(html_report.plotly_graph( json_data[graph], graph)) misc_plots_panel = html_report.panel('Miscellaneous', 'misc') for graph in ('multiplicities', 'cumulative_intensity_distribution'): misc_plots_panel.add_content(html_report.plotly_graph( json_data[graph], graph)) analysis_plots_panel_group = html_report.panel_group( [resolution_plots_panel, batch_plots_panel, misc_plots_panel]) div = html_report.div() div.add_content(html_report.raw_html('<h2>Analysis plots</h2>')) div.add_content(analysis_plots_panel_group) report.add_content(div) html = report.html() import json json_str = json.dumps(json_data) with open('xia2-report.json', 'wb') as f: print >> f, json_str with open('xia2-report.html', 'wb') as f: print >> f, html.encode('ascii', 'xmlcharrefreplace') return
def output_files_section(xproject): lines = [] for cname, xcryst in xproject.get_crystals().iteritems(): lines.append('Output files') lines.append('=' * len(lines[-1])) lines.append('\n') lines.append('.. _Reflection files output from xia2:\n') lines.append('Reflection data files') lines.append('-' * len(lines[-1])) lines.append('\n') lines.append( 'xia2 produced the following reflection data files - to download,' 'right-click on the link and select "Save Link As..."') lines.append('\n') reflection_files = xcryst.get_scaled_merged_reflections() lines.append('MTZ files (useful for CCP4 and Phenix)') lines.append('_' * len(lines[-1])) lines.append('\n') headers = ['Dataset', 'File name'] merged_mtz = reflection_files['mtz'] table = [['All datasets', '`%s <%s>`_' %(os.path.basename(merged_mtz), merged_mtz)]] #['All datasets (unmerged)', '`%s <%s>`_' %(os.path.basename(merged_mtz), merged_mtz], for wname, unmerged_mtz in reflection_files['mtz_unmerged'].iteritems(): table.append( [wname, '`%s <%s>`_' %(os.path.basename(unmerged_mtz), unmerged_mtz)]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') lines.append('SCA files (useful for AutoSHARP, etc.)') lines.append('_' * len(lines[-1])) lines.append('\n') table = [] for wname, merged_sca in reflection_files['sca'].iteritems(): table.append( [wname, '`%s <%s>`_' %(os.path.basename(merged_sca), merged_sca)]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') lines.append('SCA_UNMERGED files (useful for XPREP and Shelx C/D/E)') lines.append('_' * len(lines[-1])) lines.append('\n') table = [] for wname, unmerged_sca in reflection_files['sca_unmerged'].iteritems(): table.append( [wname, '`%s <%s>`_' %(os.path.basename(unmerged_sca), unmerged_sca)]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') lines.append('.. _Log files from individual stages:\n') lines.append('Log files') lines.append('-' * len(lines[-1])) lines.append('\n') lines.append( 'The log files are located in `<%s/LogFiles>`_ and are grouped by ' 'processing stage:' %os.path.abspath(os.path.curdir)) table = [] log_dir = os.path.join(os.path.abspath(os.path.curdir), 'LogFiles') import glob g = glob.glob(os.path.join(log_dir, '*.log')) for logfile in g: html_file = make_logfile_html(logfile) if html_file is not None: table.append( [os.path.basename(logfile), '`original <%s>`__' %logfile, '`html <%s>`__' %html_file ]) else: table.append( [os.path.basename(logfile), '`original <%s>`__' %logfile, ' ', ]) lines.append('\n') lines.append(tabulate(table, headers, tablefmt='rst')) lines.append('\n') return lines