class block_base(DictMixin): def __init__(self): self._items = {} self.loops = {} self._set = OrderedSet() self.keys_lower = {} def __setitem__(self, key, value): if not re.match(tag_re, key): raise Sorry("%s is not a valid data name" % key) if isinstance(value, loop): self.loops[key] = value self.keys_lower[key.lower()] = key for k in value.keys(): self.keys_lower[k.lower()] = k elif isinstance(value, basestring): v = str(value) if not (re.match(any_print_char_re, v) or re.match( quoted_string_re, v) or re.match(semicolon_string_re, v)): raise Sorry("Invalid data item for %s" % key) self._items[key] = v self.keys_lower[key.lower()] = key else: try: float(value) self[key] = str(value) except TypeError: if key in self._items: del self._items[key] for loop_ in self.loops.values(): if key in loop_: loop_[key] = value if key not in self: self.add_loop(loop(header=(key, ), data=(value, ))) if key in self._items or isinstance(value, loop): self._set.add(key) def __getitem__(self, key): key = self.keys_lower.get(key.lower(), key) if key in self._items: return self._items[key] else: # give precedence to returning the actual data items in the event of a # single looped item when the loop name and data name coincide for loop in self.loops.values(): if key in loop: return loop[key] if key in self.loops: return self.loops[key] raise KeyError(key) def __delitem__(self, key): key = self.keys_lower.get(key.lower(), key) if key in self._items: del self._items[key] self._set.discard(key) elif key in self.keys(): # must be a looped item for k, loop in self.loops.iteritems(): if key in loop: if len(loop) == 1: # remove the now empty loop del self[k] else: del loop[key] return raise KeyError(key) elif key in self.loops: del self.loops[key] self._set.discard(key) else: raise KeyError def get_looped_item(self, key, key_error=KeyError, value_error=None, default=None): if key not in self: if key_error is None: return default else: raise key_error(key) value = self[key] if isinstance(value, flex.std_string): return value elif value_error is not None: raise value_error("%s is not a looped item" % key) elif default is not None: return default else: return flex.std_string([value]) def loop_keys(self): done = [] for key in self: key = key.split(".")[0] if key in done: continue done.append(key) return done def iterloops(self): for key in self.loop_keys(): yield self.get(key) def get_single_item(self, key, key_error=KeyError, value_error=ValueError, default=None): if key not in self: if key_error is None: return default else: raise key_error(key) value = self[key] if not isinstance(value, flex.std_string): return value elif value_error is not None: raise value_error("%s appears as a looped item" % key) else: return default def keys(self): keys = [] for key in self._set: if key in self._items: keys.append(key) elif key in self.loops: keys.extend(self.loops[key].keys()) return keys def item_keys(self): '''Returns names of all entries that are not loops''' return self._items.keys() def __repr__(self): return repr(OrderedDict(self.iteritems())) def update(self, other=None, **kwargs): if other is None: return if isinstance(other, OrderedDict) or isinstance(other, dict): for key, value in other.iteritems(): self[key] = value else: self._items.update(other._items) self.loops.update(other.loops) self._set |= other._set self.keys_lower.update(other.keys_lower) def add_data_item(self, tag, value): self[tag] = value def add_loop(self, loop): try: self.setdefault(loop.name(), loop) except Sorry: # create a unique loop name self.setdefault('_' + str(hash(tuple(loop.keys()))), loop) def get_loop(self, loop_name, default=None): loop_ = self.loops.get( self.keys_lower.get(loop_name.lower(), loop_name)) if loop_ is None: return default return loop_ def get_loop_or_row(self, loop_name, default=None): loop_ = self.get_loop(loop_name, None) if loop_ is None: ln = loop_name if ln[-1] != '.': ln += '.' found_keys = {} for key, value in self.iteritems(): if key.startswith(ln): found_keys[key] = flex.std_string([value]) # constructing the loop if len(found_keys) > 0: loop_ = loop(data=found_keys) if loop_ is None: return default return loop_ def get_loop_with_defaults(self, loop_name, default_dict): loop_ = self.get_loop(loop_name) if loop_ is None: loop_ = loop(header=default_dict.keys()) n_rows = loop_.n_rows() for key, value in default_dict.iteritems(): if key not in loop_: loop_.add_column(key, flex.std_string(n_rows, value)) return loop_ def __copy__(self): new = self.__class__() new._items = self._items.copy() new.loops = self.loops.copy() new._set = copy.copy(self._set) new.keys_lower = self.keys_lower.copy() return new copy = __copy__ def __deepcopy__(self, memo): new = self.__class__() new._items = copy.deepcopy(self._items, memo) new.loops = copy.deepcopy(self.loops, memo) new._set = copy.deepcopy(self._set, memo) new.keys_lower = copy.deepcopy(self.keys_lower, memo) return new def deepcopy(self): return copy.deepcopy(self) def __str__(self): s = StringIO() self.show(out=s) return s.getvalue() def validate(self, dictionary): for key, value in self._items.iteritems(): dictionary.validate_single_item(key, value, self) for loop in self.loops.values(): dictionary.validate_loop(loop, self) if isinstance(self, block): for value in self.saves.itervalues(): value.validate(dictionary) def sort(self, recursive=False, key=None, reverse=False): self._set = OrderedSet( sorted(self._items.keys(), key=key, reverse=reverse) \ + sorted(self.loops.keys(), key=key, reverse=reverse)) if recursive: for l in self.loops.values(): l.sort(key=key, reverse=reverse) """Items that either appear in both self and other and the value has changed or appear in self but not other.""" def difference(self, other): new = self.__class__() for items in (self._items, self.loops): for key, value in items.iteritems(): if key in other: other_value = other[key] if other_value == value: continue else: new[key] = other_value else: new[key] = value return new
def show_reflections(reflections, show_intensities=False, show_profile_fit=False, show_centroids=False, show_all_reflection_data=False, show_flags=False, max_reflections=None): text = [] import collections from libtbx.containers import OrderedSet formats = collections.OrderedDict(( ('miller_index', '%i, %i, %i'), ('d', '%.2f'), ('qe', '%.3f'), ('id', '%i'), ('imageset_id', '%i'), ('panel', '%i'), ('flags', '%i'), ('background.mean', '%.1f'), ('background.dispersion', '%.1f'), ('background.mse', '%.1f'), ('background.sum.value', '%.1f'), ('background.sum.variance', '%.1f'), ('intensity.prf.value', '%.1f'), ('intensity.prf.variance', '%.1f'), ('intensity.sum.value', '%.1f'), ('intensity.sum.variance', '%.1f'), ('intensity.cor.value', '%.1f'), ('intensity.cor.variance', '%.1f'), ('lp', '%.3f'), ('num_pixels.background', '%i'), ('num_pixels.background_used', '%i'), ('num_pixels.foreground', '%i'), ('num_pixels.valid', '%i'), ('partial_id', '%i'), ('partiality', '%.4f'), ('profile.correlation', '%.3f'), ('profile.rmsd', '%.3f'), ('xyzcal.mm', '%.2f, %.2f, %.2f'), ('xyzcal.px', '%.2f, %.2f, %.2f'), ('delpsical.rad', '%.3f'), ('delpsical2', '%.3f'), ('delpsical.weights', '%.3f'), ('xyzobs.mm.value', '%.2f, %.2f, %.2f'), ('xyzobs.mm.variance', '%.4e, %.4e, %.4e'), ('xyzobs.px.value', '%.2f, %.2f, %.2f'), ('xyzobs.px.variance', '%.4f, %.4f, %.4f'), ('s1', '%.4f, %.4f, %.4f'), ('shoebox', '%.1f'), ('rlp', '%.4f, %.4f, %.4f'), ('zeta', '%.3f'), ('x_resid', '%.3f'), ('x_resid2', '%.3f'), ('y_resid', '%.3f'), ('y_resid2', '%.3f'), ('kapton_absorption_correction', '%.3f'), ('kapton_absorption_correction_sigmas', '%.3f'), )) for rlist in reflections: from dials.array_family import flex from dials.algorithms.shoebox import MaskCode foreground_valid = MaskCode.Valid | MaskCode.Foreground text.append('') text.append('Reflection list contains %i reflections' % (len(rlist))) if len(rlist) == 0: continue rows = [["Column", "min", "max", "mean"]] for k, col in rlist.cols(): if k in formats and not "%" in formats[k]: # Allow blanking out of entries that wouldn't make sense rows.append([k, formats[k], formats[k], formats[k]]) elif type(col) in (flex.double, flex.int, flex.size_t): if type(col) in (flex.int, flex.size_t): col = col.as_double() rows.append([ k, formats[k] % flex.min(col), formats[k] % flex.max(col), formats[k] % flex.mean(col) ]) elif type(col) in (flex.vec3_double, flex.miller_index): if isinstance(col, flex.miller_index): col = col.as_vec3_double() rows.append([ k, formats[k] % col.min(), formats[k] % col.max(), formats[k] % col.mean() ]) elif isinstance(col, flex.shoebox): rows.append([k, "", "", ""]) si = col.summed_intensity().observed_value() rows.append([ " summed I", formats[k] % flex.min(si), formats[k] % flex.max(si), formats[k] % flex.mean(si) ]) x1, x2, y1, y2, z1, z2 = col.bounding_boxes().parts() bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double() rows.append([ " N pix", formats[k] % flex.min(bbox_sizes), formats[k] % flex.max(bbox_sizes), formats[k] % flex.mean(bbox_sizes) ]) fore_valid = col.count_mask_values( foreground_valid).as_double() rows.append([ " N valid foreground pix", formats[k] % flex.min(fore_valid), formats[k] % flex.max(fore_valid), formats[k] % flex.mean(fore_valid) ]) text.append( table_utils.format(rows, has_header=True, prefix="| ", postfix=" |")) if show_flags: text.append(_create_flag_count_table(rlist)) intensity_keys = ('miller_index', 'd', 'intensity.prf.value', 'intensity.prf.variance', 'intensity.sum.value', 'intensity.sum.variance', 'background.mean', 'profile.correlation', 'profile.rmsd') profile_fit_keys = ( 'miller_index', 'd', ) centroid_keys = ('miller_index', 'd', 'xyzcal.mm', 'xyzcal.px', 'xyzobs.mm.value', 'xyzobs.mm.variance', 'xyzobs.px.value', 'xyzobs.px.variance') keys_to_print = OrderedSet() if show_intensities: for k in intensity_keys: keys_to_print.add(k) if show_profile_fit: for k in profile_fit_keys: keys_to_print.add(k) if show_centroids: for k in centroid_keys: keys_to_print.add(k) if show_all_reflection_data: for k in formats: keys_to_print.add(k) def format_column(key, data, format_strings=None): if isinstance(data, flex.vec3_double): c_strings = [ c.as_string(format_strings[i].strip()) for i, c in enumerate(data.parts()) ] elif isinstance(data, flex.miller_index): c_strings = [ c.as_string(format_strings[i].strip()) for i, c in enumerate(data.as_vec3_double().parts()) ] elif isinstance(data, flex.size_t): c_strings = [data.as_int().as_string(format_strings[0].strip())] elif isinstance(data, flex.shoebox): x1, x2, y1, y2, z1, z2 = data.bounding_boxes().parts() bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double() c_strings = [bbox_sizes.as_string(format_strings[0].strip())] key += " (N pix)" else: c_strings = [data.as_string(format_strings[0].strip())] column = flex.std_string() max_element_lengths = [c.max_element_length() for c in c_strings] for i in range(len(c_strings[0])): column.append(('%%%is' % len(key)) % ', '.join( ('%%%is' % max_element_lengths[j]) % c_strings[j][i] for j in range(len(c_strings)))) return column if keys_to_print: keys = [k for k in keys_to_print if k in rlist] rows = [keys] if max_reflections is not None: max_reflections = min(len(rlist), max_reflections) columns = [] for k in keys: columns.append( format_column(k, rlist[k], format_strings=formats[k].split(','))) text.append('') text.append('Printing %i of %i reflections:' % (max_reflections, len(rlist))) line = [] for j in range(len(columns)): key = keys[j] if key == 'shoebox': key += " (N pix)" width = max(len(key), columns[j].max_element_length()) line.append('%%%is' % width % key) text.append(' '.join(line)) for i in range(max_reflections): line = [] for j in range(len(columns)): line.append(columns[j][i]) text.append(' '.join(line)) return '\n'.join(text)
def run(args): from dials.util.options import OptionParser from dials.util.options import flatten_experiments from dials.util.options import flatten_datablocks from dials.util.options import flatten_reflections import libtbx.load_env usage = "%s [options] datablock.json | experiments.json | image_*.cbf" %( libtbx.env.dispatcher_name) parser = OptionParser( usage=usage, phil=phil_scope, read_experiments=True, read_datablocks=True, read_datablocks_from_images=True, read_reflections=True, check_format=False, epilog=help_message) params, options = parser.parse_args(show_diff_phil=True) experiments = flatten_experiments(params.input.experiments) datablocks = flatten_datablocks(params.input.datablock) reflections = flatten_reflections(params.input.reflections) if len(datablocks) == 0 and len(experiments) == 0 and len(reflections) == 0: parser.print_help() exit() for i_expt, expt in enumerate(experiments): print "Experiment %i:" %i_expt print str(expt.detector) print 'Max resolution (at corners): %f' % ( expt.detector.get_max_resolution(expt.beam.get_s0())) print 'Max resolution (inscribed): %f' % ( expt.detector.get_max_inscribed_resolution(expt.beam.get_s0())) if params.show_panel_distance: for ipanel, panel in enumerate(expt.detector): from scitbx import matrix fast = matrix.col(panel.get_fast_axis()) slow = matrix.col(panel.get_slow_axis()) normal = fast.cross(slow) origin = matrix.col(panel.get_origin()) distance = origin.dot(normal) fast_origin = - (origin - distance * normal).dot(fast) slow_origin = - (origin - distance * normal).dot(slow) print 'Panel %d: distance %.2f origin %.2f %.2f' % \ (ipanel, distance, fast_origin, slow_origin) print '' print '' panel_id, (x, y) = beam_centre(expt.detector, expt.beam) if panel_id >= 0 and x is not None and y is not None: if len(expt.detector) > 1: beam_centre_str = "Beam centre: panel %i, (%.2f,%.2f)" %(panel_id, x, y) else: beam_centre_str = "Beam centre: (%.2f,%.2f)" %(x, y) else: beam_centre_str = "" print str(expt.beam) + beam_centre_str + '\n' if expt.scan is not None: print expt.scan if expt.goniometer is not None: print expt.goniometer expt.crystal.show(show_scan_varying=params.show_scan_varying) if expt.crystal.num_scan_points: from scitbx.array_family import flex from cctbx import uctbx abc = flex.vec3_double() angles = flex.vec3_double() for n in range(expt.crystal.num_scan_points): a, b, c, alpha, beta, gamma = expt.crystal.get_unit_cell_at_scan_point(n).parameters() abc.append((a, b, c)) angles.append((alpha, beta, gamma)) a, b, c = abc.mean() alpha, beta, gamma = angles.mean() mean_unit_cell = uctbx.unit_cell((a, b, c, alpha, beta, gamma)) print " Average unit cell: %s" %mean_unit_cell print for datablock in datablocks: if datablock.format_class() is not None: print 'Format: %s' %datablock.format_class() imagesets = datablock.extract_imagesets() for imageset in imagesets: try: print imageset.get_template() except Exception: pass detector = imageset.get_detector() print str(detector) + 'Max resolution: %f\n' %( detector.get_max_resolution(imageset.get_beam().get_s0())) if params.show_panel_distance: for ipanel, panel in enumerate(detector): from scitbx import matrix fast = matrix.col(panel.get_fast_axis()) slow = matrix.col(panel.get_slow_axis()) normal = fast.cross(slow) origin = matrix.col(panel.get_origin()) distance = origin.dot(normal) fast_origin = - (origin - distance * normal).dot(fast) slow_origin = - (origin - distance * normal).dot(slow) print 'Panel %d: distance %.2f origin %.2f %.2f' % \ (ipanel, distance, fast_origin, slow_origin) print '' panel_id, (x, y) = beam_centre(detector, imageset.get_beam()) if panel_id >= 0 and x is not None and y is not None: if len(detector) > 1: beam_centre_str = "Beam centre: panel %i, (%.2f,%.2f)" %(panel_id, x, y) else: beam_centre_str = "Beam centre: (%.2f,%.2f)" %(x, y) else: beam_centre_str = "" print str(imageset.get_beam()) + beam_centre_str + '\n' if imageset.get_scan() is not None: print imageset.get_scan() if imageset.get_goniometer() is not None: print imageset.get_goniometer() from libtbx.containers import OrderedDict, OrderedSet formats = OrderedDict([ ('miller_index', '%i, %i, %i'), ('d','%.2f'), ('dqe','%.3f'), ('id','%i'), ('imageset_id','%i'), ('panel','%i'), ('flags', '%i'), ('background.mean', '%.1f'), ('background.dispersion','%.1f'), ('background.mse', '%.1f'), ('background.sum.value', '%.1f'), ('background.sum.variance', '%.1f'), ('intensity.prf.value','%.1f'), ('intensity.prf.variance','%.1f'), ('intensity.sum.value','%.1f'), ('intensity.sum.variance','%.1f'), ('intensity.cor.value','%.1f'), ('intensity.cor.variance','%.1f'), ('lp','%.3f'), ('num_pixels.background','%i'), ('num_pixels.background_used','%i'), ('num_pixels.foreground','%i'), ('num_pixels.valid','%i'), ('partial_id','%i'), ('partiality','%.4f'), ('profile.correlation','%.3f'), ('profile.rmsd','%.3f'), ('xyzcal.mm','%.2f, %.2f, %.2f'), ('xyzcal.px','%.2f, %.2f, %.2f'), ('delpsical.rad','%.3f'), ('delpsical2','%.3f'), ('xyzobs.mm.value','%.2f, %.2f, %.2f'), ('xyzobs.mm.variance','%.4e, %.4e, %.4e'), ('xyzobs.px.value','%.2f, %.2f, %.2f'), ('xyzobs.px.variance','%.4f, %.4f, %.4f'), ('s1','%.4f, %.4f, %.4f'), ('rlp','%.4f, %.4f, %.4f'), ('zeta','%.3f'), ('x_resid','%.3f'), ('x_resid2','%.3f'), ('y_resid','%.3f'), ('y_resid2','%.3f'), ]) for rlist in reflections: from cctbx.array_family import flex print print "Reflection list contains %i reflections" %(len(rlist)) rows = [["Column", "min", "max", "mean"]] for k, col in rlist.cols(): if type(col) in (flex.double, flex.int, flex.size_t): if type(col) in (flex.int, flex.size_t): col = col.as_double() rows.append([k, formats[k] %flex.min(col), formats[k] %flex.max(col), formats[k]%flex.mean(col)]) elif type(col) in (flex.vec3_double, flex.miller_index): if type(col) == flex.miller_index: col = col.as_vec3_double() rows.append([k, formats[k] %col.min(), formats[k] %col.max(), formats[k]%col.mean()]) from libtbx import table_utils print table_utils.format(rows, has_header=True, prefix="| ", postfix=" |") intensity_keys = ( 'miller_index', 'd', 'intensity.prf.value', 'intensity.prf.variance', 'intensity.sum.value', 'intensity.sum.variance', 'background.mean', 'profile.correlation', 'profile.rmsd' ) profile_fit_keys = ('miller_index', 'd',) centroid_keys = ( 'miller_index', 'd', 'xyzcal.mm', 'xyzcal.px', 'xyzobs.mm.value', 'xyzobs.mm.variance', 'xyzobs.px.value', 'xyzobs.px.variance' ) keys_to_print = OrderedSet() if params.show_intensities: for k in intensity_keys: keys_to_print.add(k) if params.show_profile_fit: for k in profile_fit_keys: keys_to_print.add(k) if params.show_centroids: for k in centroid_keys: keys_to_print.add(k) if params.show_all_reflection_data: for k in formats: keys_to_print.add(k) def format_column(key, data, format_strings=None): if isinstance(data, flex.vec3_double): c_strings = [c.as_string(format_strings[i].strip()) for i, c in enumerate(data.parts())] elif isinstance(data, flex.miller_index): c_strings = [c.as_string(format_strings[i].strip()) for i, c in enumerate(data.as_vec3_double().parts())] elif isinstance(data, flex.size_t): c_strings = [data.as_int().as_string(format_strings[0].strip())] else: c_strings = [data.as_string(format_strings[0].strip())] column = flex.std_string() max_element_lengths = [c.max_element_length() for c in c_strings] for i in range(len(c_strings[0])): column.append(('%%%is' %len(key)) %', '.join( ('%%%is' %max_element_lengths[j]) %c_strings[j][i] for j in range(len(c_strings)))) return column if keys_to_print: keys = [k for k in keys_to_print if k in rlist] rows = [keys] max_reflections = len(rlist) if params.max_reflections is not None: max_reflections = min(len(rlist), params.max_reflections) columns = [] for k in keys: columns.append(format_column(k, rlist[k], format_strings=formats[k].split(','))) print print "Printing %i of %i reflections:" %(max_reflections, len(rlist)) for j in range(len(columns)): key = keys[j] width = max(len(key), columns[j].max_element_length()) print ("%%%is" %width) %key, print for i in range(max_reflections): for j in range(len(columns)): print columns[j][i], print return
class block_base(DictMixin): def __init__(self): self._items = {} self.loops = {} self._set = OrderedSet() self.keys_lower = {} def __setitem__(self, key, value): if not re.match(tag_re, key): raise Sorry("%s is not a valid data name" %key) if isinstance(value, loop): self.loops[key] = value for k in value.keys(): self.keys_lower[k.lower()] = k elif isinstance(value, basestring): v = str(value) if not (re.match(any_print_char_re, v) or re.match(quoted_string_re, v) or re.match(semicolon_string_re, v)): raise Sorry("Invalid data item for %s" %key) self._items[key] = v self.keys_lower[key.lower()] = key else: try: float(value) self[key] = str(value) except TypeError: if key in self._items: del self._items[key] for loop_ in self.loops.values(): if key in loop_: loop_[key] = value if key not in self: self.add_loop(loop(header=(key,), data=(value,))) if key in self._items or isinstance(value, loop): self._set.add(key) def __getitem__(self, key): key = self.keys_lower.get(key.lower(), key) if key in self._items: return self._items[key] else: # give precedence to returning the actual data items in the event of a # single looped item when the loop name and data name coincide for loop in self.loops.values(): if key in loop: return loop[key] if key in self.loops: return self.loops[key] raise KeyError def __delitem__(self, key): key = self.keys_lower.get(key.lower(), key) if key in self._items: del self._items[key] self._set.discard(key) elif key in self.keys(): # must be a looped item for k, loop in self.loops.iteritems(): if key in loop: if len(loop) == 1: # remove the now empty loop del self[k] else: del loop[key] return raise KeyError elif key in self.loops: del self.loops[key] self._set.discard(key) else: raise KeyError def get_looped_item(self, key, key_error=KeyError, value_error=None, default=None): if key not in self: if key_error is None: return default else: raise key_error(key) value = self[key] if isinstance(value, flex.std_string): return value elif value_error is not None: raise value_error("%s is not a looped item" %key) elif default is not None: return default else: return flex.std_string([value]) def loop_keys(self): done = [] for key in self: key = key.split(".")[0] if key in done: continue done.append(key) return done def iterloops(self): for key in self.loop_keys(): yield self.get(key) def get_single_item(self, key, key_error=KeyError, value_error=ValueError, default=None): if key not in self: if key_error is None: return default else: raise key_error(key) value = self[key] if not isinstance(value, flex.std_string): return value elif value_error is not None: raise value_error("%s appears as a looped item" %key) else: return default def keys(self): keys = [] for key in self._set: if key in self._items: keys.append(key) elif key in self.loops: keys.extend(self.loops[key].keys()) return keys def __repr__(self): return repr(OrderedDict(self.iteritems())) def update(self, other=None, **kwargs): if other is None: return if isinstance(other, OrderedDict) or isinstance(other, dict): for key, value in other.iteritems(): self[key] = value else: self._items.update(other._items) self.loops.update(other.loops) self._set |= other._set self.keys_lower.update(other.keys_lower) def add_data_item(self, tag, value): self[tag] = value def add_loop(self, loop): try: self.setdefault(loop.name(), loop) except Sorry: # create a unique loop name self.setdefault('_'+str(hash(tuple(loop.keys()))), loop) def get_loop(self, loop_name, default=None): loop_ = self.loops.get(loop_name.lower()) if loop_ is None: return default return loop_ def get_loop_with_defaults(self, loop_name, default_dict): loop_ = self.get_loop(loop_name) if loop_ is None: loop_ = loop(header=default_dict.keys()) n_rows = loop_.n_rows() for key, value in default_dict.iteritems(): if key not in loop_: loop_.add_column(key, flex.std_string(n_rows, value)) return loop_ def __copy__(self): new = self.__class__() new._items = self._items.copy() new.loops = self.loops.copy() new._set = copy.copy(self._set) new.keys_lower = self.keys_lower.copy() return new copy = __copy__ def __deepcopy__(self, memo): new = self.__class__() new._items = copy.deepcopy(self._items, memo) new.loops = copy.deepcopy(self.loops, memo) new._set = copy.deepcopy(self._set, memo) new.keys_lower = copy.deepcopy(self.keys_lower, memo) return new def deepcopy(self): return copy.deepcopy(self) def __str__(self): s = StringIO() self.show(out=s) return s.getvalue() def validate(self, dictionary): for key, value in self._items.iteritems(): dictionary.validate_single_item(key, value, self) for loop in self.loops.values(): dictionary.validate_loop(loop, self) if isinstance(self, block): for value in self.saves.itervalues(): value.validate(dictionary) def sort(self, recursive=False, key=None, reverse=False): self._set = OrderedSet( sorted(self._items.keys(), key=key, reverse=reverse) \ + sorted(self.loops.keys(), key=key, reverse=reverse)) if recursive: for l in self.loops.values(): l.sort(key=key, reverse=reverse) """Items that either appear in both self and other and the value has changed or appear in self but not other.""" def difference(self, other): new = self.__class__() for items in (self._items, self.loops): for key, value in items.iteritems(): if key in other: other_value = other[key] if other_value == value: continue else: new[key] = other_value else: new[key] = value return new
def __init__(self, pdb_hierarchy, sequences, alignment_params=None, crystal_symmetry=None, coordinate_precision=5, occupancy_precision=3, b_iso_precision=5, u_aniso_precision=5): pdb_hierarchy_as_cif_block.__init__( self, pdb_hierarchy, crystal_symmetry=crystal_symmetry, coordinate_precision=coordinate_precision, occupancy_precision=occupancy_precision, b_iso_precision=b_iso_precision, u_aniso_precision=u_aniso_precision) import mmtbx.validation.sequence validation = mmtbx.validation.sequence.validation( pdb_hierarchy=pdb_hierarchy, sequences=sequences, params=alignment_params, extract_residue_groups=True, log=null_out(), # silence output ) entity_loop = iotbx.cif.model.loop(header=( '_entity.id', '_entity.type', #'_entity.src_method', #'_entity.pdbx_description', '_entity.formula_weight', '_entity.pdbx_number_of_molecules', #'_entity.details', #'_entity.pdbx_mutation', #'_entity.pdbx_fragment', #'_entity.pdbx_ec' )) entity_poly_loop = iotbx.cif.model.loop(header=( '_entity_poly.entity_id', '_entity_poly.type', '_entity_poly.nstd_chirality', '_entity_poly.nstd_linkage', '_entity_poly.nstd_monomer', '_entity_poly.pdbx_seq_one_letter_code', '_entity_poly.pdbx_seq_one_letter_code_can', '_entity_poly.pdbx_strand_id', '_entity_poly.type_details' )) entity_poly_seq_loop = iotbx.cif.model.loop(header=( '_entity_poly_seq.entity_id', '_entity_poly_seq.num', '_entity_poly_seq.mon_id', '_entity_poly_seq.hetero', )) sequence_counts = OrderedDict() sequence_to_chain_ids = {} entity_id = 0 sequence_to_entity_id = {} chain_id_to_entity_id = {} sequence_to_chains = {} residue_group_to_seq_num_mapping = {} aligned_pdb_chains = OrderedSet() non_polymer_counts = dict_with_default_0() non_polymer_resname_to_entity_id = OrderedDict() for chain in validation.chains: sequence = chain.alignment.b if sequence not in sequence_to_entity_id: entity_id += 1 sequence_to_entity_id[sequence] = entity_id sequence_counts.setdefault(sequence, 0) sequence_counts[sequence] += 1 sequence_to_chain_ids.setdefault(sequence, []) sequence_to_chain_ids[sequence].append(chain.chain_id) sequence_to_chains.setdefault(sequence, []) sequence_to_chains[sequence].append(chain) chain_id_to_entity_id[chain.chain_id] = sequence_to_entity_id[sequence] aligned_pdb_chains.add(chain.residue_groups[0].parent()) unaligned_pdb_chains = OrderedSet(pdb_hierarchy.chains()) - aligned_pdb_chains assert len(chain.residue_groups) + chain.n_missing_start + chain.n_missing_end == len(sequence) residue_groups = [None] * chain.n_missing_start + chain.residue_groups + [None] * chain.n_missing_end i = chain.n_missing_start seq_num = 0 for i, residue_group in enumerate(residue_groups): if residue_group is None and chain.alignment.b[i] == '-': # a deletion continue seq_num += 1 if residue_group is not None: residue_group_to_seq_num_mapping[ residue_group] = seq_num for pdb_chain in unaligned_pdb_chains: for residue_group in pdb_chain.residue_groups(): for resname in residue_group.unique_resnames(): if resname not in non_polymer_resname_to_entity_id: entity_id += 1 non_polymer_resname_to_entity_id[resname] = entity_id non_polymer_counts[resname] += 1 for sequence, count in sequence_counts.iteritems(): entity_poly_seq_num = 0 entity_id = sequence_to_entity_id[sequence] entity_loop.add_row(( entity_id, 'polymer', #polymer/non-polymer/macrolide/water #'?', #src_method #'?', # pdbx_description '?', # formula_weight len(sequence_to_chains[sequence]), # pdbx_number_of_molecules #'?', # details #'?', # pdbx_mutation #'?', # pdbx_fragment #'?' # pdbx_ec )) # The definition of the cif item _entity_poly.pdbx_seq_one_letter_code # says that modifications and non-standard amino acids should be encoded # as 'X', however in practice the PDB seem to encode them as the three-letter # code in parentheses. pdbx_seq_one_letter_code = [] pdbx_seq_one_letter_code_can = [] chains = sequence_to_chains[sequence] from iotbx.pdb import amino_acid_codes chain = chains[0] matches = chain.alignment.matches() for i, one_letter_code in enumerate(sequence): #Data items in the ENTITY_POLY_SEQ category specify the sequence #of monomers in a polymer. Allowance is made for the possibility #of microheterogeneity in a sample by allowing a given sequence #number to be correlated with more than one monomer ID. The #corresponding ATOM_SITE entries should reflect this #heterogeneity. monomer_id = None if i >= chain.n_missing_start and i < (len(sequence) - chain.n_missing_end): monomer_id = chain.resnames[i-chain.n_missing_start] if monomer_id is None and one_letter_code == '-': continue pdbx_seq_one_letter_code_can.append(one_letter_code) if monomer_id is None: if sequence_to_chains[sequence][0].chain_type == mmtbx.validation.sequence.PROTEIN: monomer_id = amino_acid_codes.three_letter_given_one_letter.get( one_letter_code, "UNK") # XXX else: monomer_id = one_letter_code else: if sequence_to_chains[sequence][0].chain_type == mmtbx.validation.sequence.PROTEIN: one_letter_code = amino_acid_codes.one_letter_given_three_letter.get( monomer_id, "(%s)" %monomer_id) pdbx_seq_one_letter_code.append(one_letter_code) entity_poly_seq_num += 1 entity_poly_seq_loop.add_row(( entity_id, entity_poly_seq_num, monomer_id, 'no', #XXX )) entity_poly_type = '?' entity_nstd_chirality = 'n' # we should probably determine the chirality more correctly by examining # the chirality of the backbone chain rather than relying on the residue # names to be correct if chain.chain_type == mmtbx.validation.sequence.PROTEIN: n_d_peptides = 0 n_l_peptides = 0 n_achiral_peptides = 0 n_unknown = 0 for resname in chain.resnames: if resname == "GLY": n_achiral_peptides += 1 elif resname in iotbx.pdb.common_residue_names_amino_acid: n_l_peptides += 1 elif resname in amino_acid_codes.three_letter_l_given_three_letter_d: n_d_peptides += 1 else: n_unknown += 1 n_total = sum([n_d_peptides, n_l_peptides, n_achiral_peptides, n_unknown]) if (n_l_peptides + n_achiral_peptides)/n_total > 0.5: entity_poly_type = 'polypeptide(L)' if n_d_peptides > 0: entity_nstd_chirality = 'y' elif (n_d_peptides + n_achiral_peptides)/n_total > 0.5: entity_poly_type = 'polypeptide(D)' if n_l_peptides > 0: entity_nstd_chirality = 'y' elif chain.chain_type == mmtbx.validation.sequence.NUCLEIC_ACID: n_dna = 0 n_rna = 0 n_unknown = 0 for resname in chain.resnames: if resname is not None and resname.strip().upper() in ( 'AD', 'CD', 'GD', 'TD', 'DA', 'DC', 'DG', 'DT'): n_dna += 1 elif resname is not None and resname.strip().upper() in ( 'A', 'C', 'G', 'T', '+A', '+C', '+G', '+T'): n_rna += 1 else: n_unknown += 1 n_total = sum([n_dna + n_rna + n_unknown]) if n_dna/n_total > 0.5 and n_rna == 0: entity_poly_type = 'polydeoxyribonucleotide' elif n_rna/n_total > 0.5 and n_dna == 0: entity_poly_type = 'polyribonucleotide' elif (n_rna + n_dna)/n_total > 0.5: entity_poly_type = 'polydeoxyribonucleotide/polyribonucleotide hybrid' entity_poly_loop.add_row(( entity_id, entity_poly_type, entity_nstd_chirality, 'no', 'no', wrap_always("".join(pdbx_seq_one_letter_code), width=80).strip(), wrap_always("".join(pdbx_seq_one_letter_code_can), width=80).strip(), ','.join(sequence_to_chain_ids[sequence]), '?' )) for resname, entity_id in non_polymer_resname_to_entity_id.iteritems(): entity_type = "non-polymer" if resname == "HOH": entity_type = "water" # XXX entity_loop.add_row(( entity_id, entity_type, #polymer/non-polymer/macrolide/water #'?', #src_method #'?', # pdbx_description '?', # formula_weight non_polymer_counts[resname], # pdbx_number_of_molecules #'?', # details #'?', # pdbx_mutation #'?', # pdbx_fragment #'?' # pdbx_ec )) self.cif_block.add_loop(entity_loop) self.cif_block.add_loop(entity_poly_loop) self.cif_block.add_loop(entity_poly_seq_loop) self.cif_block.update(pdb_hierarchy.as_cif_block()) label_entity_id = self.cif_block['_atom_site.label_entity_id'] auth_seq_id = self.cif_block['_atom_site.auth_seq_id'] ins_code = self.cif_block['_atom_site.pdbx_PDB_ins_code'] auth_asym_id = self.cif_block['_atom_site.auth_asym_id'] label_seq_id = flex.std_string(auth_seq_id.size(), '.') ins_code = ins_code.deep_copy() ins_code.set_selected(ins_code == '?', '') for residue_group, seq_num in residue_group_to_seq_num_mapping.iteritems(): sel = ((auth_asym_id == residue_group.parent().id) & (ins_code == residue_group.icode.strip()) & (auth_seq_id == residue_group.resseq.strip())) label_seq_id.set_selected(sel, str(seq_num)) label_entity_id.set_selected( sel, str(chain_id_to_entity_id[residue_group.parent().id])) for pdb_chain in unaligned_pdb_chains: for residue_group in pdb_chain.residue_groups(): sel = ((auth_asym_id == residue_group.parent().id) & (ins_code == residue_group.icode.strip()) & (auth_seq_id == residue_group.resseq.strip())) label_entity_id.set_selected( sel, str(non_polymer_resname_to_entity_id[residue_group.unique_resnames()[0]])) self.cif_block['_atom_site.label_seq_id'] = label_seq_id # reorder the loops atom_site_loop = self.cif_block['_atom_site'] atom_site_aniso_loop = self.cif_block.get('_atom_site_anisotrop') del self.cif_block['_atom_site'] self.cif_block.add_loop(atom_site_loop) if atom_site_aniso_loop is not None: del self.cif_block['_atom_site_anisotrop'] self.cif_block.add_loop(atom_site_aniso_loop)
def run(args): from dials.util.options import OptionParser from dials.util.options import flatten_experiments from dials.util.options import flatten_datablocks from dials.util.options import flatten_reflections import libtbx.load_env usage = "%s [options] datablock.json | experiments.json | image_*.cbf" %( libtbx.env.dispatcher_name) parser = OptionParser( usage=usage, phil=phil_scope, read_experiments=True, read_datablocks=True, read_datablocks_from_images=True, read_reflections=True, check_format=False, epilog=help_message) params, options = parser.parse_args(show_diff_phil=True) experiments = flatten_experiments(params.input.experiments) datablocks = flatten_datablocks(params.input.datablock) reflections = flatten_reflections(params.input.reflections) if len(datablocks) == 0 and len(experiments) == 0 and len(reflections) == 0: parser.print_help() exit() for i_expt, expt in enumerate(experiments): print "Experiment %i:" %i_expt print str(expt.detector) print 'Max resolution (at corners): %f' % ( expt.detector.get_max_resolution(expt.beam.get_s0())) print 'Max resolution (inscribed): %f' % ( expt.detector.get_max_inscribed_resolution(expt.beam.get_s0())) print '' panel_id, (x, y) = beam_centre(expt.detector, expt.beam) if panel_id >= 0 and x is not None and y is not None: if len(expt.detector) > 1: beam_centre_str = "Beam centre: panel %i, (%.2f,%.2f)" %(panel_id, x, y) else: beam_centre_str = "Beam centre: (%.2f,%.2f)" %(x, y) else: beam_centre_str = "" print str(expt.beam) + beam_centre_str + '\n' if expt.scan is not None: print expt.scan if expt.goniometer is not None: print expt.goniometer expt.crystal.show(show_scan_varying=params.show_scan_varying) if expt.crystal.num_scan_points: from scitbx.array_family import flex from cctbx import uctbx abc = flex.vec3_double() angles = flex.vec3_double() for n in range(expt.crystal.num_scan_points): a, b, c, alpha, beta, gamma = expt.crystal.get_unit_cell_at_scan_point(n).parameters() abc.append((a, b, c)) angles.append((alpha, beta, gamma)) a, b, c = abc.mean() alpha, beta, gamma = angles.mean() mean_unit_cell = uctbx.unit_cell((a, b, c, alpha, beta, gamma)) print " Average unit cell: %s" %mean_unit_cell print for datablock in datablocks: if datablock.format_class() is not None: print 'Format: %s' %datablock.format_class() imagesets = datablock.extract_imagesets() for imageset in imagesets: try: print imageset.get_template() except Exception: pass detector = imageset.get_detector() print str(detector) + 'Max resolution: %f\n' %( detector.get_max_resolution(imageset.get_beam().get_s0())) if params.show_panel_distance: for ipanel, panel in enumerate(detector): from scitbx import matrix fast = matrix.col(panel.get_fast_axis()) slow = matrix.col(panel.get_slow_axis()) normal = fast.cross(slow) origin = matrix.col(panel.get_origin()) distance = origin.dot(normal) fast_origin = - (origin - distance * normal).dot(fast) slow_origin = - (origin - distance * normal).dot(slow) print 'Panel %d: distance %.2f origin %.2f %.2f' % \ (ipanel, distance, fast_origin, slow_origin) print '' panel_id, (x, y) = beam_centre(detector, imageset.get_beam()) if panel_id >= 0 and x is not None and y is not None: if len(detector) > 1: beam_centre_str = "Beam centre: panel %i, (%.2f,%.2f)" %(panel_id, x, y) else: beam_centre_str = "Beam centre: (%.2f,%.2f)" %(x, y) else: beam_centre_str = "" print str(imageset.get_beam()) + beam_centre_str + '\n' if imageset.get_scan() is not None: print imageset.get_scan() if imageset.get_goniometer() is not None: print imageset.get_goniometer() from libtbx.containers import OrderedDict, OrderedSet formats = OrderedDict([ ('miller_index', '%i, %i, %i'), ('d','%.2f'), ('dqe','%.3f'), ('id','%i'), ('imageset_id','%i'), ('panel','%i'), ('flags', '%i'), ('background.mean', '%.1f'), ('background.dispersion','%.1f'), ('background.mse', '%.1f'), ('background.sum.value', '%.1f'), ('background.sum.variance', '%.1f'), ('intensity.prf.value','%.1f'), ('intensity.prf.variance','%.1f'), ('intensity.sum.value','%.1f'), ('intensity.sum.variance','%.1f'), ('intensity.cor.value','%.1f'), ('intensity.cor.variance','%.1f'), ('lp','%.3f'), ('num_pixels.background','%i'), ('num_pixels.background_used','%i'), ('num_pixels.foreground','%i'), ('num_pixels.valid','%i'), ('partial_id','%i'), ('partiality','%.4f'), ('profile.correlation','%.3f'), ('profile.rmsd','%.3f'), ('xyzcal.mm','%.2f, %.2f, %.2f'), ('xyzcal.px','%.2f, %.2f, %.2f'), ('delpsical.rad','%.3f'), ('delpsical2','%.3f'), ('xyzobs.mm.value','%.2f, %.2f, %.2f'), ('xyzobs.mm.variance','%.4e, %.4e, %.4e'), ('xyzobs.px.value','%.2f, %.2f, %.2f'), ('xyzobs.px.variance','%.4f, %.4f, %.4f'), ('s1','%.4f, %.4f, %.4f'), ('rlp','%.4f, %.4f, %.4f'), ('zeta','%.3f'), ('x_resid','%.3f'), ('x_resid2','%.3f'), ('y_resid','%.3f'), ('y_resid2','%.3f'), ]) for rlist in reflections: from cctbx.array_family import flex print print "Reflection list contains %i reflections" %(len(rlist)) rows = [["Column", "min", "max", "mean"]] for k, col in rlist.cols(): if type(col) in (flex.double, flex.int, flex.size_t): if type(col) in (flex.int, flex.size_t): col = col.as_double() rows.append([k, formats[k] %flex.min(col), formats[k] %flex.max(col), formats[k]%flex.mean(col)]) elif type(col) in (flex.vec3_double, flex.miller_index): if type(col) == flex.miller_index: col = col.as_vec3_double() rows.append([k, formats[k] %col.min(), formats[k] %col.max(), formats[k]%col.mean()]) from libtbx import table_utils print table_utils.format(rows, has_header=True, prefix="| ", postfix=" |") intensity_keys = ( 'miller_index', 'd', 'intensity.prf.value', 'intensity.prf.variance', 'intensity.sum.value', 'intensity.sum.variance', 'background.mean', 'profile.correlation', 'profile.rmsd' ) profile_fit_keys = ('miller_index', 'd',) centroid_keys = ( 'miller_index', 'd', 'xyzcal.mm', 'xyzcal.px', 'xyzobs.mm.value', 'xyzobs.mm.variance', 'xyzobs.px.value', 'xyzobs.px.variance' ) keys_to_print = OrderedSet() if params.show_intensities: for k in intensity_keys: keys_to_print.add(k) if params.show_profile_fit: for k in profile_fit_keys: keys_to_print.add(k) if params.show_centroids: for k in centroid_keys: keys_to_print.add(k) if params.show_all_reflection_data: for k in formats: keys_to_print.add(k) def format_column(key, data, format_strings=None): if isinstance(data, flex.vec3_double): c_strings = [c.as_string(format_strings[i].strip()) for i, c in enumerate(data.parts())] elif isinstance(data, flex.miller_index): c_strings = [c.as_string(format_strings[i].strip()) for i, c in enumerate(data.as_vec3_double().parts())] elif isinstance(data, flex.size_t): c_strings = [data.as_int().as_string(format_strings[0].strip())] else: c_strings = [data.as_string(format_strings[0].strip())] column = flex.std_string() max_element_lengths = [c.max_element_length() for c in c_strings] for i in range(len(c_strings[0])): column.append(('%%%is' %len(key)) %', '.join( ('%%%is' %max_element_lengths[j]) %c_strings[j][i] for j in range(len(c_strings)))) return column if keys_to_print: keys = [k for k in keys_to_print if k in rlist] rows = [keys] max_reflections = len(rlist) if params.max_reflections is not None: max_reflections = min(len(rlist), params.max_reflections) columns = [] for k in keys: columns.append(format_column(k, rlist[k], format_strings=formats[k].split(','))) print print "Printing %i of %i reflections:" %(max_reflections, len(rlist)) for j in range(len(columns)): key = keys[j] width = max(len(key), columns[j].max_element_length()) print ("%%%is" %width) %key, print for i in range(max_reflections): for j in range(len(columns)): print columns[j][i], print return