Esempio n. 1
0
def _transform_schematic(session, transform, center, from_rgba, to_rgba,
                         length, width, thickness):

    axis, rot_center, angle_deg, shift = transform.axis_center_angle_shift()

    # Align rot_center at same position along axis as center.
    from chimerax.geometry import inner_product
    rot_center += inner_product(center - rot_center, axis) * axis
    width_axis = center - rot_center
    varray, narray, tarray = _axis_square(axis, rot_center, width_axis, length,
                                          width, thickness)

    from chimerax.core.models import Model, Surface

    s1 = Surface('slab 1', session)
    s1.set_geometry(varray, narray, tarray)
    s1.color = from_rgba

    s2 = Surface('slab 2', session)
    from chimerax.geometry import rotation, translation
    rot2 = translation(shift * axis) * rotation(
        axis, angle_deg, center=rot_center)
    varray2 = rot2 * varray
    narray2 = rot2.transform_vectors(narray)
    s2.set_geometry(varray2, narray2, tarray)
    s2.color = to_rgba

    m = Model('transform schematic', session)
    m.add([s1, s2])

    return m
Esempio n. 2
0
def read_collada_surfaces(session,
                          path,
                          name=None,
                          color=(200, 200, 200, 255),
                          **kw):
    '''Open a collada file.'''

    from collada import Collada
    c = Collada(path)
    if name is None:
        from os.path import basename
        name = basename(path)
    from chimerax.geometry import Place
    splist = surfaces_from_nodes(c.scene.nodes, color, Place(), {}, session)
    if len(splist) > 1:
        from chimerax.core.models import Model
        s = Model(name, session)
        s.add(splist)
    elif len(splist) == 1:
        s = splist[0]
        s.name = name
    else:
        from chimerax.core.errors import UserError
        raise UserError('Collada file has no TriangleSets: %s' % name)
    set_instance_positions_and_colors(s.all_drawings())

    ai = c.assetInfo
    if ai:
        s.collada_unit_name = ai.unitname
        s.collada_contributors = ai.contributors

    return [s], ('Opened collada file %s' % name)
Esempio n. 3
0
def _group_subunit_models(session, subunit_models, name):
    from chimerax.core.models import Model
    group = Model(name, session)
    for i,m in enumerate(subunit_models):
        m.name = 'subunit %d' % (i+1)
    group.add(subunit_models)
    return group
Esempio n. 4
0
def name_and_group_models(models, name_arg, path_info):
    if len(models) > 1:
        # name arg only applies to group, not underlings
        if name_arg:
            names = [name_arg] * len(models)
        elif len(path_info) == len(models):
            names = [model_name_from_path(p) for p in path_info]
        else:
            names = [model_name_from_path(path_info[0])] * len(models)
        for m, pn in zip(models, names):
            if name_arg or not m.name:
                m.name = pn
        from chimerax.core.models import Model
        names = set([m.name for m in models])
        if len(names) == 1:
            group_name = names.pop() + " group"
        elif len(path_info) == 1:
            group_name = model_name_from_path(path_info[0])
        else:
            group_name = "group"
        group = Model(group_name, models[0].session)
        group.add(models)
        return group
    model = models[0]
    if name_arg:
        model.name = name_arg
    else:
        if not model.name:
            model.name = model_name_from_path(path_info[0])
    return model
Esempio n. 5
0
def split(session, structures = None, chains = None, ligands = False, connected = False, atoms = None):
    '''
    Partition atoms into separate structures. If only the first argument is given then those
    structures are split into a separate structure for each chain.  If chains, ligands, connected,
    or atoms keywords are given then additional partitioning into smaller subsets is performed.

    Parameters
    ----------
    Structures : Structures or None
      Structures to split.  If no structures specified then all are used.
    chains : bool
      Split each chain into into a separate atomic structure.
    ligands : bool
      Split each connected ligand into a separate atomic structure.
    connected : bool
      Split each connected subset of atoms into a separate atomic structure.
    atoms : list of Atoms
      Split specified atoms into separate atomic structures.  This option
      can be specified multiple times.
    '''
    if structures is None:
        from chimerax import atomic
        structures = atomic.all_atomic_structures(session)
        
    if chains is None and not ligands and not connected and atoms is None:
        chains = True

    slist = []
    olist = []
    log = session.logger
    from chimerax.core.models import Model
    for m in structures:
        clist = split_molecule(m, chains, ligands, connected, atoms)
        if clist:
            parent = Model(m.name, session)
            parent.id = m.id
            for i, c in enumerate(clist):
                c.id = parent.id + (i+1,)
                c.position = m.position
            parent.add(clist)
            slist.append(m)
            olist.append(parent)
            msg = 'Split %s (#%s) into %d models' % (m.name, m.id_string, len(clist))
        else:
            msg = 'Did not split %s, has only one piece' % m.name
        log.status(msg)
        log.info(msg)

    models = session.models
    models.close(slist)
    models.add(olist)
Esempio n. 6
0
def lattice_models(session, seg, max_surfaces=100):
    # Map lattice id to dictionary mapping segment index to (descrip, color)
    lattice_segs = {}
    for segment in seg.segments:
        v = segment.three_d_volume
        if v is not None:
            lseg = lattice_segs.setdefault(v.lattice_id, {})
            if v.value is not None:
                lseg[int(v.value)] = (segment.biological_annotation,
                                      segment.colour)

    scale, shift = guess_scale_and_shift(seg)

    # Create Volume model of segment indices for each lattice.
    models = []
    lattices = seg.lattices
    for i, lattice in enumerate(lattices):
        d = lattice.data_array  # Docs say number array, but emd 1547 gives bytes
        name = 'region map' if len(lattices) == 1 else 'region map %d' % i
        from chimerax.map_data import ArrayGridData
        g = ArrayGridData(d, step=scale, origin=shift, name=name)
        from chimerax.map import volume_from_grid_data
        v = volume_from_grid_data(g, session, open_model=False)
        v.display = False
        if lattice.id in lattice_segs:
            v.segments = lseg = lattice_segs[lattice.id]
            set_segmentation_image_colors(v, lseg)
            # Make a surface for each segment.
            regions = list(lseg.items())
            regions.sort()
            surfs = [
                segment_surface(v, sindex, descrip, color)
                for sindex, (descrip, color) in regions[:max_surfaces]
            ]
            if surfs:
                ns, nr = len(surfs), len(regions)
                sname = ('%d surfaces' %
                         ns) if ns == nr else ('%d of %d surfaces' % (ns, nr))
                from chimerax.core.models import Model
                surf_group = Model(sname, v.session)
                surf_group.add(surfs)
                models.append(surf_group)
        # TODO: Don't see how to get the transform id for the lattice.
        #  EMD 1547 segmentation has two transforms, identity and the
        #  map transform (2.8A voxel size, shift to center) but I didn't
        #  find any place where transform 1 is associated with the lattice.
        #  Appears it should be in segment.three_d_volume.transform_id but this
        #  attribute is optional and is None in this case.
        models.append(v)

    return models
Esempio n. 7
0
    def __init__(self, session, stream, name, auto_style, atomic):
        from .maestro import MaestroFile
        from chimerax.core.errors import UserError
        self.session = session
        self.auto_style = auto_style
        self.atomic = atomic
        try:
            mf = MaestroFile(stream)
        except (ValueError, SyntaxError) as e:
            raise UserError(e.str())

        # Make sure we have the right type and version of data
        # from initial block
        mf_iter = iter(mf)
        block0 = next(mf_iter)
        try:
            if block0.get_attribute("s_m_m2io_version") != "2.0.0":
                raise ValueError("Maestro version mismatch")
            #print "Maestro v2.0.0 file recognized"
        except Exception:
            raise UserError("%s: not a v2.0.0 Maestro file" % path)

        # Convert all subsequent blocks named "f_m_ct" to molecules
        receptors = []
        ligands = []
        for block in mf_iter:
            if block.name != "f_m_ct":
                print("%s: Skipping \"%s\" block" % (name, block.name))
            #print "Convert %s block to molecule" % block.name
            s = self._make_structure(block)
            if s:
                try:
                    is_ligand = block.get_attribute("r_i_docking_score")
                except (KeyError, ValueError):
                    is_ligand = False
                if is_ligand:
                    ligands.append(s)
                else:
                    receptors.append(s)
                self._add_properties(s, block, is_ligand)
                s.name = name
        if not receptors:
            self.structures = ligands
        elif not ligands:
            self.structures = receptors
        else:
            from chimerax.core.models import Model
            self.structures = receptors
            container = Model(name, self.session)
            container.add(ligands)
            self.structures.append(container)
Esempio n. 8
0
def show_symmetry(structures, sym_name, transforms, copies, new_model,
                  surface_only, resolution, grid_spacing, session):
    name = '%s %s' % (','.join(s.name for s in structures), sym_name)
    if copies and not new_model:
        from chimerax.core.models import Model
        g = Model(name, session)
        for i, tf in enumerate(transforms):
            if len(structures) > 1:
                # Add grouping model if more the one model is being copied
                ci = Model('copy %d' % (i + 1), session)
                ci.position = tf
                g.add([ci])
                copies = [m.copy() for m in structures]
                for c, m in zip(copies, structures):
                    c.position = m.scene_position
                ci.add(copies)
            else:
                m0 = structures[0]
                c = m0.copy()
                c.position = tf * m0.scene_position
                g.add([c])
        session.models.add([g])
    else:
        if new_model:
            from chimerax.core.models import Model
            group = Model(name, session)
            mols = [m.copy() for m in structures]
            group.add(mols)
            session.models.add([group])
        else:
            mols = structures
        # Instancing
        for m in mols:
            # Transforms are in scene coordinates, so convert to molecule coordinates
            spos = m.scene_position
            symops = transforms if spos.is_identity(
            ) else transforms.transform_coordinates(spos)
            if surface_only:
                from chimerax.surface import surface
                surfs = surface(session,
                                m.atoms,
                                grid_spacing=grid_spacing,
                                resolution=resolution)
                for s in surfs:
                    s.positions = s.positions * symops
            else:
                m.positions = m.positions * symops

    if copies or new_model:
        for s in structures:
            s.display = False
Esempio n. 9
0
 def _molecule_copies(self, mol, new_model, session):
     n = len(self.chain_ops)
     if not new_model:
         if n > 1:
             from chimerax.core.errors import UserError
             raise UserError(
                 'Assembly requires new model because'
                 'it uses more than one set of positioning matrices.')
         else:
             return [mol]
     # Create copies
     name = '%s assembly %s' % (mol.name, self.id)
     if n > 1:
         from chimerax.core.models import Model
         group = Model(name, session)
         mcopies = [mol.copy('%s %d' % (mol.name, i + 1)) for i in range(n)]
         group.add(mcopies)
         addm = [group]
     else:
         mcopies = addm = [mol.copy(name)]
     for m in mcopies:
         m.ignore_assemblies = True
     session.models.add(addm)
     return mcopies
Esempio n. 10
0
def fetch_autopack(session,
                   path,
                   results_name,
                   database=default_autopack_database,
                   ignore_cache=False):

    from . import read_apr
    recipe_loc, pieces = read_apr.read_autopack_results(path)
    recipe_url = recipe_loc.replace('autoPACKserver', database)
    from os.path import basename
    recipe_filename = basename(recipe_loc)
    from chimerax.core.fetch import fetch_file
    recipe_path = fetch_file(session,
                             recipe_url,
                             'recipe for ' + results_name,
                             recipe_filename,
                             'cellPACK',
                             ignore_cache=ignore_cache)

    ingr_filenames, comp_surfaces = read_apr.read_autopack_recipe(recipe_path)

    from chimerax.core.models import Model
    cpm = Model(results_name, session)

    # Fetch compartment surface files.
    csurfs = []
    from chimerax.surface.collada import read_collada_surfaces
    for comp_name, comp_loc, geom_loc in comp_surfaces:
        csurf = Model(comp_name, session)
        if comp_loc is not None:
            comp_url = comp_loc.replace('autoPACKserver', database)
            comp_filename = basename(comp_loc)
            comp_path = fetch_file(session,
                                   comp_url,
                                   'compartment surface ' + comp_filename,
                                   comp_filename,
                                   'cellPACK',
                                   ignore_cache=ignore_cache)
            slist, msg = read_collada_surfaces(session, comp_path,
                                               'representation')
            csurf.add(slist)
        if geom_loc is not None:
            geom_url = geom_loc.replace('autoPACKserver', database)
            geom_filename = basename(geom_loc)
            geom_path = fetch_file(session,
                                   geom_url,
                                   'compartment bounds ' + geom_filename,
                                   geom_filename,
                                   'cellPACK',
                                   ignore_cache=ignore_cache)
            slist, msg = read_collada_surfaces(session, geom_path, 'geometry')
            for s in slist:
                s.display = False
            csurf.add(slist)
        csurfs.append(csurf)
    cpm.add(csurfs)

    # Added ingredient surfaces to compartments
    ingr_mesh_path = {}
    comp = {csurf.name: csurf for csurf in csurfs}
    ingr_ids = list(pieces.keys())
    ingr_ids.sort()  # Get reproducible ordering of ingredients
    for ingr_id in ingr_ids:
        ingr_filename = ingr_filenames[ingr_id]
        mesh_path = ingr_mesh_path.get(ingr_filename, None)
        if mesh_path is None:
            from urllib.parse import urljoin
            ingr_url = urljoin(recipe_url, ingr_filename)
            ingr_path = fetch_file(session,
                                   ingr_url,
                                   'ingredient ' + ingr_filename,
                                   ingr_filename,
                                   'cellPACK',
                                   ignore_cache=ignore_cache)
            mesh_loc = read_apr.read_ingredient(ingr_path)
            mesh_url = mesh_loc.replace('autoPACKserver', database)
            mesh_filename = basename(mesh_loc)
            mesh_path = fetch_file(session,
                                   mesh_url,
                                   'mesh ' + mesh_filename,
                                   mesh_filename,
                                   'cellPACK',
                                   ignore_cache=ignore_cache)
            ingr_mesh_path[ingr_filename] = mesh_path

        comp_name, interior_or_surf, ingr_name = ingr_id
        cs = comp.get((comp_name, interior_or_surf), None)
        if cs is None:
            cs = Model(interior_or_surf, session)
            comp[comp_name].add([cs])
            comp[(comp_name, interior_or_surf)] = cs
        placements = pieces[ingr_id]
        isurf = read_apr.create_surface(session, mesh_path, ingr_name,
                                        placements)
        cs.add([isurf])

    return cpm
Esempio n. 11
0
class _RMFModel(Model):
    """Representation of the top level of an RMF model"""
    def __init__(self, session, filename):
        name = os.path.splitext(os.path.basename(filename))[0]
        self._unnamed_state = None
        self._drawing = None
        self._provenance = None
        self._provenance_map = {}
        self._rmf_resolutions = set()
        # We always want to show nodes with no explicit resolution
        self._selected_rmf_resolutions = set((None, ))
        self._rmf_chains = []
        super().__init__(name, session)

    def take_snapshot(self, session, flags):
        pm = {
            filename: model.id
            for filename, model in self._provenance_map.items()
            if not model.was_deleted
        }
        data = {
            'version': 1,
            'model state': Model.take_snapshot(self, session, flags),
            'rmf_filename': self.rmf_filename,
            'rmf_features': self.rmf_features,
            'rmf_provenance': self.rmf_provenance,
            'rmf_hierarchy': self.rmf_hierarchy,
            'provenance_map': pm,
            'rmf_resolutions': self._rmf_resolutions,
            'selected_rmf_resolutions': self._selected_rmf_resolutions,
            'rmf_chains': self._rmf_chains
        }
        return data

    @staticmethod
    def restore_snapshot(session, data):
        s = _RMFModel(session, '')
        s.set_state_from_snapshot(session, data)
        # Map session data back to ChimeraX objects only after all models have
        # been loaded (and not later, since they refer to model IDs and atom
        # indices which could be changed by the user after session-load)
        session.triggers.add_handler('end restore session',
                                     lambda trigger, session, model=s:
                                     _restore_chimera_obj(session, model))
        return s

    def set_state_from_snapshot(self, session, data):
        Model.set_state_from_snapshot(self, session, data['model state'])
        self.rmf_filename = data['rmf_filename']
        self.rmf_features = data['rmf_features']
        self.rmf_provenance = data['rmf_provenance']
        self.rmf_hierarchy = data['rmf_hierarchy']
        self._provenance_map = data['provenance_map']
        self._rmf_resolutions = data['rmf_resolutions']
        self._selected_rmf_resolutions = data['selected_rmf_resolutions']
        self._rmf_chains = data['rmf_chains']

    def _add_rmf_resolution(self, res):
        self._rmf_resolutions.add(res)
        self._selected_rmf_resolutions.add(res)

    def get_drawing(self):
        if self._drawing is None:
            self._drawing = _RMFDrawing(self.session, name="Geometry")
            self.add([self._drawing])
        return self._drawing

    def _has_provenance(self, name):
        """Return True iff provenance from the given name has been read"""
        return name in self._provenance_map

    def _update_provenance_map(self):
        """Make sure that provenance mapping is up to date, by deleting
           references to models that have been closed since the map was
           last modified."""
        to_delete = [
            name for name, model in self._provenance_map.items()
            if model.was_deleted
        ]
        for name in to_delete:
            del self._provenance_map[name]

    def _add_provenance(self, name, p):
        """Add a Model containing provenance information keyed by the given
           name (usually a filename)"""
        if self._provenance is None or self._provenance.was_deleted:
            self._provenance = Model('Provenance', self.session)
            self.add([self._provenance])
        self._provenance_map[name] = p
        self._provenance.add([p])
        return self._provenance

    def _add_state(self, name):
        """Create and return a new _RMFState"""
        s = _RMFState(self.session, name=name)
        self.add([s])
        return s

    def _add_rmf_chain(self, chain, hierarchy):
        self._rmf_chains.append((chain.get_chain_id(), hierarchy))

    def get_unnamed_state(self):
        """Get the 'unnamed' state, used for structure that isn't the
           child of an RMF State node."""
        if self._unnamed_state is None:
            self._unnamed_state = self._add_state('Unnamed state')
        return self._unnamed_state

    def add_shape(self, vertices, normals, triangles, name):
        drawing = self.get_drawing()
        drawing._drawing.add_shape(vertices,
                                   normals,
                                   triangles,
                                   numpy.array([255, 255, 255, 255]),
                                   description=name)
Esempio n. 12
0
def read_obj(session, filename, name):
    """Read OBJ model as a surface WavefrontOBJ model.

    :param filename: either the name of a file or a file-like object

    Extra arguments are ignored.
    """

    if hasattr(filename, 'read'):
        # it's really a file-like object
        input = filename
    else:
        input = open(filename, 'r')

    models = []
    object_name = None
    vertices = []
    texcoords = []
    normals = []
    faces = []
    voffset = 0
    for line_num, line in enumerate(input.readlines()):
        if line.startswith('#'):
            continue  # Comment
        fields = line.split()
        if len(fields) == 0:
            continue
        f0, fa = fields[0], fields[1:]
        if f0 == 'v':
            # Vertex
            xyz = [float(x) for x in fa]
            if len(xyz) != 3:
                raise OBJError(
                    'OBJ reader only handles x,y,z vertices, file %s, line %d: "%s"'
                    % (name, line_num, line))
            vertices.append(xyz)
        elif f0 == 'vt':
            # Texture coordinates
            uv = [float(u) for u in fa]
            if len(uv) != 2:
                raise OBJError(
                    'OBJ reader only handles u,v texture coordinates, file %s, line %d: "%s"'
                    % (name, line_num, line))
            texcoords.append(uv)
        elif f0 == 'vn':
            # Vertex normal
            n = [float(x) for x in fa]
            if len(n) != 3:
                raise OBJError(
                    'OBJ reader only handles x,y,z normals, file %s, line %d: "%s"'
                    % (name, line_num, line))
            normals.append(n)
        elif f0 == 'f':
            # Polygonal face.
            f = _parse_face(fa, line, line_num)
            faces.append(f)
        elif f0 == 'o':
            # Object name
            if vertices or object_name is not None:
                oname = object_name if object_name else name
                m = new_object(session, oname, vertices, normals, texcoords,
                               faces, voffset)
                models.append(m)
                voffset += len(vertices)
                vertices, normals, texcoords, faces = [], [], [], []
            object_name = line[2:].strip()

    if vertices:
        oname = object_name if object_name else name
        m = new_object(session, oname, vertices, normals, texcoords, faces,
                       voffset)
        models.append(m)

    if input != filename:
        input.close()

    if len(models) == 1:
        model = models[0]
    elif len(models) > 1:
        from chimerax.core.models import Model
        model = Model(name, session)
        model.add(models)
    else:
        raise OBJError('OBJ file %s has no objects' % name)

    from os.path import basename
    msg = ('Opened OBJ file %s containing %d objects, %d triangles' %
           (basename(name), len(models), sum(len(m.triangles)
                                             for m in models)))
    return [model], msg