Ejemplo n.º 1
0
class Setting(DeclarativeBase):
    """Key-value pair for mediaTUM settings that can be changed at runtime"""

    __tablename__ = "setting"

    key = C(Unicode, primary_key=True)
    value = C(JSONB)
Ejemplo n.º 2
0
class UserToUserGroup(DeclarativeBase, TimeStamp):

    __tablename__ = "user_to_usergroup"

    user_id = C(FK(User.id, ondelete="CASCADE"), primary_key=True)
    usergroup_id = C(FK(UserGroup.id, ondelete="CASCADE"), primary_key=True)
    managed_by_authenticator = C(Boolean, server_default="false")
    private = C(Boolean, server_default="false")

    user = rel(User,
               backref=bref("group_assocs",
                            passive_deletes=True,
                            cascade="all, delete-orphan"))
    usergroup = rel(UserGroup,
                    backref=bref("user_assocs",
                                 passive_deletes=True,
                                 cascade="all, delete-orphan"))

    __table_args__ = (
        # This exclude constraint is something like a unique constraint only for rows where private is true.
        # Postgres doesn't support WHERE for unique constraints (why?), so lets just use this.
        # Alternatively, we could use a unique partial index to enforce the constraint.
        ExcludeConstraint((user_id, "="),
                          using="btree",
                          where="private = true",
                          name="only_one_private_group_per_user"),
        ExcludeConstraint((usergroup_id, "="),
                          using="btree",
                          where="private = true",
                          name="only_one_user_per_private_group"),
        # XXX: missing constraint: groups cannot be used elsewhere if they are private
    )
Ejemplo n.º 3
0
class IPNetworkList(DeclarativeBase):

    __tablename__ = "ipnetwork_list"
    __versioned__ = {}

    name = C(Unicode, primary_key=True)
    description = C(Unicode)
    subnets = C(ARRAY(CIDR), index=True)
Ejemplo n.º 4
0
class OAuthUserCredentials(DeclarativeBase, TimeStamp):

    __tablename__ = "oauth_user_credentials"

    oauth_user = C(Unicode, primary_key=True)
    oauth_key = C(Unicode, nullable=False)
    user_id = integer_fk(User.id, nullable=False)

    user = rel(User)
Ejemplo n.º 5
0
class NodeType(DeclarativeBase):
    """Node type / node class description.
    We don't need that in the application, that's just to inform Postgres about our types.
    """

    __tablename__ = "nodetype"

    name = C(Unicode, primary_key=True)
    # does this type act as a container type? Other types are "content types".
    is_container = C(Boolean, index=True)
Ejemplo n.º 6
0
class NodeAlias(DeclarativeBase):
    """Alias name for a node that will be shown if the alias is requested in the frontend.
    A node can have multiple aliases."""

    __tablename__ = "node_alias"

    alias = C(Unicode, primary_key=True)
    nid = integer_fk(Node.id)
    description = C(Unicode)

    node = rel(Node)
Ejemplo n.º 7
0
class Fts(DeclarativeBase):

    __tablename__ = "fts"

    id = integer_pk()
    # XXX: we allow multiple search items with the same configuration
    # XXX: otherwise, we could use nid, config, searchtype as primary key
    # XXX: may change in the future
    nid = C(FK(Node.id, ondelete="CASCADE"))
    config = C(Text)
    searchtype = C(Text)
    tsvec = C(TSVECTOR)
Ejemplo n.º 8
0
class AccessRulesetToRule(DeclarativeBase):
    __tablename__ = "access_ruleset_to_rule"
    __versioned__ = {}

    rule_id = C(FK(AccessRule.id, ondelete="CASCADE"),
                primary_key=True,
                nullable=False)
    ruleset_name = C(FK(AccessRuleset.name,
                        ondelete="CASCADE",
                        onupdate="CASCADE"),
                     primary_key=True,
                     nullable=False)
    invert = C(Boolean, server_default="false", index=True)
    blocking = C(Boolean, server_default="false", index=True)

    rule = rel(AccessRule, backref="ruleset_assocs")
Ejemplo n.º 9
0
class AuthenticatorInfo(DeclarativeBase):

    __tablename__ = "authenticator"

    id = integer_pk()
    name = C(Unicode, nullable=False)
    auth_type = C(Unicode, nullable=False)

    @property
    def authenticator_key(self):
        return (self.auth_type, self.name)

    def __unicode__(self):
        return self.auth_type + ":" + self.name

    def __repr__(self):
        return u"AuthenticatorInfo<id: {} key: ({}, {})> ({})".format(
            self.id, self.name, self.auth_type,
            object.__repr__(self)).encode("utf8")

    __table_args__ = (UniqueConstraint(name, auth_type), )
Ejemplo n.º 10
0
class UserGroup(DeclarativeBase, TimeStamp, UserGroupMixin):

    __tablename__ = "usergroup"
    __versioned__ = {}

    id = integer_pk()
    name = C(Unicode, nullable=False, unique=True)
    description = C(UnicodeText)
    hidden_edit_functions = C(ARRAY(Unicode), server_default="{}")

    is_editor_group = C(Boolean, server_default="false")
    is_workflow_editor_group = C(Boolean, server_default="false")
    is_admin_group = C(Boolean, server_default="false")

    users = association_proxy("user_assocs",
                              "user",
                              creator=lambda u: UserToUserGroup(user=u))

    @property
    def user_names(self):
        _user_names = [unicode(u) for u in self.users]
        return sorted(_user_names, key=unicode.lower)

    @property
    def metadatatype_access(self):
        from schema.schema import Metadatatype
        return db.query(Metadatatype).join(NodeToAccessRuleset).filter_by(
            ruleset_name=self.name).all()

    def __unicode__(self):
        return self.name

    def __repr__(self):
        return u"UserGroup<{} '{}'> ({})".format(
            self.id, self.name, object.__repr__(self)).encode("utf8")
Ejemplo n.º 11
0
class NodeToAccessRuleset(DeclarativeBase):
    __tablename__ = "node_to_access_ruleset"
    __versioned__ = {}

    nid = C(FK(Node.id, ondelete="CASCADE"), primary_key=True, nullable=False)
    ruleset_name = C(FK(AccessRuleset.name,
                        ondelete="CASCADE",
                        onupdate="CASCADE"),
                     primary_key=True,
                     nullable=False)
    ruletype = C(Text, index=True, primary_key=True, nullable=False)
    invert = C(Boolean, server_default="false", index=True)
    blocking = C(Boolean, server_default="false", index=True)
    private = C(Boolean, server_default="false")

    ruleset = rel(AccessRuleset, backref="node_assocs")

    __table_args__ = (
        # This exclude constraint is something like a unique constraint only for rows where private is true.
        # Postgres doesn't support WHERE for unique constraints (why?), so lets just use this.
        # Alternatively, we could use a unique partial index to enforce the constraint.
        ExcludeConstraint(
            (nid, "="), (ruletype, "="),
            using="btree",
            where="private = true",
            name="only_one_private_ruleset_per_node_and_ruletype"),
        # XXX: missing constraint: private rulesets cannot be used elsewhere if they are private
    )
Ejemplo n.º 12
0
class NodeToAccessRule(DeclarativeBase):
    __tablename__ = "node_to_access_rule"

    nid = C(FK(Node.id, ondelete="CASCADE"), primary_key=True)
    rule_id = C(FK(AccessRule.id, ondelete="CASCADE"), primary_key=True)
    ruletype = C(Text, index=True, primary_key=True)
    invert = C(Boolean, server_default="false", index=True, primary_key=True)
    inherited = C(Boolean, server_default="false", index=True)
    blocking = C(Boolean, server_default="false", index=True, primary_key=True)

    rule = rel(AccessRule, backref="node_assocs")
Ejemplo n.º 13
0
class AccessRule(DeclarativeBase):
    __tablename__ = "access_rule"
    __versioned__ = {}

    id = integer_pk()
    invert_subnet = C(Boolean, server_default="false", index=True)
    invert_date = C(Boolean, server_default="false", index=True)
    invert_group = C(Boolean, server_default="false", index=True)
    group_ids = C(ARRAY(Integer), index=True)
    subnets = C(ARRAY(CIDR), index=True)
    dateranges = C(ARRAY(Daterange), index=True)

    group_names = column_property(
        mediatumfunc.group_ids_to_names(sql.text("group_ids")))
Ejemplo n.º 14
0
class NodeToFile(DeclarativeBase):
    __tablename__ = "node_to_file"
    __versioned__ = {}

    nid = C(Integer, FK(Node.id, ondelete="CASCADE"), primary_key=True)
    file_id = C(Integer, FK("file.id"), primary_key=True)
Ejemplo n.º 15
0
                return self
        query = self
        for field in fields:
            if field.startswith("-"):
                expr = Node.attrs[field[1:]].astext.desc()
            else:
                expr = Node.attrs[field].astext
            query = query.order_by(expr)

        return query


t_noderelation = Table(
    "noderelation",
    db_metadata,
    C("nid", Integer, FK("node.id"), primary_key=True, index=True),
    C("cid",
      Integer,
      FK("node.id", ondelete="CASCADE"),
      primary_key=True,
      index=True),
    # SQLAlchemy automatically generates primary key integers as SERIAL.
    # We don't want that for the distance, disable with autoincrement=False
    C("distance", Integer, primary_key=True, autoincrement=False, index=True))


class BaseNodeMeta(DeclarativeMeta):
    def __init__(cls, name, bases, dct):  # @NoSelf
        """Add mapper args with a default polymorphic_identity
        of classname in lowercase if not defined.
        """
Ejemplo n.º 16
0
class Node(DeclarativeBase, NodeMixin):
    """Base class for Nodes which holds all SQLAlchemy fields definitions
    """
    __metaclass__ = BaseNodeMeta
    __tablename__ = "node"
    __versioned__ = {
        "base_classes": (NodeVersionMixin, MtVersionBase, DeclarativeBase),
        "exclude": ["subnode", "system_attrs"]
    }

    id = C(Integer,
           node_id_seq,
           server_default=node_id_seq.next_value(),
           primary_key=True)
    type = C(Unicode, index=True)
    schema = C(Unicode, index=True)
    name = C(Unicode, index=True)
    orderpos = C(Integer, default=1, index=True)
    fulltext = deferred(C(Unicode))
    # indicate that this node is a subnode of a content type node
    # subnode exists just for performance reasons and is updated by the database
    # unversioned
    subnode = C(Boolean, server_default="false")

    attrs = deferred(C(MutableDict.as_mutable(JSONB)))
    # Migration from old mediatum: all attributes starting with "system." go here.
    # We should get rid of most (all?) such attributes in the future.
    # unversioned
    system_attrs = deferred(C(MutableDict.as_mutable(JSONB)))

    @hybrid_property
    def a_expr(self):
        """ see: Attributes"""
        raise Exception("node.a_expr")
        if "_attributes_accessor" not in self.__dict__:
            setattr(self, "_attributes_accessor", Attributes(self, "attrs"))
        return self._attributes_accessor

    @a_expr.expression
    def a(self):
        """ see: AttributesExpression"""
        if "_attributes_accessor" not in self.__dict__:
            setattr(self, "_attributes_accessor",
                    AttributesExpressionAdapter(self, "attrs"))
        return self._attributes_accessor

    @a.setter
    def a_set(self, value):
        raise NotImplementedError("immutable!")

    @hybrid_property
    def sys(self):
        """ see: Attributes"""
        if "_system_attributes_accessor" not in self.__dict__:
            setattr(self, "_system_attributes_accessor",
                    Attributes(self, "system_attrs"))
        return self._system_attributes_accessor

    @sys.expression
    def sys_expr(self):
        """ see: AttributesExpression"""
        if "_system_attributes_accessor" not in self.__dict__:
            setattr(self, "_system_attributes_accessor",
                    AttributesExpressionAdapter(self, "system_attrs"))
        return self._system_attributes_accessor

    @a.setter
    def sys_set(self, value):
        raise NotImplementedError("immutable!")

    def __init__(self,
                 name=u"",
                 type=u"node",
                 id=None,
                 schema=None,
                 attrs=None,
                 system_attrs=None,
                 orderpos=None):
        self.name = name
        if not isinstance(type, unicode):
            warn(
                "type arg of Node should be unicode (hint: don't create nodes with Node(type='{}')!)"
                .format(type), DeprecationWarning)

        if "/" in type:
            warn(
                "use separate type and schema parameters instead of 'type/schema'",
                DeprecationWarning)
            type, schema = type.split("/")

        self.type = type
        self.attrs = MutableDict()
        self.system_attrs = MutableDict()
        if id:
            self.id = id
        if schema:
            self.schema = schema
        if attrs:
            self.attrs.update(attrs)
        if system_attrs:
            self.system_attrs.update(system_attrs)
        if orderpos:
            self.orderpos = orderpos

    @property
    def slow_content_children_for_all_subcontainers(self):
        """
        !!! very slow, use content_children_for_all_subcontainers instead!!!
        Collects all Content nodes in all subcontainers of this node.
        This excludes content nodes that are children of other content nodes.
        """
        warn("very slow, use content_children_for_all_subcontainers instead",
             DeprecationWarning)
        from contenttypes.data import Content
        from core import db
        sq = _subquery_subtree_container(self)
        query = db.query(Content).\
            join(t_noderelation, Node.id == t_noderelation.c.cid).\
            filter(t_noderelation.c.nid.in_(sq) | (t_noderelation.c.nid == self.id)).\
            filter(t_noderelation.c.distance == 1)

        return query

    @property
    def content_children_for_all_subcontainers(self):
        """Collects all Content nodes in all subcontainers of this node.
        This excludes content nodes that are children of other content nodes.
        """
        from contenttypes.data import Content
        from core import db
        sq = _subquery_subtree_distinct(self)
        return object_session(self).query(Content).filter(
            Node.id.in_(sq)).filter_by(subnode=False)

    @property
    def content_children_for_all_subcontainers_with_duplicates(self):
        """Collects all Content nodes in all subcontainers of this node.
        This excludes content nodes that are children of other content nodes.
        This method can be much faster than content_children_for_all_subcontainers, but may return lesser nodes than expected (when using limit).
        Don't use distinct() on this method, use content_children_for_all_subcontainers instead if you need it!
        """
        from contenttypes.data import Content
        nr = t_noderelation
        # TODO: check if it's better to use the _subquery_subtree() here
        return object_session(self).query(Content).filter_by(
            subnode=False).join(
                nr, Content.id == nr.c.cid).filter(nr.c.nid == self.id)

    @property
    def content_children_count_for_all_subcontainers(self):
        if USE_CACHED_CHILDCOUNT:
            return exec_sqlfunc(
                object_session(self),
                mediatumfunc.count_content_children_for_all_subcontainers(
                    self.id))
        else:
            return self.content_children_for_all_subcontainers.count()

    def all_children_by_query(self, query):
        sq = _subquery_subtree_distinct(self)
        query = query.filter(Node.id.in_(sq))
        return query

    @staticmethod
    def req_has_access_to_node_id(node_id,
                                  accesstype,
                                  req=None,
                                  date=func.current_date()):
        # XXX: the database-independent code could move to core.node
        from core.transition import request
        from core.users import user_from_session

        if req is None:
            req = request

        user = user_from_session(req.session)

        # XXX: like in mysql version, what's the real solution?
        try:
            ip = IPv4Address(req.remote_addr)
        except AddressValueError:
            logg.warn("illegal IP address %s, refusing IP-based access",
                      req.remote_addr)
            ip = None

        return Node.has_access_to_node_id(node_id, accesstype, user, ip, date)

    @staticmethod
    def has_access_to_node_id(node_id,
                              accesstype,
                              user=None,
                              ip=None,
                              date=None):
        # XXX: the database-independent code could move to core.node
        from core import db
        from core.users import get_guest_user

        if user is None:
            user = get_guest_user()

        if user.is_admin:
            return True

        if ip is None:
            ip = IPv4Address("0.0.0.0")

        if date is None:
            date = func.current_date()

        accessfunc = access_funcs[accesstype]
        group_ids = user.group_ids
        access = accessfunc(node_id, group_ids, ip, date)
        return db.session.execute(select([access])).scalar()

    def _parse_searchquery(self, searchquery):
        """
        * `searchquery` is a string type: Parses `searchquery` and transforms it into the search tree.
        * `searchquery` already is already in search tree form: work is already done, return it unchanged.
        """
        from core.search import parse_searchquery
        if isinstance(searchquery, SearchTreeElement):
            searchtree = searchquery
        else:
            searchtree = parse_searchquery(searchquery)
        return searchtree

    def _query_subtree(self):
        """Builds the query object that is used as basis for content node searches below this node"""
        from contenttypes import Collections
        q = object_session(self).query
        subtree = _subquery_subtree(self)
        if self == q(Collections).one():
            # no need to filter, the whole tree can be searched
            subtree = None
        return subtree

    def search(self, searchquery, languages=None):
        """Creates a search query.
        :param searchquery: query in search language or parsed query (search tree) as `SearchTreeElement`:
        :param language: sequence of language config strings matching Fts.config
        :returns: Node Query
        """
        from core.database.postgres.search import apply_searchtree_to_query
        from contenttypes import Content
        q = object_session(self).query
        searchtree = self._parse_searchquery(searchquery)
        return apply_searchtree_to_query(q(Content), searchtree,
                                         languages), self._query_subtree()

    @property
    def tagged_versions(self):
        Transaction = versioning_manager.transaction_cls
        TransactionMeta = versioning_manager.transaction_meta_cls
        version_cls = version_class(self.__class__)
        return (self.versions.join(
            Transaction, version_cls.transaction_id == Transaction.id).join(
                Transaction.meta_relation).filter(
                    TransactionMeta.key == u"tag"))

    def get_tagged_version(self, tag):
        return self.tagged_versions.filter_by(value=tag).scalar()

    def get_published_version(self):
        Transaction = versioning_manager.transaction_cls
        TransactionMeta = versioning_manager.transaction_meta_cls
        version_cls = version_class(self.__class__)
        published_versions = self.versions.join(Transaction, version_cls.transaction_id == Transaction.id).\
                join(Transaction.meta_relation). filter(TransactionMeta.key == u"publish")
        return published_versions.scalar()

    def new_tagged_version(self,
                           tag=None,
                           comment=None,
                           publish=None,
                           user=None):
        """Returns a context manager that manages the creation of a new tagged node version.

        :param tag: a unicode tag assigned to the transaction belonging to the new version.
            If none is given, assume that we want to add a new numbered version.
            The tag will be the incremented version number of the last numbered version.
            If no numbered version is present, assign 1 to the last version and 2 to the new version.

        :param comment: optional comment for the transaction
        :param user: user that will be associated with the transaction.
        """
        node = self

        class VersionContextManager(object):
            def __enter__(self):
                self.session = s = object_session(node)
                if s.new or s.dirty:
                    raise Exception(
                        "Refusing to create a new tagged node version. Session must be clean!"
                    )

                uow = versioning_manager.unit_of_work(s)
                tx = uow.create_transaction(s)

                if user is not None:
                    tx.user = user

                if tag:
                    if node.get_tagged_version(tag):
                        raise ValueError("tag already exists")
                    tx.meta[u"tag"] = tag
                elif publish:
                    if node.get_published_version():
                        raise ValueError("publish version already exists")
                    tx.meta[u"publish"] = publish
                else:
                    NodeVersion = version_class(node.__class__)
                    # in case you were wondering: order_by(None) resets the default order_by
                    last_tagged_version = node.tagged_versions.order_by(
                        None).order_by(
                            NodeVersion.transaction_id.desc()).first()
                    if last_tagged_version is not None:
                        next_version = int(last_tagged_version.tag) + 1
                    else:
                        node.versions[-1].tag = u"1"
                        next_version = 2

                    tx.meta[u"tag"] = unicode(next_version)

                if comment:
                    tx.meta[u"comment"] = comment

                return tx

            def __exit__(self, exc_type, exc_value, traceback):
                if exc_type:
                    self.session.rollback()
                else:
                    self.session.commit()

        return VersionContextManager()

    def is_descendant_of(self, node):
        return exec_sqlfunc(object_session(self),
                            mediatumfunc.is_descendant_of(self.id, node.id))

    def _get_nearest_ancestor_by_type(self, ancestor_type):
        """Returns a nearest ancestor of `ancestor_type`.
        If none is found, return `Collections` as default.
        It's undefined which one will be returned if more than one nearest ancestor is found.
        """
        nr = t_noderelation
        q = object_session(self).query

        maybe_ancestor = (q(ancestor_type).join(
            nr, Node.id == nr.c.nid).filter_by(cid=self.id).order_by(
                nr.c.distance).limit(1).first())

        if maybe_ancestor is None:
            from contenttypes import Collections
            return q(Collections).one()

        return maybe_ancestor

    def get_container(self):
        from contenttypes import Container
        return self._get_nearest_ancestor_by_type(Container)

    def get_collection(self):
        from contenttypes import Collection
        return self._get_nearest_ancestor_by_type(Collection)

    def get_directory(self):
        from contenttypes import Directory
        return self._get_nearest_ancestor_by_type(Directory)

    def get_parent_sortfield(self):
        """Returns a nearest ancestor with non-empty sortfield.
        """
        first_ancestor_with_sortfield = self.all_parents.filter(
            Node.a.sortfield != u'').order_by('distance').first()
        return first_ancestor_with_sortfield

    @property
    def has_files(self):
        return len(self.file_objects) > 0

    __mapper_args__ = {'polymorphic_identity': 'node', 'polymorphic_on': type}

    def to_yaml(self):
        """overwrite default DeclarativeBase.to_yaml method because we need to convert MutableDicts first
        """
        node_dict = self.to_dict()
        node_dict["attrs"] = dict(node_dict["attrs"])
        node_dict["system_attrs"] = dict(node_dict["system_attrs"])
        return pyaml.dump(node_dict)
Ejemplo n.º 17
0
class AccessRuleset(DeclarativeBase):
    __tablename__ = "access_ruleset"
    __versioned__ = {}

    name = C(Unicode, primary_key=True)
    description = C(Unicode, server_default=u"")
Ejemplo n.º 18
0
class File(DeclarativeBase, FileMixin):
    """Represents an item on the filesystem
    """
    __versioned__ = {"base_classes": (FileVersionMixin, DeclarativeBase)}

    #: True means: physical file should be deleted when database object is deleted
    unlink_after_deletion = False

    def __init__(self, path, filetype, mimetype, node=None, **kwargs):
        # legacy stuff
        datadir = config.settings["paths.datadir"]
        if path.startswith(datadir):
            warn("file path starts with paths.datadir, should be relative",
                 DeprecationWarning)
            path = path[len(datadir):]
        if "type" in kwargs:
            raise Exception(
                "type keyword arg is not allowed anymore, use filetype")
        if "filename" in kwargs:
            raise Exception(
                "name positional arg is not allowed anymore, use path")

        self.path = path
        self.filetype = filetype
        self.mimetype = mimetype
        if node is not None:
            self.node = node

    __tablename__ = "file"
    id = integer_pk()
    path = C(Unicode(4096))
    filetype = C(Unicode(126))
    mimetype = C(String(255))
    _size = C('size', BigInteger)
    # Checksum/hash columns
    sha512 = C(String(128))  # LargeBinary could be an alternative
    sha512_created_at = C(DateTime())
    sha512_checked_at = C(DateTime())
    sha512_ok = C(Boolean())

    nodes = rel(Node,
                secondary=NodeToFile.__table__,
                backref=bref("files",
                             lazy="dynamic",
                             query_class=AppenderQueryWithLen),
                lazy="dynamic")

    _node_objects = rel(Node,
                        secondary=NodeToFile.__table__,
                        backref=bref("file_objects", viewonly=True),
                        viewonly=True)

    def unlink(self):
        if self.exists:
            os.unlink(self.abspath)
        else:
            logg.warn(
                "tried to unlink missing physical file %s at %s, ignored",
                self.id, self.path)

    def __repr__(self):
        return "File #{} ({}:{}|{}) at {}".format(self.id, self.path,
                                                  self.filetype, self.mimetype,
                                                  hex(id(self)))

    def __unicode__(self):
        return u"# {} {} {} in {}".format(self.id, self.filetype,
                                          self.mimetype, self.path)

    @property
    def size(self):
        """Return size of file in bytes"""
        if self._size is None:
            return get_filesize(self.path)
        return self._size

    @property
    def size_humanized(self):
        """Return string with the size in human-friendly format, e.g. '7.9 kB'"""
        return humanize.naturalsize(self.size)

    def calculate_sha512(self):
        """Calculate the hash from the file on disk."""
        if not self.exists:
            return None
        return sha512_from_file(self.abspath)

    def update_sha512(self):
        """Overwrite the stored checksum value with the current checksum of the file on disk.
        Use with caution, should not be necessary under usual circumstances!"""
        if not self.exists:
            return None
        logg.info('Updating sha512 for file ID: %s.' % self.id)
        self.sha512 = self.calculate_sha512()
        self.sha512_ok = True
        self.sha512_created_at = self.sha512_checked_at = datetime.utcnow()
        self._size = get_filesize(self.path)
        return self.sha512

    def get_or_create_sha512(self):
        """Return the stored hash. If there is none, create and store it."""
        if not self.exists:
            return None, False
        created = False
        if not self.sha512:
            created = True
            logg.info('Checksum not in DB, creating it for file ID: %s.' %
                      self.id)
            self.update_sha512()
        return self.sha512, created

    def verify_checksum(self):
        """Make sure the file exists and has the same checksum as before"""
        if not self.exists:
            #raise IOError()
            logg.warn('check_checksum: file %s does not exist at %s!' %
                      (self.id, self.abspath))
            self.sha512_ok = None
            return None
        self.sha512_checked_at = datetime.utcnow()
        sha_stored, created = self.get_or_create_sha512()
        if created:
            # checksum was just created, skip a second calculation of the hash
            return True
        else:
            sha_calculated = self.calculate_sha512()
            if sha_stored == sha_calculated and sha_calculated is not None:
                logg.debug('Matching checksums :) for file ID: %s.' % self.id)
                self.sha512_ok = True
            else:
                logg.warn('Checksum mismatch for file ID: %s.' % self.id)
                self.sha512_ok = False
        return self.sha512_ok
Ejemplo n.º 19
0
class User(DeclarativeBase, TimeStamp, UserMixin):

    __tablename__ = "user"
    __versioned__ = {}

    id = integer_pk()
    login_name = C(Unicode, nullable=False)
    display_name = C(Unicode)

    lastname = C(Unicode)
    firstname = C(Unicode)
    telephone = C(Unicode)
    organisation = C(Unicode)
    comment = C(UnicodeText)
    email = C(EmailType)
    password_hash = C(String)
    salt = C(String)
    password = u''

    # user activity
    last_login = C(DateTime)
    active = C(Boolean, server_default="true")

    # options
    can_edit_shoppingbag = C(Boolean, server_default="false")
    can_change_password = C(Boolean, server_default="false")

    home_dir_id = integer_fk("node.id")

    # relationships
    groups = association_proxy(
        "group_assocs",
        "usergroup",
        creator=lambda ug: UserToUserGroup(usergroup=ug))
    home_dir = rel("Directory", foreign_keys=[home_dir_id])

    authenticator_info = rel(AuthenticatorInfo)
    authenticator_id = integer_fk(AuthenticatorInfo.id, nullable=False)

    @property
    def group_ids(self):
        return [g.id for g in self.groups]

    @property
    def group_names(self):
        return [g.name for g in self.groups]

    @property
    def is_editor(self):
        return any(g.is_editor_group for g in self.groups)

    @property
    def is_admin(self):
        return any(g.is_admin_group for g in self.groups)

    @property
    def is_guest(self):
        return self.login_name == config.get_guest_name(
        ) and self.authenticator_id == 0

    @property
    def is_workflow_editor(self):
        return any(g.is_workflow_editor_group for g in self.groups)

    @property
    def hidden_edit_functions(self):
        return [
            f for group in self.groups
            for f in group.hidden_edit_functions or []
        ]

    @property
    def upload_dir(self):
        from contenttypes import Directory
        if self.home_dir:
            return self.home_dir.children.filter(
                Directory.system_attrs[u"used_as"].astext == u"upload").one()

    @property
    def trash_dir(self):
        from contenttypes import Directory
        if self.home_dir:
            return self.home_dir.children.filter(
                Directory.system_attrs[u"used_as"].astext == u"trash").one()

    def get_or_add_private_group(self):
        """Gets the private group for this user.
        Creates the group if it's missing and adds it to the session.
        Always use this method and don't create private groups by yourself!
        :rtype: UserGroup
        """

        maybe_group_assoc = [g for g in self.group_assocs if g.private == True]

        if not maybe_group_assoc:
            # the name doesn't really matter, but it must be unique
            group = UserGroup(name=u"_user_{}".format(unicode(self.id)))
            group_assoc = UserToUserGroup(usergroup=group, private=True)
            self.group_assocs.append(group_assoc)
        else:
            group = maybe_group_assoc[0].usergroup

        return group

    def change_password(self, password):
        from core.auth import create_password_hash
        self.password_hash, self.salt = create_password_hash(password)

    def create_home_dir(self):
        from contenttypes.container import Directory, Home
        from core.database.postgres.permission import AccessRulesetToRule
        from core.permission import get_or_add_access_rule
        s = object_session(self)
        home_root = s.query(Home).one()
        homedir_name = self.login_name
        home = Directory(homedir_name)
        home_root.container_children.append(home)
        home.children.extend(create_special_user_dirs())
        # add access rules so only the user itself can access the home dir
        private_group = self.get_or_add_private_group()
        # we need the private group ID, it's set on flush by the DB
        s.flush()
        user_access_rule = get_or_add_access_rule(group_ids=[private_group.id])

        for access_type in (u"read", u"write", u"data"):
            ruleset = home.get_or_add_special_access_ruleset(access_type)
            arr = AccessRulesetToRule(rule=user_access_rule)
            ruleset.rule_assocs.append(arr)

        self.home_dir = home
        logg.info("created home dir for user '%s (id: %s)'", self.login_name,
                  self.id)
        return home

    # Flask-Login integration functions
    def is_authenticated(self):
        return not self.is_guest

    def is_active(self):
        return not self.is_guest

    @property
    def is_anonymous(self):
        return self.is_guest

    def __eq__(self, other):
        '''
        Checks the equality of two `UserMixin` objects using `get_id`.
        '''
        if isinstance(other, UserMixin):
            return self.get_id() == other.get_id()
        return NotImplemented

    def __ne__(self, other):
        '''
        Checks the inequality of two `UserMixin` objects using `get_id`.
        '''
        equal = self.__eq__(other)
        if equal is NotImplemented:
            return NotImplemented
        return not equal

    def get_id(self):
        return unicode(self.id)

    def __unicode__(self):
        return u"{} \"{}\" ({}:{})".format(
            self.login_name, self.display_name if self.display_name else "",
            self.authenticator_info.auth_type, self.authenticator_info.name)

    def __repr__(self):
        return u"User<{} '{}'> ({})".format(
            self.id, self.login_name, object.__repr__(self)).encode("utf8")

    __table_args__ = (UniqueConstraint(login_name, authenticator_id), )