Beispiel #1
0
def create_association(cls, assoc_table, relationship_name):
    """
    Creates a relationship as relationship_name to the assoc_table for each parent
    """
    name = cls.__name__
    discriminator = name.lower()
    association_name = assoc_table.__tablename__

    assoc_cls = type(  # creates dynamic class
        "{cls_name}{assoc_name}".format(
            cls_name=name, assoc_name=assoc_table.__name__
        ),  # name, i.e. EmployeeAddressMap
        (assoc_table,),  # base
        dict(__tablename__=None, __mapper_args__={"polymorphic_identity": discriminator}),  # attributes
    )

    setattr(
        cls,
        relationship_name,
        association_proxy(
            association_name, relationship_name, creator=lambda obj: assoc_cls(**{relationship_name: obj})
        ),
    )

    return relationship(assoc_cls, backref=backref("parent", uselist=False))
	def InitMapper( cls, metadata, Parameter, ParameterType, ResourceQuantity ):
		mapper( cls, inherits = Parameter, polymorphic_identity = ParameterType, properties = {
			'_quantity' : relation( ResourceQuantity,
				collection_class = attribute_mapped_collection('resource'))
			})

		cls.quantity = association_proxy('_quantity', 'quantity', creator = lambda k, v: ResourceQuantity( resource = k, **v ) )
    def transaction_meta_factory(self):
        """
        Creates TransactionMeta class.
        """
        class TransactionMeta(
            self.declarative_base,
            TransactionMetaBase
        ):
            __tablename__ = 'transaction_meta'

        TransactionMeta.transaction_log = sa.orm.relationship(
            self.transaction_log_cls,
            backref=sa.orm.backref(
                'meta_relation',
                collection_class=attribute_mapped_collection('key')
            ),
            primaryjoin=(
                '%s.id == TransactionMeta.transaction_id' %
                self.transaction_log_cls.__name__
            ),
            foreign_keys=[TransactionMeta.transaction_id]
        )

        self.transaction_log_cls.meta = association_proxy(
            'meta_relation',
            'value',
            creator=lambda key, value: TransactionMeta(key=key, value=value)
        )

        return TransactionMeta
Beispiel #4
0
  def declare_categorizable(cls, category_type, single, plural, ation):
    setattr(
        cls, plural,
        association_proxy(
            ation, 'category',
            creator=lambda category: Categorization(
                category_id=category.id,
                category_type=category.__class__.__name__,
                categorizable_type=cls.__name__
            )
        )
    )

    joinstr = (
        'and_('
        'foreign(Categorization.categorizable_id) == {type}.id, '
        'foreign(Categorization.categorizable_type) == "{type}", '
        'foreign(Categorization.category_type) == "{category_type}"'
        ')'
    )
    joinstr = joinstr.format(type=cls.__name__, category_type=category_type)
    backref = '{type}_categorizable_{category_type}'.format(
        type=cls.__name__,
        category_type=category_type,
    )
    return db.relationship(
        'Categorization',
        primaryjoin=joinstr,
        backref=backref,
        cascade='all, delete-orphan',
    )
Beispiel #5
0
 def tag_association(cls):
     discriminator = cls.__name__.lower()
     creator = TagAssociation.creator(discriminator)
     kwargs = {"creator": creator, "getset_factory": _default_list_getset}
     cls.tags = associationproxy.association_proxy("tag_association", "tags", **kwargs)
     backref = orm.backref("%s_parent" % discriminator, uselist=False)
     return orm.relationship("TagAssociation", backref=backref)
Beispiel #6
0
	def InitMapper( cls, metadata, Parameter, ParameterType, DesignQuantity ):
		mapper( cls, inherits = Parameter, polymorphic_identity = ParameterType, properties = {
			'_quantity' : relation( DesignQuantity,
				collection_class = attribute_mapped_collection('design'))
			})

		cls.quantity = association_proxy('_quantity', 'quantity', creator = lambda k, v: DesignQuantity( design = k, quantity = v ) )
Beispiel #7
0
 def _set_association_proxy(cls, edge_cls, attr_name, edge_name, direction):
     rel = association_proxy(
         edge_name,
         direction,
         creator=lambda node: edge_cls(**{direction: node})
     )
     setattr(cls, attr_name, rel)
Beispiel #8
0
  def _categorizations(cls, rel_name, proxy_name, scope):
    setattr(cls, proxy_name, association_proxy(
        rel_name, 'category',
        creator=lambda category: Categorization(
            category=category,
            #FIXME add from http session!
            modified_by_id=1,
            categorizable_type=cls.__name__,
            ),
        ))
    joinstr = 'and_(foreign(Categorization.categorizable_id) == {type}.id, '\
                   'foreign(Categorization.categorizable_type) == "{type}", '\
                   'Categorization.category_id == Category.id, '\
                   'Category.scope_id == {scope})'
    joinstr = joinstr.format(type=cls.__name__, scope=scope)
    return db.relationship(
        'Categorization',
        primaryjoin=joinstr,
        backref=BACKREF_NAME_FORMAT.format(type=cls.__name__, scope=scope),
        )

  # FIXME: make eager-loading work for categorizations/assertations
  #@classmethod
  #def eager_query(cls):
  #  from sqlalchemy import orm

  #  query = super(Categorizable, cls).eager_query()
  #  return query.options(
  #      orm.subqueryload_all('categorizations.category'),
  #      orm.subqueryload_all('assertations.category'))
Beispiel #9
0
def has_field_handler(entity, name, *args, **kwargs):
    if 'through' in kwargs:
        setattr(entity, name,
                association_proxy(kwargs.pop('through'),
                                  kwargs.pop('attribute', name),
                                  **kwargs))
        return
    field = Field(*args, **kwargs)
    field.attach(entity, name)
 def address_association(cls):
     discriminator = cls.__name__.lower()
     cls.addresses= association_proxy(
                 "address_association", "addresses",
                 creator=AddressAssociation.creator(discriminator)
             )
     return relationship("AddressAssociation", 
                 backref=backref("%s_parent" % discriminator, 
                                     uselist=False))
Beispiel #11
0
def _gen_array_simple(cls, props, k, child_cust, p):
    table_name = cls.Attributes.table_name
    metadata = cls.Attributes.sqla_metadata

    # get left (fk) column info
    _gen_col = _get_col_o2m(cls, p.left)
    col_info = next(_gen_col) # gets the column name
    p.left, child_left_col_type = col_info[0] # FIXME: Add support for multi-column primary keys.
    child_left_col_name = p.left

    # get right(data) column info
    child_right_col_type = get_sqlalchemy_type(child_cust)
    child_right_col_name = p.right # this is the data column
    if child_right_col_name is None:
        child_right_col_name = k

    # get table name
    child_table_name = child_cust.Attributes.table_name
    if child_table_name is None:
        child_table_name = '_'.join([table_name, k])

    if child_table_name in metadata.tables:
        child_t = metadata.tables[child_table_name]
        assert child_right_col_type is \
               child_t.c[child_right_col_name].type.__class__
        assert child_left_col_type is \
               child_t.c[child_left_col_name].type.__class__

    else:
        # table does not exist, generate table
        child_right_col = Column(child_right_col_name,
                                        child_right_col_type)
        _sp_attrs_to_sqla_constraints(cls, child_cust,
                                            col=child_right_col)

        child_left_col = next(_gen_col)
        _sp_attrs_to_sqla_constraints(cls, child_cust,
                                            col=child_left_col)

        child_t = Table(child_table_name , metadata,
            Column('id', sqlalchemy.Integer, primary_key=True),
                                child_left_col, child_right_col)

    # generate temporary class for association proxy
    cls_name = ''.join(x.capitalize() or '_' for x in
                                    child_table_name.split('_'))
                            # generates camelcase class name.

    def _i(self, *args):
        setattr(self, child_right_col_name, args[0])

    cls_ = type("_" + cls_name, (object,), {'__init__': _i})
    own_mapper(cls_)(cls_, child_t)
    props["_" + k] = relationship(cls_)

    # generate association proxy
    setattr(cls, k, association_proxy("_" + k, child_right_col_name))
Beispiel #12
0
 def meta_association(cls):
     discriminator = cls.__name__.lower()
     cls.metas = association_proxy(
                 "meta_association", "metas",
                 creator=MetaAssociation.creator(discriminator)
             )
     return relationship("MetaAssociation",
                 backref=backref("%s_parent" % discriminator,
                                     uselist=False))
 def fit_association(cls):
     discriminator = cls.__name__.lower()
     cls.fits= association_proxy(
                 "fit_association", "fits",
                 creator=Fit_Association.creator(discriminator)
             )
     return relationship(Fit_Association, 
                 cascade="all, delete-orphan", backref=backref("%s_analysis" % discriminator, 
                                     uselist=False))
Beispiel #14
0
    def variable_association(cls):
        name = cls.__name__
        discriminator = name.lower()

        # Defines a polymorphic class to distinguish variables stored
        # for regions, cells, etc.
        cls.variable_assoc_cls = assoc_cls = type(
            "%sVariableAssociation" % name,
            (VariableAssociation,),
            {
                '__tablename__': None,  # because mapping into a shared table
                '__mapper_args__': {
                    'polymorphic_identity': discriminator
                }
            })

        def _assoc_creator(kv):
            assoc = assoc_cls()
            for key, value in kv.items():
                assoc.variables[key] = Variable(key=key, value=value)
            return assoc

        cls._variables = association_proxy(
            'variable_association', 'variables', creator=_assoc_creator)

        # Using a composite associative proxy here enables returning the
        # underlying values for a given key, as opposed to the
        # Variable object; we need both.
        cls.variables = association_proxy(
            'variable_association', 'values', creator=_assoc_creator)

        def with_characteristic(self, key, value):
            return self._variables.any(key=key, value=value)

        cls.with_characteristic = classmethod(with_characteristic)

        rel = relationship(
            assoc_cls,
            collection_class=attribute_mapped_collection('key'),
            cascade='all, delete-orphan', lazy='joined',
            single_parent=True,
            backref=backref('parent', uselist=False))

        return rel
Beispiel #15
0
 def handler(entity, name, *args, **kwargs):
     if 'through' in kwargs and 'via' in kwargs:
         setattr(entity, name,
                 association_proxy(kwargs.pop('through'),
                                   kwargs.pop('via'),
                                   **kwargs))
         return
     elif 'through' in kwargs or 'via' in kwargs:
         raise Exception("'through' and 'via' relationship keyword "
                         "arguments should be used in combination.")
     rel = target(kwargs.pop('of_kind'), *args, **kwargs)
     rel.attach(entity, name)
Beispiel #16
0
 def address_association(cls):
     discriminator = cls.__name__.lower()
     cls.addresses = association_proxy(
         'attachment_association', 'attachments',
         creator=AddressAssociation.creator(discriminator)
     )
     return relationship(
         'AttachmentAssociation',
         backref=sa.orm.backref(
             "%s_parent" % discriminator,
             uselist=False
         )
     )
    def address_association(cls):
        name = cls.__name__
        discriminator = name.lower()

        assoc_cls = type(
            "%sAddressAssociation" % name,
            (AddressAssociation,),
            dict(__mapper_args__={"polymorphic_identity": discriminator}),
        )

        cls.addresses = association_proxy(
            "address_association", "addresses", creator=lambda addresses: assoc_cls(addresses=addresses)
        )
        return relationship(assoc_cls, backref=backref("parent", uselist=False))
Beispiel #18
0
 def make_task_group_objects(cls):
   cls.task_groups = association_proxy(
       'task_group_objects', 'task_group',
       creator=lambda task_group: TaskGroupObject(
           task_group=task_group,
           object_type=cls.__name__,
       )
   )
   joinstr = 'and_(foreign(TaskGroupObject.object_id) == {type}.id, '\
             'foreign(TaskGroupObject.object_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'TaskGroupObject',
       primaryjoin=joinstr,
       backref='{0}_object'.format(cls.__name__),
       cascade='all, delete-orphan',
   )
Beispiel #19
0
 def audit_objects(cls):
   cls.audits = association_proxy(
       'audit_objects', 'audit',
       creator=lambda control: AuditObject(
           audit=audit,  # noqa
           auditable_type=cls.__name__,
       )
   )
   joinstr = 'and_(foreign(AuditObject.auditable_id) == {type}.id, '\
       'foreign(AuditObject.auditable_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'AuditObject',
       primaryjoin=joinstr,
       backref='{0}_auditable'.format(cls.__name__),
       cascade='all, delete-orphan',
   )
 def object_controls(cls):
   cls.controls = association_proxy(
       'object_controls', 'control',
       creator=lambda control: ObjectControl(
           control=control,
           controllable_type=cls.__name__,
           )
       )
   joinstr = 'and_(foreign(ObjectControl.controllable_id) == {type}.id, '\
                  'foreign(ObjectControl.controllable_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'ObjectControl',
       primaryjoin=joinstr,
       backref='{0}_controllable'.format(cls.__name__),
       cascade='all, delete-orphan',
       )
Beispiel #21
0
 def object_people(cls):
   cls.people = association_proxy(
       'object_people', 'person',
       creator=lambda person: ObjectPerson(
           person=person,
           modified_by_id=1,
           personable_type=cls.__name__,
           )
       )
   joinstr = 'and_(foreign(ObjectPerson.personable_id) == {type}.id, '\
                  'foreign(ObjectPerson.personable_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'ObjectPerson',
       primaryjoin=joinstr,
       backref='{0}_personable'.format(cls.__name__),
       )
Beispiel #22
0
 def object_owners(cls):  # pylint: disable=no-self-argument
   cls.owners = association_proxy(
       'object_owners', 'person',
       creator=lambda person: ObjectOwner(
           person=person,
           ownable_type=cls.__name__,
       )
   )
   joinstr = 'and_(foreign(ObjectOwner.ownable_id) == {type}.id, '\
       'foreign(ObjectOwner.ownable_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'ObjectOwner',
       primaryjoin=joinstr,
       backref='{0}_ownable'.format(cls.__name__),
       cascade='all, delete-orphan',
   )
Beispiel #23
0
 def _object_labels(cls):  # pylint: disable=no-self-argument
   """Object labels property"""
   # pylint: disable=attribute-defined-outside-init
   cls._labels = association_proxy(
     '_object_labels', 'label',
     creator=lambda label: ObjectLabel(
           label=label,  # noqa
           object_type=cls.__name__
       )
   )
   return db.relationship(
       ObjectLabel,
       primaryjoin=lambda: and_(cls.id == ObjectLabel.object_id,
                                cls.__name__ == ObjectLabel.object_type),
       foreign_keys=ObjectLabel.object_id,
       backref='{}_labeled'.format(cls.__name__),
       cascade='all, delete-orphan')
Beispiel #24
0
 def make_risk_objects(cls):
   cls.risks = association_proxy(
       'risk_objects', 'risk',
       creator=lambda risk: RiskObject(
           risk=risk,
           object_type=cls.__name__,
       )
   )
   joinstr = 'and_(foreign(RiskObject.object_id) == {type}.id, '\
       'foreign(RiskObject.object_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'RiskObject',
       primaryjoin=joinstr,
       backref='{0}_object'.format(cls.__name__),
       cascade='all, delete-orphan',
   )
 def object_documents(cls):
   cls.documents = association_proxy(
       'object_documents', 'document',
       creator=lambda document: ObjectDocument(
           document=document,
           documentable_type=cls.__name__,
           )
       )
   joinstr = 'and_(foreign(ObjectDocument.documentable_id) == {type}.id, '\
                  'foreign(ObjectDocument.documentable_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'ObjectDocument',
       primaryjoin=joinstr,
       backref='{0}_documentable'.format(cls.__name__),
       cascade='all, delete-orphan',
       )
 def object_sections(cls):
   cls.sections = association_proxy(
       'object_sections', 'section',
       creator=lambda section: ObjectSection(
           section=section,
           sectionable_type=cls.__name__,
           )
       )
   joinstr = 'and_(foreign(ObjectSection.sectionable_id) == {type}.id, '\
                  'foreign(ObjectSection.sectionable_type) == "{type}")'
   joinstr = joinstr.format(type=cls.__name__)
   return db.relationship(
       'ObjectSection',
       primaryjoin=joinstr,
       backref='{0}_sectionable'.format(cls.__name__),
       cascade='all, delete-orphan',
       )
Beispiel #27
0
def create_collection_class(owner, member, column, *,
                            sorted=True, duplicates=True):
    """
    Creates a class for holding the values of a collection in given *owner*
    class.

    The given *owner* class will be updated to have a new *member* with given
    name, which is a list containing elements as described by *column*:

    >>> create_collection_class(Group, 'permissions',
    ...                         Column(PermissionEnum.db_type(), nullable=False)

    Group objects will now have a member called 'permissions', which contain a
    sorted list of PermissionEnum values.

    See :func:`.create_relationship_class` for the description of the keyword
    arguments.
    """
    name = owner.__name__ + tbl2cls(member)
    if sorted:
        bref = backref(member + '_wrapper', order_by='%s.index' % name,
                       collection_class=ordering_list('index'))
    else:
        bref = backref(member + '_wrapper')
    members = {
        '__score_db__': {
            'inheritance': None
        },
        'owner_id': Column(IdType, ForeignKey('%s.id' % owner.__tablename__),
                           nullable=False),
        'owner': relationship(owner, backref=bref),
        'value': column,
    }
    if sorted:
        members['index'] = Column(Integer, nullable=False)
    if not duplicates:
        members['__table_args__'] = (
            UniqueConstraint(members['owner_id'], column),
        )
    cls = type(name, (owner.__score_db__['base'],), members)
    proxy = association_proxy(member + '_wrapper', 'value',
                              creator=lambda v: cls(value=v))
    setattr(owner, member, proxy)
    return cls
 def _categorizations(cls, rel_name, proxy_name, scope):
   cls._categorization_attrs().append(rel_name)
   cls._categorization_scopes()[rel_name] = scope
   setattr(cls, proxy_name, association_proxy(
       rel_name, 'category',
       creator=lambda category: Categorization(
           category=category,
           categorizable_type=cls.__name__,
           ),
       ))
   joinstr = 'and_(foreign(Categorization.categorizable_id) == {type}.id, '\
                  'foreign(Categorization.categorizable_type) == "{type}", '\
                  'Categorization.category_id == Category.id, '\
                  'Category.scope_id == {scope})'
   joinstr = joinstr.format(type=cls.__name__, scope=scope)
   return db.relationship(
       'Categorization',
       primaryjoin=joinstr,
       backref=BACKREF_NAME_FORMAT.format(type=cls.__name__, scope=scope),
       )
def dice_template_mapper():
    from models import get_all, get_by_id, save, delete, get_by_name, get_all_by_ordering
    from business.dice.dice_template import DiceTemplate
    from business.dice.dice_face_template import DiceFaceTemplate
    from business.dice.dice_element import DiceElement
    from business.dice.dice_type import DiceType
    from business.dice.dice_role import DiceRole
    from business.element import Element
    from business.race import Race
    from sqlalchemy.orm import mapper, relationship, backref
    from sqlalchemy.ext.associationproxy import association_proxy

    DiceTemplate.get_all = classmethod(get_all)
    DiceTemplate.get_by_id = classmethod(get_by_id)
    DiceTemplate.save = save
    DiceTemplate.delete = delete

    mapper(DiceTemplate, dice_template_table, properties={
        'type': relationship(DiceType), 
        'race': relationship(Race, backref=backref('dices', order_by=(dice_template_table.c.role_id.asc(), dice_template_table.c.type_id.asc()))),
        'role': relationship(DiceRole, backref='dices')
    })

    mapper(DiceFaceTemplate, dice_face_template_table, properties={
        'dice': relationship(DiceTemplate, backref=backref('faces', order_by=dice_face_template_table.c.side_number)), 
    })

    mapper(DiceElement, dice_element_table, properties={
        'dice': relationship(DiceTemplate, backref='element_links'), 
        'element': relationship(Element), 
    })

    DiceRole.get_all = classmethod(get_all_by_ordering)
    DiceRole.get_by_id = classmethod(get_by_id)
    DiceRole.get_by_name = classmethod(get_by_name)
    DiceRole.save = save
    DiceRole.delete = delete

    mapper(DiceRole, dice_role_table)

    DiceTemplate.elements = association_proxy('element_links', 'element')
Beispiel #30
0
    def contexts(cls):
        """
        Relationship to the context mapping class.
        If you want to be forwared to entities, use ``entities`` instead.
        """
        name = cls.__tablename__

        cls.entities = association_proxy(
            'contexts', 'entity',
            creator=lambda e: Context(entity=e, external=name))

        return relationship(
            Context,
            primaryjoin=(
                '(%s.id == Context.key) & (Context.external == "%s")'
                % (cls.__name__, name)),
            foreign_keys=[Context.key, Context.external],
            collection_class=set,
            backref=backref(
                '%s_parent' % name,
                uselist=False))
Beispiel #31
0
class IPNetworkGroup(db.Model):
    __tablename__ = 'ip_network_groups'
    principal_type = PrincipalType.network
    principal_order = 1
    is_group = False
    is_network = True
    is_single_person = False
    is_event_role = False
    is_category_role = False
    is_registration_form = False

    @declared_attr
    def __table_args__(cls):
        return (db.Index('ix_uq_ip_network_groups_name_lower', db.func.lower(cls.name), unique=True),
                {'schema': 'indico'})

    id = db.Column(
        db.Integer,
        primary_key=True
    )
    name = db.Column(
        db.String,
        nullable=False
    )
    description = db.Column(
        db.Text,
        nullable=False,
        default=''
    )
    #: Whether the network group is hidden in ACL forms
    hidden = db.Column(
        db.Boolean,
        nullable=False,
        default=False
    )
    #: Grants all IPs in the network group read access to all attachments
    attachment_access_override = db.Column(
        db.Boolean,
        nullable=False,
        default=False
    )

    _networks = db.relationship(
        'IPNetwork',
        lazy=False,
        cascade='all, delete-orphan',
        collection_class=set,
        backref=db.backref(
            'group',
            lazy=True
        )
    )
    networks = association_proxy('_networks', 'network', creator=lambda v: IPNetwork(network=v))

    # relationship backrefs:
    # - in_category_acls (CategoryPrincipal.ip_network_group)
    # - in_event_acls (EventPrincipal.ip_network_group)

    @return_ascii
    def __repr__(self):
        return format_repr(self, 'id', 'name', hidden=False, attachment_access_override=False)

    def __contains__(self, user):
        # This method is called via ``user in principal`` during ACL checks.
        # We have to take the IP from the request so if there's no request
        # (e.g. in the shell) we never grant IP-based access; same if we check
        # for a different user than the one from the current session.
        if not has_request_context() or not request.remote_addr:
            return False
        if session.user != user:
            return False
        return self.contains_ip(unicode(request.remote_addr))

    def contains_ip(self, ip):
        ip = ip_address(ip)
        return any(ip in network for network in self.networks)

    @property
    def locator(self):
        return {'network_group_id': self.id}
Beispiel #32
0
class Board(BaseNameMixin, db.Model):
    """
    Boards show a filtered set of jobs at board-specific URLs.
    """
    __tablename__ = 'board'
    #: Reserved board names
    reserved_names = ['static', 'beta']
    #: Caption
    caption = db.Column(db.Unicode(250), nullable=True)
    #: Lastuser organization userid that owns this
    userid = db.Column(db.Unicode(22), nullable=False, index=True)
    #: Welcome text
    description = db.Column(db.UnicodeText, nullable=False, default=u'')
    #: Restrict displayed posts to 24 hours if not logged in?
    require_login = db.Column(db.Boolean, nullable=False, default=True)
    #: Restrict ability to list via this board?
    restrict_listing = db.Column(db.Boolean, nullable=False, default=True)
    #: Relax pay data requirement?
    require_pay = db.Column(db.Boolean, nullable=False, default=True)
    #: New job template headline
    newjob_headline = db.Column(db.Unicode(100), nullable=True)
    #: New job posting instructions
    newjob_blurb = db.Column(db.UnicodeText, nullable=True)
    #: Featured board
    featured = db.Column(db.Boolean, default=False, nullable=False, index=True)
    #: Posting users
    posting_users = db.relationship(User, secondary=board_users_table)
    #: Available job types
    types = db.relationship(JobType, secondary=board_jobtype_table, order_by=JobType.seq)
    #: Available job categories
    categories = db.relationship(JobCategory, secondary=board_jobcategory_table, order_by=JobCategory.seq)

    #: Automatic tagging domains
    domains = db.relationship(BoardAutoDomain, backref='board', cascade='all, delete-orphan',
        order_by=BoardAutoDomain.domain)
    auto_domains = association_proxy('domains', 'domain', creator=lambda d: BoardAutoDomain(domain=d))
    #: Automatic tagging locations
    auto_locations = db.relationship(BoardAutoLocation, backref='board', cascade='all, delete-orphan')
    auto_geonameids = association_proxy('auto_locations', 'geonameid', creator=lambda l: BoardAutoLocation(geonameid=l))
    #: Automatic tagging keywords
    auto_tags = db.relationship(Tag, secondary=board_auto_tag_table, order_by=Tag.name)
    auto_keywords = association_proxy('auto_tags', 'title', creator=lambda t: Tag.get(t, create=True))
    auto_types = db.relationship(JobType, secondary=board_auto_jobtype_table, order_by=JobType.seq)
    auto_categories = db.relationship(JobCategory, secondary=board_auto_jobcategory_table, order_by=JobCategory.seq)
    #: Must all criteria match for an auto-post?
    auto_all = db.Column(db.Boolean, default=False, nullable=False)
    #: Users active on this board
    users_active_at = db.relationship(UserActiveAt, lazy='dynamic', backref='board')

    def __repr__(self):
        return '<Board %s "%s">' % (self.name, self.title)

    @property
    def is_root(self):
        return self.name == u'www'

    @property
    def not_root(self):
        return self.name != u'www'

    @property
    def options(self):
        """Form helper method (see BoardOptionsForm)"""
        return self

    @property
    def autotag(self):
        """Form helper method (see BoardTaggingForm)"""
        return self

    @cached_property
    def tz(self):
        return timezone(self.timezone)

    @property
    def title_and_name(self):
        return Markup(u'{title} (<a href="{url}" target="_blank">{name}</a>)'.format(
            title=self.title, name=self.name, url=self.url_for()))

    def owner_is(self, user):
        if user is None:
            return False
        if self.userid == user.userid or self.userid in user.allowner_ids():
            return True
        return False

    def link_to_jobpost(self, jobpost):
        return BoardJobPost.query.get((self.id, jobpost.id))

    def add(self, jobpost):
        link = self.link_to_jobpost(jobpost)
        if not link:
            link = BoardJobPost(jobpost=jobpost, board=self)
            db.session.add(link)
        return link

    def permissions(self, user, inherited=None):
        perms = super(Board, self).permissions(user, inherited)
        perms.add('view')
        if not self.restrict_listing:
            perms.add('new-job')
        if user is not None and (user.userid == self.userid or self.userid in user.allowner_ids()):
            perms.add('edit')
            perms.add('delete')
            perms.add('add')
            perms.add('new-job')
            perms.add('edit-filterset')
        elif user in self.posting_users:
            perms.add('new-job')
        return perms

    def url_for(self, action='view', _external=False):
        if action == 'view':
            if self.is_root:
                # Specialcase 'www'. Don't use www.hasjob.co.
                return url_for('index', subdomain=None, _external=_external)
            else:
                return url_for('index', subdomain=self.name, _external=_external)
        elif action == 'edit':
            return url_for('board_edit', board=self.name, _external=_external)
        elif action == 'delete':
            return url_for('board_delete', board=self.name, _external=_external)
        elif action == 'oembed':
            if self.is_root:
                return url_for('index', subdomain=None, embed=1, _external=_external)
            else:
                return url_for('index', subdomain=self.name, embed=1, _external=_external)

    @classmethod
    def get(cls, name):
        return cls.query.filter_by(name=name).one_or_none()
Beispiel #33
0
class Entity(object):
    def url(self, filetype='html', action='view'):
        # TODO: Rewrite to remove dependency on pylons
        return url(controller='entity',
                   action=action,
                   id=self.ID,
                   filetype=filetype)

    def json_struct(self, sep_filter=True, limit=10, extended=True):
        """ Dictionary structure used with a :class:`JSONEncoder` """
        struct = {
            'ID': self.ID,
            'type': 'entity',
            'label': self.label,
            'sep_dir': self.sep_dir,
            'url': self.url()
        }
        return struct

    def web_search_string(self):
        # generates search string for search engines

        search_string = "\"" + self.label + "\""
        return search_string

    def get_filename(self, corpus_path=None):
        if corpus_path and self.sep_dir:
            filename = os.path.join(corpus_path, self.sep_dir, 'index.html')
            if not os.path.exists(filename):
                filename = None
        else:
            filename = None

        return filename

    searchpatterns = association_proxy('_spatterns', 'searchpattern')

    def pluralize(self):
        pluralpatterns = []
        #returns a list of pluralizations for each search pattern
        if not (re.search('\<i\>', self.searchstring)
                or re.search('\<u\>', self.searchstring)):
            #print "no intersection or union \n"

            words = self.label.split(" ")

            count = 0
            i = 0
            plurals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

            secondtolast = []
            thirdtolast = []
            fourthtolast = []

            #for each loop marks each possible plural with "1" in array "plurals"

            for word in words:
                wordtemp = words
                #print "word is " + word + "\n"
                #print "i is " + str(i)
                if re.match('in$|of$|or$|and$|for$|on$|about$|to$', word):
                    if i > 0:
                        plurals[i - 1] = 1
                        #print "setting word before " + word
                        count += 1
                i += 1
                if i == 12:
                    raise Exception(
                        "We try not to allow labels longer than twelve words long; things get really messy.  Please go back and find a shorter label."
                    )

            #last word always candidate for pluralization
            #print "i is " + str(i) + "and plurals is " + "\n"
            #print plurals
            i -= 1
            plurals[i] = 1
            #print "plurals is "
            #print plurals
            #print "\n"

            i = 0
            count += 1
            #count now equals total number of words which need to be pluralized

            anothercount = 0

            #singularize everything
            for word in words:
                if plurals[anothercount] is 1:
                    wordtemp = words
                    re.sub('ies$', 'y', word) or re.search(
                        'descartes$',
                        word) or re.sub('ypes$', 'ype', word) or re.search(
                            'tus$', word
                        ) or re.sub('([ea])nges$', '\1nge', word) or re.sub(
                            '([aeiou])cles$', '\1cle',
                            word) or re.sub('bles$', 'ble', word) or re.sub(
                                'ues$', 'ue', word) or re.sub(
                                    'nces$', 'nce', word) or re.search(
                                        'ous$', word) or re.search(
                                            'sis$', word) or re.sub(
                                                'xes$', 'x', word) or re.sub(
                                                    '([aeiou])(.)es$', '\1\2e',
                                                    word) or re.sub(
                                                        'ces$', 'ce',
                                                        word) or re.sub(
                                                            'es$', '',
                                                            word) or re.search(
                                                                'ss$', word
                                                            ) or re.search(
                                                                's$', '', word)
                    if p.compare(word, wordtemp[anothercount]):
                        words[anothercount] = word
                anothercount += 1

            maxi = 1
            maxj = 1
            maxk = 1
            maxl = 1

            if count is 1:
                maxi = 2
            elif count is 2:
                maxi = 2
                maxj = 2
            elif count is 3:
                maxi = 2
                maxj = 2
                maxk = 2
            elif count is 4:
                maxi = 2
                maxj = 2
                maxk = 2
                maxl = 2

            pluralcount = 0
            wordstemp = words
            a = 0
            lastcount = -1
            prevcount = -1

            #cycle through each possible combination of singulars and plurals and create list of standard pluralizations
            for l in range(maxl):
                for k in range(maxk):
                    for j in range(maxj):
                        for i in range(maxi):
                            #print "appending string.join(wordstemp) to pluralpatterns which is " + string.join(wordstemp) + "\n"
                            #print "i, j, k, l is " + str(i) + str(j) + str(k) + str(l) + "\n"
                            pluralpatterns.append(string.join(wordstemp))
                            pluralcount += 1

                            #zoom in @plurals array to last plural
                            a = 0
                            lastcount = -1
                            for pluralentry in plurals:
                                #print "checking pluralentry " + str(pluralentry) + "\n"
                                if pluralentry is 1:
                                    lastcount = a
                                a += 1

                            #toggle last plural candidate from singular to plural
                            wordstemp[lastcount] = p.plural(
                                wordstemp[lastcount])

                        #reset last plural to singular if needed--e.g., if j = 1 and maxj = 2
                        if j is 0 and maxj is 2:
                            #print "resetting $wordstemp[$lastcount] to $words[$lastcount]\n";

                            #print "I'm in this part of the loop\n"
                            wordstemp[lastcount] = words[lastcount]

                            #find next to last plural
                            a = 0
                            lastcount = -1

                            for pluralentry in plurals:
                                if pluralentry is 1:
                                    secondtolast.append(a)
                                a += 1
                            secondtolast.pop()
                            lastcounttemp = secondtolast.pop()
                            if lastcounttemp or lastcounttemp == 0:
                                lastcount = lastcounttemp
                            #print "in 2nd loop, lastcounttemp = $lastcounttemp and word to be pluralized = $wordstemp[$lastcounttemp]\n";

                            #toggle 2nd to last plural (if any)
                            if lastcount != -1:
                                wordstemp[lastcount] = p.plural(
                                    wordstemp[lastcount])

                    #reset 2nd-to-last plural to singular if needed--e.g. if k = 1 and maxk = 2
                    if k is 0 and maxk is 2:
                        prevcount = lastcount
                        a = 0
                        lastcount = -1
                        if prevcount != -1:
                            wordstemp[prevcount] = words[prevcount]

                            #find 3rd plural

                            for pluralentry in plurals:
                                if pluralentry is 1:
                                    thirdtolast.append(a)
                                    a += 1
                            thirdtolast.pop()
                            thirdtolast.pop()
                            lastcounttemp = thirdtolast.pop()
                            if lastcounttemp or lastcounttemp is 0:
                                lastcount = lastcounttemp

                        #toggle 3rd plural (if any)
                        if lastcount != -1:
                            wordstemp[lastcount] = p.plural(
                                wordstemp[lastcount])

                #reset 3rd-to-last plural to singular if needed -- e.g. if l = 1 and maxl = 2
                if l is 0 and maxl is 2:
                    prevcount = lastcount
                    a = 0
                    lastcount = -1
                    if prevcount != -1:
                        wordstemp[prevcount] = words[prevcount]

                        #find 4th plural
                        for pluralentry in plurals:
                            if pluralentry is 1:
                                fourthtolast.append(a)
                            a += 1

                        fourthtolast.pop()
                        fourthtolast.pop()
                        fourthtolast.pop()
                        lastcounttemp = fourthtolast.pop()
                        if lastcounttemp or lastcounttemp is 0:
                            lastcount = lastcounttemp

                if lastcount != -1:
                    wordstemp[lastcount] = p.plural(wordstemp[lastcount])

        else:
            #now generate nonstandard pluralizations for searchstrings involving intersection or union
            if re.search("\<i\>", self.searchstring):
                #print "intersection\n"
                m = re.search("(.+)\<i\>(.+)", self.searchstring)
                first = m.group(1)
                second = m.group(2)

                firsttemp = first
                secondtemp = second

                re.sub('ies$', 'y', first) or re.search(
                    'descartes$',
                    first) or re.sub('ypes$', 'ype', first) or re.search(
                        'tus$', first
                    ) or re.sub('([ea])nges$', '\1nge', first) or re.sub(
                        '([aeiou])cles$', '\1cle',
                        first) or re.sub('bles$', 'ble', first) or re.sub(
                            'ues$', 'ue', first) or re.sub(
                                'nces$', 'nce', first) or re.search(
                                    'ous$', first) or re.search(
                                        'sis$', first) or re.sub(
                                            'xes$', 'x', first) or re.sub(
                                                '([aeiou])(.)es$', '\1\2e',
                                                first) or re.sub(
                                                    'ces$', 'ce',
                                                    first) or re.sub(
                                                        'es$', '',
                                                        first) or re.search(
                                                            'ss$', first
                                                        ) or re.search(
                                                            's$', '', first)
                re.sub('ies$', 'y', second) or re.search(
                    'descartes$',
                    second) or re.sub('ypes$', 'ype', second) or re.search(
                        'tus$', second
                    ) or re.sub('([ea])nges$', '\1nge', second) or re.sub(
                        '([aeiou])cles$', '\1cle',
                        second) or re.sub('bles$', 'ble', second) or re.sub(
                            'ues$', 'ue', second) or re.sub(
                                'nces$', 'nce', second) or re.search(
                                    'ous$', second) or re.search(
                                        'sis$', second) or re.sub(
                                            'xes$', 'x', second) or re.sub(
                                                '([aeiou])(.)es$', '\1\2e',
                                                second) or re.sub(
                                                    'ces$', 'ce',
                                                    second) or re.sub(
                                                        'es$', '',
                                                        second) or re.search(
                                                            'ss$', second
                                                        ) or re.search(
                                                            's$', '', second)

                #undo depluralization if unsuccessful
                if not (p.compare(firsttemp, first)):
                    first = firsttemp

                if not (p.compare(secondtemp, second)):
                    second = secondtemp

                singularfirst = first

                for i in range(2):
                    for j in range(2):
                        newstring = "(( " + first + "( | .+ )" + second + " )|( " + second + "( | .+ )" + first + " ))"
                        pluralpatterns.append(newstring)

                        #pluralize first
                        splitstring = first.split(" ")
                        pluralword = splitstring.pop()
                        pluralword = p.plural(pluralword)
                        splitstring.append(pluralword)
                        first = string.join(splitstring)
                    first = singularfirst
                    #pluralize second
                    splitstring2 = second.split(" ")
                    print "splitstring2 is " + str(splitstring2) + "\n"
                    pluralword2 = splitstring2.pop()
                    pluralword2 = p.plural(pluralword2)
                    splitstring2.append(pluralword2)
                    second = string.join(splitstring2)

            elif re.search("\<u\>", self.searchstring):
                #print "union\n"
                m = re.search("(.+)\<u\>(.+)", self.searchstring)
                first = m.group(1)
                second = m.group(2)

                firsttemp = first
                secondtemp = second

                re.sub('ies$', 'y', first) or re.search(
                    'descartes$',
                    first) or re.sub('ypes$', 'ype', first) or re.search(
                        'tus$', first
                    ) or re.sub('([ea])nges$', '\1nge', first) or re.sub(
                        '([aeiou])cles$', '\1cle',
                        first) or re.sub('bles$', 'ble', first) or re.sub(
                            'ues$', 'ue', first) or re.sub(
                                'nces$', 'nce', first) or re.search(
                                    'ous$', first) or re.search(
                                        'sis$', first) or re.sub(
                                            'xes$', 'x', first) or re.sub(
                                                '([aeiou])(.)es$', '\1\2e',
                                                first) or re.sub(
                                                    'ces$', 'ce',
                                                    first) or re.sub(
                                                        'es$', '',
                                                        first) or re.search(
                                                            'ss$', first
                                                        ) or re.search(
                                                            's$', '', first)
                re.sub('ies$', 'y', second) or re.search(
                    'descartes$',
                    second) or re.sub('ypes$', 'ype', second) or re.search(
                        'tus$', second
                    ) or re.sub('([ea])nges$', '\1nge', second) or re.sub(
                        '([aeiou])cles$', '\1cle',
                        second) or re.sub('bles$', 'ble', second) or re.sub(
                            'ues$', 'ue', second) or re.sub(
                                'nces$', 'nce', second) or re.search(
                                    'ous$', second) or re.search(
                                        'sis$', second) or re.sub(
                                            'xes$', 'x', second) or re.sub(
                                                '([aeiou])(.)es$', '\1\2e',
                                                second) or re.sub(
                                                    'ces$', 'ce',
                                                    second) or re.sub(
                                                        'es$', '',
                                                        second) or re.search(
                                                            'ss$', second
                                                        ) or re.search(
                                                            's$', '', second)

                #undo depluralization if unsuccessful
                if not (p.compare(firsttemp, first)):
                    first = firsttemp

                if not (p.compare(secondtemp, second)):
                    second = secondtemp

                singularfirst = first

                for i in range(2):
                    for j in range(2):
                        newstring = "( " + first + " )|( " + second + " )"
                        pluralpatterns.append(newstring)

                        #pluralize first
                        splitstring = first.split(" ")
                        pluralword = splitstring.pop()
                        pluralword = p.plural(pluralword)
                        splitstring.append(pluralword)
                        first = string.join(splitstring)
                    first = singularfirst
                    #pluralize second
                    splitstring2 = second.split(" ")
                    pluralword2 = splitstring2.pop()
                    pluralword2 = p.plural(pluralword2)
                    splitstring2.append(pluralword2)
                    second = string.join(splitstring2)

        return pluralpatterns

    def setup_SPL(self):
        #code to generate search pattern list to disambiguate ands to intersections or unions
        search_pattern_list = []

        #check to see whether label contains an and to be disambiguated
        if re.search(' and ', self.label):
            andsplit = self.label.split(' and ')
            search_pattern_list.append(andsplit[0] + '&cap' + andsplit[1])
            search_pattern_list.append(andsplit[0] + '&cup' + andsplit[1])

        search_pattern_list.append(self.label)

        return search_pattern_list

    def setup_SSL(self):
        exit = False
        search_string_list = []

        #Rewriting code to account for 8 options:
        #1. <idea>
        #2. <idea> [with 'in <area>' dropped]
        #3. <adj1> and <adj2> <idea>                    (= <adj1 idea> and <adj2 idea>)
        #4. <idea1> and <idea2>
        #5. <adj> <idea1> and <idea2>                  (= <adj idea1> and <adj idea2>)
        #6.
        #7. <idea1> and <idea2> in <area>            (= <idea1> and <idea2> and <area>
        #8. <adj> <idea1> and <idea2> in <area>      (= <adj idea1> and <adj idea2> and <area>)
        #9. <idea> in <area1> and <area2>            (= <idea> and <area1> and <area2> )
        #10. <idea> in <adj> <area1> and <area2>      (= <idea> and <adj area1> and <adj area2>)
        #11. <phrase> <idea1>, <idea2>, and <idea3>   = <phrase idea1> and <phrase idea2> and <phrase idea3>

        #code for Option 1 (consult key above)
        #exact input is always a default option
        search_string_list.append('1: ' + self.label)

        #first split by "in", if applicable; I never need to combine processing of idea clause and area clause;
        #I can process them independently and then just tack on area clause at end as conjunction
        #note that there will always be something in insplit[0], not necessarily in insplit[1]

        #initialize strings and lists used in decision structures
        insplit = [None, None]

        ideaandsplit = [None, None]

        simpleand = None
        ideaandphrase = None

        option4 = None

        if re.search(' in ', self.label):
            insplit = self.label.split(' in ')
            #allow option to drop area if desired

            #Code for Option #2; allow dropping of "area" of desired or redundant
            search_string_list.append('2: ' + insplit[0])

        #now the idea phrase is in splitstring[0] and the area phrase is in splitstring[1]; I can process them identically
        #and just combine at the end
        #Code for Option 3 and Option 4

        if not insplit[0]:
            insplit[0] = self.label

        if re.search(' and ', insplit[0]):
            ideaandsplit = insplit[0].split(' and ')

            #convert string idea1, idea2 to list, to use pop
            idea1 = ideaandsplit[0].split(' ')
            idea2 = ideaandsplit[1].split(' ')

            #Option 3:  add simple conjunction to options
            simpleand = idea1 + ['<and>'] + idea2
            search_string_list.append('3: ' + " ".join(simpleand))

            #get adj phrase, if any, from idea1
            #add Option 4, distributing adjective, saving Option 4 in ideaandphrase to reuse in Option 6
            if len(idea1) > 1:
                noun = idea1.pop()
                adjphrase = idea1
                temp = adjphrase + idea2
                option4 = ideaandsplit[0].split(' ') + ['<and>'] + temp
                search_string_list.append('4: ' + " ".join(option4))
            elif len(idea2) > 1:
                noun = idea2.pop()
                adj1 = idea1
                adj2 = idea2
                option4 = adj1 + [noun] + ['<and>'] + adj2 + [noun]
                search_string_list.append('5: ' + " ".join(option4))

        areaphrase = None

        #process area phrase in the same way, if there is something in the second part of insplit and there is an 'and' in the area phrase
        #for Options 6-10
        if insplit[1] and re.search(' and ', insplit[1]):
            areaandsplit = insplit[1].split(' and ')

            #convert string area1, area2 to list, to use pop
            area1 = areaandsplit[0].split(' ')
            area2 = areaandsplit[1].split(' ')

            if len(area1) > 1:
                #print "we have a adj noun1 and noun2 situation"
                noun2 = area1.pop()
                adjphrase2 = area1
                temp = adjphrase2 + area2
                areaphrase = areaandsplit[0].split(' ') + ['<and>'] + temp
            elif len(area2) > 1:
                #print "we have a adj1 and adj2 noun situation"
                noun2 = area2.pop()
                adj1 = area1
                adj2 = area2
                areaphrase = adj1 + [noun2] + ['<and>'] + adj2 + [noun2]

            else:
                areaphrase = areaandsplit[0].split(' ')
            #now the completed area name is stored in list areaphrase
            #print "areaphrase is "
            #print areaphrase

        #If there is an area, add option to naively add as conjunct; also add complex area phrase if applicable
        if insplit[1]:
            #Option 6; area but no and anywhere
            option6 = insplit[0] + ' <and> ' + insplit[1]
            search_string_list.append('6: ' + option6)

            #Option 7
            if simpleand:
                option7 = simpleand + ['<and>'] + insplit[1].split(' ')
                search_string_list.append('7: ' + " ".join(option7))

            #Option 8
            if option4:
                option8 = option4 + ['<and>'] + insplit[1].split(' ')
                search_string_list.append('8: ' + " ".join(option8))

            #Option 9
            if areaphrase:
                option9 = insplit[0].split(' ') + ['<and>'] + areaphrase
                search_string_list.append('9: ' + " ".join(option9))

            #Option 10
            if simpleand and areaphrase:
                option10 = simpleand + ['<and>'] + areaphrase
                search_string_list.append('10: ' + " ".join(option10))

        #print "label before re.search is " + self.label
        label = re.sub(" and", "", self.label)
        #split the idea up into components separated by and
        commasplit = label.split(',')

        #Option 11-14
        if re.search(',', self.label):
            #delete the extraneous and
            #print "it is triggering this"
            label = re.sub(" and", "", self.label)
            #split the idea up into components separated by and
            commasplit = label.split(',')

            #Option 11, simplest option:  just A and B and C
            search_string_list.append('11: ' + " <and>".join(commasplit))

            #Option 12, if an adj phrase is present, distribute
            if re.search(" ", commasplit[0]):
                adjphrase = commasplit[0].split(' ')
                joinedphrase1 = " ".join(adjphrase)
                noun1 = adjphrase.pop()
                commasplit = commasplit[1:]
                for noun in commasplit:
                    joinedphrase1 = joinedphrase1 + " <and> " + " ".join(
                        adjphrase) + noun
                search_string_list.append('12: ' + joinedphrase1)

            #Option 14, if a prefix phrase is present at end, distribute inphrase to all
            commasplit = label.split(',')
            last = commasplit.pop()
            if re.search(" in ", last):
                lastnoun = last.split(' in ')
                inphrase = lastnoun.pop()
                commasplit.append(lastnoun.pop())
                joinedphrase2 = ""
                for noun in commasplit:
                    joinedphrase2 = joinedphrase2 + " <and>" + noun + " in " + inphrase
                joinedphrase2 = re.sub("^ <and>", "", joinedphrase2)
                search_string_list.append('13: ' + joinedphrase2)

                #Option 14 = both 12 and 13
                if re.search(" ", commasplit[0]):
                    joined1 = joinedphrase1.split(" <and> ")
                    joinedphrase3 = ""
                    for entry in joined1:
                        joinedphrase3 = joinedphrase3 + " <and> " + entry + " in " + inphrase
                    joinedphrase3 = re.sub("^ <and> ", "", joinedphrase3)
                    inphrase = " in " + inphrase + "$"
                    joinedphrase3 = re.sub(inphrase, "", joinedphrase3)
                    search_string_list.append('14: ' + joinedphrase3)

        #option 13:  thinker's (views on) X

        commasplit = label.split(',')
        #print "commasplit[0] is " + commasplit[0]

        if re.search('\'s* views on ', commasplit[0]) or re.search(
                '\'', commasplit[0]):
            if re.search('\'s views on ', commasplit[0]):
                possesssplit = commasplit[0].split('\'s views on')
            elif re.search('\' views on ', commasplit[0]):
                possesssplit = commasplit[0].split('\' views on')
            elif re.search('\'s ', commasplit[0]):
                possesssplit = commasplit[0].split('\'s')
            elif re.search('\' ', commasplit[0]):
                possesssplit = commasplit[0].split('\'')
            else:
                exit = True
            if not exit:
                commasplit[0] = possesssplit[1]
                joinedphrase4 = possesssplit[0]
                for noun in commasplit:
                    joinedphrase4 = joinedphrase4 + " <and>" + noun
                search_string_list.append('15: ' + joinedphrase4)

        return search_string_list
Beispiel #34
0
class Cert(_ExternalInterfaceMixin, Base):

    __tablename__ = 'cert'
    __table_args__ = {
        'mysql_engine': MYSQL_ENGINE,
        'mysql_charset': MYSQL_CHARSET,
    }

    ca_cert_label = Column(String(100),
                           ForeignKey('ca_cert.ca_label'),
                           primary_key=True)
    serial_hex = Column(String(20), primary_key=True)
    # `LargeBinary` is SQLAlchemy's class used to represent blobs
    certificate = Column(LargeBinary, nullable=False)
    csr = Column(LargeBinary)
    created_by_login = Column(String(255), ForeignKey('user.login'))
    created_by_component_login = Column(String(255),
                                        ForeignKey('component.login'))
    creator_details = Column(LargeBinary)
    # former `n6cert-usage` field has been split into
    # columns for Boolean type: `is_client_cert`, `is_server_cert`
    is_client_cert = Column(Boolean, default=False)
    is_server_cert = Column(Boolean, default=False)
    created_on = Column(DateTime)
    valid_from = Column(DateTime)
    expires_on = Column(DateTime)
    owner_login = Column(String(255), ForeignKey('user.login'))
    owner_component_login = Column(String(255), ForeignKey('component.login'))
    revoked_on = Column(DateTime)
    revoked_by_login = Column(String(255), ForeignKey('user.login'))
    revoked_by_component_login = Column(String(255),
                                        ForeignKey('component.login'))
    revocation_comment = Column(Text)

    created_by = relationship('User',
                              back_populates='created_certs',
                              uselist=False,
                              foreign_keys=created_by_login)
    owner = relationship('User',
                         back_populates='owned_certs',
                         uselist=False,
                         foreign_keys=owner_login)
    revoked_by = relationship('User',
                              back_populates='revoked_certs',
                              uselist=False,
                              foreign_keys=revoked_by_login)
    created_by_component = relationship(
        'Component',
        back_populates='created_certs',
        uselist=False,
        foreign_keys=created_by_component_login)
    owner_component = relationship('Component',
                                   back_populates='owned_certs',
                                   uselist=False,
                                   foreign_keys=owner_component_login)
    revoked_by_component = relationship(
        'Component',
        back_populates='revoked_certs',
        uselist=False,
        foreign_keys=revoked_by_component_login)
    ca_cert = relationship('CACert', back_populates='certs', uselist=False)

    # the attribute is a reference to `ca_cert.profile`
    ca_profile = association_proxy('ca_cert', 'profile')

    _columns_to_validate = [
        'serial_hex', 'creator_details', 'revocation_comment'
    ]

    @property
    def is_revoked(self):
        return any((self.revoked_on, self.revoked_by_login,
                    self.revoked_by_component_login, self.revocation_comment))
Beispiel #35
0
class Filterset(BaseScopedNameMixin, db.Model):
    """
    Store filters to display a filtered set of jobs scoped by a board on SEO friendly URLs

    Eg: `https://hasjob.co/f/machine-learning-jobs-in-bangalore`
    """

    __tablename__ = 'filterset'

    board_id = db.Column(None,
                         db.ForeignKey('board.id'),
                         nullable=False,
                         index=True)
    board = db.relationship(Board)
    parent = db.synonym('board')

    #: Welcome text
    description = db.Column(db.UnicodeText, nullable=False, default='')

    #: Associated job types
    types = db.relationship(JobType, secondary=filterset_jobtype_table)
    #: Associated job categories
    categories = db.relationship(JobCategory,
                                 secondary=filterset_jobcategory_table)
    tags = db.relationship(Tag, secondary=filterset_tag_table)
    auto_tags = association_proxy('tags',
                                  'title',
                                  creator=lambda t: Tag.get(t, create=True))
    domains = db.relationship(Domain, secondary=filterset_domain_table)
    auto_domains = association_proxy('domains',
                                     'name',
                                     creator=lambda d: Domain.get(d))
    geonameids = db.Column(postgresql.ARRAY(db.Integer(), dimensions=1),
                           default=[],
                           nullable=False)
    remote_location = db.Column(db.Boolean,
                                default=False,
                                nullable=False,
                                index=True)
    pay_currency = db.Column(db.CHAR(3), nullable=True, index=True)
    pay_cash = db.Column(db.Integer, nullable=True, index=True)
    equity = db.Column(db.Boolean, nullable=False, default=False, index=True)
    keywords = db.Column(db.Unicode(250),
                         nullable=False,
                         default='',
                         index=True)

    def __repr__(self):
        return f'<Filterset {self.board.title} "{self.title}">'

    @classmethod
    def get(cls, board, name):
        return cls.query.filter(cls.board == board,
                                cls.name == name).one_or_none()

    def url_for(self, action='view', _external=True, **kwargs):
        kwargs.setdefault('subdomain',
                          self.board.name if self.board.not_root else None)
        return super().url_for(action,
                               name=self.name,
                               _external=_external,
                               **kwargs)

    def to_filters(self, translate_geonameids=False):
        location_names = []
        if translate_geonameids and self.geonameids:
            location_dict = location_geodata(self.geonameids)
            for geonameid in self.geonameids:
                # location_geodata returns related geonames as well
                # so we prune it down to our original list
                location_names.append(location_dict[geonameid]['name'])

        return {
            't': [jobtype.name for jobtype in self.types],
            'c': [jobcategory.name for jobcategory in self.categories],
            'k': [tag.name for tag in self.tags],
            'd': [domain.name for domain in self.domains],
            'l': location_names if translate_geonameids else self.geonameids,
            'currency': self.pay_currency,
            'pay': self.pay_cash,
            'equity': self.equity,
            'anywhere': self.remote_location,
            'q': self.keywords,
        }

    @classmethod
    def from_filters(cls, board, filters):
        basequery = cls.query.filter(cls.board == board)

        if filters.get('t'):
            basequery = (
                basequery.join(filterset_jobtype_table).join(JobType).filter(
                    JobType.name.in_(filters['t'])).group_by(
                        Filterset.id).having(
                            db.func.count(filterset_jobtype_table.c.
                                          filterset_id) == len(filters['t'])))
        else:
            basequery = basequery.filter(~(db.exists(
                db.select([1]).where(
                    Filterset.id == filterset_jobtype_table.c.filterset_id))))

        if filters.get('c'):
            basequery = (basequery.join(filterset_jobcategory_table).join(
                JobCategory).filter(JobCategory.name.in_(
                    filters['c'])).group_by(Filterset.id).having(
                        db.func.count(filterset_jobcategory_table.c.
                                      filterset_id) == len(filters['c'])))
        else:
            basequery = basequery.filter(~(db.exists(
                db.select([1]).where(
                    Filterset.id ==
                    filterset_jobcategory_table.c.filterset_id))))

        if filters.get('k'):
            basequery = (basequery.join(filterset_tag_table).join(Tag).filter(
                Tag.name.in_(filters['k'])).group_by(Filterset.id).having(
                    db.func.count(filterset_tag_table.c.filterset_id) == len(
                        filters['k'])))
        else:
            basequery = basequery.filter(~(db.exists(
                db.select([1]).where(
                    Filterset.id == filterset_tag_table.c.filterset_id))))

        if filters.get('d'):
            basequery = (
                basequery.join(filterset_domain_table).join(Domain).filter(
                    Domain.name.in_(filters['d'])).group_by(
                        Filterset.id).having(
                            db.func.count(filterset_domain_table.c.filterset_id
                                          ) == len(filters['d'])))
        else:
            basequery = basequery.filter(~(db.exists(
                db.select([1]).where(
                    Filterset.id == filterset_domain_table.c.filterset_id))))

        if filters.get('l'):
            basequery = basequery.filter(
                cls.geonameids == sorted(filters['l']))
        else:
            basequery = basequery.filter(cls.geonameids == [])

        if filters.get('equity'):
            basequery = basequery.filter(cls.equity.is_(True))
        else:
            basequery = basequery.filter(cls.equity.is_(False))

        if filters.get('pay') and filters.get('currency'):
            basequery = basequery.filter(
                cls.pay_cash == filters['pay'],
                cls.pay_currency == filters['currency'])
        else:
            basequery = basequery.filter(cls.pay_cash.is_(None),
                                         cls.pay_currency.is_(None))

        if filters.get('q'):
            basequery = basequery.filter(cls.keywords == filters['q'])
        else:
            basequery = basequery.filter(cls.keywords == '')

        if filters.get('anywhere'):
            basequery = basequery.filter(cls.remote_location.is_(True))
        else:
            basequery = basequery.filter(cls.remote_location.is_(False))

        return basequery.one_or_none()
Beispiel #36
0
    def build_relationship(self):
        attribute = self
        entities = ([
            entity for entity in EntityRegistry.values()
            if attribute.ref_name not in entity.attribute_models
        ])
        for model in entities:
            model.attribute_models[attribute.ref_name] = attribute
            parentname = _get_alias_dict(model.__dict__)
            attribute.entity_models.append(parentname)

            # TODO: autocache = attribute.__autocache__
            cacheable = attribute.__cacheable__
            if cacheable:
                model.__cacheable_attributes__.add(attribute.ref_name)

            backref_name = attribute.__backref_name__
            proxy_name = attribute.__proxy_name__
            collector = attribute.__collector__
            outline = attribute.__outline__

            relationship = db.relationship(
                model,
                secondary=attribute.__secondary__,
                primaryjoin=attribute.__secondary__.c.attr_uuid ==
                attribute.uuid,
                secondaryjoin=foreign(
                    attribute.__secondary__.c.entity_uuid) == remote(
                        model.uuid),
                backref=backref(
                    backref_name,
                    primaryjoin=attribute.__secondary__.c.entity_uuid ==
                    model.uuid,
                    secondaryjoin=foreign(
                        attribute.__secondary__.c.attr_uuid) == remote(
                            attribute.uuid),
                    collection_class=collector),
                uselist=True,
                cascade="save-update, merge, refresh-expire, expunge",
            )

            setattr(attribute, parentname, relationship)

            if outline:
                if hasattr(collector, '__proxy_args__'):
                    creator = collector.__proxy_args__.get('creator', None)
                else:
                    creator = None

                get_or_create_attribute = GetOrCreateAttribute(
                    attribute, creator)

                if hasattr(collector, '__proxy_args__'):
                    collector.__proxy_args__.set('creator',
                                                 get_or_create_attribute)
                    setattr(
                        model, proxy_name,
                        association_proxy(backref_name, outline,
                                          **collector.__proxy_args__))
                else:
                    setattr(
                        model, proxy_name,
                        association_proxy(backref_name,
                                          outline,
                                          creator=get_or_create_attribute))
Beispiel #37
0
class BIDSFile(Base):
    """Represents a single file or directory in a BIDS dataset.

    Parameters
    ----------
    filename : str
        The path to the corresponding file.
    """
    __tablename__ = 'files'

    path = Column(String, primary_key=True)
    filename = Column(String)
    dirname = Column(String)
    entities = association_proxy("tags", "value")
    is_dir = Column(Boolean)
    class_ = Column(String(20))

    _associations = relationship(
        'BIDSFile',
        secondary='associations',
        primaryjoin='FileAssociation.dst == BIDSFile.path',
        secondaryjoin='FileAssociation.src == BIDSFile.path')

    __mapper_args__ = {
        'polymorphic_on': class_,
        'polymorphic_identity': 'file'
    }

    def __init__(self, filename):
        self.path = filename
        self.filename = os.path.basename(self.path)
        self.dirname = os.path.dirname(self.path)
        self.is_dir = not self.filename

    def __getattr__(self, attr):
        # Ensures backwards compatibility with old File_ namedtuple, which is
        # deprecated as of 0.7.
        # _ check first to not mask away access to __setstate__ etc.
        # AFAIK None of the entities are allowed to start with _ anyways
        # so the check is more generic than __
        if not attr.startswith('_') and attr in self.entities:
            warnings.warn("Accessing entities as attributes is deprecated as "
                          "of 0.7. Please use the .entities dictionary instead"
                          " (i.e., .entities['%s'] instead of .%s." %
                          (attr, attr))
            return self.entities[attr]
        raise AttributeError("%s object has no attribute named %r" %
                             (self.__class__.__name__, attr))

    def __repr__(self):
        return "<{} filename='{}'>".format(self.__class__.__name__, self.path)

    def __fspath__(self):
        return self.path

    @property
    @lru_cache()
    def relpath(self):
        """Return path relative to layout root"""
        root = object_session(self).query(LayoutInfo).first().root
        return str(Path(self.path).relative_to(root))

    def get_associations(self, kind=None, include_parents=False):
        """Get associated files, optionally limiting by association kind.

        Parameters
        ----------
        kind : str
            The kind of association to return (e.g., "Child").
            By default, all associations are returned.
        include_parents : bool
            If True, files related through inheritance
            are included in the returned list. If False, only directly
            associated files are returned. For example, a file's JSON
            sidecar will always be returned, but other JSON files from
            which the sidecar inherits will only be returned if
            include_parents=True.

        Returns
        -------
        list
            A list of BIDSFile instances.
        """
        if kind is None and not include_parents:
            return self._associations

        session = object_session(self)
        q = (session.query(BIDSFile).join(
            FileAssociation,
            BIDSFile.path == FileAssociation.dst).filter_by(src=self.path))

        if kind is not None:
            q = q.filter_by(kind=kind)

        associations = q.all()

        if not include_parents:
            return associations

        def collect_associations(results, bidsfile):
            results.append(bidsfile)
            for p in bidsfile.get_associations('Child'):
                results = collect_associations(results, p)
            return results

        return list(
            chain(*[collect_associations([], bf) for bf in associations]))

    def get_metadata(self):
        """Return all metadata associated with the current file. """
        md = BIDSMetadata(self.path)
        md.update(self.get_entities(metadata=True))
        return md

    def get_entities(self, metadata=False, values='tags'):
        """Return entity information for the current file.

        Parameters
        ----------
        metadata : bool or None
            If False (default), only entities defined
            for filenames (and not those found in the JSON sidecar) are
            returned. If True, only entities found in metadata files (and not
            defined for filenames) are returned. If None, all available
            entities are returned.
        values : str
            The kind of object to return in the dict's values.
            Must be one of:
                * 'tags': Returns only the tagged value--e.g., if the key
                is "subject", the value might be "01".
                * 'objects': Returns the corresponding Entity instance.

        Returns
        -------
        dict
            A dict, where keys are entity names and values are Entity
            instances.
        """
        session = object_session(self)
        query = (session.query(Tag).filter_by(
            file_path=self.path).join(Entity))
        if metadata not in (None, 'all'):
            query = query.filter(Entity.is_metadata == metadata)

        results = query.all()
        if values.startswith('obj'):
            return {t.entity_name: t.entity for t in results}
        return {t.entity_name: t.value for t in results}

    def copy(self,
             path_patterns,
             symbolic_link=False,
             root=None,
             conflicts='fail'):
        """Copy the contents of a file to a new location.

        Parameters
        ----------
        path_patterns : list
            List of patterns used to construct the new
            filename. See :obj:`build_path` documentation for details.
        symbolic_link : bool
            If True, use a symbolic link to point to the
            existing file. If False, creates a new file.
        root : str
            Optional path to prepend to the constructed filename.
        conflicts : str
            Defines the desired action when the output path already exists.
            Must be one of:
                'fail': raises an exception
                'skip' does nothing
                'overwrite': overwrites the existing file
                'append': adds  a suffix to each file copy, starting with 1
        """
        new_filename = build_path(self.entities, path_patterns)
        if not new_filename:
            return None

        if new_filename[-1] == os.sep:
            new_filename += self.filename

        if os.path.isabs(self.path) or root is None:
            path = self.path
        else:
            path = os.path.join(root, self.path)

        if not os.path.exists(path):
            raise ValueError("Target filename to copy/symlink (%s) doesn't "
                             "exist." % path)

        kwargs = dict(path=new_filename, root=root, conflicts=conflicts)
        if symbolic_link:
            kwargs['link_to'] = path
        else:
            kwargs['copy_from'] = path

        write_to_file(**kwargs)
Beispiel #38
0
    def setup_db_cc_classes(self, cc):
        cc_ids = []
        books_custom_column_links = {}
        for row in cc:
            if row.datatype not in cc_exceptions:
                if row.datatype == 'series':
                    dicttable = {'__tablename__': 'books_custom_column_' + str(row.id) + '_link',
                                 'id': Column(Integer, primary_key=True),
                                 'book': Column(Integer, ForeignKey('books.id'),
                                                primary_key=True),
                                 'map_value': Column('value', Integer,
                                                     ForeignKey('custom_column_' +
                                                                str(row.id) + '.id'),
                                                     primary_key=True),
                                 'extra': Column(Float),
                                 'asoc': relationship('custom_column_' + str(row.id), uselist=False),
                                 'value': association_proxy('asoc', 'value')
                                 }
                    books_custom_column_links[row.id] = type(str('books_custom_column_' + str(row.id) + '_link'),
                                                             (Base,), dicttable)
                else:
                    books_custom_column_links[row.id] = Table('books_custom_column_' + str(row.id) + '_link',
                                                              Base.metadata,
                                                              Column('book', Integer, ForeignKey('books.id'),
                                                                     primary_key=True),
                                                              Column('value', Integer,
                                                                     ForeignKey('custom_column_' +
                                                                                str(row.id) + '.id'),
                                                                     primary_key=True)
                                                              )
                cc_ids.append([row.id, row.datatype])

                ccdict = {'__tablename__': 'custom_column_' + str(row.id),
                          'id': Column(Integer, primary_key=True)}
                if row.datatype == 'float':
                    ccdict['value'] = Column(Float)
                elif row.datatype == 'int':
                    ccdict['value'] = Column(Integer)
                elif row.datatype == 'bool':
                    ccdict['value'] = Column(Boolean)
                else:
                    ccdict['value'] = Column(String)
                if row.datatype in ['float', 'int', 'bool']:
                    ccdict['book'] = Column(Integer, ForeignKey('books.id'))
                cc_classes[row.id] = type(str('custom_column_' + str(row.id)), (Base,), ccdict)

        for cc_id in cc_ids:
            if (cc_id[1] == 'bool') or (cc_id[1] == 'int') or (cc_id[1] == 'float'):
                setattr(Books,
                        'custom_column_' + str(cc_id[0]),
                        relationship(cc_classes[cc_id[0]],
                                     primaryjoin=(
                                         Books.id == cc_classes[cc_id[0]].book),
                                     backref='books'))
            elif (cc_id[1] == 'series'):
                setattr(Books,
                        'custom_column_' + str(cc_id[0]),
                        relationship(books_custom_column_links[cc_id[0]],
                                     backref='books'))
            else:
                setattr(Books,
                        'custom_column_' + str(cc_id[0]),
                        relationship(cc_classes[cc_id[0]],
                                     secondary=books_custom_column_links[cc_id[0]],
                                     backref='books'))

        return cc_classes
Beispiel #39
0
class Attachment(ProtectionMixin, VersionedResourceMixin, db.Model):
    __tablename__ = 'attachments'
    __table_args__ = (
        # links: url but no file
        db.CheckConstraint(
            f'type != {AttachmentType.link.value} OR (link_url IS NOT NULL AND file_id IS NULL)',
            'valid_link'),
        # we can't require the file_id to be NOT NULL for files because of the circular relationship...
        # but we can ensure that we never have both a file_id AND a link_url...for
        db.CheckConstraint('link_url IS NULL OR file_id IS NULL',
                           'link_or_file'),
        {
            'schema': 'attachments'
        })

    stored_file_table = 'attachments.files'
    stored_file_class = AttachmentFile
    stored_file_fkey = 'attachment_id'

    #: The ID of the attachment
    id = db.Column(db.Integer, primary_key=True)
    #: The ID of the folder the attachment belongs to
    folder_id = db.Column(db.Integer,
                          db.ForeignKey('attachments.folders.id'),
                          nullable=False,
                          index=True)
    #: The ID of the user who created the attachment
    user_id = db.Column(db.Integer,
                        db.ForeignKey('users.users.id'),
                        index=True,
                        nullable=False)
    #: If the attachment has been deleted
    is_deleted = db.Column(db.Boolean, nullable=False, default=False)
    #: The name of the attachment
    title = db.Column(db.String, nullable=False)
    #: The description of the attachment
    description = db.Column(db.Text, nullable=False, default='')
    #: The date/time when the attachment was created/modified
    modified_dt = db.Column(UTCDateTime,
                            nullable=False,
                            default=now_utc,
                            onupdate=now_utc)
    #: The type of the attachment (file or link)
    type = db.Column(PyIntEnum(AttachmentType), nullable=False)
    #: The target URL for a link attachment
    link_url = db.Column(db.String, nullable=True)

    #: The user who created the attachment
    user = db.relationship('User',
                           lazy=True,
                           backref=db.backref('attachments', lazy='dynamic'))
    #: The folder containing the attachment
    folder = db.relationship('AttachmentFolder',
                             lazy=True,
                             backref=db.backref('all_attachments', lazy=True))
    acl_entries = db.relationship('AttachmentPrincipal',
                                  backref='attachment',
                                  cascade='all, delete-orphan',
                                  collection_class=set)
    #: The ACL of the folder (used for ProtectionMode.protected)
    acl = association_proxy('acl_entries',
                            'principal',
                            creator=lambda v: AttachmentPrincipal(principal=v))

    # relationship backrefs:
    # - legacy_mapping (LegacyAttachmentMapping.attachment)

    @property
    def protection_parent(self):
        return self.folder

    @property
    def locator(self):
        return dict(self.folder.locator, attachment_id=self.id)

    def get_download_url(self, absolute=False):
        """Return the download url for the attachment.

        During static site generation this returns a local URL for the
        file or the target URL for the link.

        :param absolute: If the returned URL should be absolute.
        """
        if g.get('static_site'):
            return _offline_download_url(self)
        else:
            filename = self.file.filename if self.type == AttachmentType.file else 'go'
            return url_for('attachments.download',
                           self,
                           filename=filename,
                           _external=absolute)

    @property
    def download_url(self):
        """The download url for the attachment."""
        return self.get_download_url()

    @property
    def absolute_download_url(self):
        """The absolute download url for the attachment."""
        return self.get_download_url(absolute=True)

    def can_access(self, user, *args, **kwargs):
        """Check if the user is allowed to access the attachment.

        This is the case if the user has access to see the attachment
        or if the user can manage attachments for the linked object.
        """
        return (super().can_access(user, *args, **kwargs)
                or can_manage_attachments(self.folder.object, user))

    def __repr__(self):
        return '<Attachment({}, {}, {}{}, {}, {})>'.format(
            self.id, self.title,
            self.file if self.type == AttachmentType.file else self.link_url,
            ', is_deleted=True' if self.is_deleted else '',
            self.protection_repr, self.folder_id)
Beispiel #40
0
class Distro(DeclarativeMappedObject, ActivityMixin):

    __tablename__ = 'distro'
    __table_args__ = {'mysql_engine': 'InnoDB'}
    id = Column(Integer, autoincrement=True, primary_key=True)
    name = Column(Unicode(255), nullable=False, unique=True)
    osversion_id = Column(Integer, ForeignKey('osversion.id'), nullable=False)
    osversion = relationship(OSVersion, back_populates='distros')
    date_created = Column(DateTime, nullable=False, default=datetime.utcnow)
    _tags = relationship('DistroTag',
                         secondary=distro_tag_map,
                         back_populates='distros')
    activity = relationship(
        DistroActivity,
        back_populates='object',
        order_by=[DistroActivity.created.desc(),
                  DistroActivity.id.desc()])
    trees = relationship('DistroTree',
                         back_populates='distro',
                         order_by='[DistroTree.variant, DistroTree.arch_id]')
    dyn_trees = dynamic_loader('DistroTree')

    activity_type = DistroActivity

    @staticmethod
    def _validate_name(name):
        if not name:
            raise ValueError('Distro name cannot be empty')

    @validates('name')
    def validate_name(self, key, value):
        self._validate_name(value)
        return value

    @classmethod
    def lazy_create(cls, name, osversion):
        cls._validate_name(name)
        return super(Distro, cls).lazy_create(
            name=name, _extra_attrs=dict(osversion_id=osversion.id))

    @classmethod
    def by_name(cls, name):
        with convert_db_lookup_error('No such distro: %s' % name):
            return cls.query.filter_by(name=name).one()

    @classmethod
    def by_id(cls, id):
        return cls.query.filter_by(id=id).one()

    def __unicode__(self):
        return self.name

    def __str__(self):
        return unicode(self).encode('utf8')

    def __repr__(self):
        return '%s(name=%r)' % (self.__class__.__name__, self.name)

    def __json__(self):
        return {
            'id': self.id,
            'name': self.name,
        }

    @property
    def link(self):
        return make_link(url='/distros/view?id=%s' % self.id, text=self.name)

    def expire(self, service=u'XMLRPC'):
        for tree in self.trees:
            tree.expire(service=service)

    tags = association_proxy(
        '_tags', 'tag', creator=lambda tag: DistroTag.lazy_create(tag=tag))

    def add_tag(self, tag):
        """
        Adds the given tag to this distro if it's not already present.
        """
        tagobj = DistroTag.lazy_create(tag=tag)
        session.connection(self.__class__).execute(
            ConditionalInsert(
                distro_tag_map, {
                    distro_tag_map.c.distro_id: self.id,
                    distro_tag_map.c.distro_tag_id: tagobj.id
                }))
Beispiel #41
0
class Release(db.ModelBase):

    __tablename__ = "releases"

    @declared_attr
    def __table_args__(cls):  # noqa
        return (
            Index("release_created_idx", cls.created.desc()),
            Index("release_name_created_idx", cls.name, cls.created.desc()),
            Index("release_name_idx", cls.name),
            Index("release_pypi_hidden_idx", cls._pypi_hidden),
            Index("release_version_idx", cls.version),
        )

    __repr__ = make_repr("name", "version")

    name = Column(
        Text,
        ForeignKey("packages.name", onupdate="CASCADE"),
        primary_key=True,
    )
    version = Column(Text, primary_key=True)
    author = Column(Text)
    author_email = Column(Text)
    maintainer = Column(Text)
    maintainer_email = Column(Text)
    home_page = Column(Text)
    license = Column(Text)
    summary = Column(Text)
    description = Column(Text)
    keywords = Column(Text)
    platform = Column(Text)
    download_url = Column(Text)
    _pypi_ordering = Column(Integer)
    _pypi_hidden = Column(Boolean)
    cheesecake_installability_id = Column(
        Integer,
        ForeignKey("cheesecake_main_indices.id"),
    )
    cheesecake_documentation_id = Column(
        Integer,
        ForeignKey("cheesecake_main_indices.id"),
    )
    cheesecake_code_kwalitee_id = Column(
        Integer,
        ForeignKey("cheesecake_main_indices.id"),
    )
    requires_python = Column(Text)
    description_from_readme = Column(Boolean)
    created = Column(
        DateTime(timezone=False),
        nullable=False,
        server_default=sql.func.now(),
    )

    _classifiers = orm.relationship(
        Classifier,
        backref="project_releases",
        secondary=lambda: release_classifiers,
        order_by=Classifier.classifier,
    )
    classifiers = association_proxy("_classifiers", "classifier")

    files = orm.relationship(
        "File",
        backref="release",
        cascade="all, delete-orphan",
        lazy="dynamic",
        order_by=lambda: File.filename,
    )

    dependencies = orm.relationship("Dependency")

    _requires = _dependency_relation(DependencyKind.requires)
    requires = association_proxy("_requires", "specifier")

    _provides = _dependency_relation(DependencyKind.provides)
    provides = association_proxy("_provides", "specifier")

    _obsoletes = _dependency_relation(DependencyKind.obsoletes)
    obsoletes = association_proxy("_obsoletes", "specifier")

    _requires_dist = _dependency_relation(DependencyKind.requires_dist)
    requires_dist = association_proxy("_requires_dist", "specifier")

    _provides_dist = _dependency_relation(DependencyKind.provides_dist)
    provides_dist = association_proxy("_provides_dist", "specifier")

    _obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist)
    obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")

    _requires_external = _dependency_relation(DependencyKind.requires_external)
    requires_external = association_proxy("_requires_external", "specifier")

    _project_urls = _dependency_relation(DependencyKind.project_url)
    project_urls = association_proxy("_project_urls", "specifier")

    uploader = orm.relationship(
        "User",
        secondary=lambda: JournalEntry.__table__,
        primaryjoin=lambda:
        ((JournalEntry.name == orm.foreign(Release.name)) &
         (JournalEntry.version == orm.foreign(Release.version)) &
         (JournalEntry.action == "new release")),
        secondaryjoin=lambda: (
            (User.username == orm.foreign(JournalEntry._submitted_by))),
        order_by=lambda: JournalEntry.submitted_date.desc(),
        # TODO: We have uselist=False here which raises a warning because
        # multiple items were returned. This should only be temporary because
        # we should add a nullable FK to JournalEntry so we don't need to rely
        # on ordering and implicitly selecting the first object to make this
        # happen,
        uselist=False,
        viewonly=True,
    )

    @property
    def urls(self):
        _urls = OrderedDict()

        if self.home_page:
            _urls["Homepage"] = self.home_page

        for urlspec in self.project_urls:
            name, url = urlspec.split(",", 1)
            _urls[name] = url

        if self.download_url and "Download" not in _urls:
            _urls["Download"] = self.download_url

        return _urls

    @property
    def has_meta(self):
        return any([self.keywords])
Beispiel #42
0
class Thinker(Entity):
    """
    Simple Thinker class, has custom definitions for representation strings.
    """
    def __init__(self, name, **kwargs):
        self.name = name
        for k, v in kwargs.iteritems():
            self.__setattr__(k, v)

    def __repr__(self):
        return '<Thinker %s: %s>' % (self.ID, self.label.encode('utf-8'))

    def __str__(self):
        return self.label.encode('utf-8')

    def url(self, filetype='html', action='view'):
        return url(controller='thinker',
                   action=action,
                   id=self.ID,
                   filetype=filetype)

    aliases = association_proxy('alias', 'value')

    def json_struct(self,
                    sep_filter=True,
                    limit=10,
                    extended=True,
                    graph=False,
                    glimit=None):
        struct = {
            'ID': self.ID,
            'type': 'thinker',
            'label': self.label,
            'sep_dir': self.sep_dir,
            'url': self.url()
        }
        if extended:
            struct.update({
                'aliases': self.aliases,
                'birth': {
                    'year': self.birth_year,
                    'month': self.birth_month,
                    'day': self.birth_day
                },
                'birth_string': self.birth_string,
                'death': {
                    'year': self.death_year,
                    'month': self.death_month,
                    'day': self.death_day
                },
                'death_string': self.death_string,
                'nationalities': [n.name for n in self.nationalities],
                'professions': [p.name for p in self.professions],
                'teachers': [t.ID for t in self.teachers],
                'students': [s.ID for s in self.students],
                'influenced_by': [i.ID for i in self.influenced_by],
                'influenced': [i.ID for i in self.influenced]
            })
            if sep_filter:
                struct.update({
                    'related_ideas': [
                        i.ID for i in self.related_ideas.filter(
                            Idea.sep_dir != '')[:limit - 1]
                    ]
                })
            else:
                struct.update({
                    'related_ideas':
                    [i.ID for i in self.related_ideas[:limit - 1]]
                })

        if graph:
            struct.update({
                'it_in':
                [edge.json_struct() for edge in self.it_in_edges[:glimit]],
                'it_out':
                [edge.json_struct() for edge in self.it_out_edges[:glimit]],
                'tt_in':
                [edge.json_struct() for edge in self.tt_in_edges[:glimit]],
                'tt_out':
                [edge.json_struct() for edge in self.tt_out_edges[:glimit]],
            })

        return struct

    def birth_sd(self):
        return SplitDate(self.birth_year, self.birth_month, self.birth_day)

    def death_sd(self):
        return SplitDate(self.death_year, self.death_month, self.death_day)

    @property
    def birth_string(self):
        return str(self.birth_sd())

    @property
    def death_string(self):
        return str(self.death_sd())

    def get_filename(self, corpus_path=None):
        if corpus_path and self.sep_dir:
            filename = os.path.join(corpus_path, self.sep_dir, 'index.html')
            if not os.path.exists(filename):
                filename = None
        else:
            filename = None

        return filename
Beispiel #43
0
class TraceFrame(Base, PrepareMixin, RecordMixin):  # noqa

    __tablename__ = "trace_frames"

    __table_args__ = (
        Index("ix_traceframe_run_caller_port", "run_id", "caller_id",
              "caller_port"),
        Index("ix_traceframe_run_callee_port", "run_id", "callee_id",
              "callee_port"),
    )

    # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.
    id: DBID = Column(BIGDBIDType, nullable=False, primary_key=True)

    kind = Column(Enum(TraceKind), nullable=False, index=False)

    caller_id = Column(BIGDBIDType,
                       nullable=False,
                       server_default="0",
                       default=0)

    caller = relationship(
        "SharedText",
        primaryjoin="foreign(SharedText.id) == TraceFrame.caller_id",
        uselist=False,
    )

    # pyre-fixme[8]: Attribute has type `str`; used as `Column[str]`.
    caller_port: str = Column(
        String(length=INNODB_MAX_INDEX_LENGTH),
        nullable=False,
        server_default="",
        doc="The caller port of this call edge",
    )

    callee_id = Column(BIGDBIDType,
                       nullable=False,
                       server_default="0",
                       default=0)

    callee = relationship(
        "SharedText",
        primaryjoin="foreign(SharedText.id) == TraceFrame.callee_id",
        uselist=False,
    )

    callee_location = Column(
        SourceLocationType,
        nullable=False,
        doc="The location of the callee in the source code (line|start|end)",
    )

    # pyre-fixme[8]: Attribute has type `str`; used as `Column[str]`.
    callee_port: str = Column(
        String(length=INNODB_MAX_INDEX_LENGTH),
        nullable=False,
        server_default="",
        doc="The callee port of this call edge'",
    )

    filename_id = Column(BIGDBIDType,
                         nullable=False,
                         server_default="0",
                         default=0)

    run_id = Column("run_id", BIGDBIDType, nullable=False, index=False)

    type_interval_lower = Column(Integer,
                                 nullable=True,
                                 doc="Class interval lower-bound (inclusive)")

    type_interval_upper = Column(Integer,
                                 nullable=True,
                                 doc="Class interval upper-bound (inclusive)")

    migrated_id = Column(
        BIGDBIDType,
        nullable=True,
        doc=(
            "ID of the corresponding pre/postcondition. Temporary column used "
            "for migrating existing pre/postconditions into trace frames. "
            "Will be removed once migration is completed. Use None if not "
            "in data migration mode."),
    )

    preserves_type_context = Column(
        Boolean,
        default=False,
        server_default="0",
        nullable=False,
        doc="Whether the call preserves the calling type context",
    )

    titos = Column(
        SourceLocationsType,
        doc="Locations of TITOs aka abductions for the trace frame",
        nullable=False,
        server_default="",
    )

    annotations = relationship(
        "TraceFrameAnnotation",
        primaryjoin=("TraceFrame.id == "
                     "foreign(TraceFrameAnnotation.trace_frame_id)"),
        uselist=True,
    )

    leaves = association_proxy("leaf_assoc", "leaves")

    leaf_assoc = relationship(
        "TraceFrameLeafAssoc",
        primaryjoin=("TraceFrame.id == "
                     "foreign(TraceFrameLeafAssoc.trace_frame_id)"),
        uselist=True,
    )

    issue_instances = association_proxy("trace_frame_issue_instance",
                                        "issue_instance")

    trace_frame_issue_instance = relationship(
        "IssueInstanceTraceFrameAssoc",
        primaryjoin=("TraceFrame.id == "
                     "foreign(IssueInstanceTraceFrameAssoc.trace_frame_id)"),
    )
Beispiel #44
0
class Entity(Base):
    """
    Represents a single entity defined in the JSON config.

    Parameters
    ----------
    name : str
        The name of the entity (e.g., 'subject', 'run', etc.)
    pattern : str
        A regex pattern used to match against file names.
        Must define at least one group, and only the first group is
        kept as the match.
    mandatory : bool
        If True, every File _must_ match this entity.
    directory : str
        Optional pattern defining a directory associated
        with the entity.
    dtype : str
        The optional data type of the Entity values. Must be
        one of 'int', 'float', 'bool', or 'str'. If None, no type
        enforcement will be attempted, which means the dtype of the
        value may be unpredictable.
    is_metadata : bool
        Indicates whether or not the Entity is derived
        from JSON sidecars (True) or is a predefined Entity from a
        config (False).
    """
    __tablename__ = 'entities'

    name = Column(String, primary_key=True)
    mandatory = Column(Boolean, default=False)
    pattern = Column(String)
    directory = Column(String, nullable=True)
    is_metadata = Column(Boolean, default=False)
    _dtype = Column(String, default='str')
    files = association_proxy("tags", "value")

    def __init__(self,
                 name,
                 pattern=None,
                 mandatory=False,
                 directory=None,
                 dtype='str',
                 is_metadata=False):
        self.name = name
        self.pattern = pattern
        self.mandatory = mandatory
        self.directory = directory
        self.is_metadata = is_metadata

        if not isinstance(dtype, str):
            dtype = dtype.__name__
        self._dtype = dtype

        self._init_on_load()

    @reconstructor
    def _init_on_load(self):
        if self._dtype not in ('str', 'float', 'int', 'bool'):
            raise ValueError("Invalid dtype '{}'. Must be one of 'int', "
                             "'float', 'bool', or 'str'.".format(self._dtype))
        self.dtype = eval(self._dtype)
        self.regex = re.compile(
            self.pattern) if self.pattern is not None else None

    def __iter__(self):
        for i in self.unique():
            yield i

    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)

        # Directly copy the SQLAlchemy connection before any setattr calls,
        # otherwise failures occur sporadically on Python 3.5 when the
        # _sa_instance_state attribute (randomly!) disappears.
        result._sa_instance_state = self._sa_instance_state

        memo[id(self)] = result

        for k, v in self.__dict__.items():
            if k == '_sa_instance_state':
                continue
            new_val = getattr(self, k) if k == 'regex' else deepcopy(v, memo)
            setattr(result, k, new_val)
        return result

    def match_file(self, f):
        """
        Determine whether the passed file matches the Entity.

        Parameters
        ----------
        f : BIDSFile
            The BIDSFile instance to match against.

        Returns
        -------
        the matched value if a match was found, otherwise None.
        """
        if self.regex is None:
            return None
        m = self.regex.search(f.path)
        val = m.group(1) if m is not None else None

        return self._astype(val)

    def unique(self):
        """Return all unique values/levels for the current entity.
        """
        return list(set(self.files.values()))

    def count(self, files=False):
        """Return a count of unique values or files.

        Parameters
        ----------
        files : bool
            When True, counts all files mapped to the Entity.
            When False, counts all unique values.

        Returns
        -------
        int
            Count of unique values or files.
        """
        return len(self.files) if files else len(self.unique())

    def _astype(self, val):
        if val is not None and self.dtype is not None:
            val = self.dtype(val)
        return val
Beispiel #45
0
 class Article(self.Base):
     __tablename__ = 'article'
     id = Column(Integer, primary_key=True)
     tags = association_proxy('articletags',
                              'tag',
                              creator=lambda tag: ArticleTag(tag=tag))
Beispiel #46
0
class Blocking(db.Model):
    __tablename__ = 'blockings'
    __table_args__ = {'schema': 'roombooking'}

    id = db.Column(db.Integer, primary_key=True)
    created_by_id = db.Column(db.Integer,
                              db.ForeignKey('users.users.id'),
                              index=True,
                              nullable=False)
    created_dt = db.Column(UTCDateTime, nullable=False, default=now_utc)
    start_date = db.Column(db.Date, nullable=False, index=True)
    end_date = db.Column(db.Date, nullable=False, index=True)
    reason = db.Column(db.Text, nullable=False)

    _allowed = db.relationship('BlockingPrincipal',
                               backref='blocking',
                               cascade='all, delete-orphan',
                               collection_class=set)
    allowed = association_proxy(
        '_allowed',
        'principal',
        creator=lambda v: BlockingPrincipal(principal=v))
    blocked_rooms = db.relationship('BlockedRoom',
                                    backref='blocking',
                                    cascade='all, delete-orphan')
    #: The user who created this blocking.
    created_by_user = db.relationship('User',
                                      lazy=False,
                                      backref=db.backref('blockings',
                                                         lazy='dynamic'))

    @hybrid_method
    def is_active_at(self, d):
        return self.start_date <= d <= self.end_date

    @is_active_at.expression
    def is_active_at(self, d):
        return (self.start_date <= d) & (d <= self.end_date)

    def can_edit(self, user, allow_admin=True):
        if not user:
            return False
        return user == self.created_by_user or (allow_admin
                                                and rb_is_admin(user))

    def can_delete(self, user, allow_admin=True):
        if not user:
            return False
        return user == self.created_by_user or (allow_admin
                                                and rb_is_admin(user))

    def can_override(self,
                     user,
                     room=None,
                     explicit_only=False,
                     allow_admin=True):
        """Check if a user can override the blocking.

        The following persons are authorized to override a blocking:
        - the creator of the blocking
        - anyone on the blocking's ACL
        - unless explicit_only is set: rb admins and room managers (if a room is given)
        """
        if not user:
            return False
        if self.created_by_user == user:
            return True
        if not explicit_only:
            if allow_admin and rb_is_admin(user):
                return True
            if room and room.can_manage(user):
                return True
        return any(user in principal for principal in iter_acl(self.allowed))

    @property
    def external_details_url(self):
        return url_for('rb.blocking_link', blocking_id=self.id, _external=True)

    @return_ascii
    def __repr__(self):
        return format_repr(self,
                           'id',
                           'start_date',
                           'end_date',
                           _text=self.reason)
Beispiel #47
0
class Run(AbstractBase):

    __tablename__ = type = "run"
    private = True
    id = Column(Integer, primary_key=True)
    restart_run_id = Column(Integer, ForeignKey("run.id"))
    restart_run = relationship("Run",
                               uselist=False,
                               foreign_keys=restart_run_id)
    creator = Column(SmallString, default="admin")
    properties = Column(MutableDict)
    success = Column(Boolean, default=False)
    status = Column(SmallString, default="Running")
    runtime = Column(SmallString)
    endtime = Column(SmallString)
    parent_id = Column(Integer, ForeignKey("run.id"))
    parent = relationship("Run", uselist=False, foreign_keys=parent_id)
    parent_runtime = Column(SmallString)
    parent_device_id = Column(Integer, ForeignKey("device.id"))
    parent_device = relationship("Device", foreign_keys="Run.parent_device_id")
    devices = relationship("Device",
                           secondary=run_device_table,
                           back_populates="runs")
    pools = relationship("Pool",
                         secondary=run_pool_table,
                         back_populates="runs")
    service_id = Column(Integer, ForeignKey("service.id"))
    service = relationship("Service",
                           back_populates="runs",
                           foreign_keys="Run.service_id")
    service_name = association_proxy("service", "name")
    workflow_id = Column(Integer, ForeignKey("workflow.id"))
    workflow = relationship("Workflow", foreign_keys="Run.workflow_id")
    workflow_name = association_proxy("workflow", "name")
    task_id = Column(Integer, ForeignKey("task.id"))
    task = relationship("Task", foreign_keys="Run.task_id")
    state = Column(MutableDict)
    results = relationship("Result",
                           back_populates="run",
                           cascade="all, delete-orphan")

    def __init__(self, **kwargs):
        self.runtime = kwargs.get("runtime") or app.get_time()
        super().__init__(**kwargs)
        if not kwargs.get("parent_runtime"):
            self.parent_runtime = self.runtime

    @property
    def name(self):
        return repr(self)

    def __repr__(self):
        return f"{self.runtime} ({self.service_name} run by {self.creator})"

    def __getattr__(self, key):
        if key in self.__dict__:
            return self.__dict__[key]
        elif key in self.__dict__.get("properties", {}):
            return self.__dict__["properties"][key]
        elif self.__dict__.get("service_id"):
            return getattr(self.service, key)
        else:
            raise AttributeError

    def result(self, device=None):
        result = [r for r in self.results if r.device_name == device]
        return result.pop() if result else None

    def generate_row(self, table):
        return [
            f"""
            <ul class="pagination pagination-lg" style="margin: 0px; width: 100px">
          <li>
            <button type="button" class="btn btn-info"
            onclick="showResultsPanel({self.service.row_properties}, '{self.runtime}')"
            data-tooltip="Results"
              ><span class="glyphicon glyphicon-list-alt"></span
            ></button>
          </li>
          <li>
            <button type="button" class="btn btn-info"
            onclick="showLogsPanel({self.service.row_properties}, '{self.runtime}')"
            data-tooltip="Logs"
              ><span class="glyphicon glyphicon-list"></span
            ></button>
          </li>
        </ul>"""
        ]

    @property
    def run_state(self):
        if self.state:
            return self.state
        elif self.runtime == self.parent_runtime:
            return app.run_db[self.runtime]
        else:
            return app.run_db[self.parent_runtime]["services"][self.service.id]

    @property
    def stop(self):
        return self.run_state["status"] == "stop"

    @property
    def progress(self):
        if self.status == "Running":
            progress = self.run_state["progress"]["device"]
            try:
                return (
                    f"{progress['passed'] + progress['failed']}/{progress['total']}"
                    f" ({progress['failed']} failed)")
            except KeyError:
                return "N/A"
        else:
            return "N/A"

    def compute_devices_from_query(_self, query, property,
                                   **locals):  # noqa: N805
        values = _self.eval(query, **locals)
        devices, not_found = set(), []
        if isinstance(values, str):
            values = [values]
        for value in values:
            device = fetch("device", allow_none=True, **{property: value})
            if device:
                devices.add(device)
            else:
                not_found.append(value)
        if not_found:
            raise Exception(
                f"Device query invalid targets: {', '.join(not_found)}")
        return devices

    def compute_devices(self, payload):
        devices = set(self.devices)
        for pool in self.pools:
            devices |= set(pool.devices)
        if not devices:
            if self.service.device_query:
                devices |= self.compute_devices_from_query(
                    self.service.device_query,
                    self.service.device_query_property,
                    payload=payload,
                )
            devices |= set(self.service.devices)
            for pool in self.service.pools:
                devices |= set(pool.devices)
        self.run_state["progress"]["device"]["total"] += len(devices)
        return list(devices)

    def init_state(self):
        state = {
            "status": "Idle",
            "success": None,
            "progress": {
                "device": {
                    "total": 0,
                    "passed": 0,
                    "failed": 0
                }
            },
            "attempt": 0,
            "waiting_time": {
                "total": self.service.waiting_time,
                "left": self.service.waiting_time,
            },
            "summary": {
                "passed": [],
                "failed": []
            },
        }
        if self.service.type == "workflow":
            state.update({
                "edges": defaultdict(int),
                "services": defaultdict(dict)
            })
            state["progress"]["service"] = {
                "total": len(self.service.services),
                "passed": 0,
                "failed": 0,
                "skipped": 0,
            }
        if self.runtime == self.parent_runtime:
            if self.runtime in app.run_db:
                return
            app.run_db[self.runtime] = state
        else:
            service_states = app.run_db[self.parent_runtime]["services"]
            if self.service.id not in service_states:
                service_states[self.service.id] = state

    def run(self, payload=None):
        self.init_state()
        self.run_state["status"] = "Running"
        if payload is None:
            payload = self.service.initial_payload
        try:
            app.service_db[self.service.id]["runs"] += 1
            Session.commit()
            results = self.device_run(payload)
        except Exception:
            result = (f"Running {self.service.type} '{self.service.name}'"
                      " raised the following exception:\n"
                      f"{chr(10).join(format_exc().splitlines())}\n\n"
                      "Run aborted...")
            self.log("error", result)
            results = {
                "success": False,
                "runtime": self.runtime,
                "result": result
            }
        finally:
            Session.commit()
            self.status = "Aborted" if self.stop else "Completed"
            self.run_state["status"] = self.status
            if self.run_state["success"] is not False:
                self.run_state["success"] = results["success"]
            app.service_db[self.service.id]["runs"] -= 1
            results["endtime"] = self.endtime = app.get_time()
            results["logs"] = app.run_logs.pop(self.runtime, None)
            if self.runtime == self.parent_runtime:
                self.state = results["state"] = app.run_db.pop(self.runtime)
            if self.task and not self.task.frequency:
                self.task.is_active = False
            results["properties"] = {
                "run": self.properties,
                "service": self.service.get_properties(exclude=["positions"]),
            }
            if self.send_notification:
                results = self.notify(results)
            if self.runtime == self.parent_runtime:
                self.create_result(results)
            Session.commit()
        return results

    @staticmethod
    def get_device_result(args):
        device_id, runtime, payload, results = args
        device = fetch("device", id=device_id)
        run = fetch("run", runtime=runtime)
        results.append(run.get_results(payload, device))

    def device_iteration(self, payload, device):
        derived_devices = self.compute_devices_from_query(
            self.iteration_devices, self.iteration_devices_property,
            **locals())
        derived_run = factory(
            "run",
            **{
                "service":
                self.service.id,
                "devices":
                [derived_device.id for derived_device in derived_devices],
                "workflow":
                self.workflow.id,
                "parent_device":
                device.id,
                "restart_run":
                self.restart_run,
                "parent":
                self,
                "parent_runtime":
                self.parent_runtime,
            },
        )
        derived_run.properties = self.properties
        return derived_run.run(payload)["success"]

    def device_run(self, payload):
        self.devices, success = self.compute_devices(payload), True
        if not self.devices:
            results = [self.get_results(payload)]
        else:
            if self.iteration_devices and not self.parent_device:
                success = all(
                    self.device_iteration(payload, device)
                    for device in self.devices)
                return {"success": success, "runtime": self.runtime}
            if self.multiprocessing and len(self.devices) > 1:
                results = []
                processes = min(len(self.devices), self.max_processes)
                process_args = [(device.id, self.runtime, payload, results)
                                for device in self.devices]
                pool = ThreadPool(processes=processes)
                pool.map(self.get_device_result, process_args)
                pool.close()
                pool.join()
            else:
                results = [
                    self.get_results(payload, device)
                    for device in self.devices
                ]
        return {"success": all(results), "runtime": self.runtime}

    def create_result(self, results, device=None):
        self.success = results["success"]
        result_kw = {
            "run": self,
            "result": results,
            "service": self.service_id,
            "parent_runtime": self.parent_runtime,
        }
        if self.workflow_id:
            result_kw["workflow"] = self.workflow_id
        if self.parent_device_id:
            result_kw["parent_device"] = self.parent_device_id
        if device:
            result_kw["device"] = device.id
        factory("result", **result_kw)

    def run_service_job(self, payload, device):
        args = (device, ) if device else ()
        for i in range(self.number_of_retries + 1):
            try:
                if i:
                    self.log("error", f"RETRY n°{i}", device)
                results = self.service.job(self, payload, *args)
                if device and (getattr(self, "close_connection", False)
                               or self.runtime == self.parent_runtime):
                    self.close_device_connection(device)
                self.convert_result(results)
                try:
                    self.eval(self.service.result_postprocessing,
                              function="exec",
                              **locals())
                except SystemExit:
                    pass
                if "success" not in results:
                    results["success"] = True
                if results["success"] and self.validation_method != "none":
                    self.validate_result(results, payload, device)
                if results["success"]:
                    return results
                elif i < self.number_of_retries:
                    sleep(self.time_between_retries)
            except Exception:
                result = (f"Running {self.service.type} '{self.service.name}'"
                          " raised the following exception:\n"
                          f"{chr(10).join(format_exc().splitlines())}\n\n"
                          "Run aborted...")
                self.log("error", result, device)
                return {"success": False, "result": result}
        return results

    def get_results(self, payload, device=None):
        self.log("info", "STARTING", device)
        results = {"runtime": app.get_time(), "logs": []}
        try:
            if self.restart_run and self.service.type == "workflow":
                old_result = self.restart_run.result(device=device.name)
                if old_result and "payload" in old_result.result:
                    payload.update(old_result["payload"])
            if self.service.iteration_values:
                targets_results = {}
                for target in self.eval(self.service.iteration_values,
                                        **locals()):
                    self.payload_helper(payload, self.iteration_variable_name,
                                        target)
                    targets_results[target] = self.run_service_job(
                        payload, device)
                results.update({
                    "result":
                    targets_results,
                    "success":
                    all(r["success"] for r in targets_results.values()),
                })
            else:
                results.update(self.run_service_job(payload, device))
        except Exception:
            results.update({
                "success": False,
                "result": chr(10).join(format_exc().splitlines())
            })
            self.log("error", chr(10).join(format_exc().splitlines()), device)
        results["endtime"] = app.get_time()
        if device:
            status = "passed" if results["success"] else "failed"
            self.run_state["progress"]["device"][status] += 1
            self.run_state["summary"][status].append(device.name)
            self.create_result(results, device)
        Session.commit()
        self.log("info", "FINISHED", device)
        return results["success"]

    def log(self, severity, content, device=None):
        log = f"{app.get_time()} - {severity} - {self.service.name}"
        if device:
            log += f" - DEVICE {device.name}"
        log += f" : {content}"
        app.run_logs[self.parent_runtime].append(log)

    def build_notification(self, results):
        notification = {
            "Service": f"{self.service.name} ({self.service.type})",
            "Runtime": self.runtime,
            "Status": "PASS" if results["success"] else "FAILED",
        }
        if self.notification_header:
            notification["Header"] = self.notification_header
        if self.include_link_in_summary:
            address = app.config["app"]["address"]
            notification["Link"] = f"{address}/view_service_results/{self.id}"
        if self.run_state["summary"]["failed"]:
            notification["FAILED"] = self.run_state["summary"]["failed"]
        if self.run_state["summary"][
                "passed"] and not self.display_only_failed_nodes:
            notification["PASSED"] = self.run_state["summary"]["passed"]
        return notification

    def notify(self, results):
        notification = self.build_notification(results)
        file_content = deepcopy(notification)
        if self.include_device_results:
            file_content["Device Results"] = {
                device.name: fetch(
                    "result",
                    service_id=self.service_id,
                    parent_runtime=self.parent_runtime,
                    device_id=device.id,
                ).result
                for device in self.devices
            }
        try:
            if self.send_notification_method == "mail":
                filename = self.runtime.replace(".", "").replace(":", "")
                result = app.send_email(
                    f"{self.name} ({'PASS' if results['success'] else 'FAILED'})",
                    app.str_dict(notification),
                    recipients=self.mail_recipient,
                    filename=f"results-{filename}.txt",
                    file_content=app.str_dict(file_content),
                )
            elif self.send_notification_method == "slack":
                result = SlackClient(environ.get("SLACK_TOKEN")).api_call(
                    "chat.postMessage",
                    channel=app.config["slack"]["channel"],
                    text=notification,
                )
            else:
                result = post(
                    app.config["mattermost"]["url"],
                    verify=app.config["mattermost"]["verify_certificate"],
                    data=dumps({
                        "channel": app.config["mattermost"]["channel"],
                        "text": notification,
                    }),
                ).text
            results["notification"] = {"success": True, "result": result}
        except Exception:
            results["notification"] = {
                "success": False,
                "error": "\n".join(format_exc().splitlines()),
            }
        return results

    def get_credentials(self, device):
        return (app.get_user_credentials() if self.credentials == "user" else
                (device.username,
                 device.password) if self.credentials == "device" else (
                     self.sub(self.service.custom_username, locals()),
                     self.sub(self.service.custom_password, locals()),
                 ))

    def convert_result(self, result):
        if self.conversion_method == "none" or "result" not in result:
            return result
        try:
            if self.conversion_method == "text":
                result["result"] = str(result["result"])
            elif self.conversion_method == "json":
                result["result"] = loads(result["result"])
            elif self.conversion_method == "xml":
                result["result"] = parse(result["result"])
        except (ExpatError, JSONDecodeError) as e:
            result = {
                "success": False,
                "text_response": result,
                "error": f"Conversion to {self.conversion_method} failed",
                "exception": str(e),
            }
        return result

    def validate_result(self, results, payload, device):
        if self.validation_method == "text":
            match = self.sub(self.content_match, locals())
            str_result = str(results["result"])
            if self.delete_spaces_before_matching:
                match, str_result = map(self.space_deleter,
                                        (match, str_result))
            success = (self.content_match_regex
                       and bool(search(match, str_result))
                       or match in str_result and not self.content_match_regex)
        else:
            match = self.sub(self.dict_match, locals())
            success = self.match_dictionary(results["result"], match)
        results["success"] = not success if self.negative_logic else success
        results.update({"match": match, "negative_logic": self.negative_logic})

    def match_dictionary(self, result, match, first=True):
        if self.validation_method == "dict_equal":
            return result == self.dict_match
        else:
            match_copy = deepcopy(match) if first else match
            if isinstance(result, dict):
                for k, v in result.items():
                    if k in match_copy and match_copy[k] == v:
                        match_copy.pop(k)
                    else:
                        self.match_dictionary(v, match_copy, False)
            elif isinstance(result, list):
                for item in result:
                    self.match_dictionary(item, match_copy, False)
            return not match_copy

    def transfer_file(self, ssh_client, files):
        if self.protocol == "sftp":
            with SFTPClient.from_transport(
                    ssh_client.get_transport(),
                    window_size=self.window_size,
                    max_packet_size=self.max_transfer_size,
            ) as sftp:
                for source, destination in files:
                    getattr(sftp, self.direction)(source, destination)
        else:
            with SCPClient(ssh_client.get_transport()) as scp:
                for source, destination in files:
                    getattr(scp, self.direction)(source, destination)

    def payload_helper(self,
                       payload,
                       name,
                       value=None,
                       device=None,
                       section=None,
                       operation="set"):
        payload = payload.setdefault("variables", {})
        if device:
            payload = payload.setdefault("devices", {})
            payload = payload.setdefault(device, {})
        if section:
            payload = payload.setdefault(section, {})
        if value is not None:
            if operation == "set":
                payload[name] = value
            else:
                getattr(payload[name], operation)(value)
        else:
            if name not in payload:
                raise Exception(
                    f"Payload Editor: {name} not found in {payload}.")
            return payload[name]

    def get_var(self, payload, name, device=None, **kwargs):
        return self.payload_helper(payload, name, device=device, **kwargs)

    def get_result(self, service_name, device=None, workflow=None):
        def filter_run(query, property):
            query = query.filter(models["run"].service.has(
                getattr(models["service"], property) == service_name))
            return query.all()

        def recursive_search(run: "Run"):
            if not run:
                return None
            query = Session.query(models["run"]).filter(
                models["run"].parent_runtime == run.parent_runtime)
            if workflow or self.workflow:
                name = workflow or self.workflow.name
                query.filter(models["run"].workflow.has(
                    models["workflow"].name == name))
            runs = filter_run(query, "scoped_name") or filter_run(
                query, "name")
            results = list(filter(None, [run.result(device) for run in runs]))
            if not results:
                return recursive_search(run.restart_run)
            else:
                return results.pop().result

        return recursive_search(self)

    def python_code_kwargs(_self, **locals):  # noqa: N805
        return {
            "config": app.config,
            "get_var": partial(_self.get_var, locals.get("payload", {})),
            "get_result": _self.get_result,
            "workflow": _self.workflow,
            "set_var": partial(_self.payload_helper, locals.get("payload",
                                                                {})),
            "parent_device": _self.parent_device or locals.get("device"),
            **locals,
        }

    def eval(_self, query, function="eval", **locals):  # noqa: N805
        return builtins[function](query, _self.python_code_kwargs(**locals))

    def sub(self, input, variables):
        r = compile("{{(.*?)}}")

        def replace(match):
            return str(self.eval(match.group()[2:-2], **variables))

        def rec(input):
            if isinstance(input, str):
                return r.sub(replace, input)
            elif isinstance(input, list):
                return [rec(x) for x in input]
            elif isinstance(input, dict):
                return {rec(k): rec(v) for k, v in input.items()}
            else:
                return input

        return rec(input)

    def space_deleter(self, input):
        return "".join(input.split())

    def update_netmiko_connection(self, connection):
        for property in ("fast_cli", "timeout", "global_delay_factor"):
            service_value = getattr(self.service, property)
            if service_value:
                setattr(connection, property, service_value)
        try:
            if not hasattr(connection, "check_config_mode"):
                self.log("error",
                         f"Netmiko 'check_config_mode' method is missing.")
                return
            mode = connection.check_config_mode()
            if mode and not self.config_mode:
                connection.exit_config_mode()
            elif self.config_mode and not mode:
                connection.config_mode()
        except Exception as exc:
            self.log("error", f"Failed to honor the config mode {exc}")
        return connection

    def netmiko_connection(self, device):
        connection = self.get_or_close_connection("netmiko", device)
        if connection:
            self.log("info", "Using cached Netmiko connection", device)
            return self.update_netmiko_connection(connection)
        self.log("info", "Opening new Netmiko connection", device)
        username, password = self.get_credentials(device)
        driver = device.netmiko_driver if self.use_device_driver else self.driver
        netmiko_connection = ConnectHandler(
            device_type=driver,
            ip=device.ip_address,
            port=device.port,
            username=username,
            password=password,
            secret=device.enable_password,
            fast_cli=self.fast_cli,
            timeout=self.timeout,
            global_delay_factor=self.global_delay_factor,
        )
        if self.enable_mode:
            netmiko_connection.enable()
        if self.config_mode:
            netmiko_connection.config_mode()
        app.connections_cache["netmiko"][self.parent_runtime][
            device.name] = netmiko_connection
        return netmiko_connection

    def napalm_connection(self, device):
        connection = self.get_or_close_connection("napalm", device)
        if connection:
            self.log("info", "Using cached NAPALM connection", device)
            return connection
        self.log("info", "Opening new NAPALM connection", device)
        username, password = self.get_credentials(device)
        optional_args = self.service.optional_args
        if not optional_args:
            optional_args = {}
        if "secret" not in optional_args:
            optional_args["secret"] = device.enable_password
        driver = get_network_driver(
            device.napalm_driver if self.use_device_driver else self.driver)
        napalm_connection = driver(
            hostname=device.ip_address,
            username=username,
            password=password,
            timeout=self.timeout,
            optional_args=optional_args,
        )
        napalm_connection.open()
        app.connections_cache["napalm"][self.parent_runtime][
            device.name] = napalm_connection
        return napalm_connection

    def get_or_close_connection(self, library, device):
        connection = self.get_connection(library, device)
        if not connection:
            return
        if self.start_new_connection:
            return self.disconnect(library, device, connection)
        if library == "napalm":
            if connection.is_alive():
                return connection
            else:
                self.disconnect(library, device, connection)
        else:
            try:
                connection.find_prompt()
                return connection
            except Exception:
                self.disconnect(library, device, connection)

    def get_connection(self, library, device):
        cache = app.connections_cache[library].get(self.parent_runtime, {})
        return cache.get(device.name)

    def close_device_connection(self, device):
        for library in ("netmiko", "napalm"):
            connection = self.get_connection(library, device)
            if connection:
                self.disconnect(library, device, connection)

    def disconnect(self, library, device, connection):
        try:
            connection.disconnect(
            ) if library == "netmiko" else connection.close()
            app.connections_cache[library][self.parent_runtime].pop(
                device.name)
            self.log("info", f"Closed {library} connection", device)
        except Exception as exc:
            self.log("error",
                     f"Error while closing {library} connection ({exc})",
                     device)

    def generate_yaml_file(self, path, device):
        data = {
            "last_failure": device.last_failure,
            "last_runtime": device.last_runtime,
            "last_update": device.last_update,
            "last_status": device.last_status,
        }
        with open(path / "data.yml", "w") as file:
            yaml.dump(data, file, default_flow_style=False)
Beispiel #48
0
class RecurringEvent(Event):
    """ Represents an individual one-off instance of a recurring event,
        including cancelled events.
    """
    __mapper_args__ = {'polymorphic_identity': 'recurringevent'}
    __table_args__ = None

    id = Column(Integer, ForeignKey('event.id', ondelete='CASCADE'),
                primary_key=True)
    rrule = Column(String(RECURRENCE_MAX_LEN))
    exdate = Column(Text)  # There can be a lot of exception dates
    until = Column(FlexibleDateTime, nullable=True)
    start_timezone = Column(String(35))

    override_uids = association_proxy('overrides', 'uid')

    def __init__(self, **kwargs):
        self.start_timezone = kwargs.pop('original_start_tz', None)
        kwargs['recurrence'] = repr(kwargs['recurrence'])
        super(RecurringEvent, self).__init__(**kwargs)
        try:
            self.unwrap_rrule()
        except Exception as e:
            log.error("Error parsing RRULE entry", event_id=self.id,
                      error=e, exc_info=True)

    # FIXME @karim: use an overrided property instead of a reconstructor.
    @reconstructor
    def reconstruct(self):
        try:
            self.unwrap_rrule()
        except Exception as e:
            log.error("Error parsing stored RRULE entry", event_id=self.id,
                      error=e, exc_info=True)

    def inflate(self, start=None, end=None):
        # Convert a RecurringEvent into a series of InflatedEvents
        # by expanding its RRULE into a series of start times.
        from inbox.events.recurring import get_start_times
        occurrences = get_start_times(self, start, end)
        return [InflatedEvent(self, o) for o in occurrences]

    def unwrap_rrule(self):
        # Unwraps the RRULE list of strings into RecurringEvent properties.
        for item in self.recurring:
            if item.startswith('RRULE'):
                self.rrule = item
                if 'UNTIL' in item:
                    for p in item.split(';'):
                        if p.startswith('UNTIL'):
                            self.until = parse_rrule_datetime(p[6:])
            elif item.startswith('EXDATE'):
                self.exdate = item

    def all_events(self, start=None, end=None):
        # Returns all inflated events along with overrides that match the
        # provided time range.
        overrides = self.overrides
        if start:
            overrides = overrides.filter(RecurringEventOverride.start > start)
        if end:
            overrides = overrides.filter(RecurringEventOverride.end < end)

        # Google calendar events have the same uid __globally_. This means
        # that if I created an event, shared it with you and that I also
        # shared my calendar with you, override to this events for calendar B
        # may show up in a query for calendar A.
        # (https://phab.nylas.com/T3420)
        overrides = overrides.filter(
                RecurringEventOverride.calendar_id == self.calendar_id)

        events = list(overrides)
        overridden_starts = [e.original_start_time for e in events]
        # Remove cancellations from the override set
        events = filter(lambda e: not e.cancelled, events)
        # If an override has not changed the start time for an event, including
        # if the override is a cancellation, the RRULE doesn't include an
        # exception for it. Filter out unnecessary inflated events
        # to cover this case by checking the start time.
        for e in self.inflate(start, end):
            if e.start not in overridden_starts:
                events.append(e)
        return sorted(events, key=lambda e: e.start)

    def update(self, event):
        super(RecurringEvent, self).update(event)
        if isinstance(event, type(self)):
            self.rrule = event.rrule
            self.exdate = event.exdate
            self.until = event.until
            self.start_timezone = event.start_timezone
Beispiel #49
0
class SharedAttributeModel(_Jsonable, Model, metaclass=RichMixinMeta):
    """
    An attribute shared by multiple entity.
    A second table will be created to track the relation.
    """
    # TODO: Error on creation
    rmixin_registry = SharedAttributeRegistry
    # When given only create relationship with given entities or else will be a genetic attribute
    # Use require or namespace for entity
    __entities__ = None
    # Collector to use for relationship
    __collector__ = list
    # If specified will use associationproxy, and this is the key
    __outline__ = None  # TODO: Accept a dict
    # If cache the entry automatically after is't modified or created
    __autocache__ = False
    # If this is cacheable or not
    __cacheable__ = False

    entity_models = []

    parents = association_proxy(
        'entity',
        'uuid',
        creator=lambda uuid: MetadashEntity.query.filter(
            MetadashEntity.index_uuid == uuid).first(
            )  # TODO: Error on empty query
    )

    def __repr__(self):
        return '<Metadash Shared Attr "{}">'.format(self.uuid)

    def as_dict(self, detail=False):
        dict_ = super(SharedAttributeModel, self).as_dict()
        if detail:
            dict_['parents'] = _format_for_json(self.parents)
        return dict_

    @staticmethod
    def build_relationship(self):
        attribute = self
        entities = ([
            entity for entity in EntityRegistry.values()
            if attribute.ref_name not in entity.attribute_models
        ])
        for model in entities:
            model.attribute_models[attribute.ref_name] = attribute
            parentname = _get_alias_dict(model.__dict__)
            attribute.entity_models.append(parentname)

            # TODO: autocache = attribute.__autocache__
            cacheable = attribute.__cacheable__
            if cacheable:
                model.__cacheable_attributes__.add(attribute.ref_name)

            backref_name = attribute.__backref_name__
            proxy_name = attribute.__proxy_name__
            collector = attribute.__collector__
            outline = attribute.__outline__

            relationship = db.relationship(
                model,
                secondary=attribute.__secondary__,
                primaryjoin=attribute.__secondary__.c.attr_uuid ==
                attribute.uuid,
                secondaryjoin=foreign(
                    attribute.__secondary__.c.entity_uuid) == remote(
                        model.uuid),
                backref=backref(
                    backref_name,
                    primaryjoin=attribute.__secondary__.c.entity_uuid ==
                    model.uuid,
                    secondaryjoin=foreign(
                        attribute.__secondary__.c.attr_uuid) == remote(
                            attribute.uuid),
                    collection_class=collector),
                uselist=True,
                cascade="save-update, merge, refresh-expire, expunge",
            )

            setattr(attribute, parentname, relationship)

            if outline:
                if hasattr(collector, '__proxy_args__'):
                    creator = collector.__proxy_args__.get('creator', None)
                else:
                    creator = None

                get_or_create_attribute = GetOrCreateAttribute(
                    attribute, creator)

                if hasattr(collector, '__proxy_args__'):
                    collector.__proxy_args__.set('creator',
                                                 get_or_create_attribute)
                    setattr(
                        model, proxy_name,
                        association_proxy(backref_name, outline,
                                          **collector.__proxy_args__))
                else:
                    setattr(
                        model, proxy_name,
                        association_proxy(backref_name,
                                          outline,
                                          creator=get_or_create_attribute))

    def sub_init(baseclass, subclass, subclass_name, baseclasses,
                 subclass_dict):
        subclass.build_relationship(subclass)

    def sub_new(mcs, classname, bases, dict_, **kwargs):
        # TODO: collector = dict_.setdefault('__collector__', list if not unique_attribute else None)
        outline = dict_.setdefault('__outline__', None)

        tablename = _get_table_name_dict(dict_)
        aliasname = _get_alias_dict(dict_)

        proxy_name = dict_.setdefault('__proxy_name__', pluralize(aliasname))
        backref_name = dict_.setdefault('__backref_name__',
                                        "{}_ref".format(proxy_name))
        dict_.setdefault('entity_models',
                         SharedAttributeModel.entity_models[:])

        dict_['ref_name'] = proxy_name if outline else backref_name

        # Build foreign key and relationship
        has_primary_key = False
        for value in dict_.values():
            if isinstance(value, db.Column):
                if value.primary_key:
                    has_primary_key = True
                if value.unique_attribute:
                    value.unique = True
        dict_['uuid'] = db.Column(UUID(),
                                  index=True,
                                  nullable=False,
                                  unique=True,
                                  primary_key=not has_primary_key,
                                  default=uuid.uuid1)
        dict_['__secondary__'] = (db.Table(
            "metadash_entities_{}".format(tablename),
            db.Column('entity_uuid', UUID(), index=True),
            db.Column('attr_uuid', UUID(), index=True),
            db.ForeignKeyConstraint(['attr_uuid'], [dict_['uuid']],
                                    ondelete="CASCADE"),
            db.ForeignKeyConstraint(['entity_uuid'],
                                    [MetadashEntity.index_uuid],
                                    ondelete="CASCADE")))
        dict_['entity'] = db.relationship(
            MetadashEntity,
            secondary=dict_['__secondary__'],
            primaryjoin=dict_['__secondary__'].c.attr_uuid == dict_['uuid'],
            secondaryjoin=dict_['__secondary__'].c.entity_uuid ==
            MetadashEntity.index_uuid,
            # backref=backref("shared_attributes"), TODO
        )
Beispiel #50
0
class IssueInstance(Base, PrepareMixin, MutableRecordMixin):  # noqa
    """A particularly instance of an issue found in a run"""

    __tablename__ = "issue_instances"

    # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.
    id: DBID = Column(BIGDBIDType, primary_key=True)

    location = Column(
        SourceLocationType,
        nullable=False,
        doc="Location (possibly a range) of the issue",
    )

    filename_id = Column(BIGDBIDType,
                         nullable=False,
                         server_default="0",
                         default=0)

    filename = relationship(
        "SharedText",
        primaryjoin="foreign(SharedText.id) == IssueInstance.filename_id",
        uselist=False,
    )

    callable_id = Column(BIGDBIDType,
                         nullable=False,
                         server_default="0",
                         default=0)

    callable = relationship(
        "SharedText",
        primaryjoin="foreign(SharedText.id) == IssueInstance.callable_id",
        uselist=False,
    )

    is_new_issue = Column(
        Boolean,
        index=True,
        default=False,
        doc="True if the issue did not exist before this instance",
    )

    run_id = Column(BIGDBIDType, nullable=False, index=True)

    issue_id = Column(BIGDBIDType, nullable=False, index=True)

    issue = relationship(
        "Issue",
        primaryjoin="foreign(Issue.id) == IssueInstance.issue_id",
        uselist=False,
    )

    fix_info_id = Column(BIGDBIDType, nullable=True)

    fix_info = relationship(
        "IssueInstanceFixInfo",
        primaryjoin=("foreign(IssueInstanceFixInfo.id) == "
                     "IssueInstance.fix_info_id"),
        uselist=False,
    )

    message_id = Column(BIGDBIDType, nullable=True)

    message = relationship(
        "SharedText",
        primaryjoin="foreign(SharedText.id) == IssueInstance.message_id",
        uselist=False,
    )

    trace_frames = association_proxy("issue_instance_trace_frame",
                                     "trace_frame")

    issue_instance_trace_frame = relationship(
        "IssueInstanceTraceFrameAssoc",
        primaryjoin=(
            "IssueInstance.id == "
            "foreign(IssueInstanceTraceFrameAssoc.issue_instance_id)"),
    )

    shared_texts = association_proxy("issue_instance_shared_text",
                                     "shared_text")

    issue_instance_shared_text = relationship(
        "IssueInstanceSharedTextAssoc",
        primaryjoin=(
            "IssueInstance.id == "
            "foreign(IssueInstanceSharedTextAssoc.issue_instance_id)"),
    )

    min_trace_length_to_sources = Column(
        Integer, nullable=True, doc="The minimum trace length to sources")

    min_trace_length_to_sinks = Column(Integer,
                                       nullable=True,
                                       doc="The minimum trace length to sinks")

    rank = Column(
        Integer,
        server_default="0",
        doc="The higher the rank, the higher the priority for this issue",
    )

    callable_count = Column(
        Integer,
        server_default="0",
        doc="Number of issues in this callable for this run",
    )

    min_trace_length_to_entrypoints = Column(
        Integer, nullable=True, doc="The minimum trace length to entrypoints")

    def get_shared_texts_by_kind(self, kind: SharedTextKind):
        return [text for text in self.shared_texts if text.kind == kind]

    def get_trace_frames_by_kind(self, kind: TraceKind):
        return [frame for frame in self.trace_frames if frame.kind == kind]

    @classmethod
    def merge(cls, session, items):
        for i in items:
            # If the issue is new, then the instance has to be new. But note
            # that we still may need RunDiffer, because issues that disappeared
            # for a while and then came back are also marked new.
            i.is_new_issue = i.issue_id.is_new
            yield i
Beispiel #51
0
class TVDBSeries(Base):
    __tablename__ = "tvdb_series"

    id = Column(Integer, primary_key=True, autoincrement=False)
    last_updated = Column(Integer)
    expired = Column(Boolean)
    name = Column(Unicode)
    language = Column(Unicode)
    rating = Column(Float)
    status = Column(Unicode)
    runtime = Column(Integer)
    airs_time = Column(Unicode)
    airs_dayofweek = Column(Unicode)
    content_rating = Column(Unicode)
    network = Column(Unicode)
    overview = Column(Unicode)
    imdb_id = Column(Unicode)
    zap2it_id = Column(Unicode)
    _banner = Column('banner', Unicode)

    _first_aired = Column('first_aired', DateTime)
    first_aired = text_date_synonym('_first_aired')
    _aliases = Column('aliases', Unicode)
    aliases = json_synonym('_aliases')
    _actors = Column('actors', Unicode)
    actors_list = json_synonym('_actors')
    _posters = Column('posters', Unicode)
    posters_list = json_synonym('_posters')

    _genres = relation('TVDBGenre', secondary=genres_table)
    genres = association_proxy('_genres', 'name')

    episodes = relation('TVDBEpisode',
                        backref='series',
                        cascade='all, delete, delete-orphan')

    def __init__(self, id):
        """
        Looks up movie on tvdb and creates a new database model for it.
        These instances should only be added to a session via `session.merge`.
        """
        self.id = id

        try:
            series = TVDBRequest().get('series/%s' % self.id)
        except requests.RequestException as e:
            raise LookupError('Error updating data from tvdb: %s' % e)

        self.id = series['id']
        self.language = 'en'
        self.last_updated = series['lastUpdated']
        self.name = series['seriesName']
        self.rating = float(
            series['siteRating']) if series['siteRating'] else 0.0
        self.status = series['status']
        self.runtime = int(series['runtime']) if series['runtime'] else 0
        self.airs_time = series['airsTime']
        self.airs_dayofweek = series['airsDayOfWeek']
        self.content_rating = series['rating']
        self.network = series['network']
        self.overview = series['overview']
        self.imdb_id = series['imdbId']
        self.zap2it_id = series['zap2itId']
        self.first_aired = series['firstAired']
        self.expired = False
        self.aliases = series['aliases']
        self._banner = series['banner']

        genres = _get_db_genres(series['genre'])
        self._genres = [TVDBGenre(**g) for g in genres]

        # Actors and Posters are lazy populated
        self._actors = None
        self._posters = None

    def __repr__(self):
        return '<TVDBSeries name=%s,tvdb_id=%s>' % (self.name, self.id)

    @property
    def banner(self):
        if self._banner:
            return TVDBRequest.BANNER_URL + self._banner

    @property
    def actors(self):
        return self.get_actors()

    @property
    def posters(self):
        return self.get_posters()

    def get_actors(self):
        if not self._actors:
            log.debug('Looking up actors for series %s' % self.name)
            try:
                actors_query = TVDBRequest().get('series/%s/actors' % self.id)
                self.actors_list = [a['name'] for a in actors_query
                                    ] if actors_query else []
            except requests.RequestException as e:
                if None is not e.response and e.response.status_code == 404:
                    self.actors_list = []
                else:
                    raise LookupError('Error updating actors from tvdb: %s' %
                                      e)

        return self.actors_list

    def get_posters(self):
        if not self._posters:
            log.debug('Getting top 5 posters for series %s' % self.name)
            try:
                poster_query = TVDBRequest().get('series/%s/images/query' %
                                                 self.id,
                                                 keyType='poster')
                self.posters_list = [p['fileName'] for p in poster_query[:5]
                                     ] if poster_query else []
            except requests.RequestException as e:
                if None is not e.response and e.response.status_code == 404:
                    self.posters_list = []
                else:
                    raise LookupError('Error updating posters from tvdb: %s' %
                                      e)

        return [TVDBRequest.BANNER_URL + p for p in self.posters_list]

    def to_dict(self):
        return {
            'tvdb_id':
            self.id,
            'last_updated':
            datetime.fromtimestamp(
                self.last_updated).strftime('%Y-%m-%d %H:%M:%S'),
            'expired':
            self.expired,
            'series_name':
            self.name,
            'language':
            self.language,
            'rating':
            self.rating,
            'status':
            self.status,
            'runtime':
            self.runtime,
            'airs_time':
            self.airs_time,
            'airs_dayofweek':
            self.airs_dayofweek,
            'content_rating':
            self.content_rating,
            'network':
            self.network,
            'overview':
            self.overview,
            'imdb_id':
            self.imdb_id,
            'zap2it_id':
            self.zap2it_id,
            'banner':
            self.banner,
            'posters':
            self.posters,
            'genres': [g for g in self.genres],
            'actors':
            self.actors,
            'first_aired':
            self.first_aired,
        }
Beispiel #52
0
class User(UserMixin, db.Model):
    __tablename__ = "users"
    id = db.Column(db.Integer, nullable=False, primary_key=True)
    email = db.Column(db.String(64), unique=True, index=True)
    # Todo: 不能为空, 两端的字符不能为空格
    username = db.Column(db.String(32),
                         nullable=False,
                         unique=True,
                         index=True,
                         default=defaultUsername)
    password_hash = db.Column(db.String(128))
    avatar = db.Column(db.String(64),
                       nullable=False,
                       default='default_avatar.jpg')
    self_intro = db.Column(db.String(40), default='')
    # 用户的性别,值为1时是男性,值为2时是女性,值为0时是未知
    gender = db.Column(db.Integer, default=0)
    # user status
    member_since = db.Column(db.DateTime, default=datetime.utcnow)
    last_seen = db.Column(db.DateTime, default=datetime.utcnow)
    """ Relationships """
    # 动态
    statuses = db.relationship('Status',
                               backref='user',
                               lazy='dynamic',
                               cascade='all, delete-orphan')
    status_replies = db.relationship('StatusReply',
                                     backref='user',
                                     lazy='dynamic',
                                     cascade='all, delete-orphan')
    # 用户关注
    followed = db.relationship(
        'User',
        secondary=user_follows,
        primaryjoin=(user_follows.c.follower_id == id),
        secondaryjoin=(user_follows.c.followed_id == id),
        backref=db.backref('followers', lazy='dynamic'),
        lazy='dynamic')
    # 团体
    group_memberships = db.relationship("GroupMembership",
                                        back_populates="user",
                                        cascade='all, delete-orphan',
                                        lazy='dynamic')
    # 添加直接访问方式, 可以直接通过u.groups访问到用户所在的团体
    groups = association_proxy("group_memberships", "group")
    # 二手
    sales = db.relationship('Sale', backref='user', lazy='dynamic')
    sale_comments = db.relationship('SaleComment',
                                    backref='user',
                                    lazy='dynamic')
    # private messages
    messages = db.relationship('Message', backref='user', lazy='dynamic')

    def generate_auth_token(self, expiration):
        s = Serializer('auth' + current_app.config['SECRET_KEY'],
                       expires_in=expiration)
        return s.dumps({'id': self.id}).decode('ascii')

    @staticmethod
    def verify_auth_token(token):
        import app.cache as Cache
        import app.cache.redis_keys as Keys
        """ Get current User from token """
        token_key = Keys.user_token.format(token)
        data = rd.get(token_key)
        if data != None:
            return Cache.get_user(data.decode())
        s = Serializer('auth' + current_app.config['SECRET_KEY'])
        try:
            data = s.loads(token)
            rd.set(token_key, data['id'], Keys.user_token_expire)
            return Cache.get_user(data['id'])
        except:
            return None

    def verify_password(self, password):
        return check_password_hash(self.password_hash, password)

    @staticmethod
    def process_json(json_user):
        import app.cache as Cache
        id = json_user['id']
        t = Cache.is_user_followed_by(id, g.user.id) if\
                hasattr(g, 'user') else False
        json_user['followed_by_me'] = t
        json_user['last_seen'] = to_http_date(json_user['last_seen'])
        json_user['member_since'] = to_http_date(json_user['member_since'])
        return json_user

    @logfuncall
    def to_json(self, cache=False):
        image_server = current_app.config['IMAGE_SERVER']
        # NOTE: keep json_user without nested dict in order to
        # perfectly store it to redis hast data type
        json_user = {
            'id': self.id,
            'username': self.username,
            'avatar': image_server + self.avatar,
            'self_intro': self.self_intro,
            'gender': self.gender,
            'member_since': self.member_since.timestamp(),
            'last_seen': self.last_seen.timestamp(),
            'groups_enrolled': self.group_memberships.count(),
            'followed': self.followed.count(),
            'followers': self.followers.count(),
        }
        if not cache:
            return User.process_json(json_user)
        # `followed_by_me` is g.user relevant, which
        # is set in app.cache.users
        return json_user

    def __repr__(self):
        return '<User: %r>' % self.username

    @property
    def password(self):
        raise AttributeError('password is not a readable attribute')

    @password.setter
    def password(self, password):
        self.password_hash = generate_password_hash(password)
Beispiel #53
0
class ThirdPartyUser(DefaultTableMixin, UUIDMixin, WriteTrackingMixin):
    __tablename__ = 'third_party_user'

    # table columns
    third_party_type = db.Column(EnumType(ThirdPartyType), nullable=False)
    unique_identifier = db.Column(db.String(191), nullable=False)
    user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"), nullable=False)
    _params = db.Column(db.Text)

    # relationships
    # user via User Model
    user_uuid = association_proxy('user', 'uuid')

    # hybrid and other functions

    @property
    def params(self):
        return json.loads(self._params) if self._params else None

    @params.setter
    def params(self, params):
        self._params = json.dumps(params) if params else None

    @property
    def global_unique_identifier(self):
        if self.params:
            global_unique_identifier_attribute = None
            if self.third_party_type == ThirdPartyType.cas:
                global_unique_identifier_attribute = current_app.config.get('CAS_GLOBAL_UNIQUE_IDENTIFIER_FIELD')
            elif self.third_party_type == ThirdPartyType.saml:
                global_unique_identifier_attribute = current_app.config.get('SAML_GLOBAL_UNIQUE_IDENTIFIER_FIELD')

            if global_unique_identifier_attribute and global_unique_identifier_attribute in self.params:
                global_unique_identifier = self.params.get(global_unique_identifier_attribute)
                if isinstance(global_unique_identifier, list):
                    global_unique_identifier = global_unique_identifier[0] if len(global_unique_identifier) > 0 else None
                return global_unique_identifier

        return None

    @classmethod
    def __declare_last__(cls):
        super(cls, cls).__declare_last__()

    __table_args__ = (
        # prevent duplicate user in course
        db.UniqueConstraint('third_party_type', 'unique_identifier', name='_unique_third_party_type_and_unique_identifier'),
        DefaultTableMixin.default_table_args
    )

    @classmethod
    def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None):
        if not title:
            title = "Third Party User Unavailable"
        if not message:
            message = "Sorry, this third party user was deleted or is no longer accessible."
        return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message)

    def generate_or_link_user_account(self):
        from . import SystemRole, User

        if not self.user:
            # check if global_unique_identifier user already exists
            if self.global_unique_identifier:
                self.user = User.query \
                    .filter_by(global_unique_identifier=self.global_unique_identifier) \
                    .one_or_none()

            if not self.user:
                self.user = User(
                    username=None,
                    password=None,
                    system_role=self._get_system_role(),
                    global_unique_identifier=self.global_unique_identifier
                )
                self._sync_name()
                self._sync_email()
                if self.user.system_role == SystemRole.student:
                    self._sync_student_number()

                # instructors can have their display names set to their full name by default
                if self.user.system_role != SystemRole.student and self.user.fullname != None:
                    self.user.displayname = self.user.fullname
                else:
                    self.user.displayname = display_name_generator(self.user.system_role.value)

    def update_user_profile(self):
        if self.user and self.user.system_role == SystemRole.student and self.params:
            # overwrite first/last name if student not allowed to change it
            if not current_app.config.get('ALLOW_STUDENT_CHANGE_NAME'):
                self._sync_name()

            # overwrite email if student not allowed to change it
            if not current_app.config.get('ALLOW_STUDENT_CHANGE_EMAIL'):
                self._sync_email()

            # overwrite student number if student not allowed to change it
            if not current_app.config.get('ALLOW_STUDENT_CHANGE_STUDENT_NUMBER'):
                self._sync_student_number()

    def _sync_name(self):
        if self.params:
            firstname_attribute = lastname_attribute = None
            if self.third_party_type == ThirdPartyType.cas:
                firstname_attribute = current_app.config.get('CAS_ATTRIBUTE_FIRST_NAME')
                lastname_attribute = current_app.config.get('CAS_ATTRIBUTE_LAST_NAME')
            elif self.third_party_type == ThirdPartyType.saml:
                firstname_attribute = current_app.config.get('SAML_ATTRIBUTE_FIRST_NAME')
                lastname_attribute = current_app.config.get('SAML_ATTRIBUTE_LAST_NAME')

            if firstname_attribute and firstname_attribute in self.params:
                first_name = self.params.get(firstname_attribute)
                if isinstance(first_name, list):
                    first_name = first_name[0] if len(first_name) > 0 else None
                self.user.firstname = first_name

            if lastname_attribute and lastname_attribute in self.params:
                last_name = self.params.get(lastname_attribute)
                if isinstance(last_name, list):
                    last_name = last_name[0] if len(last_name) > 0 else None
                self.user.lastname = last_name

    def _sync_email(self):
        if self.params:
            email_attribute = None
            if self.third_party_type == ThirdPartyType.cas:
                email_attribute = current_app.config.get('CAS_ATTRIBUTE_EMAIL')
            elif self.third_party_type == ThirdPartyType.saml:
                email_attribute = current_app.config.get('SAML_ATTRIBUTE_EMAIL')

            if email_attribute and email_attribute in self.params:
                email = self.params.get(email_attribute)
                if isinstance(email, list):
                    email = email[0] if len(email) > 0 else None
                self.user.email = email

    def _sync_student_number(self):
        if self.params:
            student_number_attribute = None
            if self.third_party_type == ThirdPartyType.cas:
                student_number_attribute = current_app.config.get('CAS_ATTRIBUTE_STUDENT_NUMBER')
            elif self.third_party_type == ThirdPartyType.saml:
                student_number_attribute = current_app.config.get('SAML_ATTRIBUTE_STUDENT_NUMBER')

            if student_number_attribute and student_number_attribute in self.params:
                student_number = self.params.get(student_number_attribute)
                if isinstance(student_number, list):
                    student_number = student_number[0] if len(student_number) > 0 else None
                self.user.student_number = student_number

    def _get_system_role(self):
        from . import SystemRole

        if self.params:
            user_roles_attribute = instructor_role_values = None
            if self.third_party_type == ThirdPartyType.cas:
                user_roles_attribute = current_app.config.get('CAS_ATTRIBUTE_USER_ROLE')
                instructor_role_values = list(current_app.config.get('CAS_INSTRUCTOR_ROLE_VALUES'))
            if self.third_party_type == ThirdPartyType.saml:
                user_roles_attribute = current_app.config.get('SAML_ATTRIBUTE_USER_ROLE')
                instructor_role_values = list(current_app.config.get('SAML_INSTRUCTOR_ROLE_VALUES'))

            if user_roles_attribute and instructor_role_values and user_roles_attribute in self.params:
                user_roles = self.params.get(user_roles_attribute)
                if not isinstance(user_roles, list):
                    user_roles = [user_roles]

                for user_role in user_roles:
                    if user_role in instructor_role_values:
                        return SystemRole.instructor

        return SystemRole.student

    def upgrade_system_role(self):
        # upgrade system role is needed
        if self.user and self.params and self._get_system_role():
            system_role = self._get_system_role()
            if self.user.system_role == SystemRole.student and system_role == SystemRole.instructor:
                self.user.system_role = system_role

            db.session.commit()
class RenderedTaskInstanceFields(Base):
    """Save Rendered Template Fields"""

    __tablename__ = "rendered_task_instance_fields"

    dag_id = Column(StringID(), primary_key=True)
    task_id = Column(StringID(), primary_key=True)
    run_id = Column(StringID(), primary_key=True)
    map_index = Column(Integer, primary_key=True, server_default=text('-1'))
    rendered_fields = Column(sqlalchemy_jsonfield.JSONField(json=json),
                             nullable=False)
    k8s_pod_yaml = Column(sqlalchemy_jsonfield.JSONField(json=json),
                          nullable=True)

    __table_args__ = (
        PrimaryKeyConstraint(
            "dag_id",
            "task_id",
            "run_id",
            "map_index",
            name='rendered_task_instance_fields_pkey',
            mssql_clustered=True,
        ),
        ForeignKeyConstraint(
            [dag_id, task_id, run_id, map_index],
            [
                "task_instance.dag_id",
                "task_instance.task_id",
                "task_instance.run_id",
                "task_instance.map_index",
            ],
            name='rtif_ti_fkey',
            ondelete="CASCADE",
        ),
    )
    task_instance = relationship(
        "TaskInstance",
        lazy='joined',
        back_populates="rendered_task_instance_fields",
    )

    # We don't need a DB level FK here, as we already have that to TI (which has one to DR) but by defining
    # the relationship we can more easily find the execution date for these rows
    dag_run = relationship(
        "DagRun",
        primaryjoin="""and_(
            RenderedTaskInstanceFields.dag_id == foreign(DagRun.dag_id),
            RenderedTaskInstanceFields.run_id == foreign(DagRun.run_id),
        )""",
        viewonly=True,
    )

    execution_date = association_proxy("dag_run", "execution_date")

    def __init__(self, ti: TaskInstance, render_templates=True):
        self.dag_id = ti.dag_id
        self.task_id = ti.task_id
        self.run_id = ti.run_id
        self.map_index = ti.map_index
        self.ti = ti
        if render_templates:
            ti.render_templates()
        self.task = ti.task
        if os.environ.get("AIRFLOW_IS_K8S_EXECUTOR_POD", None):
            self.k8s_pod_yaml = ti.render_k8s_pod_yaml()
        self.rendered_fields = {
            field: serialize_template_field(getattr(self.task, field))
            for field in self.task.template_fields
        }

        self._redact()

    def __repr__(self):
        prefix = f"<{self.__class__.__name__}: {self.dag_id}.{self.task_id} {self.run_id}"
        if self.map_index != -1:
            prefix += f" map_index={self.map_index}"
        return prefix + '>'

    def _redact(self):
        from airflow.utils.log.secrets_masker import redact

        if self.k8s_pod_yaml:
            self.k8s_pod_yaml = redact(self.k8s_pod_yaml)

        for field, rendered in self.rendered_fields.items():
            self.rendered_fields[field] = redact(rendered, field)

    @classmethod
    @provide_session
    def get_templated_fields(cls,
                             ti: TaskInstance,
                             session: Session = NEW_SESSION) -> Optional[dict]:
        """
        Get templated field for a TaskInstance from the RenderedTaskInstanceFields
        table.

        :param ti: Task Instance
        :param session: SqlAlchemy Session
        :return: Rendered Templated TI field
        """
        result = (session.query(cls.rendered_fields).filter(
            cls.dag_id == ti.dag_id,
            cls.task_id == ti.task_id,
            cls.run_id == ti.run_id,
            cls.map_index == ti.map_index,
        ).one_or_none())

        if result:
            rendered_fields = result.rendered_fields
            return rendered_fields
        else:
            return None

    @classmethod
    @provide_session
    def get_k8s_pod_yaml(cls,
                         ti: TaskInstance,
                         session: Session = NEW_SESSION) -> Optional[dict]:
        """
        Get rendered Kubernetes Pod Yaml for a TaskInstance from the RenderedTaskInstanceFields
        table.

        :param ti: Task Instance
        :param session: SqlAlchemy Session
        :return: Kubernetes Pod Yaml
        """
        result = (session.query(cls.k8s_pod_yaml).filter(
            cls.dag_id == ti.dag_id,
            cls.task_id == ti.task_id,
            cls.run_id == ti.run_id,
            cls.map_index == ti.map_index,
        ).one_or_none())
        return result.k8s_pod_yaml if result else None

    @provide_session
    def write(self, session: Session = None):
        """Write instance to database

        :param session: SqlAlchemy Session
        """
        session.merge(self)

    @classmethod
    @provide_session
    def delete_old_records(
        cls,
        task_id: str,
        dag_id: str,
        num_to_keep=conf.getint("core",
                                "max_num_rendered_ti_fields_per_task",
                                fallback=0),
        session: Session = None,
    ):
        """
        Keep only Last X (num_to_keep) number of records for a task by deleting others.

        In the case of data for a mapped task either all of the rows or none of the rows will be deleted, so
        we don't end up with partial data for a set of mapped Task Instances left in the database.

        :param task_id: Task ID
        :param dag_id: Dag ID
        :param num_to_keep: Number of Records to keep
        :param session: SqlAlchemy Session
        """
        from airflow.models.dagrun import DagRun

        if num_to_keep <= 0:
            return

        tis_to_keep_query = (session.query(
            cls.dag_id, cls.task_id, cls.run_id).filter(
                cls.dag_id == dag_id,
                cls.task_id == task_id).join(cls.dag_run).distinct().order_by(
                    DagRun.execution_date.desc()).limit(num_to_keep))

        if session.bind.dialect.name in ["postgresql", "sqlite"]:
            # Fetch Top X records given dag_id & task_id ordered by Execution Date
            subq1 = tis_to_keep_query.subquery()
            excluded = session.query(subq1.c.dag_id, subq1.c.task_id,
                                     subq1.c.run_id)
            session.query(cls).filter(
                cls.dag_id == dag_id,
                cls.task_id == task_id,
                tuple_(cls.dag_id, cls.task_id, cls.run_id).notin_(excluded),
            ).delete(synchronize_session=False)
        elif session.bind.dialect.name in ["mysql"]:
            cls._remove_old_rendered_ti_fields_mysql(dag_id, session, task_id,
                                                     tis_to_keep_query)
        else:
            # Fetch Top X records given dag_id & task_id ordered by Execution Date
            tis_to_keep = tis_to_keep_query.all()

            filter_tis = [
                not_(
                    and_(
                        cls.dag_id == ti.dag_id,
                        cls.task_id == ti.task_id,
                        cls.run_id == ti.run_id,
                    )) for ti in tis_to_keep
            ]

            session.query(cls).filter(
                and_(*filter_tis)).delete(synchronize_session=False)

        session.flush()

    @classmethod
    @retry_db_transaction
    def _remove_old_rendered_ti_fields_mysql(cls, dag_id, session, task_id,
                                             tis_to_keep_query):
        # Fetch Top X records given dag_id & task_id ordered by Execution Date
        subq1 = tis_to_keep_query.subquery('subq1')
        # Second Subquery
        # Workaround for MySQL Limitation (https://stackoverflow.com/a/19344141/5691525)
        # Limitation: This version of MySQL does not yet support
        # LIMIT & IN/ALL/ANY/SOME subquery
        subq2 = session.query(subq1.c.dag_id, subq1.c.task_id,
                              subq1.c.run_id).subquery('subq2')
        # This query might deadlock occasionally and it should be retried if fails (see decorator)
        session.query(cls).filter(
            cls.dag_id == dag_id,
            cls.task_id == task_id,
            tuple_(cls.dag_id, cls.task_id, cls.run_id).notin_(subq2),
        ).delete(synchronize_session=False)
Beispiel #55
0
class Person(Base):
    """Stores both account login details and personal information.
    """
    __tablename__ = 'person'

    id = sa.Column(sa.types.Integer, primary_key=True)

    email_address = sa.Column(sa.types.Text, nullable=False, unique=True)
    password_hash = sa.Column(sa.types.String(64))
    password_salt = sa.Column(sa.types.String(64))

    # creation timestamp of the registration
    creation_timestamp = sa.Column(sa.types.DateTime,
                                   nullable=False,
                                   default=sa.func.current_timestamp())
    last_modification_timestamp = sa.Column(
        sa.types.DateTime,
        nullable=False,
        default=sa.func.current_timestamp(),
        onupdate=sa.func.current_timestamp())
    url_hash = sa.Column(sa.types.String(64), nullable=False, index=True)

    # flag that the account has been activated by the user
    # (responded to their confirmation email)
    activated = sa.Column(sa.types.Boolean, nullable=False, default=False)

    # other personal details
    # the lengths of the fields are chosen arbitrarily
    firstname = sa.Column(sa.types.Text)
    lastname = sa.Column(sa.types.Text)
    fullname = sa.orm.column_property(firstname + " " + lastname)
    address1 = sa.Column(sa.types.Text)
    address2 = sa.Column(sa.types.Text)
    city = sa.Column(sa.types.Text)
    state = sa.Column(sa.types.Text)
    postcode = sa.Column(sa.types.Text)
    country = sa.Column(sa.types.Text)
    company = sa.Column(sa.types.Text)
    phone = sa.Column(sa.types.Text)
    mobile = sa.Column(sa.types.Text)

    url = sa.Column(sa.types.Text)

    # Proposal bits
    experience = sa.Column(sa.types.Text)
    bio = sa.Column(sa.types.Text)

    badge_printed = sa.Column(sa.types.Boolean, default='False')
    i_agree = sa.Column(sa.types.Boolean, nullable=False, default=False)

    def _create_social_network_map(network, account_name):
        """Constructs SocialNetworkMaps from the SocialNetowkr and the
          account_name."""
        return PersonSocialNetworkMap(social_network=network,
                                      account_name=account_name)

    # relations
    roles = sa.orm.relation(Role,
                            secondary=person_role_map,
                            backref='people',
                            order_by=Role.name)
    by_social_network = sa.orm.relation(
        PersonSocialNetworkMap,
        collection_class=attribute_mapped_collection('social_network'),
        cascade="all, delete-orphan",
        backref='person')
    social_networks = association_proxy('by_social_network',
                                        'account_name',
                                        creator=_create_social_network_map)
    special_registration = sa.orm.relation(SpecialRegistration,
                                           backref='person')

    def _get_proposal_offers(self):
        from proposal import Proposal, ProposalStatus, person_proposal_map
        return Session.query(Proposal).join(person_proposal_map).join(
            Person).join(ProposalStatus).filter(Person.id == self.id).filter(
                ProposalStatus.name.like('%Offered%')).all()

    proposal_offers = property(_get_proposal_offers)

    def __init__(self, **kwargs):
        # remove the args that should never be set via creation
        super(Person, self).__init__(**kwargs)

        self.creation_timestamp = datetime.datetime.now()
        self.activated = False
        self.badge_printed = False

        # url_hash should never be modifiable by the caller directly
        self._update_url_hash()

    def gen_password(self, value):
        if not self.password_salt:
            salt = hashlib.new('sha256')
            salt.update(os.urandom(32))
            self.password_salt = salt.hexdigest()

        salt = lca_info['password_salt'] + self.password_salt
        # FIXME: switch back to PBKDF2 once Python 2.7.8 is in Ubuntu LTS (16.04)
        #dk = hashlib.pbkdf2_hmac('sha256', value, salt, lca_info['password_iterations'])
        #return binascii.hexlify(dk)
        h = hashlib.new('sha256')
        h.update(value + salt)
        return h.hexdigest()

    def _set_password(self, value):
        if value is not None:
            self.password_hash = self.gen_password(value)

    def _get_password(self):
        return self.password_hash

    password = property(_get_password, _set_password)

    def check_password(self, value):
        """Check the given password is equal to the stored one"""
        return self.password_hash == self.gen_password(value)

    def is_professional(self):
        """We treat speakers, miniconf orgs, Little Blue sponsors and
           professionals as professionals."""
        if self.is_speaker() or self.is_miniconf_org():
            return True
        else:
            for invoice in self.invoices:
                if invoice.is_paid and not invoice.is_void:
                    for item in invoice.items:
                        if (item.description.find('Professional') > -1
                                or item.description.find('Little Blue') > -1):
                            return True
        return False

    def is_speaker(self):
        # Check is they have the 'copresenter' role, this means they are not a 'real' speaker
        if self.has_role("copresenter"): return False
        return reduce(
            lambda a, b: a or (b.accepted and b.type.name != 'Miniconf'),
            self.proposals, False) or False
        # note: the "or False" at the end converts a None into a False

    def is_miniconf_org(self):
        return reduce(
            lambda a, b: a or (b.accepted and b.type.name == 'Miniconf'),
            self.proposals, False) or False
        # note: the "or False" at the end converts a None into a False

    def has_role(self, name):
        name = name.lower()
        for role in self.roles:
            if role.name.lower() == name:
                return True
        return False

    def is_volunteer(self):
        if self.volunteer and self.volunteer.accepted is not None:
            return self.volunteer.accepted
        return False

    def is_from_common_country(self):
        # People registering from these countries will not require extra verification
        common_countries = [
            'australia', 'new zealand', 'united states', 'canada', 'germany',
            'france', 'spain', 'italy', 'switzerland', 'austria',
            'united kingdom', 'ireland', 'japan', 'norway', 'denmark',
            'sweden', 'finland', 'iceland', 'belgium', 'brazil', 'mexico',
            'argentina', 'chile', 'columbia', 'estonia', 'greece', 'hong kong',
            'israel', 'luxembourg', 'monaco', 'netherlands', 'portugal',
            'south africa'
        ]

        if self.country and self.country.strip().lower() in common_countries:
            return True
        else:
            return False

    def _update_url_hash(self):
        """Update the stored URL hash for this person.

        Call this when an element of the URL hash has changed
        (i.e. either the email address or timestamp)
        """
        nonce = random.randrange(0, 2**30)
        magic = "%s&%s&%s" % (self.email_address, self.creation_timestamp,
                              nonce)
        self.url_hash = self.gen_password(magic)

    def valid_invoice(self):
        for invoice in self.invoices:
            if not invoice.is_void and not invoice.manual:
                return invoice
        return None

    def has_valid_invoice(self):
        for invoice in self.invoices:
            if not invoice.is_void:
                return True
        return False

    def has_paid_ticket(self):
        for invoice in self.invoices:
            if invoice.is_paid and not invoice.is_void:
                for item in invoice.items:
                    if item.product is not None and item.product.category.name == 'Ticket':
                        return True
        return False

    def ticket_type(self):
        for invoice in self.invoices:
            if not invoice.is_void:
                for item in invoice.items:
                    if item.product is not None and item.product.category.name == 'Ticket':
                        # Strip off any mention of "Ticket".
                        str = item.description
                        str = str.replace('Ticket - ', '')
                        str = str.replace(' Ticket', '')
                        return str

    def paid(self):
        status = False
        for invoice in self.invoices:
            if not invoice.is_void:
                if invoice.is_paid:
                    status = True
                else:
                    return False
        return status

    def fetch_social_networks(self):
        self.social_network = dict()

        for sn in self.social_networks:
            self.social_network[sn.name] = self.social_networks[sn]

        for sn in SocialNetwork.find_all():
            if sn.name not in self.social_network:
                self.social_network[sn.name] = ''

    def __repr__(self):
        return '<Person id="%s" email="%s">' % (self.id, self.email_address)

    @classmethod
    def find_by_email(cls, email, abort_404=False):
        result = Session.query(Person).filter_by(
            email_address=email.lower()).first()
        if result is None and abort_404:
            abort(404, "No such person object")
        return result

    @classmethod
    def find_by_id(cls, id, abort_404=True):
        result = Session.query(Person).filter_by(id=id).first()
        if result is None and abort_404:
            abort(404, "No such person object")
        return result

    @classmethod
    def find_all(cls):
        return Session.query(Person).order_by(Person.id).all()

    @classmethod
    def find_by_url_hash(cls, url_hash, abort_404=True):
        result = Session.query(Person).filter_by(url_hash=url_hash).first()
        if result is None and abort_404:
            abort(404, "No such person object")
        return result

    def avatar_url(self):
        return libravatar_url(email=self.email_address,
                              https=True,
                              default='mm')

    @classmethod
    def find_review_summary(cls):
        from review import Review
        return Review.stats_query().join(cls).add_entity(cls).group_by(cls)
Beispiel #56
0
class Genus(db.Base, db.Serializable):
    """
    :Table name: genus

    :Columns:
        *genus*:
            The name of the genus.  In addition to standard generic
            names any additional hybrid flags or genera should included here.

        *qualifier*:
            Designates the botanical status of the genus.

            Possible values:
                * s. lat.: aggregrate genus (sensu lato)

                * s. str.: segregate genus (sensu stricto)

        *author*:
            The name or abbreviation of the author who published this genus.

    :Properties:
        *family*:
            The family of the genus.

        *synonyms*:
            The list of genera who are synonymous with this genus.  If
            a genus is listed as a synonym of this genus then this
            genus should be considered the current and valid name for
            the synonym.

    :Contraints:
        The combination of genus, author, qualifier
        and family_id must be unique.
    """
    __tablename__ = 'genus'
    __table_args__ = (UniqueConstraint('genus', 'author', 'qualifier',
                                       'family_id'), {})
    __mapper_args__ = {'order_by': ['genus', 'author']}

    rank = 'genus'
    link_keys = ['accepted']

    @property
    def cites(self):
        '''the cites status of this taxon, or None
        '''

        cites_notes = [
            i.note for i in self.notes
            if i.category and i.category.upper() == 'CITES'
        ]
        if not cites_notes:
            return self.family.cites
        return cites_notes[0]

    # columns
    genus = Column(String(64), nullable=False, index=True)

    # use '' instead of None so that the constraints will work propertly
    author = Column(Unicode(255), default=u'')

    @validates('genus', 'author')
    def validate_stripping(self, key, value):
        if value is None:
            return None
        return value.strip()

    qualifier = Column(types.Enum(values=['s. lat.', 's. str', u'']),
                       default=u'')

    family_id = Column(Integer, ForeignKey('family.id'), nullable=False)

    # relations
    # `species` relation is defined outside of `Genus` class definition
    synonyms = association_proxy('_synonyms', 'synonym')
    _synonyms = relation('GenusSynonym',
                         primaryjoin='Genus.id==GenusSynonym.genus_id',
                         cascade='all, delete-orphan',
                         uselist=True,
                         backref='genus')

    # this is a dummy relation, it is only here to make cascading work
    # correctly and to ensure that all synonyms related to this genus
    # get deleted if this genus gets deleted
    __syn = relation('GenusSynonym',
                     primaryjoin='Genus.id==GenusSynonym.synonym_id',
                     cascade='all, delete-orphan',
                     uselist=True)

    @property
    def accepted(self):
        'Name that should be used if name of self should be rejected'
        session = object_session(self)
        syn = session.query(GenusSynonym).filter(
            GenusSynonym.synonym_id == self.id).first()
        accepted = syn and syn.genus
        return accepted

    @accepted.setter
    def accepted(self, value):
        'Name that should be used if name of self should be rejected'
        assert isinstance(value, self.__class__)
        if self in value.synonyms:
            return
        # remove any previous `accepted` link
        session = db.Session()
        session.query(GenusSynonym).filter(
            GenusSynonym.synonym_id == self.id).delete()
        session.commit()
        value.synonyms.append(self)

    def __repr__(self):
        return Genus.str(self)

    @staticmethod
    def str(genus, author=False):
        # TODO: the genus should be italicized for markup
        if genus.genus is None:
            return repr(genus)
        elif not author or genus.author is None:
            return ' '.join([
                s for s in [genus.genus, genus.qualifier]
                if s not in ('', None)
            ])
        else:
            return ' '.join([
                s for s in [
                    genus.genus, genus.qualifier,
                    xml.sax.saxutils.escape(genus.author)
                ] if s not in ('', None)
            ])

    def has_accessions(self):
        '''true if genus is linked to at least one accession
        '''

        return False

    def as_dict(self, recurse=True):
        result = db.Serializable.as_dict(self)
        del result['genus']
        del result['qualifier']
        result['object'] = 'taxon'
        result['rank'] = 'genus'
        result['epithet'] = self.genus
        result['ht-rank'] = 'familia'
        result['ht-epithet'] = self.family.family
        if recurse and self.accepted is not None:
            result['accepted'] = self.accepted.as_dict(recurse=False)
        return result

    @classmethod
    def retrieve(cls, session, keys):
        try:
            return session.query(cls).filter(
                cls.genus == keys['epithet']).one()
        except:
            if 'author' not in keys:
                return None
        try:
            return session.query(cls).filter(
                cls.genus == keys['epithet'],
                cls.author == keys['author']).one()
        except:
            return None

    @classmethod
    def correct_field_names(cls, keys):
        for internal, exchange in [('genus', 'epithet'),
                                   ('family', 'ht-epithet')]:
            if exchange in keys:
                keys[internal] = keys[exchange]
                del keys[exchange]

    @classmethod
    def compute_serializable_fields(cls, session, keys):
        from family import Family
        result = {'family': None}
        ## retrieve family object
        if keys.get('ht-epithet'):
            result['family'] = Family.retrieve_or_create(
                session, {'epithet': keys['ht-epithet']}, create=True)
        if result['family'] is None:
            raise error.NoResultException()
        return result
Beispiel #57
0
class Message(MailSyncBase, HasRevisions, HasPublicID):
    @property
    def API_OBJECT_NAME(self):
        return 'message' if not self.is_draft else 'draft'

    namespace_id = Column(ForeignKey(Namespace.id, ondelete='CASCADE'),
                          index=True,
                          nullable=False)
    namespace = relationship('Namespace', load_on_pending=True)

    # Do delete messages if their associated thread is deleted.
    thread_id = Column(Integer,
                       ForeignKey('thread.id', ondelete='CASCADE'),
                       nullable=False)
    thread = relationship('Thread',
                          backref=backref('messages',
                                          order_by='Message.received_date',
                                          passive_deletes=True,
                                          cascade='all, delete-orphan'))

    from_addr = Column(JSON, nullable=False, default=lambda: [])
    sender_addr = Column(JSON, nullable=True)
    reply_to = Column(JSON, nullable=True, default=lambda: [])
    to_addr = Column(JSON, nullable=False, default=lambda: [])
    cc_addr = Column(JSON, nullable=False, default=lambda: [])
    bcc_addr = Column(JSON, nullable=False, default=lambda: [])
    in_reply_to = Column(JSON, nullable=True)
    # From: http://tools.ietf.org/html/rfc4130, section 5.3.3,
    # max message_id_header is 998 characters
    message_id_header = Column(String(998), nullable=True)
    # There is no hard limit on subject limit in the spec, but 255 is common.
    subject = Column(String(255), nullable=True, default='')
    received_date = Column(DateTime, nullable=False, index=True)
    size = Column(Integer, nullable=False)
    data_sha256 = Column(String(255), nullable=True)

    is_read = Column(Boolean, server_default=false(), nullable=False)
    is_starred = Column(Boolean, server_default=false(), nullable=False)

    # For drafts (both Inbox-created and otherwise)
    is_draft = Column(Boolean, server_default=false(), nullable=False)
    is_sent = Column(Boolean, server_default=false(), nullable=False)

    # REPURPOSED
    state = Column(
        Enum('draft', 'sending', 'sending failed', 'sent', 'actions_pending',
             'actions_committed'))

    @property
    def categories_changes(self):
        return self.state == 'actions_pending'

    @categories_changes.setter
    def categories_changes(self, has_changes):
        if has_changes is True:
            self.state = 'actions_pending'
        else:
            self.state = 'actions_committed'

    _compacted_body = Column(LONGBLOB, nullable=True)
    snippet = Column(String(191), nullable=False)
    SNIPPET_LENGTH = 191

    # A reference to the block holding the full contents of the message
    full_body_id = Column(ForeignKey('block.id', name='full_body_id_fk'),
                          nullable=True)
    full_body = relationship('Block', cascade='all, delete')

    # this might be a mail-parsing bug, or just a message from a bad client
    decode_error = Column(Boolean,
                          server_default=false(),
                          nullable=False,
                          index=True)

    # In accordance with JWZ (http://www.jwz.org/doc/threading.html)
    references = Column(JSON, nullable=True)

    # Only used for drafts.
    version = Column(Integer, nullable=False, server_default='0')

    # only on messages from Gmail (TODO: use different table)
    #
    # X-GM-MSGID is guaranteed unique across an account but not globally
    # across all Gmail.
    #
    # Messages between different accounts *may* have the same X-GM-MSGID,
    # but it's unlikely.
    #
    # (Gmail info from
    # http://mailman13.u.washington.edu/pipermail/imap-protocol/
    # 2014-July/002290.html.)
    g_msgid = Column(BigInteger, nullable=True, index=True, unique=False)
    g_thrid = Column(BigInteger, nullable=True, index=True, unique=False)

    # The uid as set in the X-INBOX-ID header of a sent message we create
    inbox_uid = Column(String(64), nullable=True, index=True)

    def regenerate_inbox_uid(self):
        """
        The value of inbox_uid is simply the draft public_id and version,
        concatenated. Because the inbox_uid identifies the draft on the remote
        provider, we regenerate it on each draft revision so that we can delete
        the old draft and add the new one on the remote."""

        from inbox.sendmail.message import generate_message_id_header
        self.inbox_uid = '{}-{}'.format(self.public_id, self.version)
        self.message_id_header = generate_message_id_header(self.inbox_uid)

    categories = association_proxy(
        'messagecategories',
        'category',
        creator=lambda category: MessageCategory(category=category))

    # FOR INBOX-CREATED MESSAGES:

    is_created = Column(Boolean, server_default=false(), nullable=False)

    # Whether this draft is a reply to an existing thread.
    is_reply = Column(Boolean)

    reply_to_message_id = Column(Integer,
                                 ForeignKey('message.id'),
                                 nullable=True)
    reply_to_message = relationship('Message', uselist=False)

    def mark_for_deletion(self):
        """
        Mark this message to be deleted by an asynchronous delete
        handler.

        """
        self.deleted_at = datetime.datetime.utcnow()

    @validates('subject')
    def sanitize_subject(self, key, value):
        # Trim overlong subjects, and remove null bytes. The latter can result
        # when, for example, UTF-8 text decoded from an RFC2047-encoded header
        # contains null bytes.
        if value is None:
            return
        if len(value) > 255:
            value = value[:255]
        value = value.replace('\0', '')
        return value

    @classmethod
    def create_from_synced(cls, account, mid, folder_name, received_date,
                           body_string):
        """
        Parses message data and writes out db metadata and MIME blocks.

        Returns the new Message, which links to the new Part and Block objects
        through relationships. All new objects are uncommitted.

        Threads are not computed here; you gotta do that separately.

        Parameters
        ----------
        mid : int
            The account backend-specific message identifier; it's only used for
            logging errors.

        raw_message : str
            The full message including headers (encoded).

        """
        _rqd = [account, mid, folder_name, body_string]
        if not all([v is not None for v in _rqd]):
            raise ValueError(
                'Required keyword arguments: account, mid, folder_name, '
                'body_string')
        # stop trickle-down bugs
        assert account.namespace is not None
        assert not isinstance(body_string, unicode)

        msg = Message()

        from inbox.models.block import Block
        body_block = Block()
        body_block.namespace_id = account.namespace.id
        body_block.data = body_string
        body_block.content_type = "text/plain"
        msg.full_body = body_block

        msg.namespace_id = account.namespace.id

        try:
            parsed = mime.from_string(body_string)
            msg._parse_metadata(parsed, body_string, received_date, account.id,
                                folder_name, mid)
        except (mime.DecodingError, AttributeError, RuntimeError,
                TypeError) as e:
            parsed = None
            log.error('Error parsing message metadata',
                      folder_name=folder_name,
                      account_id=account.id,
                      error=e)
            msg._mark_error()

        if parsed is not None:
            plain_parts = []
            html_parts = []
            for mimepart in parsed.walk(
                    with_self=parsed.content_type.is_singlepart()):
                try:
                    if mimepart.content_type.is_multipart():
                        continue  # TODO should we store relations?
                    msg._parse_mimepart(mid, mimepart, account.namespace.id,
                                        html_parts, plain_parts)
                except (mime.DecodingError, AttributeError, RuntimeError,
                        TypeError, binascii.Error, UnicodeDecodeError) as e:
                    log.error('Error parsing message MIME parts',
                              folder_name=folder_name,
                              account_id=account.id,
                              error=e)
                    msg._mark_error()
            msg.calculate_body(html_parts, plain_parts)

            # Occasionally people try to send messages to way too many
            # recipients. In such cases, empty the field and treat as a parsing
            # error so that we don't break the entire sync.
            for field in ('to_addr', 'cc_addr', 'bcc_addr', 'references'):
                value = getattr(msg, field)
                if json_field_too_long(value):
                    log.error('Recipient field too long',
                              field=field,
                              account_id=account.id,
                              folder_name=folder_name,
                              mid=mid)
                    setattr(msg, field, [])
                    msg._mark_error()

        return msg

    def _parse_metadata(self, parsed, body_string, received_date, account_id,
                        folder_name, mid):
        mime_version = parsed.headers.get('Mime-Version')
        # sometimes MIME-Version is '1.0 (1.0)', hence the .startswith()
        if mime_version is not None and not mime_version.startswith('1.0'):
            log.warning('Unexpected MIME-Version',
                        account_id=account_id,
                        folder_name=folder_name,
                        mid=mid,
                        mime_version=mime_version)

        self.data_sha256 = sha256(body_string).hexdigest()

        self.subject = parsed.subject
        self.from_addr = parse_mimepart_address_header(parsed, 'From')
        self.sender_addr = parse_mimepart_address_header(parsed, 'Sender')
        self.reply_to = parse_mimepart_address_header(parsed, 'Reply-To')
        self.to_addr = parse_mimepart_address_header(parsed, 'To')
        self.cc_addr = parse_mimepart_address_header(parsed, 'Cc')
        self.bcc_addr = parse_mimepart_address_header(parsed, 'Bcc')

        self.in_reply_to = parsed.headers.get('In-Reply-To')
        self.message_id_header = parsed.headers.get('Message-Id')

        self.received_date = received_date if received_date else \
            get_internaldate(parsed.headers.get('Date'),
                                parsed.headers.get('Received'))

        # Custom Inbox header
        self.inbox_uid = parsed.headers.get('X-INBOX-ID')

        # In accordance with JWZ (http://www.jwz.org/doc/threading.html)
        self.references = parse_references(
            parsed.headers.get('References', ''),
            parsed.headers.get('In-Reply-To', ''))

        self.size = len(body_string)  # includes headers text

    def _parse_mimepart(self, mid, mimepart, namespace_id, html_parts,
                        plain_parts):
        disposition, _ = mimepart.content_disposition
        content_id = mimepart.headers.get('Content-Id')
        content_type, params = mimepart.content_type

        filename = mimepart.detected_file_name
        if filename == '':
            filename = None

        is_text = content_type.startswith('text')
        if disposition not in (None, 'inline', 'attachment'):
            log.error('Unknown Content-Disposition',
                      message_public_id=self.public_id,
                      bad_content_disposition=mimepart.content_disposition)
            self._mark_error()
            return

        if disposition == 'attachment':
            self._save_attachment(mimepart, disposition, content_type,
                                  filename, content_id, namespace_id, mid)
            return

        if (disposition == 'inline'
                and not (is_text and filename is None and content_id is None)):
            # Some clients set Content-Disposition: inline on text MIME parts
            # that we really want to treat as part of the text body. Don't
            # treat those as attachments.
            self._save_attachment(mimepart, disposition, content_type,
                                  filename, content_id, namespace_id, mid)
            return

        if is_text:
            if mimepart.body is None:
                return
            normalized_data = mimepart.body.encode('utf-8', 'strict')
            normalized_data = normalized_data.replace('\r\n', '\n'). \
                replace('\r', '\n')
            if content_type == 'text/html':
                html_parts.append(normalized_data)
            elif content_type == 'text/plain':
                plain_parts.append(normalized_data)
            else:
                log.info('Saving other text MIME part as attachment',
                         content_type=content_type,
                         mid=mid)
                self._save_attachment(mimepart, 'attachment', content_type,
                                      filename, content_id, namespace_id, mid)
            return

        # Finally, if we get a non-text MIME part without Content-Disposition,
        # treat it as an attachment.
        self._save_attachment(mimepart, 'attachment', content_type, filename,
                              content_id, namespace_id, mid)

    def _save_attachment(self, mimepart, content_disposition, content_type,
                         filename, content_id, namespace_id, mid):
        from inbox.models import Part, Block
        block = Block()
        block.namespace_id = namespace_id
        block.filename = _trim_filename(filename, mid=mid)
        block.content_type = content_type
        part = Part(block=block, message=self)
        if content_id:
            content_id = content_id[:255]
        part.content_id = content_id
        part.content_disposition = content_disposition
        data = mimepart.body or ''
        if isinstance(data, unicode):
            data = data.encode('utf-8', 'strict')
        block.data = data

    def _mark_error(self):
        """
        Mark message as having encountered errors while parsing.

        Message parsing can fail for several reasons. Occasionally iconv will
        fail via maximum recursion depth. EAS messages may be missing Date and
        Received headers. Flanker may fail to handle some out-of-spec messages.

        In this case, we keep what metadata we've managed to parse but also
        mark the message as having failed to parse properly.

        """
        self.decode_error = True
        # fill in required attributes with filler data if could not parse them
        self.size = 0
        if self.received_date is None:
            self.received_date = datetime.datetime.utcnow()
        if self.body is None:
            self.body = ''
        if self.snippet is None:
            self.snippet = ''

    def calculate_body(self, html_parts, plain_parts):
        html_body = ''.join(html_parts).decode('utf-8').strip()
        plain_body = '\n'.join(plain_parts).decode('utf-8').strip()
        if html_body:
            self.snippet = self.calculate_html_snippet(html_body)
            self.body = html_body
        elif plain_body:
            self.snippet = self.calculate_plaintext_snippet(plain_body)
            self.body = plaintext2html(plain_body, False)
        else:
            self.body = u''
            self.snippet = u''

    def calculate_html_snippet(self, text):
        text = strip_tags(text)
        return self.calculate_plaintext_snippet(text)

    def calculate_plaintext_snippet(self, text):
        return ' '.join(text.split())[:self.SNIPPET_LENGTH]

    @property
    def body(self):
        if self._compacted_body is None:
            return None
        return decode_blob(self._compacted_body).decode('utf-8')

    @body.setter
    def body(self, value):
        if value is None:
            self._compacted_body = None
        else:
            self._compacted_body = encode_blob(value.encode('utf-8'))

    @property
    def participants(self):
        """
        Different messages in the thread may reference the same email
        address with different phrases. We partially deduplicate: if the same
        email address occurs with both empty and nonempty phrase, we don't
        separately return the (empty phrase, address) pair.

        """
        deduped_participants = defaultdict(set)
        chain = []
        if self.from_addr:
            chain.append(self.from_addr)

        if self.to_addr:
            chain.append(self.to_addr)

        if self.cc_addr:
            chain.append(self.cc_addr)

        if self.bcc_addr:
            chain.append(self.bcc_addr)

        for phrase, address in itertools.chain.from_iterable(chain):
            deduped_participants[address].add(phrase.strip())

        p = []
        for address, phrases in deduped_participants.iteritems():
            for phrase in phrases:
                if phrase != '' or len(phrases) == 1:
                    p.append((phrase, address))
        return p

    @property
    def attachments(self):
        return [part for part in self.parts if part.is_attachment]

    @property
    def api_attachment_metadata(self):
        resp = []
        for part in self.parts:
            if not part.is_attachment:
                continue
            k = {
                'content_type': part.block.content_type,
                'size': part.block.size,
                'filename': part.block.filename,
                'id': part.block.public_id
            }
            content_id = part.content_id
            if content_id:
                if content_id[0] == '<' and content_id[-1] == '>':
                    content_id = content_id[1:-1]
                k['content_id'] = content_id
            resp.append(k)
        return resp

    @property
    def versioned_relationships(self):
        return ['parts']

    @property
    def propagated_attributes(self):
        return ['is_read', 'is_starred', 'messagecategories']

    @property
    def has_attached_events(self):
        return 'text/calendar' in [p.block.content_type for p in self.parts]

    @property
    def attached_event_files(self):
        return [
            part for part in self.parts
            if part.block.content_type == 'text/calendar'
        ]

    @property
    def account(self):
        return self.namespace.account

    def get_header(self, header, mid):
        if self.decode_error:
            log.warning('Error getting message header', mid=mid)
            return

        parsed = mime.from_string(self.full_body.data)
        return parsed.headers.get(header)

    @classmethod
    def from_public_id(cls, public_id, namespace_id, db_session):
        q = bakery(lambda s: s.query(cls))
        q += lambda q: q.filter(
            Message.public_id == bindparam('public_id'), Message.namespace_id
            == bindparam('namespace_id'))
        q += lambda q: q.options(
            joinedload(Message.thread).load_only('discriminator', 'public_id'),
            joinedload(Message.messagecategories).joinedload('category'),
            joinedload(Message.parts).joinedload('block'),
            joinedload(Message.events))
        return q(db_session).params(public_id=public_id,
                                    namespace_id=namespace_id).one()

    @classmethod
    def api_loading_options(cls, expand=False):
        columns = [
            'public_id', 'is_draft', 'from_addr', 'to_addr', 'cc_addr',
            'bcc_addr', 'is_read', 'is_starred', 'received_date', 'is_sent',
            'subject', 'snippet', 'version', 'from_addr', 'to_addr', 'cc_addr',
            'bcc_addr', 'reply_to', '_compacted_body', 'thread_id',
            'namespace_id'
        ]
        if expand:
            columns += ['message_id_header', 'in_reply_to', 'references']
        return (load_only(*columns), subqueryload('parts').joinedload('block'),
                subqueryload('thread').load_only('public_id', 'discriminator'),
                subqueryload('events').load_only('public_id', 'discriminator'),
                subqueryload('messagecategories').joinedload('category'))
Beispiel #58
0
class BallotBookModel(db.Model):
    __tablename__ = 'ballotBook'
    stationaryItemId = db.Column(
        db.Integer,
        db.ForeignKey(StationaryItem.Model.__table__.c.stationaryItemId),
        primary_key=True,
        nullable=False)
    fromBallotStationaryItemId = db.Column(
        db.Integer,
        db.ForeignKey(Ballot.Model.__table__.c.stationaryItemId),
        nullable=False)
    toBallotStationaryItemId = db.Column(
        db.Integer,
        db.ForeignKey(Ballot.Model.__table__.c.stationaryItemId),
        nullable=False)

    stationaryItem = relationship(StationaryItem.Model,
                                  foreign_keys=[stationaryItemId])
    fromBallot = relationship(Ballot.Model,
                              foreign_keys=[fromBallotStationaryItemId])
    toBallot = relationship(Ballot.Model,
                            foreign_keys=[toBallotStationaryItemId])

    electionId = association_proxy("stationaryItem", "electionId")
    election = association_proxy("stationaryItem", "election")
    fromBallotId = association_proxy("fromBallot", "ballotId")
    toBallotId = association_proxy("toBallot", "ballotId")

    @hybrid_property
    def ballots(self):
        return Ballot.Model.query.filter(
            and_(
                cast(Ballot.Model.ballotId, Integer) >= cast(
                    self.fromBallotId, Integer),
                cast(Ballot.Model.ballotId, Integer) <= cast(
                    self.toBallotId, Integer))).filter(
                        Ballot.Model.electionId == self.electionId).all()

    @hybrid_property
    def available(self):
        locked_invoices = db.session.query(Invoice.Model.invoiceId).join(
            InvoiceStationaryItem.Model,
            and_(InvoiceStationaryItem.Model.invoiceId ==
                 Invoice.Model.invoiceId)).join(
                     Ballot.Model,
                     and_(
                         Ballot.Model.stationaryItemId ==
                         InvoiceStationaryItem.Model.stationaryItemId,
                         cast(Ballot.Model.ballotId, Integer) >= cast(
                             self.fromBallotId, Integer),
                         cast(Ballot.Model.ballotId, Integer) <= cast(
                             self.toBallotId, Integer))).filter(
                                 Invoice.Model.delete == False).group_by(
                                     Invoice.Model.invoiceId).all()

        return len(locked_invoices) == 0

    def __init__(self, electionId, fromBallotId, toBallotId):
        fromBallot = Ballot.get_all(ballotId=fromBallotId,
                                    electionId=electionId)
        toBallot = Ballot.get_all(ballotId=toBallotId, electionId=electionId)

        if len(fromBallot) is 0:
            raise NotFoundException("Ballot not found (ballotId=%s)" %
                                    fromBallotId)
        else:
            fromBallot = fromBallot[0]

        if len(toBallot) is 0:
            raise NotFoundException("Ballot not found (ballotId=%s)" %
                                    toBallotId)
        else:
            toBallot = toBallot[0]

        stationary_item = StationaryItem.create(
            electionId=electionId,
            stationaryItemType=StationaryItemTypeEnum.Ballot)

        super(BallotBookModel, self).__init__(
            fromBallotStationaryItemId=fromBallot.stationaryItemId,
            toBallotStationaryItemId=toBallot.stationaryItemId,
            stationaryItemId=stationary_item.stationaryItemId)

        db.session.add(self)
        db.session.flush()
Beispiel #59
0
class Compound(Base):
    __tablename__ = 'compound'
    id = Column('compound_id', Integer, primary_key=True)
    insert_date = Column('compound_insert_date',
                         TIMESTAMP,
                         nullable=False,
                         default=func.current_timestamp())
    inchikey = Column('compound_inchikey',
                      CHAR(27),
                      nullable=False,
                      unique=True)
    inchi = Column('compound_inchi', String(4000), nullable=False)
    molecular_formula = Column('compound_molecular_formula', String(255))
    molecular_weight = Column('compound_molecular_weight', Numeric(9, 4))
    accurate_mass = Column('compound_accurate_mass', Numeric(9, 4))
    m_plus_H = Column('compound_m_plus_H', Numeric(9, 4))
    m_plus_Na = Column('compound_m_plus_Na', Numeric(9, 4))
    smiles = Column('compound_smiles', String(2000))
    molblock = Column('compound_molblock', Text)
    cluster_id = Column('compound_cluster_id', Integer)
    node_id = Column('compound_node_id', Integer)
    # Simple relationships
    curation_data = relationship('CurationData', uselist=False)
    db_ids = relationship('ExternalDB')
    # Complex Associations
    # These get associations with references attached as tuple
    names = association_proxy('compound_name', 'name_reference')
    syntheses = association_proxy('compound_synthesis', 'synthesis_reference')
    origins = association_proxy('compound_origin', 'origin_reference')

    def __repr__(self):
        return "<Compound(inchikey='%s')>" % self.inchikey

    @hybrid_property
    def original_origin_reference(self):
        co = object_session(self).query(CompoundOrigin)\
            .filter(CompoundOrigin.compound_id==self.id)\
            .filter(CompoundOrigin.original_isolation_reference==1)\
            .one()
        return co.origin_reference

    @hybrid_property
    def original_origin(self):
        co = object_session(self).query(CompoundOrigin)\
            .filter(CompoundOrigin.compound_id==self.id)\
            .filter(CompoundOrigin.original_isolation_reference==1)\
            .one()
        return co.origin

    @hybrid_property
    def original_name_reference(self):
        cn = object_session(self).query(CompoundName)\
            .filter(CompoundName.compound_id==self.id)\
            .filter(CompoundName.original_isolation_name==1)\
            .one()
        return cn.name_reference

    @hybrid_property
    def original_name(self):
        cn = object_session(self).query(CompoundName)\
            .filter(CompoundName.compound_id==self.id)\
            .filter(CompoundName.original_isolation_name==1)\
            .one()
        return cn.name
Beispiel #60
0
class Media(object):
    """
    Media metadata and a collection of related files.

    """
    meta = association_proxy('_meta', 'value', creator=MediaMeta)

    query = DBSession.query_property(MediaQuery)

    # TODO: replace '_thumb_dir' with something more generic, like 'name',
    #       so that its other uses throughout the code make more sense.
    _thumb_dir = 'media'

    def __init__(self):
        if self.author is None:
            self.author = Author()

    def __repr__(self):
        return '<Media: %r>' % self.slug

    @classmethod
    def example(cls, **kwargs):
        media = Media()
        defaults = dict(
            title=u'Foo Media',
            author=Author(u'Joe', u'*****@*****.**'),
            type=None,
        )
        defaults.update(kwargs)
        defaults.setdefault('slug', get_available_slug(Media,
                                                       defaults['title']))
        for key, value in defaults.items():
            assert hasattr(media, key)
            setattr(media, key, value)
        DBSession.add(media)
        DBSession.flush()
        return media

    def set_tags(self, tags):
        """Set the tags relations of this media, creating them as needed.

        :param tags: A list or comma separated string of tags to use.
        """
        if isinstance(tags, basestring):
            tags = extract_tags(tags)
        if isinstance(tags, list) and tags:
            tags = fetch_and_create_tags(tags)
        self.tags = tags or []

    def set_categories(self, cats):
        """Set the related categories of this media.

        :param cats: A list of category IDs to set.
        """
        if cats:
            cats = Category.query.filter(Category.id.in_(cats)).all()
        self.categories = cats or []

    def update_status(self):
        """Ensure the type (audio/video) and encoded flag are properly set.

        Call this after modifying any files belonging to this item.

        """
        was_encoded = self.encoded
        self.type = self._update_type()
        self.encoded = self._update_encoding()
        if self.encoded and not was_encoded:
            events.Media.encoding_done(self)

    def _update_type(self):
        """Update the type of this Media object.

        If there's a video file, mark this as a video type, else fallback
        to audio, if possible, or unknown (None)
        """
        if any(file.type == VIDEO for file in self.files):
            return VIDEO
        elif any(file.type == AUDIO for file in self.files):
            return AUDIO
        return None

    def _update_encoding(self):
        # Test to see if we can find a workable file/player combination
        # for the most common podcasting app w/ the POOREST format support
        if self.podcast_id and not pick_podcast_media_file(self):
            return False
        # Test to see if we can find a workable file/player combination
        # for the browser w/ the BEST format support
        if not pick_any_media_file(self):
            return False
        return True

    @property
    def is_published(self):
        if self.id is None:
            return False
        return self.publishable and self.reviewed and self.encoded\
           and (self.publish_on is not None and self.publish_on <= datetime.now())\
           and (self.publish_until is None or self.publish_until >= datetime.now())

    @property
    def resource(self):
        return Resource('media', self.id, media=self)

    def increment_views(self):
        """Increment the number of views in the database.

        We avoid concurrency issues by incrementing JUST the views and
        not allowing modified_on to be updated automatically.

        """
        if self.id is None:
            self.views += 1
            return self.views

        DBSession.execute(media.update()\
            .values(views=media.c.views + 1)\
            .where(media.c.id == self.id))

        # Increment the views by one for the rest of the request,
        # but don't allow the ORM to increment the views too.
        attributes.set_committed_value(self, 'views', self.views + 1)
        return self.views

    def increment_likes(self):
        self.likes += 1
        self.update_popularity()
        return self.likes

    def increment_dislikes(self):
        self.dislikes += 1
        self.update_popularity()
        return self.dislikes

    def update_popularity(self):
        if self.is_published:
            self.popularity_points = calculate_popularity(
                self.publish_on,
                self.likes - self.dislikes,
            )
            self.popularity_likes = calculate_popularity(
                self.publish_on,
                self.likes,
            )
            self.popularity_dislikes = calculate_popularity(
                self.publish_on,
                self.dislikes,
            )
        else:
            self.popularity_points = 0
            self.popularity_likes = 0
            self.popularity_dislikes = 0

    @validates('description')
    def _validate_description(self, key, value):
        self.description_plain = line_break_xhtml(line_break_xhtml(value))
        return value

    @validates('description_plain')
    def _validate_description_plain(self, key, value):
        return strip_xhtml(value, True)

    def get_uris(self):
        uris = []
        for file in self.files:
            uris.extend(file.get_uris())
        return uris