Esempio n. 1
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the TSIG Keys tables
    tsigkeys_table = Table('tsigkeys', meta, autoload=True)

    scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES)
    scopes.create()

    # Create the scope and resource columns
    scope_col = Column('scope', scopes, nullable=False, server_default='POOL')
    scope_col.create(tsigkeys_table)

    # Start with nullable=True and populate_default=True, then convert
    # to nullable=False once all rows have been populted with a resource_id
    resource_id_col = Column('resource_id', UUID, default=default_pool_id,
                             nullable=True)
    resource_id_col.create(tsigkeys_table, populate_default=True)

    # Now that we've populated the default pool id in existing rows, MySQL
    # will let us convert this over to nullable=False
    tsigkeys_table.c.resource_id.alter(nullable=False)

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', name='unique_tsigkey_name',
                                      table=tsigkeys_table)
        constraint.create()
def _add_visibility_column(meta):
    enum = Enum('private', 'public', 'shared', 'community', metadata=meta,
                name='image_visibility')
    enum.create()
    v_col = Column('visibility', enum, nullable=True, server_default=None)
    op.add_column('images', v_col)
    op.create_index('visibility_image_idx', 'images', ['visibility'])
def downgrade(migrate_engine):
    """Function removes key_pairs type field."""
    meta = MetaData(bind=migrate_engine)
    key_pairs = Table('key_pairs', meta, autoload=True)
    shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)
    enum = Enum(metadata=meta, name='keypair_types')

    if hasattr(key_pairs.c, 'type'):
        key_pairs.c.type.drop()

    if hasattr(shadow_key_pairs.c, 'type'):
        shadow_key_pairs.c.type.drop()

    enum.drop()
def upgrade():
    enum_types = Enum("BILLING", "SHIPPING", name='address_types')
    enum_types.create(op.get_bind(), checkfirst=False)

    op.create_table('address',
                    Column('id', Integer, primary_key=True),
                    Column('customer_id', Integer, ForeignKey('customer.id')),
                    Column('type', enum_types),
                    Column('line1', String),
                    Column('line2', String),
                    Column('suburb', String),
                    Column('postcode', String),
                    Column('state', String),
                    Column('country', String)
                    )
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    for table_name in ['instances', 'shadow_instances']:
        enum = Enum('owner', 'admin', name='%s0locked_by' % table_name)
        if migrate_engine.url.get_dialect() is postgresql.dialect:
            # Need to explicitly create Postgres enums during migrations
            enum.create(migrate_engine, checkfirst=False)

        instances = Table(table_name, meta, autoload=True)
        locked_by = Column('locked_by', enum)
        instances.create_column(locked_by)
        instances.update().\
            where(instances.c.locked == True).\
            values(locked_by='admin').execute()
Esempio n. 6
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    keys = Enum(name='key', *ZONE_ATTRIBUTE_KEYS)

    domain_attributes_table = Table(
        'domain_attributes', meta,
        Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
        Column('version', Integer(), default=1, nullable=False),
        Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
        Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),

        Column('key', keys),
        Column('value', String(255), nullable=False),
        Column('domain_id', UUID(), nullable=False),

        UniqueConstraint('key', 'value', 'domain_id',
                         name='unique_attributes'),
        ForeignKeyConstraint(['domain_id'], ['domains.id'],
                             ondelete='CASCADE'),

        mysql_engine='INNODB',
        mysql_charset='utf8'
    )

    domains_table = Table('domains', meta, autoload=True)
    types = Enum(name='types', metadata=meta, *ZONE_TYPES)
    types.create()

    # Add type and transferred_at to domains
    type_ = Column('type', types, default='PRIMARY', server_default='PRIMARY')
    transferred_at = Column('transferred_at', DateTime, default=None)

    type_.create(domains_table, populate_default=True)
    transferred_at.create(domains_table, populate_default=True)

    domain_attributes_table.create()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)

        # Add missing unique index
        constraint.create()
Esempio n. 7
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the TSIG Keys tables
    tsigkeys_table = Table('tsigkeys', meta, autoload=True)
    scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES)

    # Create the scope and resource columns
    tsigkeys_table.c.scope.drop()
    tsigkeys_table.c.resource_id.drop()
    scopes.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', name='unique_tsigkey_name',
                                      table=tsigkeys_table)
        constraint.create()
Esempio n. 8
0
def upgrade(migrate_engine):
    """Function adds key_pairs type field."""
    meta = MetaData(bind=migrate_engine)
    key_pairs = Table("key_pairs", meta, autoload=True)
    shadow_key_pairs = Table("shadow_key_pairs", meta, autoload=True)

    enum = Enum("ssh", "x509", metadata=meta, name="keypair_types")
    enum.create()

    keypair_type = Column("type", enum, nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH)

    if hasattr(key_pairs.c, "type"):
        key_pairs.c.type.drop()

    if hasattr(shadow_key_pairs.c, "type"):
        shadow_key_pairs.c.type.drop()

    key_pairs.create_column(keypair_type)
    shadow_key_pairs.create_column(keypair_type.copy())
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    status_enum = Enum(name='service_statuses', metadata=meta, *SERVICE_STATES)
    status_enum.create()

    service_status_table = Table('service_statuses', meta,
        Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
        Column('created_at', DateTime),
        Column('updated_at', DateTime),

        Column('service_name', String(40), nullable=False),
        Column('hostname', String(255), nullable=False),
        Column('heartbeated_at', DateTime, nullable=True),
        Column('status', status_enum, nullable=False),
        Column('stats', Text, nullable=False),
        Column('capabilities', Text, nullable=False),
    )
    service_status_table.create()
def upgrade(migrate_engine):
    """Function adds key_pairs type field."""
    meta = MetaData(bind=migrate_engine)
    key_pairs = Table('key_pairs', meta, autoload=True)
    shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)

    enum = Enum('ssh', 'x509', metadata=meta, name='keypair_types')
    enum.create()

    keypair_type = Column('type', enum, nullable=False,
                          server_default=keypair.KEYPAIR_TYPE_SSH)

    if hasattr(key_pairs.c, 'type'):
        key_pairs.c.type.drop()

    if hasattr(shadow_key_pairs.c, 'type'):
        shadow_key_pairs.c.type.drop()

    key_pairs.create_column(keypair_type)
    shadow_key_pairs.create_column(keypair_type.copy())
Esempio n. 11
0
def upgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)
    migrations = Table("migrations", meta, autoload=True)
    shadow_migrations = Table("shadow_migrations", meta, autoload=True)

    enum = Enum("migration", "resize", "live-migration", "evacuation", metadata=meta, name="migration_type")
    enum.create()

    migration_type = Column("migration_type", enum, nullable=True)

    if not hasattr(migrations.c, "migration_type"):
        migrations.create_column(migration_type)
    if not hasattr(shadow_migrations.c, "migration_type"):
        shadow_migrations.create_column(migration_type.copy())

    hidden = Column("hidden", Boolean, default=False)
    if not hasattr(migrations.c, "hidden"):
        migrations.create_column(hidden)
    if not hasattr(shadow_migrations.c, "hidden"):
        shadow_migrations.create_column(hidden.copy())
def upgrade():
    enum_roles = Enum("SALES", "ACCOUNTS", name='role_types')
    enum_roles.create(op.get_bind(), checkfirst=False)

    op.create_table('contact_roles',
                    Column('id', Integer, primary_key=True),
                    Column('role', enum_roles)
                    )

    enum_phones = Enum("OFFICE", "MOBILE", "OTHER", name='phone_types')
    enum_phones.create(op.get_bind(), checkfirst=False)

    op.create_table('contact_phones',
                    Column('id', Integer, primary_key=True,),
                    Column('contact_id', Integer, ForeignKey('contact.id')),
                    Column('type', enum_phones),
                    Column('number', String)
                    )

    op.create_table('contact_roles_association',
                    Column('contact_id', Integer, ForeignKey('contact.id')),
                    Column('contact_roles_id', Integer, ForeignKey('contact_roles.id'))
                    )

    op.create_table('contact',
                    Column('id', Integer, primary_key=True),
                    Column('customer_id', Integer, ForeignKey('customer.id')),
                    Column('firstname', String),
                    Column('lastname', String),
                    Column('email', String),
                    )
Esempio n. 13
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    keys = Enum(name='key', metadata=meta, *ZONE_ATTRIBUTE_KEYS)
    types = Enum(name='types', metadata=meta, *ZONE_TYPES)

    domains_attributes_table = Table('domain_attributes', meta, autoload=True)
    domains_table = Table('domains', meta, autoload=True)

    domains = select(columns=[domains_table.c.id, domains_table.c.type])\
        .where(domains_table.c.type == 'SECONDARY')\
        .execute().fetchall()

    for dom in domains:
        delete = domains_table.delete()\
            .where(domains_table.id == dom.id)
        delete.execute()

    domains_table.c.type.drop()
    domains_table.c.transferred_at.drop()

    domains_attributes_table.drop()
    keys.drop()
    types.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)

        # Add missing unique index
        constraint.create()
Esempio n. 14
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    enum = Enum("ssh", "x509", metadata=meta, name="keypair_types")
    enum.create(checkfirst=True)

    keypairs = Table(
        "key_pairs",
        meta,
        Column("created_at", DateTime),
        Column("updated_at", DateTime),
        Column("id", Integer, primary_key=True, nullable=False),
        Column("name", String(255), nullable=False),
        Column("user_id", String(255), nullable=False),
        Column("fingerprint", String(255)),
        Column("public_key", Text()),
        Column("type", enum, nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH),
        UniqueConstraint("user_id", "name", name="uniq_key_pairs0user_id0name"),
        mysql_engine="InnoDB",
        mysql_charset="utf8",
    )
    keypairs.create(checkfirst=True)
Esempio n. 15
0
class DeclEnumType(SchemaType, TypeDecorator):

    def __init__(self, enum):
        super(DeclEnumType, self).__init__()
        self.enum = enum
        to_lower = lambda m: "_" + m.group(1).lower()
        self.name = 'ck{}'.format(re.sub('([A-Z])', to_lower, enum.__name__))
        self.impl = Enum(*enum.values(), name=self.name)

    def _set_table(self, table, column):
        self.impl._set_table(table, column)

    def copy(self):
        return DeclEnumType(self.enum)

    def process_bind_param(self, value, dialect):
        if value is None:
            return None
        return value.value

    def process_result_value(self, value, dialect):
        if value is None:
            return None
        return self.enum.from_string(value.strip())
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    # Add a domain & record creation status for async backends
    domain_statuses = Enum(name='domain_statuses', metadata=meta,
                           *RESOURCE_STATUSES)
    domain_statuses.create()

    record_statuses = Enum(name='record_statuses', metadata=meta,
                           *RESOURCE_STATUSES)
    record_statuses.create()

    domain_status = Column('status', domain_statuses, nullable=False,
                           server_default='ACTIVE', default='ACTIVE')

    record_status = Column('status', record_statuses, nullable=False,
                           server_default='ACTIVE', default='ACTIVE')

    domain_status.create(domains_table, populate_default=True)
    record_status.create(records_table, populate_default=True)
Esempio n. 17
0
class Article(Base):
    "Represent an article from a feed."
    __tablename__ = 'article'

    id = Column(Integer, primary_key=True)
    entry_id = Column(String)
    link = Column(String)
    link_hash = Column(Binary)
    title = Column(String)
    content = Column(String)
    comments = Column(String)
    lang = Column(String)
    date = Column(UTCDateTime, default=utc_now)
    retrieved_date = Column(UTCDateTime, default=utc_now)

    # integration control
    article_type = Column(Enum(ArticleType), default=None, nullable=True)

    # parsing
    tags = Column(PickleType, default=[])
    vector = Column(TSVECTOR)
    # reasons
    cluster_reason = Column(Enum(ClusterReason), default=None)
    cluster_score = Column(Integer, default=None)
    cluster_tfidf_neighbor_size = Column(Integer, default=None)
    cluster_tfidf_with = Column(Integer, default=None)

    # foreign keys
    user_id = Column(Integer, nullable=False)
    feed_id = Column(Integer, nullable=False)
    category_id = Column(Integer)
    cluster_id = Column(Integer)

    # relationships
    user = relationship('User', back_populates='articles')
    cluster = relationship('Cluster',
                           back_populates='articles',
                           foreign_keys=[cluster_id])
    category = relationship('Category',
                            back_populates='articles',
                            foreign_keys=[category_id])
    feed = relationship('Feed',
                        back_populates='articles',
                        foreign_keys=[feed_id])

    __table_args__ = (
        ForeignKeyConstraint([user_id], ['user.id'], ondelete='CASCADE'),
        ForeignKeyConstraint([feed_id], ['feed.id'], ondelete='CASCADE'),
        ForeignKeyConstraint([category_id], ['category.id'],
                             ondelete='CASCADE'),
        ForeignKeyConstraint([cluster_id], ['cluster.id']),
        Index('ix_article_uid_cluid', user_id, cluster_id),
        Index('ix_article_uid_fid_cluid', user_id, feed_id, cluster_id),
        Index('ix_article_uid_cid_cluid', user_id, category_id, cluster_id),
        Index('ix_article_uid_fid_eid', user_id, feed_id, entry_id),
        Index('ix_article_uid_cid_linkh', user_id, category_id, link_hash),
        Index('ix_article_retrdate', retrieved_date),
    )

    def __repr__(self):
        """Represents and article."""
        return "<Article(feed_id=%s, id=%s)>" % (self.feed_id, self.id)

    # TFIDF vectors
    @property
    def simple_vector(self):
        return get_simple_vector(self.vector)[0]

    @property
    def simple_vector_magnitude(self):
        return get_simple_vector(self.vector)[1]

    def get_tfidf_vector(self,
                         frequencies,
                         corpus_size,
                         will_be_left_member=False):
        vector, size = get_simple_vector(self.vector)
        return TFIDFVector(vector,
                           size,
                           frequencies,
                           corpus_size,
                           will_be_left_member=will_be_left_member)

    @property
    def content_generator(self):
        from jarr.lib.content_generator import get_content_generator
        return get_content_generator(self)
Esempio n. 18
0
class Message(MailSyncBase, HasRevisions, HasPublicID):
    @property
    def API_OBJECT_NAME(self):
        return 'message' if not self.is_draft else 'draft'

    namespace_id = Column(ForeignKey(Namespace.id, ondelete='CASCADE'),
                          index=True,
                          nullable=False)
    namespace = relationship('Namespace', load_on_pending=True)

    # Do delete messages if their associated thread is deleted.
    thread_id = Column(Integer,
                       ForeignKey('thread.id', ondelete='CASCADE'),
                       nullable=False)
    thread = relationship('Thread',
                          backref=backref('messages',
                                          order_by='Message.received_date',
                                          passive_deletes=True,
                                          cascade='all, delete-orphan'))

    from_addr = Column(JSON, nullable=False, default=lambda: [])
    sender_addr = Column(JSON, nullable=True)
    reply_to = Column(JSON, nullable=True, default=lambda: [])
    to_addr = Column(JSON, nullable=False, default=lambda: [])
    cc_addr = Column(JSON, nullable=False, default=lambda: [])
    bcc_addr = Column(JSON, nullable=False, default=lambda: [])
    in_reply_to = Column(JSON, nullable=True)
    # From: http://tools.ietf.org/html/rfc4130, section 5.3.3,
    # max message_id_header is 998 characters
    message_id_header = Column(String(998), nullable=True)
    # There is no hard limit on subject limit in the spec, but 255 is common.
    subject = Column(String(255), nullable=True, default='')
    received_date = Column(DateTime, nullable=False, index=True)
    size = Column(Integer, nullable=False)
    data_sha256 = Column(String(255), nullable=True)

    is_read = Column(Boolean, server_default=false(), nullable=False)
    is_starred = Column(Boolean, server_default=false(), nullable=False)

    # For drafts (both Inbox-created and otherwise)
    is_draft = Column(Boolean, server_default=false(), nullable=False)
    is_sent = Column(Boolean, server_default=false(), nullable=False)

    # REPURPOSED
    state = Column(
        Enum('draft', 'sending', 'sending failed', 'sent', 'actions_pending',
             'actions_committed'))

    @property
    def categories_changes(self):
        return self.state == 'actions_pending'

    @categories_changes.setter
    def categories_changes(self, has_changes):
        if has_changes is True:
            self.state = 'actions_pending'
        else:
            self.state = 'actions_committed'

    _compacted_body = Column(LONGBLOB, nullable=True)
    snippet = Column(String(191), nullable=False)
    SNIPPET_LENGTH = 191

    # A reference to the block holding the full contents of the message
    full_body_id = Column(ForeignKey('block.id', name='full_body_id_fk'),
                          nullable=True)
    full_body = relationship('Block', cascade='all, delete')

    # this might be a mail-parsing bug, or just a message from a bad client
    decode_error = Column(Boolean,
                          server_default=false(),
                          nullable=False,
                          index=True)

    # In accordance with JWZ (http://www.jwz.org/doc/threading.html)
    references = Column(JSON, nullable=True)

    # Only used for drafts.
    version = Column(Integer, nullable=False, server_default='0')

    # only on messages from Gmail (TODO: use different table)
    #
    # X-GM-MSGID is guaranteed unique across an account but not globally
    # across all Gmail.
    #
    # Messages between different accounts *may* have the same X-GM-MSGID,
    # but it's unlikely.
    #
    # (Gmail info from
    # http://mailman13.u.washington.edu/pipermail/imap-protocol/
    # 2014-July/002290.html.)
    g_msgid = Column(BigInteger, nullable=True, index=True, unique=False)
    g_thrid = Column(BigInteger, nullable=True, index=True, unique=False)

    # The uid as set in the X-INBOX-ID header of a sent message we create
    inbox_uid = Column(String(64), nullable=True, index=True)

    def regenerate_inbox_uid(self):
        """
        The value of inbox_uid is simply the draft public_id and version,
        concatenated. Because the inbox_uid identifies the draft on the remote
        provider, we regenerate it on each draft revision so that we can delete
        the old draft and add the new one on the remote."""

        from inbox.sendmail.message import generate_message_id_header
        self.inbox_uid = '{}-{}'.format(self.public_id, self.version)
        self.message_id_header = generate_message_id_header(self.inbox_uid)

    categories = association_proxy(
        'messagecategories',
        'category',
        creator=lambda category: MessageCategory(category=category))

    # FOR INBOX-CREATED MESSAGES:

    is_created = Column(Boolean, server_default=false(), nullable=False)

    # Whether this draft is a reply to an existing thread.
    is_reply = Column(Boolean)

    reply_to_message_id = Column(Integer,
                                 ForeignKey('message.id'),
                                 nullable=True)
    reply_to_message = relationship('Message', uselist=False)

    def mark_for_deletion(self):
        """
        Mark this message to be deleted by an asynchronous delete
        handler.

        """
        self.deleted_at = datetime.datetime.utcnow()

    @validates('subject')
    def sanitize_subject(self, key, value):
        # Trim overlong subjects, and remove null bytes. The latter can result
        # when, for example, UTF-8 text decoded from an RFC2047-encoded header
        # contains null bytes.
        if value is None:
            return
        if len(value) > 255:
            value = value[:255]
        value = value.replace('\0', '')
        return value

    @classmethod
    def create_from_synced(cls, account, mid, folder_name, received_date,
                           body_string):
        """
        Parses message data and writes out db metadata and MIME blocks.

        Returns the new Message, which links to the new Part and Block objects
        through relationships. All new objects are uncommitted.

        Threads are not computed here; you gotta do that separately.

        Parameters
        ----------
        mid : int
            The account backend-specific message identifier; it's only used for
            logging errors.

        raw_message : str
            The full message including headers (encoded).

        """
        _rqd = [account, mid, folder_name, body_string]
        if not all([v is not None for v in _rqd]):
            raise ValueError(
                'Required keyword arguments: account, mid, folder_name, '
                'body_string')
        # stop trickle-down bugs
        assert account.namespace is not None
        assert not isinstance(body_string, unicode)

        msg = Message()

        from inbox.models.block import Block
        body_block = Block()
        body_block.namespace_id = account.namespace.id
        body_block.data = body_string
        body_block.content_type = "text/plain"
        msg.full_body = body_block

        msg.namespace_id = account.namespace.id

        try:
            parsed = mime.from_string(body_string)
            msg._parse_metadata(parsed, body_string, received_date, account.id,
                                folder_name, mid)
        except (mime.DecodingError, AttributeError, RuntimeError,
                TypeError) as e:
            parsed = None
            log.error('Error parsing message metadata',
                      folder_name=folder_name,
                      account_id=account.id,
                      error=e)
            msg._mark_error()

        if parsed is not None:
            plain_parts = []
            html_parts = []
            for mimepart in parsed.walk(
                    with_self=parsed.content_type.is_singlepart()):
                try:
                    if mimepart.content_type.is_multipart():
                        log.warning('multipart sub-part found',
                                    account_id=account.id,
                                    folder_name=folder_name,
                                    mid=mid)
                        continue  # TODO should we store relations?
                    msg._parse_mimepart(mid, mimepart, account.namespace.id,
                                        html_parts, plain_parts)
                except (mime.DecodingError, AttributeError, RuntimeError,
                        TypeError) as e:
                    log.error('Error parsing message MIME parts',
                              folder_name=folder_name,
                              account_id=account.id,
                              error=e)
                    msg._mark_error()
            msg.calculate_body(html_parts, plain_parts)

            # Occasionally people try to send messages to way too many
            # recipients. In such cases, empty the field and treat as a parsing
            # error so that we don't break the entire sync.
            for field in ('to_addr', 'cc_addr', 'bcc_addr', 'references'):
                value = getattr(msg, field)
                if json_field_too_long(value):
                    log.error('Recipient field too long',
                              field=field,
                              account_id=account.id,
                              folder_name=folder_name,
                              mid=mid)
                    setattr(msg, field, [])
                    msg._mark_error()

        return msg

    def _parse_metadata(self, parsed, body_string, received_date, account_id,
                        folder_name, mid):
        mime_version = parsed.headers.get('Mime-Version')
        # sometimes MIME-Version is '1.0 (1.0)', hence the .startswith()
        if mime_version is not None and not mime_version.startswith('1.0'):
            log.warning('Unexpected MIME-Version',
                        account_id=account_id,
                        folder_name=folder_name,
                        mid=mid,
                        mime_version=mime_version)

        self.data_sha256 = sha256(body_string).hexdigest()

        self.subject = parsed.subject
        self.from_addr = parse_mimepart_address_header(parsed, 'From')
        self.sender_addr = parse_mimepart_address_header(parsed, 'Sender')
        self.reply_to = parse_mimepart_address_header(parsed, 'Reply-To')
        self.to_addr = parse_mimepart_address_header(parsed, 'To')
        self.cc_addr = parse_mimepart_address_header(parsed, 'Cc')
        self.bcc_addr = parse_mimepart_address_header(parsed, 'Bcc')

        self.in_reply_to = parsed.headers.get('In-Reply-To')
        self.message_id_header = parsed.headers.get('Message-Id')

        self.received_date = received_date if received_date else \
            get_internaldate(parsed.headers.get('Date'),
                                parsed.headers.get('Received'))

        # Custom Inbox header
        self.inbox_uid = parsed.headers.get('X-INBOX-ID')

        # In accordance with JWZ (http://www.jwz.org/doc/threading.html)
        self.references = parse_references(
            parsed.headers.get('References', ''),
            parsed.headers.get('In-Reply-To', ''))

        self.size = len(body_string)  # includes headers text

    def _parse_mimepart(self, mid, mimepart, namespace_id, html_parts,
                        plain_parts):
        disposition, _ = mimepart.content_disposition
        content_id = mimepart.headers.get('Content-Id')
        content_type, params = mimepart.content_type
        filename = params.get('name')
        is_text = content_type.startswith('text')
        if disposition not in (None, 'inline', 'attachment'):
            log.error('Unknown Content-Disposition',
                      message_public_id=self.public_id,
                      bad_content_disposition=mimepart.content_disposition)
            self._mark_error()
            return

        if disposition == 'attachment':
            self._save_attachment(mimepart, disposition, content_type,
                                  filename, content_id, namespace_id, mid)
            return

        if (disposition == 'inline'
                and not (is_text and filename is None and content_id is None)):
            # Some clients set Content-Disposition: inline on text MIME parts
            # that we really want to treat as part of the text body. Don't
            # treat those as attachments.
            self._save_attachment(mimepart, disposition, content_type,
                                  filename, content_id, namespace_id, mid)
            return

        if is_text:
            if mimepart.body is None:
                return
            normalized_data = mimepart.body.encode('utf-8', 'strict')
            normalized_data = normalized_data.replace('\r\n', '\n'). \
                replace('\r', '\n')
            if content_type == 'text/html':
                html_parts.append(normalized_data)
            elif content_type == 'text/plain':
                plain_parts.append(normalized_data)
            else:
                log.info('Saving other text MIME part as attachment',
                         content_type=content_type,
                         mid=mid)
                self._save_attachment(mimepart, 'attachment', content_type,
                                      filename, content_id, namespace_id, mid)
            return

        # Finally, if we get a non-text MIME part without Content-Disposition,
        # treat it as an attachment.
        self._save_attachment(mimepart, 'attachment', content_type, filename,
                              content_id, namespace_id, mid)

    def _save_attachment(self, mimepart, content_disposition, content_type,
                         filename, content_id, namespace_id, mid):
        from inbox.models import Part, Block
        block = Block()
        block.namespace_id = namespace_id
        block.filename = _trim_filename(filename, mid=mid)
        block.content_type = content_type
        part = Part(block=block, message=self)
        if content_id:
            content_id = content_id[:255]
        part.content_id = content_id
        part.content_disposition = content_disposition
        data = mimepart.body or ''
        if isinstance(data, unicode):
            data = data.encode('utf-8', 'strict')
        block.data = data

    def _mark_error(self):
        """
        Mark message as having encountered errors while parsing.

        Message parsing can fail for several reasons. Occasionally iconv will
        fail via maximum recursion depth. EAS messages may be missing Date and
        Received headers. Flanker may fail to handle some out-of-spec messages.

        In this case, we keep what metadata we've managed to parse but also
        mark the message as having failed to parse properly.

        """
        self.decode_error = True
        # fill in required attributes with filler data if could not parse them
        self.size = 0
        if self.received_date is None:
            self.received_date = datetime.datetime.utcnow()
        if self.body is None:
            self.body = ''
        if self.snippet is None:
            self.snippet = ''

    def calculate_body(self, html_parts, plain_parts):
        html_body = ''.join(html_parts).decode('utf-8').strip()
        plain_body = '\n'.join(plain_parts).decode('utf-8').strip()
        if html_body:
            self.snippet = self.calculate_html_snippet(html_body)
            self.body = html_body
        elif plain_body:
            self.snippet = self.calculate_plaintext_snippet(plain_body)
            self.body = plaintext2html(plain_body, False)
        else:
            self.body = u''
            self.snippet = u''

    def calculate_html_snippet(self, text):
        text = strip_tags(text)
        return self.calculate_plaintext_snippet(text)

    def calculate_plaintext_snippet(self, text):
        return ' '.join(text.split())[:self.SNIPPET_LENGTH]

    @property
    def body(self):
        if self._compacted_body is None:
            return None
        return decode_blob(self._compacted_body).decode('utf-8')

    @body.setter
    def body(self, value):
        if value is None:
            self._compacted_body = None
        else:
            self._compacted_body = encode_blob(value.encode('utf-8'))

    @property
    def participants(self):
        """
        Different messages in the thread may reference the same email
        address with different phrases. We partially deduplicate: if the same
        email address occurs with both empty and nonempty phrase, we don't
        separately return the (empty phrase, address) pair.

        """
        deduped_participants = defaultdict(set)
        chain = []
        if self.from_addr:
            chain.append(self.from_addr)

        if self.to_addr:
            chain.append(self.to_addr)

        if self.cc_addr:
            chain.append(self.cc_addr)

        if self.bcc_addr:
            chain.append(self.bcc_addr)

        for phrase, address in itertools.chain.from_iterable(chain):
            deduped_participants[address].add(phrase.strip())

        p = []
        for address, phrases in deduped_participants.iteritems():
            for phrase in phrases:
                if phrase != '' or len(phrases) == 1:
                    p.append((phrase, address))
        return p

    @property
    def attachments(self):
        return [part for part in self.parts if part.is_attachment]

    @property
    def api_attachment_metadata(self):
        resp = []
        for part in self.parts:
            if not part.is_attachment:
                continue
            k = {
                'content_type': part.block.content_type,
                'size': part.block.size,
                'filename': part.block.filename,
                'id': part.block.public_id
            }
            content_id = part.content_id
            if content_id:
                if content_id[0] == '<' and content_id[-1] == '>':
                    content_id = content_id[1:-1]
                k['content_id'] = content_id
            resp.append(k)
        return resp

    @property
    def versioned_relationships(self):
        return ['parts']

    @property
    def propagated_attributes(self):
        return ['is_read', 'is_starred', 'messagecategories']

    @property
    def has_attached_events(self):
        return 'text/calendar' in [p.block.content_type for p in self.parts]

    @property
    def attached_event_files(self):
        return [
            part for part in self.parts
            if part.block.content_type == 'text/calendar'
        ]

    @property
    def account(self):
        return self.namespace.account

    def get_header(self, header, mid):
        if self.decode_error:
            log.warning('Error getting message header', mid=mid)
            return

        parsed = mime.from_string(self.full_body.data)
        return parsed.headers.get(header)

    @classmethod
    def from_public_id(cls, public_id, namespace_id, db_session):
        q = bakery(lambda s: s.query(cls))
        q += lambda q: q.filter(
            Message.public_id == bindparam('public_id'), Message.namespace_id
            == bindparam('namespace_id'))
        q += lambda q: q.options(
            joinedload(Message.thread).load_only('discriminator', 'public_id'),
            joinedload(Message.messagecategories).joinedload('category'),
            joinedload(Message.parts).joinedload('block'),
            joinedload(Message.events))
        return q(db_session).params(public_id=public_id,
                                    namespace_id=namespace_id).one()
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED', 'ERROR']

    # Get associated database tables
    domains_table = Table('domains', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith("postgresql"):
        migrate_engine.execute(
            "ALTER TYPE domain_statuses RENAME TO resource_statuses;")

        with migrate_engine.connect() as conn:
            conn.execution_options(isolation_level="AUTOCOMMIT")
            conn.execute(
                "ALTER TYPE resource_statuses ADD VALUE 'ERROR' "
                "AFTER 'DELETED'")
            conn.close()

    actions = Enum(name='actions', metadata=meta, *ACTIONS)
    actions.create()

    resource_statuses = Enum(name='resource_statuses', metadata=meta,
                             *RESOURCE_STATUSES)

    # Upgrade the domains table.
    domains_table.c.status.alter(
        type=resource_statuses,
        default='PENDING', server_default='PENDING')

    action_column = Column('action', actions,
                           default='CREATE', server_default='CREATE',
                           nullable=False)
    action_column.create(domains_table)

    # Re-add constraint for sqlite.
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)
        constraint.create()

    # Upgrade the records table.
    if dialect.startswith("postgresql"):
        sql = "ALTER TABLE records ALTER COLUMN status DROP DEFAULT, " \
              "ALTER COLUMN status TYPE resource_statuses USING " \
              "records::text::resource_statuses, ALTER COLUMN status " \
              "SET DEFAULT 'PENDING';"
        migrate_engine.execute(sql)
        record_statuses = Enum(name='record_statuses', metadata=meta,
                               *RESOURCE_STATUSES)
        record_statuses.drop()
    else:
        records_table.c.status.alter(
            type=resource_statuses,
            default='PENDING', server_default='PENDING')

    action_column = Column('action', actions,
                           default='CREATE', server_default='CREATE',
                           nullable=False)
    action_column.create(records_table)
    serial_column = Column('serial', Integer(), server_default='1',
                           nullable=False)
    serial_column.create(records_table)

    # Re-add constraint for sqlite.
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'hash', name='unique_record', table=records_table)
        constraint.create()
Esempio n. 20
0
class Task(Base):
    """Analysis task queue."""
    __tablename__ = "tasks"

    id = Column(Integer(), primary_key=True)
    target = Column(Text(), nullable=False)
    category = Column(String(255), nullable=False)
    timeout = Column(Integer(), server_default="0", nullable=False)
    priority = Column(Integer(), server_default="1", nullable=False)
    custom = Column(String(255), nullable=True)
    owner = Column(String(64), nullable=True)
    machine = Column(String(255), nullable=True)
    package = Column(String(255), nullable=True)
    tags = relationship("Tag",
                        secondary=tasks_tags,
                        cascade="all, delete",
                        single_parent=True,
                        backref=backref("task", cascade="all"),
                        lazy="subquery")
    options = Column(String(255), nullable=True)
    platform = Column(String(255), nullable=True)
    memory = Column(Boolean, nullable=False, default=False)
    enforce_timeout = Column(Boolean, nullable=False, default=False)
    clock = Column(DateTime(timezone=False),
                   default=datetime.now,
                   nullable=False)
    added_on = Column(DateTime(timezone=False),
                      default=datetime.now,
                      nullable=False)
    started_on = Column(DateTime(timezone=False), nullable=True)
    completed_on = Column(DateTime(timezone=False), nullable=True)
    status = Column(Enum(TASK_PENDING,
                         TASK_RUNNING,
                         TASK_COMPLETED,
                         TASK_REPORTED,
                         TASK_RECOVERED,
                         TASK_FAILED_ANALYSIS,
                         TASK_FAILED_PROCESSING,
                         TASK_FAILED_REPORTING,
                         name="status_type"),
                    server_default=TASK_PENDING,
                    nullable=False)
    sample_id = Column(Integer, ForeignKey("samples.id"), nullable=True)
    sample = relationship("Sample", backref="tasks")
    guest = relationship("Guest",
                         uselist=False,
                         backref="tasks",
                         cascade="save-update, delete")
    errors = relationship("Error",
                          backref="tasks",
                          cascade="save-update, delete")

    def to_dict(self):
        """Converts object to dict.
        @return: dict
        """
        d = {}
        for column in self.__table__.columns:
            value = getattr(self, column.name)
            if isinstance(value, datetime):
                d[column.name] = value.strftime("%Y-%m-%d %H:%M:%S")
            else:
                d[column.name] = value

        # Tags are a relation so no column to iterate.
        d["tags"] = [tag.name for tag in self.tags]
        return d

    def to_json(self):
        """Converts object to JSON.
        @return: JSON data
        """
        return json.dumps(self.to_dict())

    def __init__(self, target=None):
        self.target = target

    def __repr__(self):
        return "<Task('{0}','{1}')>".format(self.id, self.target)
Esempio n. 21
0
class NodeBase(InstanceModelMixin):
    """
    Typed vertex in the service topology.

    Nodes may have zero or more :class:`Relationship` instances to other nodes, together forming
    a many-to-many node graph.

    Usually an instance of a :class:`NodeTemplate`.
    """

    __tablename__ = 'node'

    __private_fields__ = ('type_fk', 'host_fk', 'service_fk',
                          'node_template_fk')

    INITIAL = 'initial'
    CREATING = 'creating'
    CREATED = 'created'
    CONFIGURING = 'configuring'
    CONFIGURED = 'configured'
    STARTING = 'starting'
    STARTED = 'started'
    STOPPING = 'stopping'
    DELETING = 'deleting'
    DELETED = 'deleted'
    ERROR = 'error'

    # Note: 'deleted' isn't actually part of the TOSCA spec, since according the description of the
    # 'deleting' state: "Node is transitioning from its current state to one where it is deleted and
    # its state is no longer tracked by the instance model." However, we prefer to be able to
    # retrieve information about deleted nodes, so we chose to add this 'deleted' state to enable us
    # to do so.

    STATES = (INITIAL, CREATING, CREATED, CONFIGURING, CONFIGURED, STARTING,
              STARTED, STOPPING, DELETING, DELETED, ERROR)

    _OP_TO_STATE = {
        'create': {
            'transitional': CREATING,
            'finished': CREATED
        },
        'configure': {
            'transitional': CONFIGURING,
            'finished': CONFIGURED
        },
        'start': {
            'transitional': STARTING,
            'finished': STARTED
        },
        'stop': {
            'transitional': STOPPING,
            'finished': CONFIGURED
        },
        'delete': {
            'transitional': DELETING,
            'finished': DELETED
        }
    }

    # region one_to_one relationships

    @declared_attr
    def host(cls):  # pylint: disable=method-hidden
        """
        Node in which we are hosted (can be ``None``).

        Normally the host node is found by following the relationship graph (relationships with
        ``host`` roles) to final nodes (with ``host`` roles).

        :type: :class:`Node`
        """
        return relationship.one_to_one_self(cls, 'host_fk')

    # endregion

    # region one_to_many relationships

    @declared_attr
    def tasks(cls):
        """
        Associated tasks.

        :type: [:class:`Task`]
        """
        return relationship.one_to_many(cls, 'task')

    @declared_attr
    def interfaces(cls):
        """
        Associated interfaces.

        :type: {:obj:`basestring`: :class:`Interface`}
        """
        return relationship.one_to_many(cls, 'interface', dict_key='name')

    @declared_attr
    def properties(cls):
        """
        Associated immutable parameters.

        :type: {:obj:`basestring`: :class:`Property`}
        """
        return relationship.one_to_many(cls, 'property', dict_key='name')

    @declared_attr
    def attributes(cls):
        """
        Associated mutable parameters.

        :type: {:obj:`basestring`: :class:`Attribute`}
        """
        return relationship.one_to_many(cls, 'attribute', dict_key='name')

    @declared_attr
    def artifacts(cls):
        """
        Associated artifacts.

        :type: {:obj:`basestring`: :class:`Artifact`}
        """
        return relationship.one_to_many(cls, 'artifact', dict_key='name')

    @declared_attr
    def capabilities(cls):
        """
        Associated exposed capabilities.

        :type: {:obj:`basestring`: :class:`Capability`}
        """
        return relationship.one_to_many(cls, 'capability', dict_key='name')

    @declared_attr
    def outbound_relationships(cls):
        """
        Relationships to other nodes.

        :type: [:class:`Relationship`]
        """
        return relationship.one_to_many(
            cls,
            'relationship',
            other_fk='source_node_fk',
            back_populates='source_node',
            rel_kwargs=dict(order_by='Relationship.source_position',
                            collection_class=ordering_list('source_position',
                                                           count_from=0)))

    @declared_attr
    def inbound_relationships(cls):
        """
        Relationships from other nodes.

        :type: [:class:`Relationship`]
        """
        return relationship.one_to_many(
            cls,
            'relationship',
            other_fk='target_node_fk',
            back_populates='target_node',
            rel_kwargs=dict(order_by='Relationship.target_position',
                            collection_class=ordering_list('target_position',
                                                           count_from=0)))

    # endregion

    # region many_to_one relationships

    @declared_attr
    def service(cls):
        """
        Containing service.

        :type: :class:`Service`
        """
        return relationship.many_to_one(cls, 'service')

    @declared_attr
    def node_template(cls):
        """
        Source node template (can be ``None``).

        :type: :class:`NodeTemplate`
        """
        return relationship.many_to_one(cls, 'node_template')

    @declared_attr
    def type(cls):
        """
        Node type.

        :type: :class:`Type`
        """
        return relationship.many_to_one(
            cls, 'type', back_populates=relationship.NO_BACK_POP)

    # endregion

    # region association proxies

    @declared_attr
    def service_name(cls):
        return relationship.association_proxy('service',
                                              'name',
                                              type=':obj:`basestring`')

    @declared_attr
    def node_template_name(cls):
        return relationship.association_proxy('node_template',
                                              'name',
                                              type=':obj:`basestring`')

    # endregion

    # region foreign_keys

    @declared_attr
    def type_fk(cls):
        """For Node many-to-one to Type"""
        return relationship.foreign_key('type')

    @declared_attr
    def host_fk(cls):
        """For Node one-to-one to Node"""
        return relationship.foreign_key('node', nullable=True)

    @declared_attr
    def service_fk(cls):
        """For Service one-to-many to Node"""
        return relationship.foreign_key('service')

    @declared_attr
    def node_template_fk(cls):
        """For Node many-to-one to NodeTemplate"""
        return relationship.foreign_key('node_template')

    # endregion

    description = Column(Text,
                         doc="""
    Human-readable description.

    :type: :obj:`basestring`
    """)

    state = Column(Enum(*STATES, name='node_state'),
                   nullable=False,
                   default=INITIAL,
                   doc="""
    TOSCA state.

    :type: :obj:`basestring`
    """)

    version = Column(Integer,
                     default=1,
                     doc="""
    Used by :mod:`aria.storage.instrumentation`.

    :type: :obj:`int`
    """)

    __mapper_args__ = {
        'version_id_col': version
    }  # Enable SQLAlchemy automatic version counting

    @classmethod
    def determine_state(cls, op_name, is_transitional):
        """
        :return: the state the node should be in as a result of running the operation on this node.

        E.g. if we are running tosca.interfaces.node.lifecycle.Standard.create, then
        the resulting state should either 'creating' (if the task just started) or 'created'
        (if the task ended).

        If the operation is not a standard TOSCA lifecycle operation, then we return None.
        """

        state_type = 'transitional' if is_transitional else 'finished'
        try:
            return cls._OP_TO_STATE[op_name][state_type]
        except KeyError:
            return None

    def is_available(self):
        return self.state not in (self.INITIAL, self.DELETED, self.ERROR)

    def get_outbound_relationship_by_name(self, name):
        for the_relationship in self.outbound_relationships:
            if the_relationship.name == name:
                return the_relationship
        return None

    def get_inbound_relationship_by_name(self, name):
        for the_relationship in self.inbound_relationships:
            if the_relationship.name == name:
                return the_relationship
        return None

    @property
    def host_address(self):
        if self.host and self.host.attributes:
            attribute = self.host.attributes.get('ip')
            if attribute is not None:
                return attribute.value
        return None

    @property
    def as_raw(self):
        return collections.OrderedDict(
            (('name', self.name), ('type_name', self.type.name),
             ('properties', formatting.as_raw_dict(self.properties)),
             ('attributes', formatting.as_raw_dict(self.properties)),
             ('interfaces', formatting.as_raw_list(self.interfaces)),
             ('artifacts', formatting.as_raw_list(self.artifacts)),
             ('capabilities', formatting.as_raw_list(self.capabilities)),
             ('relationships',
              formatting.as_raw_list(self.outbound_relationships))))
Esempio n. 22
0
from sqlalchemy import (create_engine, Integer, String, Date, Enum, Float,
                        ForeignKey, Table)
from sqlalchemy.orm import sessionmaker, relationship, remote, foreign
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import DataError, IntegrityError
import predict


def Column(*args, **kwargs):
    kwargs.setdefault('nullable', False)
    return SQAColumn(*args, **kwargs)


Base = declarative_base()

win_loss_enum = Enum('win', 'loss', name='win_loss')


class Game(Base):
    __tablename__ = 'game'

    team = Column(String, primary_key=True, index=True)
    opponent = Column(String, primary_key=True, index=True)
    date = Column(Date, primary_key=True, index=True)
    result = Column(win_loss_enum, index=True)
    points = Column(Integer)
    field_goals = Column(Integer)
    field_goal_attempts = Column(Integer)
    three_points = Column(Integer)
    three_point_attempts = Column(Integer)
    free_throws = Column(Integer)
Esempio n. 23
0
class Episode(Base):

    __table__ = Table(
        'episode', Base.metadata, Column('id', Integer, primary_key=True),
        Column('name', String(32), nullable=False),
        Column('basename', String(32), nullable=False),
        Column('status', Enum('act', 'dis'), default='act', nullable=False),
        Column('project_id', Integer, ForeignKey(Project.id), nullable=False),
        Column('shotgun_id', Integer), Column('description', String(255)),
        Index('ix_proj_name', 'project_id', 'name', 'status'),
        Index('ix_sg', 'shotgun_id'),
        UniqueConstraint('project_id', 'name', name='uq_proj_name'),
        UniqueConstraint('project_id', 'basename', name='uq_proj_basename'),
        UniqueConstraint('shotgun_id', name='uq_sg'))
    _sequences = relationship('Sequence',
                              backref='episode',
                              lazy='dynamic',
                              order_by='Sequence.name',
                              cascade="all, delete-orphan")

    @property
    def parent(self):
        '''
        Return Episode parent Project entity.
        '''
        return self.project

    @classmethod
    def find(cls,
             project=None,
             name=None,
             status=None,
             id=None,
             shotgun_id=None):
        '''
        Return Episode instances by query arguments

            Args:
                project     (Project) : parent Project instance.
                name            (str) : Episode name.
                status          (str) : Episode status.
                id         (int/list) : Episode id(s).
                shotgun_id (int/list) : Epsiode shotgun id(s).

            Returns:
                A list of Episode instances matching find arguments.
        '''
        query = cls.query(project=project,
                          name=name,
                          id=id,
                          status=status,
                          shotgun_id=shotgun_id)
        return query.all()

    @classmethod
    def create(cls, name, project, status=None, shotgun_id=None):
        '''
        Create an Episode instance.

            Args:
                name            (str) : Episode name.
                project     (Project) : parent Project instance.
                status          (str) : Episode status.
                shotgun_id (int/list) : Epsiode shotgun id(s).

            Returns:
                New Episode instance.
        '''
        cls.assert_isinstance(project, 'Project')

        data = dict(name=name,
                    basename=name,
                    status=status,
                    project_id=project.id,
                    shotgun_id=shotgun_id)

        return super(Episode, cls).create(**data)
Esempio n. 24
0
class Medline(_Base):
    """
    A MEDLINE or PubMed record.

    Attributes:

        pmid
            the record's identifier (PubMed ID)
        status
            the current status of this record (see `Medline.STATES`)
        journal
            the journal name (Medline TA)
        created
            the record's creation date
        completed
            the record's completion date
        revised
            the record's revision date
        modified
            the date the record was last modified in the DB

    Relations:

        sections
            a :class:`list` of the record's text sections
        authors
            a :class:`list` of the record's author names
        identifiers
            a :class:`dict` of the record's alternate IDs using the
            :attr:`AlternateID.key` a dictionary keys
        descriptors
            a :class:`list` of the record's MeSH descriptors
        qualifiers
            a :class:`list` of the record's MeSH qualifiers
        chemicals
            a :class:`list` of the record's chemicals
        databases
            a :class:`list` of the record's external DB references

    Primary Key: ``pmid``
    """

    STATES = frozenset({
        'Completed', 'In-Process', 'PubMed-not-MEDLINE', 'In-Data-Review',
        'Publisher', 'MEDLINE', 'OLDMEDLINE'
    })
    CHILDREN = (
        Section,
        Identifier,
        Database,
        Author,
        Descriptor,
        Qualifier,  # Qualifier last!
    )
    TABLENAMES = [cls.__tablename__ for cls in CHILDREN]
    TABLES = {cls.__tablename__: cls.__table__ for cls in CHILDREN}

    __tablename__ = 'records'

    authors = relation(Author,
                       backref='medline',
                       cascade='all, delete-orphan',
                       order_by=Author.__table__.c.pos)
    chemicals = relation(Chemical,
                         backref='medline',
                         cascade='all, delete-orphan',
                         order_by=Chemical.__table__.c.idx)
    databases = relation(
        Database,
        backref='medline',
        cascade='all, delete-orphan',
    )
    descriptors = relation(Descriptor,
                           backref='medline',
                           cascade='all, delete-orphan',
                           order_by=Descriptor.__table__.c.num)
    identifiers = relation(Identifier,
                           backref='medline',
                           cascade='all, delete-orphan',
                           collection_class=column_mapped_collection(
                               Identifier.namespace))
    keywords = relation(Keyword,
                        backref='medline',
                        cascade='all, delete-orphan',
                        order_by=(Keyword.__table__.c.owner,
                                  Keyword.__table__.c.cnt))
    publication_types = relation(PublicationType,
                                 backref='medline',
                                 cascade='all, delete-orphan')
    qualifiers = relation(Qualifier, backref='medline')
    sections = relation(Section,
                        backref='medline',
                        cascade='all, delete-orphan',
                        order_by=Section.__table__.c.seq)

    pmid = Column(BigInteger,
                  CheckConstraint('pmid > 0'),
                  primary_key=True,
                  autoincrement=False)
    status = Column(Enum(*STATES, name='state'), nullable=False)
    journal = Column(Unicode(length=256),
                     CheckConstraint("journal <> ''"),
                     nullable=False)
    pub_date = Column(Unicode(length=256),
                      CheckConstraint("pub_date <> ''"),
                      nullable=False)
    issue = Column(Unicode(length=256),
                   CheckConstraint("issue <> ''"),
                   nullable=True)
    pagination = Column(Unicode(length=256),
                        CheckConstraint("pagination <> ''"),
                        nullable=True)
    created = Column(Date, nullable=False)
    completed = Column(Date, nullable=True)
    revised = Column(Date, nullable=True)
    modified = Column(Date,
                      default=date.today,
                      onupdate=date.today,
                      nullable=False)

    def __init__(self,
                 pmid: int,
                 status: str,
                 journal: str,
                 pub_date: str,
                 created: date,
                 completed: date = None,
                 revised: date = None,
                 issue: str = None,
                 pagination: str = None):
        assert pmid > 0, pmid
        assert status in Medline.STATES, repr(status)
        assert journal, repr(journal)
        assert pub_date, repr(pub_date)
        assert isinstance(created, date), repr(created)
        assert completed is None or isinstance(completed,
                                               date), repr(completed)
        assert revised is None or isinstance(revised, date), repr(revised)
        assert pagination is None or pagination
        assert issue is None or issue
        self.pmid = pmid
        self.status = status
        self.journal = journal
        self.pub_date = pub_date
        self.issue = issue
        self.pagination = pagination
        self.created = created
        self.completed = completed
        self.revised = revised

    def __str__(self):
        return '{}\n'.format('\t'.join(
            map(str, [
                NULL(self.pmid),
                NULL(self.status),
                NULL(self.journal),
                NULL(self.pub_date),
                NULL(self.issue),
                NULL(self.pagination),
                DATE(self.created),
                DATE(self.completed),
                DATE(self.revised),
                DATE(date.today() if self.modified is None else self.modified)
            ])))

    def __repr__(self):
        return "Medline<{}>".format(self.pmid)

    def __eq__(self, other):
        return isinstance(other, Medline) and \
               self.pmid == other.pmid and \
               self.status == other.status and \
               self.journal == other.journal and \
               self.pub_date == other.pub_date and \
               self.issue == other.issue and \
               self.pagination == other.pagination and \
               self.created == other.created and \
               self.completed == other.completed and \
               self.revised == other.revised

    def citation(self):
        issue = '; {}'.format(self.issue) if self.issue else ""
        pagination = ': {}'.format(self.pagination) if self.pagination else ""
        return "{}{}{}".format(self.pub_date, issue, pagination)

    @classmethod
    def insert(cls, data: dict):
        """
        Insert *data* into all relevant tables.
        """
        target_ins = dict(
            (tname, cls.TABLES[tname].insert()) for tname in cls.TABLENAMES)
        conn = _db.engine.connect()
        transaction = conn.begin()

        try:
            if cls.__tablename__ in data and len(data[cls.__tablename__]):
                conn.execute(cls.__table__.insert(), data[cls.__tablename__])

            for tname in cls.TABLENAMES:
                if tname in data and len(data[tname]):
                    conn.execute(target_ins[tname], data[tname])

            transaction.commit()
        except:
            transaction.rollback()
            raise
        finally:
            conn.close()

    @classmethod
    def select(cls, pmids: list, attributes: iter) -> iter([RowProxy]):
        """
        Return the `pmid` and *attributes*
        for each row as a `sqlalchemy.engine.RowProxy`
        that matches one of the *pmids*.
        """
        if not len(pmids):
            return []

        c = cls.__table__.c
        mapping = {col.key: col for col in c}
        columns = [mapping[name] for name in attributes]
        columns.insert(0, c.pmid)
        query = select(columns, c.pmid.in_(pmids))
        return _fetch_all(query)

    @classmethod
    def selectAll(cls, pmids: list) -> iter([RowProxy]):
        """
        Return all columns
        for each row as a `sqlalchemy.engine.RowProxy`
        that matches one of the *pmids*.
        """
        if not len(pmids):
            return []

        c = cls.__table__.c
        query = select([cls.__table__], c.pmid.in_(pmids))
        return _fetch_all(query)

    @classmethod
    def delete(cls, primary_keys: list):
        """
        Delete records and their dependent entities (authors, identifiers,
        etc.) for the given *primary_keys* (a list of PMIDs).
        """
        if not len(primary_keys):
            return

        t = cls.__table__
        query = t.delete(t.c.pmid.in_(primary_keys))
        conn = _db.engine.connect()
        transaction = conn.begin()

        try:
            conn.execute(query)
            transaction.commit()
        except:
            transaction.rollback()
            raise
        finally:
            conn.close()

    @classmethod
    def existing(cls, pmids: list) -> set:
        "Return the sub- `set` of all *pmids* that exist in the DB."
        if not len(pmids):
            return set()

        c = cls.__table__.c
        query = select([c.pmid], c.pmid.in_(pmids))
        conn = _db.engine.connect()

        try:
            return {row[0] for row in conn.execute(query)}
        finally:
            conn.close()

    @classmethod
    def missing(cls, pmids: list) -> set:
        "Return the sub- `set` of all *pmids* that do not exist in the DB."
        return set(pmids) - Medline.existing(pmids)

    @classmethod
    def modifiedBefore(cls, pmids: list, before: date) -> set:
        """
        Return the sub- `set` of all *pmids* that have been `modified`
        *before* a `datetime.date` in the DB.
        """
        if not len(pmids):
            return set()

        c = cls.__table__.c
        query = select([c.pmid], c.pmid.in_(pmids) & (c.modified < before))
        conn = _db.engine.connect()

        try:
            return set(row[0] for row in conn.execute(query))
        finally:
            conn.close()
Esempio n. 25
0
class Keyword(_Base, SelectMixin):
    """
    Keywords, external or curated by the NLM.

    Attributes:

        pmid
            the record's identifier (PubMed ID)
        owner
            the entity that provided the keyword
        cnt
            a unique counter for all keywords from a given owner and record
            (starting from 1)
        major
            if the keyword is a major topic of this article
        name
            the keyword itself

    Primary Key: ``(pmid, owner, cnt)``
    """

    __tablename__ = 'keywords'

    OWNERS = frozenset({'NASA', 'PIP', 'KIE', 'NLM', 'NOTNLM', 'HHS'})

    pmid = Column(BigInteger,
                  ForeignKey('records', ondelete="CASCADE"),
                  primary_key=True)
    owner = Column(Enum(*OWNERS, name='owner'), primary_key=True)
    cnt = Column(SmallInteger, CheckConstraint("cnt > 0"), primary_key=True)
    major = Column(Boolean, nullable=False)
    name = Column(UnicodeText, CheckConstraint("name <> ''"), nullable=False)

    def __init__(self,
                 pmid: int,
                 owner: str,
                 cnt: int,
                 name: str,
                 major: bool = False):
        assert pmid > 0, pmid
        assert owner in Keyword.OWNERS, repr(owner)
        assert cnt > 0, cnt
        assert name, repr(name)
        self.pmid = pmid
        self.owner = owner
        self.cnt = cnt
        self.major = major
        self.name = name

    def __str__(self):
        return '{}\t{}\t{}\t{}\t{}\n'.format(NULL(self.pmid), NULL(self.owner),
                                             NULL(self.cnt),
                                             'T' if self.major else 'F',
                                             STRING(self.name))

    def __repr__(self):
        return "Keyword<{}:{}:{}>".format(self.pmid, self.owner, self.cnt)

    def __eq__(self, other):
        return isinstance(other, Keyword) and \
               self.pmid == other.pmid and \
               self.owner == other.owner and \
               self.cnt == other.cnt and \
               self.major == other.major and \
               self.name == other.name
Esempio n. 26
0
def downgrade():
    op.drop_table('address')
    Enum(name="address_types").drop(op.get_bind(), checkfirst=False)
Esempio n. 27
0
 Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
 Column('deleted',
        CHAR(32),
        nullable=False,
        default='0',
        server_default='0'),
 Column('deleted_at', DateTime, nullable=True, default=None),
 Column('shard',
        SmallInteger(),
        nullable=False,
        default=lambda ctxt: default_shard(ctxt, 'id')),
 Column('tenant_id', String(36), default=None, nullable=True),
 Column('name', String(255), nullable=False),
 Column('email', String(255), nullable=False),
 Column('description', Unicode(160), nullable=True),
 Column("type", Enum(name='type', *ZONE_TYPES), nullable=False),
 Column('transferred_at', DateTime, default=None),
 Column('ttl', Integer, default=CONF.default_ttl, nullable=False),
 Column('serial', Integer, default=timeutils.utcnow_ts, nullable=False),
 Column('refresh',
        Integer,
        default=CONF.default_soa_refresh,
        nullable=False),
 Column('retry', Integer, default=CONF.default_soa_retry, nullable=False),
 Column('expire', Integer, default=CONF.default_soa_expire, nullable=False),
 Column('minimum',
        Integer,
        default=CONF.default_soa_minimum,
        nullable=False),
 Column('status',
        Enum(name='resource_statuses', *RESOURCE_STATUSES),
Esempio n. 28
0
class User(SitemapMixin, db.Model):

    __tablename__ = "users"
    __table_args__ = (
        CheckConstraint("length(username) <= 50",
                        name="users_valid_username_length"),
        CheckConstraint(
            "username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'",
            name="users_valid_username",
        ),
    )

    __repr__ = make_repr("username")

    username = Column(CIText, nullable=False, unique=True)
    name = Column(String(length=100), nullable=False)
    password = Column(String(length=128), nullable=False)
    password_date = Column(DateTime,
                           nullable=True,
                           server_default=sql.func.now())
    is_active = Column(Boolean, nullable=False, server_default=sql.false())
    is_superuser = Column(Boolean, nullable=False, server_default=sql.false())
    is_moderator = Column(Boolean, nullable=False, server_default=sql.false())
    date_joined = Column(DateTime, server_default=sql.func.now())
    last_login = Column(DateTime,
                        nullable=False,
                        server_default=sql.func.now())
    disabled_for = Column(
        Enum(DisableReason, values_callable=lambda x: [e.value for e in x]),
        nullable=True,
    )
    two_factor_allowed = Column(Boolean,
                                nullable=False,
                                server_default=sql.false())
    totp_secret = Column(Binary(length=20), nullable=True)

    emails = orm.relationship("Email",
                              backref="user",
                              cascade="all, delete-orphan",
                              lazy=False)

    @property
    def primary_email(self):
        primaries = [x for x in self.emails if x.primary]
        if primaries:
            return primaries[0]

    @hybrid_property
    def email(self):
        primary_email = self.primary_email
        return primary_email.email if primary_email else None

    @email.expression
    def email(self):
        return (select([Email.email
                        ]).where((Email.user_id == self.id)
                                 & (Email.primary.is_(True))).as_scalar())

    @property
    def has_two_factor(self):
        # TODO: This is where user.u2f_provisioned et al.
        # will also go.
        return self.two_factor_allowed and self.totp_secret is not None
Esempio n. 29
0
class IBMDedicatedHostDisk(db.Model):
    """
    Model for Dedicated host disk
    """
    ID_KEY = "id"
    NAME_KEY = "name"
    ZONE_KEY = "zone"
    RESOURCE_ID_KEY = "resource_id"
    HREF_KEY = "href"
    SIZE_KEY = "size"
    AVAILABLE_KEY = "available"
    INTERFACE_TYPE_KEY = "interface_type"
    PROVISIONABLE_KEY = "provisionable"
    SUPPORTED_INSTANCE_INTERFACE_TYPES_KEY = "supported_instance_interface_types"
    LIFECYCLE_STATE_KEY = "lifecycle_state"

    __tablename__ = "ibm_dedicated_host_disks"
    # instance_disks missing. Should be relationship. Currently IBMInstance does not have a disks relationship, skipping
    # resource_type missing. Not needed I guess
    id = Column(String(32), primary_key=True)
    name = Column(String(255))
    zone = Column(String(20), nullable=False)
    resource_id = Column(String(64))
    href = Column(Text)
    size = Column(Integer)
    available = Column(Integer)
    interface_type = Column(Enum("nvme"), default="nvme")
    provisionable = Column(Boolean, default=True)
    supported_instance_interface_types = Column(JSON)
    lifecycle_state = Column(Enum("deleting", "failed", "pending", "stable",
                                  "updating", "waiting", "suspended"),
                             default="stable")

    dedicated_host_id = Column(String(32),
                               ForeignKey("ibm_dedicated_hosts.id"))

    def __init__(self,
                 name,
                 zone,
                 resource_id,
                 href,
                 size,
                 available,
                 interface_type,
                 provisionable,
                 supported_instance_interface_types,
                 lifecycle_state=None):
        self.id = str(uuid.uuid4().hex)
        self.name = name
        self.zone = zone
        self.resource_id = resource_id
        self.href = href
        self.size = size
        self.available = available
        self.interface_type = interface_type
        self.provisionable = provisionable
        self.supported_instance_interface_types = supported_instance_interface_types
        self.lifecycle_state = lifecycle_state

    def to_json(self):
        """
        Return a JSON representation of the object
        """
        return {
            self.ID_KEY: self.id,
            self.NAME_KEY: self.name,
            self.ZONE_KEY: self.zone,
            self.RESOURCE_ID_KEY: self.resource_id,
            self.HREF_KEY: self.href,
            self.SIZE_KEY: self.size,
            self.AVAILABLE_KEY: self.available,
            self.INTERFACE_TYPE_KEY: self.interface_type,
            self.PROVISIONABLE_KEY: self.provisionable,
            self.SUPPORTED_INSTANCE_INTERFACE_TYPES_KEY:
            self.supported_instance_interface_types,
            self.LIFECYCLE_STATE_KEY: self.lifecycle_state
        }
Esempio n. 30
0
class Article(Model):
    __tablename__ = 'Article'
    id = Column(Integer, primary_key=True)
    DTYPE = Column(String(length=31))
    version = Column(Integer)
    format = Column(Enum('NEWS', 'BEST_PRODUCTS', 'HOW_TO', 'OPINION', 'INTERVIEW', 'POLL',
                         'LIVE_STREAM', 'DEALS', 'HANDS_ON', 'FULL_REVIEW', 'LONG_TERM_REVIEW', 'COMPARISON'))
    language = Column(String(length=2))
    uri_language = Column(String(length=2))
    uri_uri = Column(String(length=255))
    title = Column(String(length=255))
    advertorial = Column(BIT)
    metaDescription = Column(Text)
    metaKeywords = Column(Text)
    metaNewsKeywords = Column(Text)
    categoryIds = Column(Text)
    relatedManufacturerIds = Column(Text)
    mainDevice_id = Column(Integer)
    relatedDeviceIds = Column(Text)
    relatedAppIds = Column(Text)
    relatedAndroidVersions = Column(Text)
    relatedSystemUIs = Column(Text)
    relatedOSs = Column(Text)
    relatedOperatorBrands = Column(Text)
    otherTags = Column(Text)
    otherTagIds = Column(Text)
    relatedForumThreadIds = Column(String(length=255))
    relatedArticleIds = Column(String(length=255))
    referencedGalleryIds = Column(Text)
    commentsAllowed = Column(Boolean)
    author_id = Column(Integer, ForeignKey('User.id'))
    published = Column(Boolean)
    publishingDate = Column(DateTime)
    republishingDate = Column(DateTime)
    storyLabel = Column(String(length=255))
    sourceName = Column(String(length=255))
    sourceURL = Column(String(length=255))
    source2Name = Column(String(length=255))
    source2URL = Column(String(length=255))
    source3Name = Column(String(length=255))
    source3URL = Column(String(length=255))
    translationSource_id = Column(Integer, ForeignKey('Article.id'))

    heroImage_id = Column(Integer, ForeignKey(
        'UserFile.id'))  # foregin keys here/
    heroImageAuto = Column(Boolean)
    previewImage_id = Column(Integer, ForeignKey('UserFile.id'))
    previewImageLegacy_id = Column(Integer, ForeignKey('UserFile.id'))
    pros = Column(Text)
    cons = Column(Text)

    createdBy_id = Column(Integer, ForeignKey('User.id'))
    creationDate = Column(DateTime)

    modifiedBy_id = Column(Integer, ForeignKey('User.id'))
    modificationDate = Column(DateTime)

    deleted = Column(Boolean)
    deletionDate = Column(DateTime)
    deletionReason = Column(String(length=255))

    author = relationship('User', foreign_keys=[author_id])
    createdBy = relationship('User', foreign_keys=[createdBy_id])
    modifiedBy = relationship('User', foreign_keys=[modifiedBy_id])

    heroImage = relationship('UserFile', foreign_keys=[heroImage_id])
    previewImage = relationship('UserFile', foreign_keys=[previewImage_id])
    previewImageLegacy = relationship(
        'UserFile', foreign_keys=[previewImageLegacy_id])

    sections = relationship('ArticleSection', back_populates='article')
    comments = relationship('ArticleComment', back_populates='article')
    translationSource = relationship('Article', uselist=False, remote_side=[id], back_populates='translations')
    translations = relationship('Article', foreign_keys=[translationSource_id], back_populates='translationSource')

    def __repr__(self):
        return "<ApitArticle(id='%s', title='%s', publishingDate='%s')>" % (self.id, self.title, self.publishingDate)
Esempio n. 31
0
class Media(Base):
    __tablename__ = 'media'
    id = Column(Integer, primary_key=True)
    type = Column(Enum(MyEnum))
    url = Column(String(250))
    post_id = Column(Integer, ForeignKey('post.id'))
Esempio n. 32
0
class Account(MailSyncBase, HasPublicID, HasEmailAddress, HasRunState,
              HasRevisions, UpdatedAtMixin, DeletedAtMixin):
    API_OBJECT_NAME = 'account'

    @property
    def provider(self):
        """
        A constant, unique lowercase identifier for the account provider
        (e.g., 'gmail', 'eas'). Subclasses should override this.

        """
        raise NotImplementedError

    @property
    def verbose_provider(self):
        """
        A detailed identifier for the account provider
        (e.g., 'gmail', 'office365', 'outlook').
        Subclasses may override this.

        """
        return self.provider

    @property
    def category_type(self):
        """
        Whether the account is organized by folders or labels
        ('folder'/ 'label'), depending on the provider.
        Subclasses should override this.

        """
        raise NotImplementedError

    @property
    def auth_handler(self):
        from inbox.auth.base import handler_from_provider
        return handler_from_provider(self.provider)

    @property
    def provider_info(self):
        return provider_info(self.provider)

    @property
    def thread_cls(self):
        from inbox.models.thread import Thread
        return Thread

    # The default phrase used when sending mail from this account.
    name = Column(String(256), nullable=False, server_default='')

    # If True, throttle initial sync to reduce resource load
    throttled = Column(Boolean, server_default=false())

    # if True we sync contacts/events/email
    # NOTE: these columns are meaningless for EAS accounts
    sync_email = Column(Boolean, nullable=False, default=True)
    sync_contacts = Column(Boolean, nullable=False, default=False)
    sync_events = Column(Boolean, nullable=False, default=False)

    last_synced_contacts = Column(DateTime, nullable=True)

    # DEPRECATED
    last_synced_events = Column(DateTime, nullable=True)

    emailed_events_calendar_id = Column(BigInteger,
                                        ForeignKey('calendar.id',
                                                   ondelete='SET NULL',
                                                   use_alter=True,
                                                   name='emailed_events_cal'),
                                        nullable=True)

    _emailed_events_calendar = relationship(
        'Calendar',
        post_update=True,
        foreign_keys=[emailed_events_calendar_id])

    def create_emailed_events_calendar(self):
        if not self._emailed_events_calendar:
            calname = "Emailed events"
            cal = Calendar(namespace=self.namespace,
                           description=calname,
                           uid='inbox',
                           name=calname,
                           read_only=True)
            self._emailed_events_calendar = cal

    @property
    def emailed_events_calendar(self):
        self.create_emailed_events_calendar()
        return self._emailed_events_calendar

    @emailed_events_calendar.setter
    def emailed_events_calendar(self, cal):
        self._emailed_events_calendar = cal

    sync_host = Column(String(255), nullable=True)
    desired_sync_host = Column(String(255), nullable=True)

    # current state of this account
    state = Column(Enum('live', 'down', 'invalid'), nullable=True)

    # Based on account status, should the sync be running?
    # (Note, this is stored via a mixin.)
    # This is set to false if:
    #  - Account credentials are invalid (see mark_invalid())
    #  - External factors no longer require this account to sync
    # The value of this bit should always equal the AND value of all its
    # folders and heartbeats.

    @property
    def sync_enabled(self):
        return self.sync_should_run

    sync_state = Column(Enum('running', 'stopped', 'killed', 'invalid',
                             'connerror'),
                        nullable=True)

    _sync_status = Column(MutableDict.as_mutable(JSON),
                          default={},
                          nullable=True)

    @property
    def sync_status(self):
        d = dict(id=self.id,
                 email=self.email_address,
                 provider=self.provider,
                 is_enabled=self.sync_enabled,
                 state=self.sync_state,
                 sync_host=self.sync_host,
                 desired_sync_host=self.desired_sync_host)
        d.update(self._sync_status or {})

        return d

    @property
    def sync_error(self):
        return self._sync_status.get('sync_error')

    @property
    def initial_sync_start(self):
        if len(self.folders) == 0 or \
           any([f.initial_sync_start is None for f in self.folders]):
            return None
        return min([f.initial_sync_start for f in self.folders])

    @property
    def initial_sync_end(self):
        if len(self.folders) == 0 \
           or any([f.initial_sync_end is None for f in self.folders]):
            return None
        return max([f.initial_sync_end for f in self.folders])

    @property
    def initial_sync_duration(self):
        if not self.initial_sync_start or not self.initial_sync_end:
            return None
        return (self.initial_sync_end - self.initial_sync_end).total_seconds()

    def update_sync_error(self, error=None):
        if error is None:
            self._sync_status['sync_error'] = None
        else:
            error_obj = {
                'message':
                str(error.message)[:3000],
                'exception':
                "".join(traceback.format_exception_only(type(error),
                                                        error))[:500],
                'traceback':
                traceback.format_exc(20)[:3000]
            }

            self._sync_status['sync_error'] = error_obj

    def sync_started(self):
        """
        Record transition to started state. Should be called after the
        sync is actually started, not when the request to start it is made.

        """
        current_time = datetime.utcnow()

        # Never run before (vs restarting stopped/killed)
        if self.sync_state is None and (
                not self._sync_status
                or self._sync_status.get('sync_end_time') is None):
            self._sync_status['original_start_time'] = current_time

        self._sync_status['sync_start_time'] = current_time
        self._sync_status['sync_end_time'] = None
        self._sync_status['sync_error'] = None
        self._sync_status['sync_disabled_reason'] = None
        self._sync_status['sync_disabled_on'] = None
        self._sync_status['sync_disabled_by'] = None

        self.sync_state = 'running'

    def enable_sync(self):
        """ Tell the monitor that this account should be syncing. """
        self.sync_should_run = True

    def disable_sync(self, reason):
        """ Tell the monitor that this account should stop syncing. """
        self.sync_should_run = False
        self._sync_status['sync_disabled_reason'] = reason
        self._sync_status['sync_disabled_on'] = datetime.utcnow()
        self._sync_status['sync_disabled_by'] = os.environ.get(
            'USER', 'unknown')

    def mark_invalid(self, reason='invalid credentials', scope='mail'):
        """
        In the event that the credentials for this account are invalid,
        update the status and sync flag accordingly. Should only be called
        after trying to re-authorize / get new token.

        """
        if scope == 'calendar':
            self.sync_events = False
        elif scope == 'contacts':
            self.sync_contacts = False
        else:
            self.disable_sync(reason)
            self.sync_state = 'invalid'

    def mark_for_deletion(self):
        """
        Mark account for deletion
        """
        self.disable_sync('account deleted')
        self.sync_state = 'stopped'
        # Commit this to prevent race conditions
        inspect(self).session.commit()

    def unmark_for_deletion(self):
        self.enable_sync()
        self._sync_status = {}
        self.sync_state = 'running'
        inspect(self).session.commit()

    def sync_stopped(self, requesting_host):
        """
        Record transition to stopped state. Should be called after the
        sync is actually stopped, not when the request to stop it is made.

        """
        if requesting_host == self.sync_host:
            # Perform a compare-and-swap before updating these values.
            # Only if the host requesting to update the account.sync_* attributes
            # here still owns the account sync (i.e is account.sync_host),
            # the request can proceed.
            self.sync_host = None
            if self.sync_state == 'running':
                self.sync_state = 'stopped'
            self._sync_status['sync_end_time'] = datetime.utcnow()
            return True
        return False

    @classmethod
    def get(cls, id_, session):
        q = bakery(lambda session: session.query(cls))
        q += lambda q: q.filter(cls.id == bindparam('id_'))
        return q(session).params(id_=id_).first()

    @property
    def is_killed(self):
        return self.sync_state == 'killed'

    @property
    def is_running(self):
        return self.sync_state == 'running'

    @property
    def is_marked_for_deletion(self):
        return self.sync_state in ('stopped', 'killed', 'invalid') and \
            self.sync_should_run is False and \
            self._sync_status.get('sync_disabled_reason') == 'account deleted'

    @property
    def should_suppress_transaction_creation(self):
        # Only version if new or the `sync_state` has changed.
        obj_state = inspect(self)
        return not (obj_state.pending
                    or inspect(self).attrs.sync_state.history.has_changes())

    @property
    def server_settings(self):
        return None

    def get_raw_message_contents(self, message):
        # Get the raw contents of a message. We do this differently
        # for every backend (Gmail, IMAP, EAS), and the best way
        # to do this across repos is to make it a method of the
        # account class.
        raise NotImplementedError

    discriminator = Column('type', String(16))
    __mapper_args__ = {
        'polymorphic_identity': 'account',
        'polymorphic_on': discriminator
    }
Esempio n. 33
0
def add_election_compound_to_archive(context):
    old_type = Enum('election', 'vote', name='type_of_result')
    new_type = Enum(
        'election', 'election_compound', 'vote', name='type_of_result'
    )
    tmp_type = Enum(
        'election', 'election_compound', 'vote', name='_type_of_result'
    )

    tmp_type.create(context.operations.get_bind(), checkfirst=False)
    context.operations.execute(
        'ALTER TABLE archived_results ALTER COLUMN type '
        'TYPE _type_of_result USING type::text::_type_of_result'
    )

    old_type.drop(context.operations.get_bind(), checkfirst=False)

    new_type.create(context.operations.get_bind(), checkfirst=False)
    context.operations.execute(
        'ALTER TABLE archived_results ALTER COLUMN type '
        'TYPE type_of_result USING type::text::type_of_result'
    )

    tmp_type.drop(context.operations.get_bind(), checkfirst=False)
Esempio n. 34
0
#     director = relationship('Director', back_populates='movies', uselist=False)
#
#     def __str__(self):
#         return self.director.name
MovieDirector = Table(
    'movie_director', Base.metadata, Column('id', Integer, primary_key=True),
    Column('movie_id', Integer, ForeignKey('movie.id', ondelete='cascade')),
    Column('celebrity_id', Integer,
           ForeignKey('director.id', ondelete='cascade')))

MovieCelebrity = Table(
    'movie_celebrity', Base.metadata, Column('id', Integer, primary_key=True),
    Column('movie_id', Integer, ForeignKey('movie.id', ondelete='cascade')),
    Column('celebrity_id', Integer,
           ForeignKey('celebrity.id', ondelete='cascade')),
    Column('celebrity_type', Enum('actor', 'director')))

# class MovieArea(Base):
#     __tablename__ = 'movie_area'
#     id = Column(Integer(), primary_key=True)
#     movie_id = Column(Integer(), ForeignKey('movie.id', ondelete='cascade'))
#     area_id = Column(Integer(), ForeignKey('area.id', ondelete='cascade'))
#
#     movie = relationship("Movie", back_populates='areas', uselist=False)
#     area = relationship("Area", back_populates='movies', uselist=False)
#
#     def __str__(self):
#         return self.area.name
MovieArticle = Table(
    'movie_article', Base.metadata, Column('id', Integer, primary_key=True),
    Column('movie_id', Integer, ForeignKey('movie.id', ondelete='cascade')),
Esempio n. 35
0
 def __init__(self, enum):
     super(DeclEnumType, self).__init__()
     self.enum = enum
     to_lower = lambda m: "_" + m.group(1).lower()
     self.name = 'ck{}'.format(re.sub('([A-Z])', to_lower, enum.__name__))
     self.impl = Enum(*enum.values(), name=self.name)
Esempio n. 36
0
def change_election_type_column(context):
    type_ = Enum('proporz', 'majorz', name='type_of_election')
    context.operations.execute(
        'ALTER TABLE elections ALTER COLUMN type TYPE Text'
    )
    type_.drop(context.operations.get_bind(), checkfirst=False)
Esempio n. 37
0
# Metadata instance that is used to bind the engine, Object and tables
metadata = MetaData()

# Description of the file table

fileTable = Table('File',
                  metadata,
                  Column('FileID', Integer, primary_key=True),
                  Column('OperationID',
                         Integer,
                         ForeignKey('Operation.OperationID',
                                    ondelete='CASCADE'),
                         nullable=False),
                  Column('Status',
                         Enum('Waiting', 'Done', 'Failed', 'Scheduled'),
                         server_default='Waiting'),
                  Column('LFN', String(255), index=True),
                  Column('PFN', String(255)),
                  Column('ChecksumType',
                         Enum('ADLER32', 'MD5', 'SHA1', ''),
                         server_default=''),
                  Column('Checksum', String(255)),
                  Column('GUID', String(36)),
                  Column('Size', BigInteger),
                  Column('Attempt', Integer),
                  Column('Error', String(255)),
                  mysql_engine='InnoDB')

# Map the File object to the fileTable, with a few special attributes
Esempio n. 38
0
def add_municipality_domain(context):
    # Rename the columns
    renames = (
        ('elections', 'total_municipalities', 'total_entities'),
        ('elections', 'counted_municipalities', 'counted_entities'),
        ('election_results', 'municipality_id', 'entity_id'),
        ('ballot_results', 'municipality_id', 'entity_id'),
    )

    for table, old, new in renames:
        if context.has_column(table, old):
            context.operations.alter_column(table, old, new_column_name=new)

    # Add the new domain, see http://stackoverflow.com/a/14845740
    table_names = []
    inspector = Inspector(context.operations_connection)
    if 'elections' in inspector.get_table_names(context.schema):
        table_names.append('elections')
    if 'election_compounds' in inspector.get_table_names(context.schema):
        table_names.append('election_compounds')
    if 'votes' in inspector.get_table_names(context.schema):
        table_names.append('votes')
    if 'archived_results' in inspector.get_table_names(context.schema):
        table_names.append('archived_results')

    old_type = Enum('federation', 'canton', name='domain_of_influence')
    new_type = Enum('federation', 'canton', 'municipality',
                    name='domain_of_influence')
    tmp_type = Enum('federation', 'canton', 'municipality',
                    name='_domain_of_influence')

    tmp_type.create(context.operations.get_bind(), checkfirst=False)

    for table_name in table_names:
        context.operations.execute(
            (
                'ALTER TABLE {} ALTER COLUMN domain TYPE _domain_of_influence '
                'USING domain::text::_domain_of_influence'
            ).format(table_name)
        )

    old_type.drop(context.operations.get_bind(), checkfirst=False)

    new_type.create(context.operations.get_bind(), checkfirst=False)

    for table_name in table_names:
        context.operations.execute(
            (
                'ALTER TABLE {} ALTER COLUMN domain TYPE domain_of_influence '
                'USING domain::text::domain_of_influence'
            ).format(table_name)
        )

    tmp_type.drop(context.operations.get_bind(), checkfirst=False)
Esempio n. 39
0
class IBMDedicatedHost(db.Model):
    """
    Model for Dedicated host
    """
    ID_KEY = "id"
    NAME_KEY = "name"
    STATUS_KEY = "status"
    REGION_KEY = "region"
    ZONE_KEY = "zone"
    RESOURCE_ID_KEY = "resource_id"
    CRN_KEY = "crn"
    HREF_KEY = "href"
    INSTANCE_PLACEMENT_ENABLED_KEY = "instance_placement_enabled"
    LIFECYCLE_STATE_KEY = "lifecycle_state"
    AVAILABLE_MEMORY_KEY = "available_memory"
    MEMORY_KEY = "memory"
    PROVISIONABLE_KEY = "provisionable"
    SOCKET_COUNT_KEY = "socket_count"
    STATE_KEY = "state"
    VCPU_KEY = "vcpu"
    AVAILABLE_VCPU_KEY = "available_vpcu"
    RESOURCE_GROUP_KEY = "resource_group"
    DEDICATED_HOST_GROUP_KEY = "dedicated_host_group"
    DEDICATED_HOST_PROFILE_KEY = "dedicated_host_profile"
    INSTANCES_KEY = "instances"
    DEDICATED_HOST_DISKS_KEY = "dedicated_host_disks"
    SUPPORTED_INSTANCE_PROFILES_KEY = "supported_instance_profiles"

    __tablename__ = "ibm_dedicated_hosts"
    # resource_type missing. Not needed I guess
    id = Column(String(32), primary_key=True)
    name = Column(String(255))
    status = Column(String(50), nullable=False)
    region = Column(String(128), nullable=False)
    zone = Column(String(20), nullable=False)
    resource_id = Column(String(64))
    crn = Column(Text)
    href = Column(Text)
    instance_placement_enabled = Column(Boolean, default=True)
    lifecycle_state = Column(Enum("deleting", "failed", "pending", "stable",
                                  "updating", "waiting", "suspended"),
                             default="stable")
    available_memory = Column(Integer)
    memory = Column(Integer)
    provisionable = Column(Boolean)
    socket_count = Column(Integer)
    state = Column(Enum("available", "degraded", "migrating", "unavailable"))
    vcpu = Column(JSON)
    available_vcpu = Column(JSON)

    cloud_id = Column(String(32), ForeignKey("ibm_clouds.id"), nullable=False)
    resource_group_id = Column(String(32),
                               ForeignKey("ibm_resource_groups.id"))
    dedicated_host_group_id = Column(
        String(32), ForeignKey("ibm_dedicated_host_groups.id"))
    dedicated_host_profile_id = Column(
        String(32),
        ForeignKey("ibm_dedicated_host_profiles.id"),
        nullable=False)

    instances = relationship("IBMInstance",
                             backref="ibm_dedicated_host",
                             cascade="all, delete-orphan",
                             lazy="dynamic")
    dedicated_host_disks = relationship("IBMDedicatedHostDisk",
                                        backref="ibm_dedicated_host",
                                        cascade="all, delete-orphan",
                                        lazy="dynamic")
    supported_instance_profiles = relationship(
        "IBMInstanceProfile",
        secondary=ibm_dh_supported_instance_profiles,
        backref="ibm_dedicated_hosts",
        lazy="dynamic")

    __table_args__ = (UniqueConstraint(
        name, region, cloud_id, name="uix_ibm_dh_name_region_cloudid"), )

    def __init__(self,
                 name=None,
                 status=None,
                 region=None,
                 zone=None,
                 resource_id=None,
                 crn=None,
                 href=None,
                 instance_placement_enabled=True,
                 lifecycle_state=None,
                 available_memory=None,
                 memory=None,
                 provisionable=True,
                 socket_count=None,
                 state=None,
                 vcpu=None,
                 available_vcpu=None,
                 cloud_id=None):
        self.id = str(uuid.uuid4().hex)
        self.name = name
        self.status = status or CREATION_PENDING
        self.region = region
        self.zone = zone
        self.resource_id = resource_id
        self.crn = crn
        self.href = href
        self.instance_placement_enabled = instance_placement_enabled
        self.lifecycle_state = lifecycle_state
        self.available_memory = available_memory
        self.memory = memory
        self.provisionable = provisionable
        self.socket_count = socket_count
        self.state = state
        self.vcpu = vcpu
        self.available_vcpu = available_vcpu
        self.cloud_id = cloud_id

    def to_json(self):
        """
        Return a JSON representation of the object
        """
        return {
            self.ID_KEY:
            self.id,
            self.NAME_KEY:
            self.name,
            self.STATUS_KEY:
            self.status,
            self.REGION_KEY:
            self.region,
            self.ZONE_KEY:
            self.zone,
            self.RESOURCE_ID_KEY:
            self.resource_id,
            self.CRN_KEY:
            self.crn,
            self.INSTANCE_PLACEMENT_ENABLED_KEY:
            self.instance_placement_enabled,
            self.LIFECYCLE_STATE_KEY:
            self.lifecycle_state,
            self.AVAILABLE_MEMORY_KEY:
            self.available_memory,
            self.MEMORY_KEY:
            self.memory,
            self.PROVISIONABLE_KEY:
            self.provisionable,
            self.SOCKET_COUNT_KEY:
            self.socket_count,
            self.STATE_KEY:
            self.state,
            self.VCPU_KEY:
            self.vcpu,
            self.AVAILABLE_VCPU_KEY:
            self.available_vcpu,
            self.RESOURCE_GROUP_KEY: {
                self.ID_KEY: self.resource_group_id
            } if self.resource_group_id else None,
            self.DEDICATED_HOST_GROUP_KEY: {
                self.ID_KEY: self.dedicated_host_group_id,
                self.NAME_KEY: self.ibm_dedicated_host_group.name
            } if self.dedicated_host_group_id else None,
            self.DEDICATED_HOST_PROFILE_KEY: {
                self.ID_KEY: self.dedicated_host_profile_id,
                self.NAME_KEY: self.ibm_dedicated_host_profile.name
            } if self.dedicated_host_profile_id else None,
            self.SUPPORTED_INSTANCE_PROFILES_KEY:
            [sip.to_json() for sip in self.supported_instance_profiles.all()],
            self.INSTANCES_KEY:
            [instance.id for instance in self.instances.all()]
        }

    def to_json_body(self):
        """
        Return a JSON representation of the object according to IBM's CREATE API Call
        """
        json_data = {"profile": {"name": self.ibm_dedicated_host_profile.name}}
        # DO NOT simplify the following expression
        if self.instance_placement_enabled is False:
            json_data[
                "instance_placement_enabled"] = self.instance_placement_enabled,

        if self.name:
            json_data["name"] = self.name

        if self.ibm_resource_group:
            json_data["resource_group"] = {
                "id": self.ibm_resource_group.resource_id
            }

        if self.ibm_dedicated_host_group.resource_id:
            json_data["group"] = {
                "id": self.ibm_dedicated_host_group.resource_id
            }
        else:
            json_data["zone"] = {"name": self.zone}
            if self.ibm_dedicated_host_group.name:
                json_data["group"] = {
                    "name": self.ibm_dedicated_host_group.name
                }

            if self.ibm_dedicated_host_group.ibm_resource_group:
                json_data["group"] = json_data.get("group") or {}
                json_data["group"]["resource_group"] = {
                    "id":
                    self.ibm_dedicated_host_group.ibm_resource_group.
                    resource_id
                }

        return json_data

    def update_from_obj(self, updated_obj):
        self.name = updated_obj.name
        self.status = updated_obj.status
        self.region = updated_obj.region
        self.zone = updated_obj.zone
        self.resource_id = updated_obj.resource_id
        self.crn = updated_obj.crn
        self.href = updated_obj.href
        self.instance_placement_enabled = updated_obj.instance_placement_enabled
        self.lifecycle_state = updated_obj.lifecycle_state
        self.available_memory = updated_obj.available_memory
        self.memory = updated_obj.memory
        self.provisionable = updated_obj.provisionable
        self.socket_count = updated_obj.socket_count
        self.state = updated_obj.state
        self.vcpu = updated_obj.vcpu
        self.available_vcpu = updated_obj.available_vcpu

    @classmethod
    def from_ibm_json(cls, json_body):
        """
        Return an object of the class created from the provided JSON body
        """
        return cls(
            name=json_body["name"],
            status="CREATED",
            region=json_body["href"].split("//")[1].split(".")[0],
            zone=json_body["zone"]["name"],
            resource_id=json_body["id"],
            crn=json_body["crn"],
            href=json_body["href"],
            instance_placement_enabled=json_body["instance_placement_enabled"],
            lifecycle_state=json_body["lifecycle_state"],
            available_memory=json_body["available_memory"],
            memory=json_body["memory"],
            provisionable=json_body["provisionable"],
            socket_count=json_body["socket_count"],
            state=json_body["state"],
            vcpu=json_body["vcpu"],
            available_vcpu=json_body["available_vcpu"])

    def to_report_json(self):
        return {
            self.ID_KEY: self.id,
            self.NAME_KEY: self.name,
            self.STATUS_KEY:
            "SUCCESS" if self.status == "CREATED" else "PENDING",
            "message": ""
        }
Esempio n. 40
0
class Message(MailSyncBase, HasRevisions, HasPublicID):
    API_OBJECT_NAME = 'message'

    # Do delete messages if their associated thread is deleted.
    thread_id = Column(Integer,
                       ForeignKey('thread.id', ondelete='CASCADE'),
                       nullable=False)

    thread = relationship('Thread',
                          backref=backref('messages',
                                          order_by='Message.received_date',
                                          passive_deletes=True,
                                          cascade='all, delete-orphan'))

    namespace_id = Column(ForeignKey(Namespace.id, ondelete='CASCADE'),
                          index=True,
                          nullable=False)
    namespace = relationship('Namespace', lazy='joined', load_on_pending=True)

    from_addr = Column(JSON, nullable=False, default=lambda: [])
    sender_addr = Column(JSON, nullable=True)
    reply_to = Column(JSON, nullable=True)
    to_addr = Column(JSON, nullable=False, default=lambda: [])
    cc_addr = Column(JSON, nullable=False, default=lambda: [])
    bcc_addr = Column(JSON, nullable=False, default=lambda: [])
    in_reply_to = Column(JSON, nullable=True)
    # From: http://tools.ietf.org/html/rfc4130, section 5.3.3,
    # max message_id_header is 998 characters
    message_id_header = Column(String(998), nullable=True)
    # There is no hard limit on subject limit in the spec, but 255 is common.
    subject = Column(String(255), nullable=True, default='')
    received_date = Column(DateTime, nullable=False, index=True)
    size = Column(Integer, nullable=False)
    data_sha256 = Column(String(255), nullable=True)

    is_read = Column(Boolean, server_default=false(), nullable=False)

    # For drafts (both Inbox-created and otherwise)
    is_draft = Column(Boolean, server_default=false(), nullable=False)
    is_sent = Column(Boolean, server_default=false(), nullable=False)

    # DEPRECATED
    state = Column(Enum('draft', 'sending', 'sending failed', 'sent'))

    # Most messages are short and include a lot of quoted text. Preprocessing
    # just the relevant part out makes a big difference in how much data we
    # need to send over the wire.
    # Maximum length is determined by typical email size limits (25 MB body +
    # attachments on Gmail), assuming a maximum # of chars determined by
    # 1-byte (ASCII) chars.
    # NOTE: always HTML :)
    sanitized_body = Column(Text(length=26214400), nullable=False)
    snippet = Column(String(191), nullable=False)
    SNIPPET_LENGTH = 191

    # A reference to the block holding the full contents of the message
    full_body_id = Column(ForeignKey('block.id', name='full_body_id_fk'),
                          nullable=True)
    full_body = relationship('Block', cascade='all, delete')

    # this might be a mail-parsing bug, or just a message from a bad client
    decode_error = Column(Boolean, server_default=false(), nullable=False)

    # only on messages from Gmail (TODO: use different table)
    #
    # X-GM-MSGID is guaranteed unique across an account but not globally
    # across all Gmail.
    #
    # Messages between different accounts *may* have the same X-GM-MSGID,
    # but it's unlikely.
    #
    # (Gmail info from
    # http://mailman13.u.washington.edu/pipermail/imap-protocol/
    # 2014-July/002290.html.)
    g_msgid = Column(BigInteger, nullable=True, index=True, unique=False)
    g_thrid = Column(BigInteger, nullable=True, index=True, unique=False)

    # The uid as set in the X-INBOX-ID header of a sent message we create
    inbox_uid = Column(String(64), nullable=True, index=True)

    def regenerate_inbox_uid(self):
        """The value of inbox_uid is simply the draft public_id and version,
        concatenated. Because the inbox_uid identifies the draft on the remote
        provider, we regenerate it on each draft revision so that we can delete
        the old draft and add the new one on the remote."""
        self.inbox_uid = '{}-{}'.format(self.public_id, self.version)

    # In accordance with JWZ (http://www.jwz.org/doc/threading.html)
    references = Column(JSON, nullable=True)

    # Only used for drafts.
    version = Column(Integer, nullable=False, server_default='0')

    def mark_for_deletion(self):
        """Mark this message to be deleted by an asynchronous delete
        handler."""
        self.deleted_at = datetime.datetime.utcnow()

    @validates('subject')
    def sanitize_subject(self, key, value):
        # Trim overlong subjects, and remove null bytes. The latter can result
        # when, for example, UTF-8 text decoded from an RFC2047-encoded header
        # contains null bytes.
        if value is None:
            return
        if len(value) > 255:
            value = value[:255]
        value = value.replace('\0', '')
        return value

    @classmethod
    def create_from_synced(cls, account, mid, folder_name, received_date,
                           body_string):
        """
        Parses message data and writes out db metadata and MIME blocks.

        Returns the new Message, which links to the new Part and Block objects
        through relationships. All new objects are uncommitted.

        Threads are not computed here; you gotta do that separately.

        Parameters
        ----------
        mid : int
            The account backend-specific message identifier; it's only used for
            logging errors.

        raw_message : str
            The full message including headers (encoded).

        """
        _rqd = [account, mid, folder_name, body_string]
        if not all([v is not None for v in _rqd]):
            raise ValueError(
                'Required keyword arguments: account, mid, folder_name, '
                'body_string')
        # stop trickle-down bugs
        assert account.namespace is not None
        assert not isinstance(body_string, unicode)

        msg = Message()

        try:
            from inbox.models.block import Block, Part
            body_block = Block()
            body_block.namespace_id = account.namespace.id
            body_block.data = body_string
            body_block.content_type = "text/plain"
            msg.full_body = body_block

            msg.namespace_id = account.namespace.id
            parsed = mime.from_string(body_string)

            mime_version = parsed.headers.get('Mime-Version')
            # sometimes MIME-Version is '1.0 (1.0)', hence the .startswith()
            if mime_version is not None and not mime_version.startswith('1.0'):
                log.warning('Unexpected MIME-Version',
                            account_id=account.id,
                            folder_name=folder_name,
                            mid=mid,
                            mime_version=mime_version)

            msg.data_sha256 = sha256(body_string).hexdigest()

            msg.subject = parsed.subject
            msg.from_addr = parse_mimepart_address_header(parsed, 'From')
            msg.sender_addr = parse_mimepart_address_header(parsed, 'Sender')
            msg.reply_to = parse_mimepart_address_header(parsed, 'Reply-To')
            msg.to_addr = parse_mimepart_address_header(parsed, 'To')
            msg.cc_addr = parse_mimepart_address_header(parsed, 'Cc')
            msg.bcc_addr = parse_mimepart_address_header(parsed, 'Bcc')

            msg.in_reply_to = parsed.headers.get('In-Reply-To')
            msg.message_id_header = parsed.headers.get('Message-Id')

            msg.received_date = received_date if received_date else \
                get_internaldate(parsed.headers.get('Date'),
                                 parsed.headers.get('Received'))

            # Custom Inbox header
            msg.inbox_uid = parsed.headers.get('X-INBOX-ID')

            # In accordance with JWZ (http://www.jwz.org/doc/threading.html)
            msg.references = parse_references(
                parsed.headers.get('References', ''),
                parsed.headers.get('In-Reply-To', ''))

            msg.size = len(body_string)  # includes headers text

            i = 0  # for walk_index

            # Store all message headers as object with index 0
            block = Block()
            block.namespace_id = account.namespace.id
            block.data = json.dumps(parsed.headers.items())

            headers_part = Part(block=block, message=msg)
            headers_part.walk_index = i

            for mimepart in parsed.walk(
                    with_self=parsed.content_type.is_singlepart()):
                i += 1
                if mimepart.content_type.is_multipart():
                    log.warning('multipart sub-part found',
                                account_id=account.id,
                                folder_name=folder_name,
                                mid=mid)
                    continue  # TODO should we store relations?
                msg._parse_mimepart(mimepart, mid, i, account.namespace.id)
            msg.calculate_sanitized_body()
        except (mime.DecodingError, AttributeError, RuntimeError, TypeError,
                ValueError) as e:
            # Message parsing can fail for several reasons. Occasionally iconv
            # will fail via maximum recursion depth. EAS messages may be
            # missing Date and Received headers. In such cases, we still keep
            # the metadata and mark it as b0rked.
            _log_decode_error(account.id, folder_name, mid, body_string)
            err_filename = _get_errfilename(account.id, folder_name, mid)
            log.error('Message parsing error',
                      folder_name=folder_name,
                      account_id=account.id,
                      err_filename=err_filename,
                      error=e)
            msg._mark_error()

        # Occasionally people try to send messages to way too many
        # recipients. In such cases, empty the field and treat as a parsing
        # error so that we don't break the entire sync.
        for field in ('to_addr', 'cc_addr', 'bcc_addr', 'references'):
            value = getattr(msg, field)
            if json_field_too_long(value):
                _log_decode_error(account.id, folder_name, mid, body_string)
                err_filename = _get_errfilename(account.id, folder_name, mid)
                log.error('Recipient field too long',
                          field=field,
                          account_id=account.id,
                          folder_name=folder_name,
                          mid=mid)
                setattr(msg, field, [])
                msg._mark_error()

        return msg

    def _parse_mimepart(self, mimepart, mid, index, namespace_id):
        """Parse a single MIME part into a Block and Part object linked to this
        message."""
        from inbox.models.block import Block, Part
        disposition, disposition_params = mimepart.content_disposition
        if (disposition is not None
                and disposition not in ['inline', 'attachment']):
            cd = mimepart.content_disposition
            log.error('Unknown Content-Disposition',
                      mid=mid,
                      bad_content_disposition=cd,
                      parsed_content_disposition=disposition)
            self._mark_error()
            return
        block = Block()
        block.namespace_id = namespace_id
        block.content_type = mimepart.content_type.value
        block.filename = _trim_filename(
            mimepart.content_type.params.get('name'), mid)

        new_part = Part(block=block, message=self)
        new_part.walk_index = index

        # TODO maybe also trim other headers?
        if disposition is not None:
            new_part.content_disposition = disposition
            if disposition == 'attachment':
                new_part.block.filename = _trim_filename(
                    disposition_params.get('filename'), mid)

        if mimepart.body is None:
            data_to_write = ''
        elif new_part.block.content_type.startswith('text'):
            data_to_write = mimepart.body.encode('utf-8', 'strict')
            # normalize mac/win/unix newlines
            data_to_write = data_to_write.replace('\r\n', '\n'). \
                replace('\r', '\n')
        else:
            data_to_write = mimepart.body
        if data_to_write is None:
            data_to_write = ''

        new_part.content_id = mimepart.headers.get('Content-Id')

        block.data = data_to_write

    def _mark_error(self):
        self.decode_error = True
        # fill in required attributes with filler data if could not parse them
        self.size = 0
        if self.received_date is None:
            self.received_date = datetime.datetime.utcnow()
        if self.sanitized_body is None:
            self.sanitized_body = ''
        if self.snippet is None:
            self.snippet = ''

    def calculate_sanitized_body(self):
        plain_part, html_part = self.body
        # TODO: also strip signatures.
        if html_part:
            assert '\r' not in html_part, "newlines not normalized"
            self.snippet = self.calculate_html_snippet(html_part)
            self.sanitized_body = html_part
        elif plain_part:
            self.snippet = self.calculate_plaintext_snippet(plain_part)
            self.sanitized_body = plaintext2html(plain_part, False)
        else:
            self.sanitized_body = u''
            self.snippet = u''

    def calculate_html_snippet(self, text):
        text = strip_tags(text)
        return self.calculate_plaintext_snippet(text)

    def calculate_plaintext_snippet(self, text):
        return ' '.join(text.split())[:self.SNIPPET_LENGTH]

    @property
    def body(self):
        """ Returns (plaintext, html) body for the message, decoded. """
        assert self.parts, \
            "Can't calculate body before parts have been parsed"

        plain_data = None
        html_data = None

        for part in self.parts:
            if part.block.content_type == 'text/html':
                html_data = part.block.data.decode('utf-8').strip()
                break
        for part in self.parts:
            if part.block.content_type == 'text/plain':
                plain_data = part.block.data.decode('utf-8').strip()
                break

        return plain_data, html_data

    @property
    def headers(self):
        """ Returns headers for the message, decoded. """
        assert self.parts, \
            "Can't provide headers before parts have been parsed"

        headers = self.parts[0].block.data
        json_headers = json.JSONDecoder().decode(headers)

        return json_headers

    @property
    def participants(self):
        """
        Different messages in the thread may reference the same email
        address with different phrases. We partially deduplicate: if the same
        email address occurs with both empty and nonempty phrase, we don't
        separately return the (empty phrase, address) pair.

        """
        deduped_participants = defaultdict(set)
        chain = []
        if self.from_addr:
            chain.append(self.from_addr)

        if self.to_addr:
            chain.append(self.to_addr)

        if self.cc_addr:
            chain.append(self.cc_addr)

        if self.bcc_addr:
            chain.append(self.bcc_addr)

        for phrase, address in itertools.chain.from_iterable(chain):
            deduped_participants[address].add(phrase.strip())

        p = []
        for address, phrases in deduped_participants.iteritems():
            for phrase in phrases:
                if phrase != '' or len(phrases) == 1:
                    p.append((phrase, address))
        return p

    @property
    def folders(self):
        return self.thread.folders

    @property
    def attachments(self):
        return [part for part in self.parts if part.is_attachment]

    @property
    def api_attachment_metadata(self):
        resp = []
        for part in self.parts:
            if not part.is_attachment:
                continue
            k = {
                'content_type': part.block.content_type,
                'size': part.block.size,
                'filename': part.block.filename,
                'id': part.block.public_id
            }
            content_id = part.content_id
            if content_id:
                if content_id[0] == '<' and content_id[-1] == '>':
                    content_id = content_id[1:-1]
                k['content_id'] = content_id
            resp.append(k)
        return resp

    # FOR INBOX-CREATED MESSAGES:

    is_created = Column(Boolean, server_default=false(), nullable=False)

    # Whether this draft is a reply to an existing thread.
    is_reply = Column(Boolean)

    reply_to_message_id = Column(Integer,
                                 ForeignKey('message.id'),
                                 nullable=True)
    reply_to_message = relationship('Message', uselist=False)

    @property
    def versioned_relationships(self):
        return ['parts']

    @property
    def has_attached_events(self):
        return 'text/calendar' in [p.block.content_type for p in self.parts]

    @property
    def attached_event_files(self):
        return [
            part for part in self.parts
            if part.block.content_type == 'text/calendar'
        ]
Esempio n. 41
0
class InventoryIndent(DeclBase, BaseMixin, TimestampMixin):

    title = Column(String(60), unique=False, nullable=True)
    desc = Column(String, unique=False, nullable=True)

    type = Column(Enum('production',
                       'prototype',
                       'testing',
                       'support',
                       'rd',
                       name='indent_type'),
                  nullable=False,
                  default='active',
                  server_default='production')

    status = Column(Enum('active',
                         'pending',
                         'archived',
                         'reversed',
                         name='indent_status'),
                    nullable=False,
                    default='active',
                    server_default='active')

    # Relationships
    # # Documents
    # doc_id = Column(Integer, ForeignKey('DocStoreDocument.id'),
    #                 nullable=True, unique=True)
    # doc = relationship("DocStoreDocument", uselist=False)
    #
    # doc_cobom_id = Column(Integer, ForeignKey('DocStoreDocument.id'),
    #                       nullable=True, unique=True)
    # doc_cobom = relationship("DocStoreDocument", uselist=False)

    # Serial Numbers
    requested_by_id = Column(Integer,
                             ForeignKey('User.id'),
                             nullable=False,
                             unique=False)
    requested_by = relationship("User", backref='indents')

    serialno_id = Column(Integer,
                         ForeignKey('SerialNumber.id'),
                         nullable=False)
    serialno = relationship(
        "SerialNumber",
        uselist=False,
        cascade="all",
        primaryjoin="InventoryIndent.serialno_id == SerialNumber.id")

    auth_parent_id = Column(Integer,
                            ForeignKey('SerialNumber.id'),
                            nullable=True)
    auth_parent = relationship(
        "SerialNumber",
        backref='direct_indents',
        uselist=False,
        primaryjoin="InventoryIndent.auth_parent_id == SerialNumber.id")

    # Raw Data
    cobom_id = None
    cobom = None

    def __repr__(self):
        return "<InventoryIndent DB ({0})>".format(self.sno)
Esempio n. 42
0
class Edition(Base, EditionConstants):
    """A lightly schematized collection of metadata for a work, or an
    edition of a work, or a book, or whatever. If someone thinks of it
    as a "book" with a "title" it can go in here.
    """

    __tablename__ = 'editions'
    id = Column(Integer, primary_key=True)

    data_source_id = Column(Integer, ForeignKey('datasources.id'), index=True)

    MAX_THUMBNAIL_HEIGHT = 300
    MAX_THUMBNAIL_WIDTH = 200

    # A full-sized image no larger than this height can be used as a thumbnail
    # in a pinch.
    MAX_FALLBACK_THUMBNAIL_HEIGHT = 500

    # This Edition is associated with one particular
    # identifier--the one used by its data source to identify
    # it. Through the Equivalency class, it is associated with a
    # (probably huge) number of other identifiers.
    primary_identifier_id = Column(Integer,
                                   ForeignKey('identifiers.id'),
                                   index=True)

    # An Edition may be the presentation edition for a single Work. If it's not
    # a presentation edition for a work, work will be None.
    work = relationship("Work", uselist=False, backref="presentation_edition")

    # An Edition may show up in many CustomListEntries.
    custom_list_entries = relationship("CustomListEntry", backref="edition")

    # An Edition may be the presentation edition for many LicensePools.
    is_presentation_for = relationship("LicensePool",
                                       backref="presentation_edition")

    title = Column(Unicode, index=True)
    sort_title = Column(Unicode, index=True)
    subtitle = Column(Unicode, index=True)
    series = Column(Unicode, index=True)
    series_position = Column(Integer)

    # This is not a foreign key per se; it's a calculated UUID-like
    # identifier for this work based on its title and author, used to
    # group together different editions of the same work.
    permanent_work_id = Column(String(36), index=True)

    # A string depiction of the authors' names.
    author = Column(Unicode, index=True)
    sort_author = Column(Unicode, index=True)

    contributions = relationship("Contribution", backref="edition")

    language = Column(Unicode, index=True)
    publisher = Column(Unicode, index=True)
    imprint = Column(Unicode, index=True)

    # `issued` is the date the ebook edition was sent to the distributor by the publisher,
    # i.e. the date it became available for librarians to buy for their libraries
    issued = Column(Date)
    # `published is the original publication date of the text.
    # A Project Gutenberg text was likely `published` long before being `issued`.
    published = Column(Date)

    MEDIUM_ENUM = Enum(*EditionConstants.KNOWN_MEDIA, name="medium")

    medium = Column(MEDIUM_ENUM, index=True)

    cover_id = Column(Integer,
                      ForeignKey('resources.id',
                                 use_alter=True,
                                 name='fk_editions_summary_id'),
                      index=True)
    # These two let us avoid actually loading up the cover Resource
    # every time.
    cover_full_url = Column(Unicode)
    cover_thumbnail_url = Column(Unicode)

    # An OPDS entry containing all metadata about this entry that
    # would be relevant to display to a library patron.
    simple_opds_entry = Column(Unicode, default=None)

    # Information kept in here probably won't be used.
    extra = Column(MutableDict.as_mutable(JSON), default={})

    def __repr__(self):
        id_repr = repr(self.primary_identifier).decode("utf8")
        a = (u"Edition %s [%r] (%s/%s/%s)" %
             (self.id, id_repr, self.title, ", ".join(
                 [x.sort_name for x in self.contributors]), self.language))
        return a.encode("utf8")

    @property
    def language_code(self):
        return LanguageCodes.three_to_two.get(self.language, self.language)

    @property
    def contributors(self):
        return set([x.contributor for x in self.contributions])

    @property
    def author_contributors(self):
        """All distinct 'author'-type contributors, with the primary author
        first, other authors sorted by sort name.
        Basically, we're trying to figure out what would go on the
        book cover. The primary author should go first, and be
        followed by non-primary authors in alphabetical order. People
        whose role does not rise to the level of "authorship"
        (e.g. author of afterword) do not show up.
        The list as a whole should contain no duplicates. This might
        happen because someone is erroneously listed twice in the same
        role, someone is listed as both primary author and regular
        author, someone is listed as both author and translator,
        etc. However it happens, your name only shows up once on the
        front of the book.
        """
        seen_authors = set()
        primary_author = None
        other_authors = []
        acceptable_substitutes = defaultdict(list)
        if not self.contributions:
            return []

        # If there is one and only one contributor, return them, no
        # matter what their role is.
        if len(self.contributions) == 1:
            return [self.contributions[0].contributor]

        # There is more than one contributor. Try to pick out the ones
        # that rise to the level of being 'authors'.
        for x in self.contributions:
            if not primary_author and x.role == Contributor.PRIMARY_AUTHOR_ROLE:
                primary_author = x.contributor
            elif x.role in Contributor.AUTHOR_ROLES:
                other_authors.append(x.contributor)
            elif x.role.lower().startswith('author and'):
                other_authors.append(x.contributor)
            elif (x.role in Contributor.AUTHOR_SUBSTITUTE_ROLES
                  or x.role in Contributor.PERFORMER_ROLES):
                l = acceptable_substitutes[x.role]
                if x.contributor not in l:
                    l.append(x.contributor)

        def dedupe(l):
            """If an item shows up multiple times in a list,
            keep only the first occurence.
            """
            seen = set()
            deduped = []
            for i in l:
                if i in seen:
                    continue
                deduped.append(i)
                seen.add(i)
            return deduped

        if primary_author:
            return dedupe([primary_author] +
                          sorted(other_authors, key=lambda x: x.sort_name))

        if other_authors:
            return dedupe(other_authors)

        for role in (Contributor.AUTHOR_SUBSTITUTE_ROLES +
                     Contributor.PERFORMER_ROLES):
            if role in acceptable_substitutes:
                contributors = acceptable_substitutes[role]
                return dedupe(sorted(contributors, key=lambda x: x.sort_name))
        else:
            # There are roles, but they're so random that we can't be
            # sure who's the 'author' or so low on the creativity
            # scale (like 'Executive producer') that we just don't
            # want to put them down as 'author'.
            return []

    @classmethod
    def for_foreign_id(cls,
                       _db,
                       data_source,
                       foreign_id_type,
                       foreign_id,
                       create_if_not_exists=True):
        """Find the Edition representing the given data source's view of
        the work that it primarily identifies by foreign ID.
        e.g. for_foreign_id(_db, DataSource.OVERDRIVE,
                            Identifier.OVERDRIVE_ID, uuid)
        finds the Edition for Overdrive's view of a book identified
        by Overdrive UUID.
        This:
        for_foreign_id(_db, DataSource.OVERDRIVE, Identifier.ISBN, isbn)
        will probably return nothing, because although Overdrive knows
        that books have ISBNs, it doesn't use ISBN as a primary
        identifier.
        """
        # Look up the data source if necessary.
        if isinstance(data_source, basestring):
            data_source = DataSource.lookup(_db, data_source)

        identifier, ignore = Identifier.for_foreign_id(_db, foreign_id_type,
                                                       foreign_id)

        # Combine the two to get/create a Edition.
        if create_if_not_exists:
            f = get_one_or_create
            kwargs = dict()
        else:
            f = get_one
            kwargs = dict()
        r = f(_db,
              Edition,
              data_source=data_source,
              primary_identifier=identifier,
              **kwargs)
        return r

    @property
    def license_pools(self):
        """The LicensePools that provide access to the book described
        by this Edition.
        """
        _db = Session.object_session(self)
        return _db.query(LicensePool).filter(
            LicensePool.data_source == self.data_source,
            LicensePool.identifier == self.primary_identifier).all()

    def equivalent_identifiers(self, type=None, policy=None):
        """All Identifiers equivalent to this
        Edition's primary identifier, according to the given
        PresentationCalculationPolicy
        """
        _db = Session.object_session(self)
        identifier_id_subquery = Identifier.recursively_equivalent_identifier_ids_query(
            self.primary_identifier.id, policy=policy)
        q = _db.query(Identifier).filter(
            Identifier.id.in_(identifier_id_subquery))
        if type:
            if isinstance(type, list):
                q = q.filter(Identifier.type.in_(type))
            else:
                q = q.filter(Identifier.type == type)
        return q.all()

    def equivalent_editions(self, policy=None):
        """All Editions whose primary ID is equivalent to this Edition's
        primary ID, according to the given PresentationCalculationPolicy.
        """
        _db = Session.object_session(self)
        identifier_id_subquery = Identifier.recursively_equivalent_identifier_ids_query(
            self.primary_identifier.id, policy=policy)
        return _db.query(Edition).filter(
            Edition.primary_identifier_id.in_(identifier_id_subquery))

    @classmethod
    def missing_coverage_from(cls,
                              _db,
                              edition_data_sources,
                              coverage_data_source,
                              operation=None):
        """Find Editions from `edition_data_source` whose primary
        identifiers have no CoverageRecord from
        `coverage_data_source`.
        e.g.
         gutenberg = DataSource.lookup(_db, DataSource.GUTENBERG)
         oclc_classify = DataSource.lookup(_db, DataSource.OCLC)
         missing_coverage_from(_db, gutenberg, oclc_classify)
        will find Editions that came from Project Gutenberg and
        have never been used as input to the OCLC Classify web
        service.
        """
        if isinstance(edition_data_sources, DataSource):
            edition_data_sources = [edition_data_sources]
        edition_data_source_ids = [x.id for x in edition_data_sources]
        join_clause = (
            (Edition.primary_identifier_id == CoverageRecord.identifier_id) &
            (CoverageRecord.data_source_id == coverage_data_source.id) &
            (CoverageRecord.operation == operation))

        q = _db.query(Edition).outerjoin(CoverageRecord, join_clause)
        if edition_data_source_ids:
            q = q.filter(Edition.data_source_id.in_(edition_data_source_ids))
        q2 = q.filter(CoverageRecord.id == None)
        return q2

    @classmethod
    def sort_by_priority(cls, editions, license_source=None):
        """Return all Editions that describe the Identifier associated with
        this LicensePool, in the order they should be used to create a
        presentation Edition for the LicensePool.
        """
        def sort_key(edition):
            """Return a numeric ordering of this edition."""
            source = edition.data_source
            if not source:
                # This shouldn't happen. Give this edition the
                # lowest priority.
                return -100

            if source == license_source:
                # This Edition contains information from the same data
                # source as the LicensePool itself. Put it below any
                # Edition from one of the data sources in
                # PRESENTATION_EDITION_PRIORITY, but above all other
                # Editions.
                return -1

            elif source.name == DataSourceConstants.METADATA_WRANGLER:
                # The metadata wrangler is slightly less trustworthy
                # than the license source, for everything except cover
                # image (which is handled by
                # Representation.quality_as_thumbnail_image)
                return -1.5

            if source.name in DataSourceConstants.PRESENTATION_EDITION_PRIORITY:
                return DataSourceConstants.PRESENTATION_EDITION_PRIORITY.index(
                    source.name)
            else:
                return -2

        return sorted(editions, key=sort_key)

    @classmethod
    def _content(cls, content, is_html=False):
        """Represent content that might be plain-text or HTML.
        e.g. a book's summary.
        """
        if not content:
            return None
        if is_html:
            type = "html"
        else:
            type = "text"
        return dict(type=type, value=content)

    def set_cover(self, resource):
        old_cover = self.cover
        old_cover_full_url = self.cover_full_url
        self.cover = resource
        self.cover_full_url = resource.representation.public_url

        # TODO: In theory there could be multiple scaled-down
        # versions of this representation and we need some way of
        # choosing between them. Right now we just pick the first one
        # that works.
        if (resource.representation.image_height
                and resource.representation.image_height <=
                self.MAX_THUMBNAIL_HEIGHT):
            # This image doesn't need a thumbnail.
            self.cover_thumbnail_url = resource.representation.public_url
        else:
            # Use the best available thumbnail for this image.
            best_thumbnail = resource.representation.best_thumbnail
            if best_thumbnail:
                self.cover_thumbnail_url = best_thumbnail.public_url
        if (not self.cover_thumbnail_url
                and resource.representation.image_height
                and resource.representation.image_height <=
                self.MAX_FALLBACK_THUMBNAIL_HEIGHT):
            # The full-sized image is too large to be a thumbnail, but it's
            # not huge, and there is no other thumbnail, so use it.
            self.cover_thumbnail_url = resource.representation.public_url
        if old_cover != self.cover or old_cover_full_url != self.cover_full_url:
            logging.debug("Setting cover for %s/%s: full=%s thumb=%s",
                          self.primary_identifier.type,
                          self.primary_identifier.identifier,
                          self.cover_full_url, self.cover_thumbnail_url)

    def add_contributor(self,
                        name,
                        roles,
                        aliases=None,
                        lc=None,
                        viaf=None,
                        **kwargs):
        """Assign a contributor to this Edition."""
        _db = Session.object_session(self)
        if isinstance(roles, basestring):
            roles = [roles]

        # First find or create the Contributor.
        if isinstance(name, Contributor):
            contributor = name
        else:
            contributor, was_new = Contributor.lookup(_db, name, lc, viaf,
                                                      aliases)
            if isinstance(contributor, list):
                # Contributor was looked up/created by name,
                # which returns a list.
                contributor = contributor[0]

        # Then add their Contributions.
        for role in roles:
            contribution, was_new = get_one_or_create(_db,
                                                      Contribution,
                                                      edition=self,
                                                      contributor=contributor,
                                                      role=role)
        return contributor

    def similarity_to(self, other_record):
        """How likely is it that this record describes the same book as the
        given record?
        1 indicates very strong similarity, 0 indicates no similarity
        at all.
        For now we just compare the sets of words used in the titles
        and the authors' names. This should be good enough for most
        cases given that there is usually some preexisting reason to
        suppose that the two records are related (e.g. OCLC said
        they were).
        Most of the Editions are from OCLC Classify, and we expect
        to get some of them wrong (e.g. when a single OCLC work is a
        compilation of several novels by the same author). That's okay
        because those Editions aren't backed by
        LicensePools. They're purely informative. We will have some
        bad information in our database, but the clear-cut cases
        should outnumber the fuzzy cases, so we we should still group
        the Editions that really matter--the ones backed by
        LicensePools--together correctly.
        TODO: apply much more lenient terms if the two Editions are
        identified by the same ISBN or other unique identifier.
        """
        if other_record == self:
            # A record is always identical to itself.
            return 1

        if other_record.language == self.language:
            # The books are in the same language. Hooray!
            language_factor = 1
        else:
            if other_record.language and self.language:
                # Each record specifies a different set of languages. This
                # is an immediate disqualification.
                return 0
            else:
                # One record specifies a language and one does not. This
                # is a little tricky. We're going to apply a penalty, but
                # since the majority of records we're getting from OCLC are in
                # English, the penalty will be less if one of the
                # languages is English. It's more likely that an unlabeled
                # record is in English than that it's in some other language.
                if self.language == 'eng' or other_record.language == 'eng':
                    language_factor = 0.80
                else:
                    language_factor = 0.50

        title_quotient = MetadataSimilarity.title_similarity(
            self.title, other_record.title)

        author_quotient = MetadataSimilarity.author_similarity(
            self.author_contributors, other_record.author_contributors)
        if author_quotient == 0:
            # The two works have no authors in common. Immediate
            # disqualification.
            return 0

        # We weight title more heavily because it's much more likely
        # that one author wrote two different books than that two
        # books with the same title have different authors.
        return language_factor * ((title_quotient * 0.80) +
                                  (author_quotient * 0.20))

    def apply_similarity_threshold(self, candidates, threshold=0.5):
        """Yield the Editions from the given list that are similar
        enough to this one.
        """
        for candidate in candidates:
            if self == candidate:
                yield candidate
            else:
                similarity = self.similarity_to(candidate)
                if similarity >= threshold:
                    yield candidate

    def best_cover_within_distance(self, distance, rel=None, policy=None):
        _db = Session.object_session(self)
        identifier_ids = [self.primary_identifier.id]

        if distance > 0:
            if policy is None:
                new_policy = PresentationCalculationPolicy()
            else:
                new_policy = PresentationCalculationPolicy(
                    equivalent_identifier_levels=distance,
                    equivalent_identifier_cutoff=policy.
                    equivalent_identifier_cutoff,
                    equivalent_identifier_threshold=policy.
                    equivalent_identifier_threshold,
                )

            identifier_ids_dict = Identifier.recursively_equivalent_identifier_ids(
                _db, identifier_ids, policy=new_policy)
            identifier_ids += identifier_ids_dict[self.primary_identifier.id]

        return Identifier.best_cover_for(_db, identifier_ids, rel=rel)

    @property
    def title_for_permanent_work_id(self):
        title = self.title
        if self.subtitle:
            title += (": " + self.subtitle)
        return title

    @property
    def author_for_permanent_work_id(self):
        authors = self.author_contributors
        if authors:
            # Use the sort name of the primary author.
            author = authors[0].sort_name
        else:
            # This may be an Edition that represents an item on a best-seller list
            # or something like that. In this case it wouldn't have any Contributor
            # objects, just an author string. Use that.
            author = self.sort_author or self.author
        return author

    def calculate_permanent_work_id(self, debug=False):
        title = self.title_for_permanent_work_id
        medium = self.medium_for_permanent_work_id.get(self.medium, None)
        if not title or not medium:
            # If a book has no title or medium, it has no permanent work ID.
            self.permanent_work_id = None
            return

        author = self.author_for_permanent_work_id

        w = WorkIDCalculator
        norm_title = w.normalize_title(title)
        norm_author = w.normalize_author(author)

        old_id = self.permanent_work_id
        self.permanent_work_id = self.calculate_permanent_work_id_for_title_and_author(
            title, author, medium)
        args = ("Permanent work ID for %d: %s/%s -> %s/%s/%s -> %s (was %s)",
                self.id, title, author, norm_title, norm_author, medium,
                self.permanent_work_id, old_id)
        if debug:
            logging.debug(*args)
        elif old_id != self.permanent_work_id:
            logging.info(*args)

    @classmethod
    def calculate_permanent_work_id_for_title_and_author(
            cls, title, author, medium):
        w = WorkIDCalculator
        norm_title = w.normalize_title(title)
        norm_author = w.normalize_author(author)

        return WorkIDCalculator.permanent_id(norm_title, norm_author, medium)

    UNKNOWN_AUTHOR = u"[Unknown]"

    def calculate_presentation(self, policy=None):
        """Make sure the presentation of this Edition is up-to-date."""
        _db = Session.object_session(self)
        changed = False
        if policy is None:
            policy = PresentationCalculationPolicy()

        # Gather information up front that will be used to determine
        # whether this method actually did anything.
        old_author = self.author
        old_sort_author = self.sort_author
        old_sort_title = self.sort_title
        old_work_id = self.permanent_work_id
        old_cover = self.cover
        old_cover_full_url = self.cover_full_url
        old_cover_thumbnail_url = self.cover_thumbnail_url

        if policy.set_edition_metadata:
            self.author, self.sort_author = self.calculate_author()
            self.sort_title = TitleProcessor.sort_title_for(self.title)
            self.calculate_permanent_work_id()
            CoverageRecord.add_for(
                self,
                data_source=self.data_source,
                operation=CoverageRecord.SET_EDITION_METADATA_OPERATION)

        if policy.choose_cover:
            self.choose_cover(policy=policy)

        if (self.author != old_author or self.sort_author != old_sort_author
                or self.sort_title != old_sort_title
                or self.permanent_work_id != old_work_id
                or self.cover != old_cover
                or self.cover_full_url != old_cover_full_url
                or self.cover_thumbnail_url != old_cover_thumbnail_url):
            changed = True

        # Now that everything's calculated, log it.
        if policy.verbose:
            if changed:
                changed_status = "changed"
                level = logging.info
            else:
                changed_status = "unchanged"
                level = logging.debug

            msg = u"Presentation %s for Edition %s (by %s, pub=%s, ident=%s/%s, pwid=%s, language=%s, cover=%r)"
            args = [
                changed_status, self.title, self.author, self.publisher,
                self.primary_identifier.type,
                self.primary_identifier.identifier, self.permanent_work_id,
                self.language
            ]
            if self.cover and self.cover.representation:
                args.append(self.cover.representation.public_url)
            else:
                args.append(None)
            level(msg, *args)
        return changed

    def calculate_author(self):
        """Turn the list of Contributors into string values for .author
        and .sort_author.
        """

        sort_names = []
        display_names = []
        for author in self.author_contributors:
            if author.sort_name and not author.display_name or not author.family_name:
                default_family, default_display = author.default_names()
            display_name = author.display_name or default_display or author.sort_name
            family_name = author.family_name or default_family or author.sort_name
            display_names.append([family_name, display_name])
            sort_names.append(author.sort_name)
        if display_names:
            author = ", ".join([x[1] for x in sorted(display_names)])
        else:
            author = self.UNKNOWN_AUTHOR
        if sort_names:
            sort_author = " ; ".join(sorted(sort_names))
        else:
            sort_author = self.UNKNOWN_AUTHOR
        return author, sort_author

    def choose_cover(self, policy=None):
        """Try to find a cover that can be used for this Edition."""
        self.cover_full_url = None
        self.cover_thumbnail_url = None
        for distance in (0, 5):
            # If there's a cover directly associated with the
            # Edition's primary ID, use it. Otherwise, find the
            # best cover associated with any related identifier.
            best_cover, covers = self.best_cover_within_distance(
                distance=distance, policy=policy)

            if best_cover:
                if not best_cover.representation:
                    logging.warn(
                        "Best cover for %r has no representation!",
                        self.primary_identifier,
                    )
                else:
                    rep = best_cover.representation
                    if not rep.thumbnails:
                        logging.warn(
                            "Best cover for %r (%s) was never thumbnailed!",
                            self.primary_identifier, rep.public_url)
                self.set_cover(best_cover)
                break
        else:
            # No cover has been found. If the Edition currently references
            # a cover, it has since been rejected or otherwise removed.
            # Cover details need to be removed.
            cover_info = [self.cover, self.cover_full_url]
            if any(cover_info):
                self.cover = None
                self.cover_full_url = None

        if not self.cover_thumbnail_url:
            # The process we went through above did not result in the
            # setting of a thumbnail cover.
            #
            # It's possible there's a thumbnail even when there's no
            # full-sized cover, or when the full-sized cover and
            # thumbnail are different Resources on the same
            # Identifier. Try to find a thumbnail the same way we'd
            # look for a cover.
            for distance in (0, 5):
                best_thumbnail, thumbnails = self.best_cover_within_distance(
                    distance=distance,
                    policy=policy,
                    rel=LinkRelations.THUMBNAIL_IMAGE,
                )
                if best_thumbnail:
                    if not best_thumbnail.representation:
                        logging.warn(
                            "Best thumbnail for %r has no representation!",
                            self.primary_identifier,
                        )
                    else:
                        rep = best_thumbnail.representation
                        if rep:
                            self.cover_thumbnail_url = rep.public_url
                        break
            else:
                # No thumbnail was found. If the Edition references a thumbnail,
                # it needs to be removed.
                if self.cover_thumbnail_url:
                    self.cover_thumbnail_url = None

        # Whether or not we succeeded in setting the cover,
        # record the fact that we tried.
        CoverageRecord.add_for(self,
                               data_source=self.data_source,
                               operation=CoverageRecord.CHOOSE_COVER_OPERATION)
Esempio n. 43
0
class Authority(Base):
    authority = Column(Enum(AuthEnum), primary_key=True)
    app_id = Column(String(36), primary_key=True)

    app = relationship("ClientApp", back_populates="authorities")
Esempio n. 44
0
class Chip(Base):
    __tablename__ = 'chip'
    
    id = Column(Integer, primary_key=True, autoincrement=True)
    indice = Column(
        Integer,
        nullable=False,
        default=0,
        doc='''The library number for the chip.  Some chips do not have
        an indice as a result of not being officially
        obtainable in the games.'''
    )
    indice_game = Column(
        Integer,
        nullable=True,
        default=0,
        doc='''The in-game number for the chip.  Same rules as
        `Chip.indice`.'''
    )
    game = Column(
        Enum(Game),
        nullable=False,
        doc='''The game where the chip is from.'''
    )
    version = Column(
        Enum(Version),
        nullable=True,
        doc='''Post MMBN2, each MMBN game had version-exclusive chips,
        so we are accounting for those.  In the case that the chip is not
        version is exclusive, this may be left blank.'''
    )
    name = Column(
        Unicode(50),
        nullable=False,
        doc='The in-game name for the chip.'
    )
    name_jp = Column(
        Unicode(50),
        nullable=True,
        doc='The in-game name for the chip (Japanese).'
    )
    codes = relationship(
        lambda: ChipCode,
        cascade='save-update, merge, delete'
    )
    classification = Column(
        Enum(Classification),
        nullable=True,
        doc='''The in-game classification of a chip (standard, mega, etc.)
        Valid chip types can be found in the Classifications Enum.'''
    )
    effects = relationship(
        lambda: ChipEffects,
        cascade='save-update, merge, delete'
    )
    element = Column(
        Enum(Element),
        nullable=False,
        doc='''The element for the chip, can be any element found in Elements
        enum.'''
    )
    size = Column(
        Integer,
        nullable=True,
        default=0,
        doc='The size (in MB) of the chip, from 1-99'
    )
    description = Column(
        UnicodeText(255, convert_unicode=True),
        nullable=False,
        doc='The in-game description for the chip.'
    )
    summary = Column(
        UnicodeText(convert_unicode=True),
        nullable=True,
        doc='A more detailed explanation of what the battlechip does.'
    )
    rarity = Column(Integer,
        nullable=False,
        default=1,
        doc='''The amount of stars (rarity) of a chip, from 1-5.'''
    )
    damage_min = Column(
        Integer,
        nullable=False,
        default=0,
        doc='The amount of damage the chip deals by itself. (Minimum)'
    )
    damage_max = Column(
        Integer,
        nullable=False,
        default=0,
        doc='The amount of damage the chip deals by itself. (Maximum)'
    )
    recovery = Column(
        Integer,
        nullable=False,
        default=0,
        doc='If a recovery chip, the amount of HP recovered.'
    )
    
    __table_args__ = (
        UniqueConstraint('name', 'game', name='chip'),
        Index('chip_index', 'indice', 'classification')
    )
    
    def __init__(self, indice=None, indice_game=None, game=None, version=None,
        name=None, name_jp=None, classification='', element=None, size=None,
        description=None, summary=None, rarity=None, damage_min=None,
        damage_max=None, recovery=None
    ):
        self.indice = indice
        self.indice_game = indice_game
        self.game = game
        self.version = version
        self.name = name
        self.name_jp = name_jp
        self.classification = classification
        self.element = element
        self.size = size
        self.description = description
        self.summary = summary
        self.rarity = rarity
        self.damage_min = damage_min
        self.damage_max = damage_max
        self.recovery = recovery
        
    def __repr__(self):
        return '<Chip: #%s - %s - %s>' % (self.indice, self.name, self.game)

    def codes_iter(self):
        return [code.code for code in self.codes]
Esempio n. 45
0
from __future__ import absolute_import

import datetime
import enum
from decimal import Decimal
from typing import List, Optional, Tuple

from sqlalchemy import (Column, Date, Enum, ForeignKey, Integer, String, Table,
                        func, select)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import column_property, composite, mapper, relationship

PetKind = Enum("cat", "dog", name="pet_kind")


class HairKind(enum.Enum):
    LONG = 'long'
    SHORT = 'short'


Base = declarative_base()

association_table = Table(
    "association",
    Base.metadata,
    Column("pet_id", Integer, ForeignKey("pets.id")),
    Column("reporter_id", Integer, ForeignKey("reporters.id")),
)

Esempio n. 46
0
class Election(DbBase):
    @staticmethod
    def get_election_by_id(
        db_session: Session,
        id: str,
        query_modifier: Callable[[Query], Query] = lambda x: x,
    ) -> Optional[Election]:
        return query_modifier(
            db_session.query(Election)).filter_by(id=id).first()

    @staticmethod
    def get_active_election_by_active_id(
        db_session: Session,
        active_id: str,
        query_modifier: Callable[[Query], Query] = lambda x: x,
    ) -> Optional[Election]:
        return (query_modifier(db_session.query(Election)).filter_by(
            active_id=active_id, election_completed_at=None).first())

    @staticmethod
    def get_rankings_by_user_for_election(
            db_session: Session, election_id: str) -> Dict[str, List[str]]:
        aliased_ranking_table = aliased(Ranking, name="aliased_ranking")

        user_rankings = (db_session.query(
            func.group_concat(aliased_ranking_table.business_id)).filter(
                aliased_ranking_table.user_id == Ranking.user_id,
                aliased_ranking_table.election_id == election_id,
            ).order_by(aliased_ranking_table.rank.asc()).correlate(
                Ranking).as_scalar())
        rankings_for_election = (db_session.query(
            Ranking.user_id,
            user_rankings).filter(Ranking.election_id == election_id).group_by(
                Ranking.user_id).all())

        return {
            user_id: rank_string.split(",")
            for user_id, rank_string in rankings_for_election
        }

    @staticmethod
    def delete_election_results_for_election(db_session: Session,
                                             election_id: str):
        db_session.query(Round).filter_by(election_id=election_id).delete()

    __tablename__ = "election"

    id: str = Column(String(length=36), primary_key=True)
    active_id: str = Column(String(length=ACTIVE_ID_LENGTH))
    election_status: ElectionStatus = Column(
        Enum(ElectionStatus),
        default=ElectionStatus.IN_CREATION,
        nullable=False)
    election_completed_at: datetime = Column(DateTime)
    election_creator_id: str = Column(String(length=36),
                                      ForeignKey(BasicUser.id),
                                      nullable=False)

    election_creator = relationship("BasicUser", uselist=False)
    candidates: List[Candidate] = relationship("Candidate")

    __table_args__ = (Index("active_id", active_id, election_completed_at), )
Esempio n. 47
0
class User(Base, UserMixin):
    __tablename__ = 'users'
    id = Column(Integer(), primary_key=True)
    email = Column(String(128), unique=True)
    name = Column(String(12), unique=True)
    password = Column(String(500), nullable=False)
    verify_token = Column(String(200))
    token_expire = Column(Integer())
    active = Column(Boolean())
    created_at = Column(DateTime())
    updated_at = Column(DateTime())
    login_ip = Column(String(15))
    status = Column(Enum(u'正常', u'被举报'))
    reputation = Column(Integer, nullable=False, default=0)

    comments = relationship('Comment', back_populates='user')
    downloads = relationship('Download', back_populates='user')
    articles = relationship('Article', back_populates='user')
    feedbacks = relationship('FeedBack', back_populates='user')
    praises = relationship('Praise', back_populates='user')
    send_messages = relationship('Message', back_populates='sender')
    roles = relationship('Role',
                         secondary=roles_users,
                         backref=backref('users', lazy='dynamic'))
    favorite_movies = relationship(Movie, secondary=MovieFavorite)

    bts = relationship('Bt', back_populates='user')

    def is_active(self):
        return self.actived

    def __str__(self):
        return self.name

    def id_attribute(self):
        return self.id

    def messages(self):
        res = Message.query.filter(
            and_(Message.reciver_id == self.id, Message.readed == 0))
        return res

    def all_messages(self):
        res = Message.query.filter(Message.reciver_id == self.id)
        return res

    def send_active_mail(self):
        with app.app_context():
            print 'send active mail to ' + self.email
            email = self.email
            username = self.name
            active_token = md5(email + str(self.token_expire) +
                               app.config['SECRET_KEY'])
            msg = MailMessage(u"DOTA电影天堂-欢迎-请验证邮箱地址",
                              sender=u"DOTA电影天堂用户注册<*****@*****.**>",
                              recipients=[email])
            token = base64.b64encode(active_token)
            domain = app.config['SITE_DOMAIN']
            url = domain + '/signup_active?user='******'&token=' + token
            msg.html = '<h1 style="text-align:center"><a href="'+domain\
            +'" target="_blank"><img src="'+domain+'/static/img/logo.png"></h1><p><a href="'+url+'">'+url+'</a></p>'

            thread = Thread(target=send_async_email, args=[app, mail, msg])
            thread.start()
            return True
Esempio n. 48
0
class Instance(Base):

    __table__ = Table('instance', Base.metadata,

                      Column('id', Integer, primary_key=True),
                      Column('name', String(64), nullable=False),
                      Column('status', Enum('dis', 'act'), default='act', nullable=False),
                      Column('project_id', Integer, ForeignKey(Project.id), nullable=False),
                      Column('sequence_id', Integer, ForeignKey(Sequence.id)),
                      Column('shot_id', Integer, ForeignKey(Shot.id)),
                      Column('asset_id', Integer, ForeignKey(Asset.id), nullable=False),
                      Column('shotgun_id', Integer),
                      Column('description', String(255)),
                      Column('created', DateTime(timezone=True), server_default=func.now()),
                      Column('updated', DateTime(timezone=True), onupdate=func.now()),

                      Index('ix_proj_seq_name', 'project_id', 'sequence_id', 'name'),
                      Index('ix_proj_shot_name', 'project_id', 'shot_id', 'name'),
                      Index('ix_shot_asset', 'asset_id', 'shot_id'),
                      Index('ix_sg', 'shotgun_id'),

                      UniqueConstraint('sequence_id', 'name', name='uq_seq_name'),
                      UniqueConstraint('shot_id', 'name', name='uq_shot_name'),
                      UniqueConstraint('shotgun_id', name='uq_sg')
                      )

    _publishgroups = relationship('PublishGroup', backref='instance', lazy='dynamic',
                                  order_by='PublishGroup.id', cascade="all, delete-orphan")

    @property
    def parent(self):
        '''
        Return Instance parent.
        Parent could be Sequence, Shot or Asset entity.
        '''
        if self.shot_id:
            return self.shot
        elif self.sequence_id:
            return self.sequence
        elif self.asset_id:
            return self.asset

    @hybrid_property
    def fullname(self):
        '''
        Return Instance fullname string.
        {episode.name}_{sequence.name}_{shot.name}_{instance.name} or
        {sequence.name}_{shot.name}_{instance.name}
        '''
        return '{}_{}'.format(self.shot.fullname, self.name)

    @classmethod
    def add_instance(cls, entity, asset, name):
        '''
        Add a new Instance for entity.

            Args:
                entity (Sequence|Shot) : Sequence or Shot parent.
                asset          (Asset) : Asset to instance.
                name             (str) : New instance name.
        '''
        cls.assert_isinstance(entity, ('Sequence', 'Shot'))
        cls.assert_isinstance(asset, 'Asset')
        assert isinstance(name, string_types), 'name arg must be a string. Given {!r}'.format(name)

        # Raise InstanceNameExists if an instance name already exists.
        for inst in [i for i in entity._instances if i.name == name]:
            raise InstanceNameExists('{!r} already have an instance named {!r} with status {!r}'
                                     .format(entity, name, inst.status))

        return Instance.create(project=entity.project, entity=entity, name=name, asset=asset)

    @classmethod
    def find(cls, project=None, entity=None, name=None, asset=None, status=None,
             id=None, shotgun_id=None):
        '''
        Return Instance instances by query arguments

            Args:
                project      (Project) : parent Project instance.
                entity (Sequence|Shot) : parent Sequence instance.
                asset          (Asset) : Asset instance.
                name             (str) : Instance name.
                status           (str) : Instance status.
                id          (int/list) : Instance id(s).
                shotgun_id  (int/list) : Instance shotgun id(s).

            Returns:
                A list of Instance instances matching find arguments.
        '''
        query = cls.query(project=project, name=name, id=id, status=status, shotgun_id=shotgun_id)

        if entity:
            if entity.cls_name() == 'Sequence':
                field = cls.sequence_id
            elif entity.cls_name() == 'Shot':
                field = cls.shot_id
            else:
                raise TypeError('entity arg must be an Sequence or Shot class. Given {!r}'
                                .format(type(entity)))

            if isinstance(entity, (list, tuple)):
                query = query.filter(field.in_([e.id for e in entity]))
            else:
                query = query.filter(field == entity.id)

        if asset:
            cls.assert_isinstance(asset, 'Asset')
            query = query.filter(cls.asset_id == asset.id)

        return query.all()

    @classmethod
    def create(cls, name, project, entity, asset, status=None, shotgun_id=None):
        '''
        Create a Instance instance.

            Args:
                name            (str) : Instance name.
                project     (Project) : parent Project instance.
                sequence   (Sequence) : parent Sequence instance.
                cut           (Tuple) : (cut_in and cut_out) e.g. (1001,1002)
                status          (str) : Instance status.
                shotgun_id (int/list) : Instance shotgun id(s).

            Returns:
                New Instance Instance.

        '''
        cls.assert_isinstance(project, 'Project')
        cls.assert_isinstance(asset, 'Asset')

        (sequence, shot) = (None, None)
        if not isinstance(entity, Base):
            raise TypeError('entity arg must be an Entity class. Given {!r}'
                            .format(type(entity)))
        elif entity.cls_name() == 'Sequence':
            sequence = entity
        elif entity.cls_name() == 'Shot':
            shot = entity
        else:
            raise TypeError('entity arg must be a Sequence or Shot. Given {!r}'
                            .format(type(entity)))

        data = dict(name=name,
                    status=status,
                    project_id=project.id,
                    asset_id=asset.id,
                    sequence_id=getattr(sequence, 'id', None),
                    shot_id=getattr(shot, 'id', None),
                    shotgun_id=shotgun_id)

        return super(Instance, cls).create(**data)
Esempio n. 49
0
def add_region_domain(context):
    # Add the new domain, see http://stackoverflow.com/a/14845740
    table_names = []
    inspector = Inspector(context.operations_connection)
    if 'elections' in inspector.get_table_names(context.schema):
        table_names.append('elections')
    if 'election_compounds' in inspector.get_table_names(context.schema):
        table_names.append('election_compounds')
    if 'votes' in inspector.get_table_names(context.schema):
        table_names.append('votes')
    if 'archived_results' in inspector.get_table_names(context.schema):
        table_names.append('archived_results')

    old_type = Enum('federation', 'canton', 'municipality',
                    name='domain_of_influence')
    new_type = Enum('federation', 'region', 'canton', 'municipality',
                    name='domain_of_influence')
    tmp_type = Enum('federation', 'region', 'canton', 'municipality',
                    name='_domain_of_influence')

    tmp_type.create(context.operations.get_bind(), checkfirst=False)

    for table_name in table_names:
        context.operations.execute(
            (
                'ALTER TABLE {} ALTER COLUMN domain TYPE _domain_of_influence '
                'USING domain::text::_domain_of_influence'
            ).format(table_name)
        )

    old_type.drop(context.operations.get_bind(), checkfirst=False)

    new_type.create(context.operations.get_bind(), checkfirst=False)

    for table_name in table_names:
        context.operations.execute(
            (
                'ALTER TABLE {} ALTER COLUMN domain TYPE domain_of_influence '
                'USING domain::text::domain_of_influence'
            ).format(table_name)
        )

    tmp_type.drop(context.operations.get_bind(), checkfirst=False)
Esempio n. 50
0
class File(db.Model):

    __tablename__ = "release_files"

    @declared_attr
    def __table_args__(cls):  # noqa
        return (
            CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"),
            CheckConstraint("blake2_256_digest ~* '^[A-F0-9]{64}$'"),
            Index(
                "release_files_single_sdist",
                "release_id",
                "packagetype",
                unique=True,
                postgresql_where=((cls.packagetype == "sdist")
                                  & (cls.allow_multiple_sdist == False)  # noqa
                                  ),
            ),
            Index("release_files_release_id_idx", "release_id"),
        )

    release_id = Column(
        ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"),
        nullable=False,
    )
    python_version = Column(Text)
    requires_python = Column(Text)
    packagetype = Column(
        Enum(
            "bdist_dmg",
            "bdist_dumb",
            "bdist_egg",
            "bdist_msi",
            "bdist_rpm",
            "bdist_wheel",
            "bdist_wininst",
            "sdist",
        ))
    comment_text = Column(Text)
    filename = Column(Text, unique=True)
    path = Column(Text, unique=True, nullable=False)
    size = Column(Integer)
    has_signature = Column(Boolean)
    md5_digest = Column(Text, unique=True, nullable=False)
    sha256_digest = Column(CIText, unique=True, nullable=False)
    blake2_256_digest = Column(CIText, unique=True, nullable=False)
    upload_time = Column(DateTime(timezone=False), server_default=func.now())
    uploaded_via = Column(Text)

    # We need this column to allow us to handle the currently existing "double"
    # sdists that exist in our database. Eventually we should try to get rid
    # of all of them and then remove this column.
    allow_multiple_sdist = Column(Boolean,
                                  nullable=False,
                                  server_default=sql.false())

    @hybrid_property
    def pgp_path(self):
        return self.path + ".asc"

    @pgp_path.expression  # type: ignore
    def pgp_path(self):
        return func.concat(self.path, ".asc")

    @validates("requires_python")
    def validates_requires_python(self, *args, **kwargs):
        raise RuntimeError("Cannot set File.requires_python")
Esempio n. 51
0
class Event(MailSyncBase, HasRevisions, HasPublicID):
    """Data for events."""
    API_OBJECT_NAME = 'event'

    namespace_id = Column(ForeignKey(Namespace.id, ondelete='CASCADE'),
                          nullable=False)

    namespace = relationship(Namespace, load_on_pending=True)

    calendar_id = Column(ForeignKey(Calendar.id, ondelete='CASCADE'),
                         nullable=False)
    # Note that we configure a delete cascade, rather than
    # passive_deletes=True, in order to ensure that delete revisions are
    # created for events if their parent calendar is deleted.
    calendar = relationship(Calendar,
                            backref=backref('events', cascade='delete'),
                            load_on_pending=True)

    # A server-provided unique ID.
    uid = Column(String(767, collation='ascii_general_ci'), nullable=False)

    # DEPRECATED
    # TODO(emfree): remove
    provider_name = Column(String(64), nullable=False, default='DEPRECATED')
    source = Column('source', Enum('local', 'remote'), default='local')

    raw_data = Column(Text, nullable=False)

    title = Column(String(TITLE_MAX_LEN), nullable=True)
    # The database column is named differently for legacy reasons.
    owner = Column('owner2', String(OWNER_MAX_LEN), nullable=True)

    description = Column('_description', LONGTEXT, nullable=True)
    location = Column(String(LOCATION_MAX_LEN), nullable=True)
    busy = Column(Boolean, nullable=False, default=True)
    read_only = Column(Boolean, nullable=False)
    reminders = Column(String(REMINDER_MAX_LEN), nullable=True)
    recurrence = Column(Text, nullable=True)
    start = Column(FlexibleDateTime, nullable=False)
    end = Column(FlexibleDateTime, nullable=True)
    all_day = Column(Boolean, nullable=False)
    is_owner = Column(Boolean, nullable=False, default=True)
    last_modified = Column(FlexibleDateTime, nullable=True)
    status = Column('status', Enum(*EVENT_STATUSES),
                    server_default='confirmed')

    # This column is only used for events that are synced from iCalendar
    # files.
    message_id = Column(ForeignKey(Message.id, ondelete='CASCADE'),
                        nullable=True)

    message = relationship(Message,
                           backref=backref('events',
                                           order_by='Event.last_modified',
                                           cascade='all, delete-orphan'))

    __table_args__ = (Index('ix_event_ns_uid_calendar_id',
                            'namespace_id', 'uid', 'calendar_id'),)

    participants = Column(MutableList.as_mutable(BigJSON), default=[],
                          nullable=True)

    # This is only used by the iCalendar invite code. The sequence number
    # stores the version number of the invite.
    sequence_number = Column(Integer, nullable=True)

    discriminator = Column('type', String(30))
    __mapper_args__ = {'polymorphic_on': discriminator,
                       'polymorphic_identity': 'event'}

    @validates('reminders', 'recurrence', 'owner', 'location', 'title',
               'raw_data')
    def validate_length(self, key, value):
        max_len = _LENGTHS[key]
        return value if value is None else value[:max_len]

    @property
    def when(self):
        if self.all_day:
            # Dates are stored as DateTimes so transform to dates here.
            start = arrow.get(self.start).to('utc').date()
            end = arrow.get(self.end).to('utc').date()
            return Date(start) if start == end else DateSpan(start, end)
        else:
            start = self.start
            end = self.end
            return Time(start) if start == end else TimeSpan(start, end)

    @when.setter
    def when(self, when):
        if 'time' in when:
            self.start = self.end = time_parse(when['time'])
            self.all_day = False
        elif 'start_time' in when:
            self.start = time_parse(when['start_time'])
            self.end = time_parse(when['end_time'])
            self.all_day = False
        elif 'date' in when:
            self.start = self.end = date_parse(when['date'])
            self.all_day = True
        elif 'start_date' in when:
            self.start = date_parse(when['start_date'])
            self.end = date_parse(when['end_date'])
            self.all_day = True

    def _merge_participant_attributes(self, left, right):
        """Merge right into left. Right takes precedence unless it's null."""
        for attribute in right.keys():
                # Special cases:
                if right[attribute] is None:
                    continue
                elif right[attribute] == '':
                    continue
                elif right['status'] == 'noreply':
                    continue
                else:
                    left[attribute] = right[attribute]

        return left

    def _partial_participants_merge(self, event):
        """Merge the participants from event into self.participants.
        event always takes precedence over self, except if
        a participant in self isn't in event.

        This method is only called by the ical merging code because
        iCalendar attendance updates are partial: an RSVP reply often
        only contains the status of the person that RSVPs.
        It would be very wrong to call this method to merge, say, Google
        Events participants because they handle the merging themselves.
        """

        # We have to jump through some hoops because a participant may
        # not have an email or may not have a name, so we build a hash
        # where we can find both. Also note that we store names in the
        # hash only if the email is None.
        self_hash = {}
        for participant in self.participants:
            email = participant.get('email')
            name = participant.get('name')
            if email is not None:
                self_hash[email] = participant
            elif name is not None:
                # We have a name without an email.
                self_hash[name] = participant

        for participant in event.participants:
            email = participant.get('email')
            name = participant.get('name')

            # This is the tricky part --- we only want to store one entry per
            # participant --- we check if there's an email we already know, if
            # not we create it. Otherwise we use the name. This sorta works
            # because we're merging updates to an event and ical updates
            # always have an email address.
            # - karim
            if email is not None:
                if email in self_hash:
                    self_hash[email] =\
                     self._merge_participant_attributes(self_hash[email],
                                                        participant)
                else:
                    self_hash[email] = participant
            elif name is not None:
                if name in self_hash:
                    self_hash[name] =\
                     self._merge_participant_attributes(self_hash[name],
                                                        participant)
                else:
                    self_hash[name] = participant

        return self_hash.values()

    def update(self, event):
        if event.namespace is not None and event.namespace.id is not None:
            self.namespace_id = event.namespace.id

        if event.calendar is not None and event.calendar.id is not None:
            self.calendar_id = event.calendar.id

        if event.provider_name is not None:
            self.provider_name = event.provider_name

        self.uid = event.uid
        self.raw_data = event.raw_data
        self.title = event.title
        self.description = event.description
        self.location = event.location
        self.start = event.start
        self.end = event.end
        self.all_day = event.all_day
        self.owner = event.owner
        self.is_owner = event.is_owner
        self.read_only = event.read_only
        self.participants = event.participants
        self.busy = event.busy
        self.reminders = event.reminders
        self.recurrence = event.recurrence
        self.last_modified = event.last_modified
        self.message = event.message
        self.status = event.status

        if event.sequence_number is not None:
            self.sequence_number = event.sequence_number

    @property
    def recurring(self):
        if self.recurrence and self.recurrence != '':
            try:
                r = ast.literal_eval(self.recurrence)
                if isinstance(r, str):
                    r = [r]
                return r
            except ValueError:
                log.warn('Invalid RRULE entry for event', event_id=self.id)
                return []
        return []

    @property
    def is_recurring(self):
        return self.recurrence is not None

    @property
    def length(self):
        return self.when.delta

    @property
    def cancelled(self):
        return self.status == 'cancelled'

    @cancelled.setter
    def cancelled(self, is_cancelled):
        if is_cancelled:
            self.status = 'cancelled'
        else:
            self.status = 'confirmed'

    @classmethod
    def __new__(cls, *args, **kwargs):
        # Decide whether or not to instantiate a RecurringEvent/Override
        # based on the kwargs we get.
        cls_ = cls
        recurrence = kwargs.get('recurrence')
        master_event_uid = kwargs.get('master_event_uid')
        if recurrence and master_event_uid:
            raise ValueError("Event can't have both recurrence and master UID")
        if recurrence and recurrence != '':
            cls_ = RecurringEvent
        if master_event_uid:
            cls_ = RecurringEventOverride
        return object.__new__(cls_, *args, **kwargs)

    def __init__(self, **kwargs):
        # Allow arguments for all subclasses to be passed to main constructor
        for k in kwargs.keys():
            if not hasattr(type(self), k):
                del kwargs[k]
        super(Event, self).__init__(**kwargs)
Esempio n. 52
0
class IBMDedicatedHostProfile(db.Model):
    """
    Model for Dedicated host profile
    """
    ID_KEY = "id"
    NAME_KEY = "name"
    REGION_KEY = "region"
    HREF_KEY = "href"
    FAMILY_KEY = "family"
    CLASS_KEY = "class"
    SOCKET_COUNT_KEY = "socket_count"
    MEMORY_KEY = "memory"
    VCPU_ARCH_KEY = "vcpu_architecture"
    VCPU_COUNT_KEY = "vcpu_count"
    DISKS_KEY = "disks"
    DEDICATED_HOSTS_KEY = "dedicated_hosts"
    SUPPORTED_INSTANCE_PROFILES_KEY = "supported_instance_profiles"

    __tablename__ = "ibm_dedicated_host_profiles"
    id = Column(String(32), primary_key=True)
    name = Column(String(255), nullable=False)
    region = Column(String(128), nullable=False)
    href = Column(Text)
    family = Column(Enum("balanced", "memory", "compute"))
    class_ = Column('class', String(20))
    socket_count = Column(JSON)
    memory = Column(JSON)  # Play around with properties for this
    vcpu_architecture = Column(JSON)  # Play around with properties for this
    vcpu_count = Column(JSON)  # Play around with properties for this
    disks = Column(JSON)  # Play around with properties for this

    cloud_id = Column(String(32), ForeignKey("ibm_clouds.id"), nullable=False)

    dedicated_hosts = relationship("IBMDedicatedHost",
                                   backref="ibm_dedicated_host_profile",
                                   cascade="all, delete-orphan",
                                   lazy="dynamic")
    supported_instance_profiles = relationship(
        "IBMInstanceProfile",
        secondary=ibm_dh_profile_supported_instance_profiles,
        backref="ibm_dedicated_host_profiles",
        lazy="dynamic")

    __table_args__ = (UniqueConstraint(
        name, region, cloud_id,
        name="uix_ibm_dh_profile_name_region_cloudid"), )

    def __init__(self,
                 name,
                 region,
                 href=None,
                 family=None,
                 class_=None,
                 socket_count=None,
                 memory=None,
                 vcpu_architecture=None,
                 vcpu_count=None,
                 disks=None,
                 cloud_id=None):
        self.id = str(uuid.uuid4().hex)
        self.name = name
        self.region = region
        self.href = href
        self.family = family
        self.class_ = class_
        self.socket_count = socket_count
        self.memory = memory
        self.vcpu_architecture = vcpu_architecture
        self.vcpu_count = vcpu_count
        self.disks = disks
        self.cloud_id = cloud_id

    @classmethod
    def from_ibm_json(cls, json_body):
        """
        Return an object of the class created from the provided JSON body
        """
        return cls(name=json_body["name"],
                   region=json_body["href"].split("//")[1].split(".")[0],
                   href=json_body["href"],
                   family=json_body["family"],
                   class_=json_body["class"],
                   socket_count=json_body["socket_count"],
                   memory=json_body["memory"],
                   vcpu_architecture=json_body["vcpu_architecture"],
                   vcpu_count=json_body["vcpu_count"],
                   disks=json_body["disks"])

    def update_from_obj(self, updated_obj,
                        updated_supported_instance_profiles_list):
        """
        Update an existing object of the class from an updated one
        """
        from doosra.models import IBMInstanceProfile

        assert isinstance(updated_obj, IBMDedicatedHostProfile)
        self.name = updated_obj.name
        self.region = updated_obj.region
        self.href = updated_obj.href
        self.family = updated_obj.family
        self.class_ = updated_obj.class_
        self.memory = updated_obj.memory
        self.socket_count = updated_obj.socket_count
        self.vcpu_architecture = updated_obj.vcpu_architecture
        self.vcpu_count = updated_obj.vcpu_count
        self.disks = updated_obj.disks

        updated_sip_name_obj_dict = {}
        for updated_sip_relation in updated_supported_instance_profiles_list:
            updated_sip_name_obj_dict[
                updated_sip_relation.name] = updated_sip_relation

        sip_names_to_remove_relation_with = []
        for db_supported_instance_profile in self.supported_instance_profiles.all(
        ):
            if db_supported_instance_profile.name not in updated_sip_name_obj_dict:
                sip_names_to_remove_relation_with.append(
                    db_supported_instance_profile.name)

        if sip_names_to_remove_relation_with:
            for instance_profile_to_remove_relation_with in db.session.query(
                    IBMInstanceProfile).filter(
                        IBMInstanceProfile.cloud_id == self.cloud_id,
                        IBMInstanceProfile.name.in_(
                            sip_names_to_remove_relation_with)).all():
                instance_profile_to_remove_relation_with.ibm_dedicated_host_profiles.remove(
                    self)
                db.session.commit()

        db_sip_names = [
            db_sip.name for db_sip in self.supported_instance_profiles.all()
        ]
        for updated_sip_relation_name, updated_sip_relation_obj in updated_sip_name_obj_dict.items(
        ):
            if updated_sip_relation_name not in db_sip_names:
                self.supported_instance_profiles.append(
                    updated_sip_name_obj_dict[updated_sip_relation_name])
                db.session.commit()

        db.session.commit()

    def to_json(self):
        """
        Return a JSON representation of the object
        """
        return {
            self.ID_KEY:
            self.id,
            self.NAME_KEY:
            self.name,
            self.REGION_KEY:
            self.region,
            # self.HREF_KEY: self.href,
            self.FAMILY_KEY:
            self.family,
            self.CLASS_KEY:
            self.class_,
            self.SOCKET_COUNT_KEY:
            self.socket_count,
            self.MEMORY_KEY:
            self.memory,
            self.VCPU_ARCH_KEY:
            self.vcpu_architecture,
            self.VCPU_COUNT_KEY:
            self.vcpu_count,
            self.DISKS_KEY:
            self.disks,
            self.DEDICATED_HOSTS_KEY: [{
                self.ID_KEY: dedicated_host.id
            } for dedicated_host in self.dedicated_hosts.all()],
            self.SUPPORTED_INSTANCE_PROFILES_KEY:
            [sip.to_json() for sip in self.supported_instance_profiles.all()]
        }