def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    tasks_table = Table('tasks', meta, autoload=True)
    task_info_table = Table('task_info', meta, autoload=True)

    for col_name in TASKS_MIGRATE_COLUMNS:
        column = Column(col_name, Text())
        column.create(tasks_table)

    task_info_records = task_info_table.select().execute().fetchall()

    for task_info in task_info_records:
        values = {
            'input': task_info.input,
            'result': task_info.result,
            'message': task_info.message
        }

        tasks_table\
            .update(values=values)\
            .where(tasks_table.c.id == task_info.task_id)\
            .execute()

    drop_tables([task_info_table])
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Add the hash column, start with allowing NULLs
    hash_column = Column('hash', String(32), nullable=True, default=None,
                         unique=True)
    hash_column.create(records_table, unique_name='unique_record')

    sync_domains = []

    # Fill out the hash values. We need to do this in a way that lets us track
    # which domains need to be re-synced, so having the DB do this directly
    # won't work.
    for record in records_table.select().execute():
        try:
            records_table.update()\
                         .where(records_table.c.id == record.id)\
                         .values(hash=_build_hash(record))\
                         .execute()
        except IntegrityError:
            if record.domain_id not in sync_domains:
                sync_domains.append(record.domain_id)
                LOG.warn(_LW("Domain '%s' needs to be synchronised") %
                         record.domain_id)

            records_table.delete()\
                         .where(records_table.c.id == record.id)\
                         .execute()

    # Finally, the column should not be nullable.
    records_table.c.hash.alter(nullable=False)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    # Get the default pool_id from the config file
    default_pool_id = cfg.CONF['service:central'].default_pool_id

    # Create the pool_id column
    pool_id_column = Column('pool_id',
                            UUID(),
                            default=default_pool_id,
                            nullable=True)
    pool_id_column.create(domains_table, populate_default=True)

    # Alter the table to drop default value after populating it
    domains_table.c.pool_id.alter(default=None)

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', 'deleted',
                                      name='unique_domain_name',
                                      table=domains_table)
        constraint.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    hash_column = Column('hash', String(32), nullable=False, unique=True)
    hash_column.drop(records_table)
def upgrade(migrate_engine):
    LOG.info(_LI("Adding boolean column delayed_notify to table 'zones'"))
    meta.bind = migrate_engine
    zones_table = Table('zones', meta, autoload=True)
    col = Column('delayed_notify', Boolean(), default=False)
    col.create(zones_table)
    index = Index('delayed_notify', zones_table.c.delayed_notify)
    index.create(migrate_engine)
示例#6
0
 def _ensure_columns(self, row):
     columns = set(row.keys()) - set(self.table.columns.keys())
     columns = map(validate_columnname, columns)
     for column in columns:
         _type = self._guess_type(column, row[column])
         log.debug("Creating column: %s (%s) on %r" % (column, 
             _type, self.table.name))
         col = Column(column, _type)
         col.create(self.table, connection=self.bind)
示例#7
0
文件: table.py 项目: aklaver/dataset
    def create_column(self, name, type):
        """
        Explicitely create a new column ``name`` of a specified type.
        ``type`` must be a `SQLAlchemy column type <http://docs.sqlalchemy.org/en/rel_0_8/core/types.html>`_.
        ::

            table.create_column('created_at', sqlalchemy.DateTime)
        """
        self._check_dropped()
        with self.database.lock:
            if name not in self.table.columns.keys():
                col = Column(name, type)
                col.create(self.table,
                           connection=self.database.engine)
示例#8
0
    def __init__(self, *args, **kwargs):
        """ Responsible for:
        * Filter out type-specific kwargs and init Type using these.
        * Filter out `_schema_class` kwargs and init `_schema_class`.
        * Filter out column-slecific kwargs and init column using them.
        * If `args` are provided, that means column proxy is being created.
          In this case Type does not need to be created.
        """
        if not hasattr(self, '_kwargs_backup'):
            self._kwargs_backup = kwargs.copy()

        type_args, type_kw, cleaned_kw = self.process_type_args(kwargs)
        if not args:
            schema_item, cleaned_kw = self._generate_schema_item(cleaned_kw)
        column_kw = self.process_column_args(cleaned_kw)
        # Column proxy is created by declarative extension
        if args:
            column_kw['name'], column_kw['type_'], schema_item = args
        # Column init when defining a schema
        else:
            column_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw)
            if 'type_' not in kwargs:
                self._init_kwargs = self._kwargs_backup.copy()
        column_args = (schema_item,)
        return Column.__init__(self, *column_args, **column_kw)
def upgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)
    service_images = Table('service_images', meta, autoload=True)
    tenantc = Column('tenant_id', String(255))
    tenantc.create(service_images)
    azc = Column('availability_zone', String(255))
    azc.create(service_images)
示例#10
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the TSIG Keys tables
    tsigkeys_table = Table('tsigkeys', meta, autoload=True)

    scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES)
    scopes.create()

    # Create the scope and resource columns
    scope_col = Column('scope', scopes, nullable=False, server_default='POOL')
    scope_col.create(tsigkeys_table)

    # Start with nullable=True and populate_default=True, then convert
    # to nullable=False once all rows have been populted with a resource_id
    resource_id_col = Column('resource_id', UUID, default=default_pool_id,
                             nullable=True)
    resource_id_col.create(tsigkeys_table, populate_default=True)

    # Now that we've populated the default pool id in existing rows, MySQL
    # will let us convert this over to nullable=False
    tsigkeys_table.c.resource_id.alter(nullable=False)

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', name='unique_tsigkey_name',
                                      table=tsigkeys_table)
        constraint.create()
def upgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)
    service_flavors = Table('service_flavors', meta, autoload=True)
    
    conn = migrate_engine.connect()
    trans = conn.begin()
    try:
        delete = service_flavors.delete()\
                                .where(service_flavors.c.service_name=='database')
        conn.execute(delete)
        trans.commit()
    except:
        trans.rollback()
        raise
    
    ramc = Column('ram', Integer())
    ramc.create(service_flavors)
    vcpusc = Column('vcpus', Integer())
    vcpusc.create(service_flavors)
    
    conn = migrate_engine.connect()
    trans = conn.begin()
    try:
        for flavor in SERVICE_FLAVORS:
            insert = service_flavors.insert()\
                .execute(id=flavor['id'], service_name="database", flavor_name=flavor['flavor_name'], 
                        flavor_id=flavor['flavor_id'], deleted=0, ram=flavor['ram'], 
                        vcpus=flavor['vcpus'], created_at=datetime.datetime.now(), updated_at=datetime.datetime.now())
            trans.commit
    except:
        trans.rollback()
        raise
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    rs_table = Table('recordsets', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    recordsets = _get_recordsets(rs_table)

    col = Column('priority', Integer, default=None, nullable=True)
    col.create(records_table)

    record_cols = [
        records_table.c.id,
        records_table.c.priority,
        records_table.c.data]

    for rs in recordsets:
        records = select(columns=record_cols)\
            .where(records_table.c.recordset_id == rs[0])\
            .execute().fetchall()

        for record in records:
            priority, _, data = record[2].partition(" ")

            # Old style hashes are <rs_id>:<data>:<priority>
            new_hash = _build_hash(rs[0], data, priority)

            update = records_table.update()\
                .where(records_table.c.id == record[0])\
                .values(priority=int(priority), data=data, hash=new_hash)
            update.execute()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.create()
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    dialect = migrate_engine.url.get_dialect().name

    zone_tasks_table = Table('zone_tasks', meta, autoload=True)

    dialect = migrate_engine.url.get_dialect().name

    if dialect.startswith("postgresql"):
        with migrate_engine.connect() as conn:
            conn.execution_options(isolation_level="AUTOCOMMIT")
            conn.execute(
                "ALTER TYPE task_types ADD VALUE 'EXPORT' "
                "AFTER 'IMPORT'")
            conn.close()

    zone_tasks_table.c.task_type.alter(type=Enum(name='task_type',
                                            *TASK_TYPES))

    location = Column('location', String(160), nullable=True)
    location.create(zone_tasks_table)
示例#14
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    keys = Enum(name='key', *ZONE_ATTRIBUTE_KEYS)

    domain_attributes_table = Table(
        'domain_attributes', meta,
        Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
        Column('version', Integer(), default=1, nullable=False),
        Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
        Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),

        Column('key', keys),
        Column('value', String(255), nullable=False),
        Column('domain_id', UUID(), nullable=False),

        UniqueConstraint('key', 'value', 'domain_id',
                         name='unique_attributes'),
        ForeignKeyConstraint(['domain_id'], ['domains.id'],
                             ondelete='CASCADE'),

        mysql_engine='INNODB',
        mysql_charset='utf8'
    )

    domains_table = Table('domains', meta, autoload=True)
    types = Enum(name='types', metadata=meta, *ZONE_TYPES)
    types.create()

    # Add type and transferred_at to domains
    type_ = Column('type', types, default='PRIMARY', server_default='PRIMARY')
    transferred_at = Column('transferred_at', DateTime, default=None)

    type_.create(domains_table, populate_default=True)
    transferred_at.create(domains_table, populate_default=True)

    domain_attributes_table.create()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)

        # Add missing unique index
        constraint.create()
示例#15
0
class Prize(Base):
    u"""优惠码表"""
    __tablename__ = "prize"
    id = Column(Integer, primary_key=True, autoincrement=True)
    coupon = Column(Unicode(255), doc=u"优惠码的信息")
    account = Column(Integer, doc=u"优惠码金额")
    user_id = Column(Integer, doc=u"分配给某些用户")
    share = Column(Integer, doc=u"是否分享,默认为0,没有分享")
    probability = Column(Integer, doc=u"用户概率控制,3表示废弃优惠码")

    @classmethod
    def get_random_prize(cls, connection):
        stmt = connection.query(Prize).\
            filter(Prize.user_id.is_(None)).\
            filter(Prize.probability == 1).\
            order_by(func.rand()).limit(1).scalar()
        if stmt is None:
            stmt = connection.query(Prize).\
                filter(Prize.user_id.is_(None)).\
                order_by(func.rand()).limit(1).scalar()
        return stmt

    @classmethod
    def get_random_prize_not_coupon(cls, connection):
        return connection.query(Prize).\
            filter(Prize.account.in_((20, 35, 50, 90))).limit(1).scalar()

    @classmethod
    def get_user_prize(cls, connection, user_id):
        return connection.query(Prize).filter(
            Prize.user_id == user_id).scalar()

    @classmethod
    def set_prize_user(cls, connection, prize_id, user_id):
        connection.query(Prize).\
            filter(Prize.id == prize_id).update(
            {
                Prize.user_id: user_id
            }
        )
        connection.commit()

    @classmethod
    def get_share_prize(cls, connection, user_id):
        prize = connection.query(Prize).filter(Prize.user_id == user_id).\
            filter(Prize.share == 0).scalar()
        if prize:
            account = prize.account
            connection.query(Prize).filter(Prize.id == prize.id).update({
                Prize.user_id:
                None,
                Prize.probability:
                3
            })
            connection.commit()
            new_prize = connection.query(Prize).\
                filter(Prize.account == account + 10).\
                filter(Prize.user_id.is_(None)).limit(1).scalar()
            connection.query(Prize).filter(Prize.id == new_prize.id).update({
                Prize.user_id:
                user_id,
                Prize.share:
                1
            })
            connection.commit()
            return True
        return False
示例#16
0
class ECDKG(db.Base):
    __tablename__ = 'ecdkg'

    node_id = Column(types.Integer, ForeignKey('ecdkg_node.id'))
    node = relationship('ECDKGNode', back_populates='protocol_instances')

    decryption_condition = Column(types.String(32), index=True)
    phase = Column(types.Enum(ECDKGPhase), nullable=False, default=ECDKGPhase.uninitialized)
    threshold = Column(types.Integer)
    encryption_key = Column(db.CurvePoint)
    decryption_key = Column(db.PrivateValue)
    participants = relationship('ECDKGParticipant', back_populates='ecdkg')

    secret_poly1 = Column(db.Polynomial)
    secret_poly2 = Column(db.Polynomial)
    verification_points = Column(db.CurvePointTuple)
    encryption_key_vector = Column(db.CurvePointTuple)

    __table_args__ = (UniqueConstraint('node_id', 'decryption_condition'),)

    def init(self):
        for addr in networking.channels.keys():
            self.get_or_create_participant_by_address(addr)

        # everyone should on agree on participants
        self.threshold = math.ceil(THRESHOLD_FACTOR * (len(self.participants)+1))

        spoly1 = random_polynomial(self.threshold)
        spoly2 = random_polynomial(self.threshold)

        self.secret_poly1 = spoly1
        self.secret_poly2 = spoly2

        self.encryption_key_vector = tuple(secp256k1.multiply(secp256k1.G, coeff) for coeff in self.secret_poly1)

        self.verification_points = tuple(
            secp256k1.add(secp256k1.multiply(secp256k1.G, a), secp256k1.multiply(G2, b))
            for a, b in zip(spoly1, spoly2)
        )

        self.phase = ECDKGPhase.key_distribution

    def process_advance_to_phase(self, target_phase: ECDKGPhase):
        if self.phase < target_phase:
            self.phase = target_phase
            db.Session.commit()

    def process_secret_shares(self, sender_address: int, secret_shares: (int, int), signature: 'rsv triplet'):
        own_address = self.node.address
        participant = self.get_participant_by_address(sender_address)
        share1, share2 = secret_shares

        msg_bytes = (
            b'SECRETSHARES' +
            self.decryption_condition.encode() +
            util.address_to_bytes(own_address) +
            util.private_value_to_bytes(share1) +
            util.private_value_to_bytes(share2)
        )

        recovered_address = util.address_from_message_and_signature(msg_bytes, signature)

        if sender_address != recovered_address:
            raise ValueError(
                'sender address {:040x} does not match recovered address {:040x}'
                .format(sender_address, recovered_address)
            )

        if participant.secret_share1 is None and participant.secret_share2 is None:
            participant.secret_share1 = share1
            participant.secret_share2 = share2
            participant.shares_signature = signature

            db.Session.commit()

            if participant.verification_points is not None:
                self.process_secret_share_verification(sender_address)

        elif participant.secret_share1 != share1 or participant.secret_share2 != share2:
            participant.get_or_create_complaint_by_complainer_address(own_address)
            raise ValueError(
                '{:040x} sent shares for {} which do not match: {} != {}'
                .format(
                    sender_address,
                    self.decryption_condition,
                    (participant.secret_share1, participant.secret_share2),
                    (share1, share2),
                )
            )

    def process_verification_points(self, sender_address: int, verification_points: tuple, signature: 'rsv triplet'):
        own_address = self.node.address
        participant = self.get_participant_by_address(sender_address)

        msg_bytes = (
            b'VERIFICATIONPOINTS' +
            self.decryption_condition.encode() +
            util.curve_point_tuple_to_bytes(verification_points)
        )

        recovered_address = util.address_from_message_and_signature(msg_bytes, signature)

        if sender_address != recovered_address:
            raise ValueError(
                'sender address {:040x} does not match recovered address {:040x}'
                .format(sender_address, recovered_address)
            )

        if participant.verification_points is None:
            participant.verification_points = verification_points
            participant.verification_points_signature = signature

            db.Session.commit()

            if participant.secret_share1 is not None and participant.secret_share2 is not None:
                self.process_secret_share_verification(sender_address)

        elif participant.verification_points != verification_points:
            participant.get_or_create_complaint_by_complainer_address(own_address)
            raise ValueError(
                '{:040x} sent verification points for {} which do not match: {} != {}'
                .format(
                    sender_address,
                    self.decryption_condition,
                    participant.verification_points,
                    verification_points,
                )
            )

    def process_secret_share_verification(self, address: int):
        own_address = self.node.address
        participant = self.get_participant_by_address(address)

        share1 = participant.secret_share1
        share2 = participant.secret_share2

        vlhs = secp256k1.add(secp256k1.multiply(secp256k1.G, share1),
                             secp256k1.multiply(G2, share2))
        vrhs = functools.reduce(
            secp256k1.add,
            (secp256k1.multiply(ps, pow(own_address, k, secp256k1.N))
                for k, ps in enumerate(participant.verification_points)))

        if vlhs == vrhs:
            return

        participant.get_or_create_complaint_by_complainer_address(own_address)

    def process_encryption_key_vector(self,
                                      sender_address: int,
                                      encryption_key_vector: tuple,
                                      signature: 'rsv triplet'):
        own_address = self.node.address
        participant = self.get_participant_by_address(sender_address)

        msg_bytes = (
            b'ENCRYPTIONKEYPART' +
            self.decryption_condition.encode() +
            util.curve_point_tuple_to_bytes(encryption_key_vector)
        )

        recovered_address = util.address_from_message_and_signature(msg_bytes, signature)

        if sender_address != recovered_address:
            raise ValueError(
                'sender address {:040x} does not match recovered address {:040x}'
                .format(sender_address, recovered_address)
            )

        if participant.encryption_key_vector is None:
            lhs = secp256k1.multiply(secp256k1.G, participant.secret_share1)
            rhs = functools.reduce(
                secp256k1.add,
                (secp256k1.multiply(ps, pow(own_address, k, secp256k1.N))
                    for k, ps in enumerate(encryption_key_vector)))
            if lhs != rhs:
                participant.get_or_create_complaint_by_complainer_address(own_address)
                raise ValueError(
                    '{:040x} sent enc key vector which does not match previously sent secret share'
                    .format(sender_address)
                )

            participant.encryption_key_vector = encryption_key_vector
            participant.encryption_key_vector_signature = signature

            if all(p.encryption_key_vector is not None for p in self.participants):
                self.encryption_key = functools.reduce(
                    secp256k1.add,
                    (p.encryption_key_vector[0] for p in self.participants),
                    self.encryption_key_vector[0]
                )

            db.Session.commit()
        elif participant.encryption_key_vector != encryption_key_vector:
            participant.get_or_create_complaint_by_complainer_address(own_address)
            raise ValueError(
                '{:040x} sent encryption key part for {} which do not match: {} != {}'
                .format(
                    sender_address,
                    self.decryption_condition,
                    participant.encryption_key_vector,
                    encryption_key_vector,
                )
            )

    def process_decryption_key_part(self,
                                    sender_address: int,
                                    decryption_key_part: int,
                                    signature: 'rsv triplet'):
        participant = self.get_participant_by_address(sender_address)

        msg_bytes = (
            b'DECRYPTIONKEYPART' +
            self.decryption_condition.encode() +
            util.private_value_to_bytes(decryption_key_part)
        )

        recovered_address = util.address_from_message_and_signature(msg_bytes, signature)

        if sender_address != recovered_address:
            raise ValueError(
                'sender address {:040x} does not match recovered address {:040x}'
                .format(sender_address, recovered_address)
            )

        if participant.decryption_key_part is None:
            if secp256k1.multiply(secp256k1.G, decryption_key_part) != participant.encryption_key_vector[0]:
                participant.get_or_create_complaint_by_complainer_address(own_address)
                raise ValueError(
                    '{:040x} sent dec key part which does not match previously sent enc key vector'
                    .format(sender_address)
                )

            participant.decryption_key_part = decryption_key_part
            participant.decryption_key_part_signature = signature

            if all(p.decryption_key_part is not None for p in self.participants):
                self.decryption_key = (
                    sum(p.decryption_key_part for p in self.participants) +
                    self.secret_poly1[0]
                ) % secp256k1.N

            db.Session.commit()
        elif participant.decryption_key_part != decryption_key_part:
            participant.get_or_create_complaint_by_complainer_address(own_address)
            raise ValueError(
                '{:040x} sent decryption key part for {} which do not match: {} != {}'
                .format(
                    sender_address,
                    self.decryption_condition,
                    participant.decryption_key_part,
                    decryption_key_part,
                )
            )

    async def run_until_phase(self, target_phase: ECDKGPhase):
        while self.phase < target_phase:
            logging.info('handling {} phase...'.format(self.phase.name))
            await getattr(self, 'handle_{}_phase'.format(self.phase.name))()

    async def handle_key_distribution_phase(self):
        signed_secret_shares = await networking.broadcast_jsonrpc_call_on_all_channels(
            'get_signed_secret_shares', self.decryption_condition)

        for participant in self.participants:
            address = participant.eth_address

            if address not in signed_secret_shares:
                logging.warning('missing shares from address {:040x}'.format(address))
                continue

            try:
                self.process_secret_shares(address, *signed_secret_shares[address])
            except Exception as e:
                logging.warning(
                    'exception occurred while processing secret shares from {:040x}: {}'
                    .format(address, e)
                )

        logging.info('set all secret shares')
        signed_verification_points = await networking.broadcast_jsonrpc_call_on_all_channels(
            'get_signed_verification_points', self.decryption_condition)

        for participant in self.participants:
            address = participant.eth_address

            if address not in signed_verification_points:
                logging.warning('missing verification points from address {:040x}'.format(address))
                continue

            try:
                self.process_verification_points(address, *signed_verification_points[address])
            except Exception as e:
                logging.warning(
                    'exception occurred while processing verification points from {:040x}: {}'
                    .format(address, e)
                )

        self.process_advance_to_phase(ECDKGPhase.key_verification)

    async def handle_key_verification_phase(self):
        self.process_advance_to_phase(ECDKGPhase.key_check)

    async def handle_key_check_phase(self):
        complaints = await networking.broadcast_jsonrpc_call_on_all_channels(
            'get_complaints', self.decryption_condition)

        for participant in self.participants:
            complainer_address = participant.eth_address

            if complainer_address not in complaints:
                logging.warning('missing complaints from address {:040x}'.format(complainer_address))
                continue

            # TODO: Add complaints and collect responses to complaints

        self.process_advance_to_phase(ECDKGPhase.key_generation)

    async def handle_key_generation_phase(self):
        signed_encryption_key_vectors = await networking.broadcast_jsonrpc_call_on_all_channels(
            'get_signed_encryption_key_vector', self.decryption_condition)

        for participant in self.participants:
            address = participant.eth_address

            if address not in signed_encryption_key_vectors:
                # TODO: this is supposed to be broadcast... maybe try getting it from other nodes instead?
                logging.warning('missing encryption key part from address {:040x}'.format(address))
                continue

            try:
                self.process_encryption_key_vector(address, *signed_encryption_key_vectors[address])
            except Exception as e:
                logging.warning(
                    'exception occurred while processing encryption key part from {:040x}: {}'
                    .format(address, e)
                )

        self.process_advance_to_phase(ECDKGPhase.key_publication)

    async def handle_key_publication_phase(self):
        await util.decryption_condition_satisfied(self.decryption_condition)

        signed_decryption_key_parts = await networking.broadcast_jsonrpc_call_on_all_channels(
            'get_signed_decryption_key_part', self.decryption_condition)

        for p in self.participants:
            address = p.eth_address

            if address not in signed_decryption_key_parts:
                # TODO: switch to interpolation of secret shares if waiting doesn't work
                logging.warning('missing decryption key part from address {:040x}'.format(address))
                continue

            try:
                self.process_decryption_key_part(address, *signed_decryption_key_parts[address])
            except Exception as e:
                logging.warning(
                    'exception occurred while processing decryption key part from {:040x}: {}'
                    .format(address, e)
                )

        self.process_advance_to_phase(ECDKGPhase.complete)

    def get_participant_by_address(self, address: int) -> 'ECDKGParticipant':
        participant = (
            db.Session
            .query(ECDKGParticipant)
            .filter(ECDKGParticipant.ecdkg_id == self.id,
                    ECDKGParticipant.eth_address == address)
            .scalar()
        )

        if participant is None:
            raise ValueError('could not find participant with address {:040x}'.format(address))

        return participant

    def get_or_create_participant_by_address(self, address: int) -> 'ECDKGParticipant':
        try:
            return self.get_participant_by_address(address)
        except ValueError:
            participant = ECDKGParticipant(ecdkg_id=self.id, eth_address=address)
            db.Session.add(participant)
            db.Session.commit()
            return participant

    def get_signed_secret_shares(self, address: int) -> ((int, int), 'rsv triplet'):
        private_key = self.node.private_key

        secret_shares = (eval_polynomial(self.secret_poly1, address),
                         eval_polynomial(self.secret_poly2, address))

        msg_bytes = (
            b'SECRETSHARES' +
            self.decryption_condition.encode() +
            util.address_to_bytes(address) +
            util.private_value_to_bytes(secret_shares[0]) +
            util.private_value_to_bytes(secret_shares[1])
        )

        signature = util.sign_with_key(msg_bytes, private_key)

        return (secret_shares, signature)

    def get_signed_verification_points(self) -> (tuple, 'rsv triplet'):
        private_key = self.node.private_key

        msg_bytes = (
            b'VERIFICATIONPOINTS' +
            self.decryption_condition.encode() +
            util.curve_point_tuple_to_bytes(self.verification_points)
        )

        signature = util.sign_with_key(msg_bytes, private_key)

        return (self.verification_points, signature)

    def get_signed_encryption_key_vector(self) -> ((int, int), 'rsv triplet'):
        private_key = self.node.private_key

        msg_bytes = (
            b'ENCRYPTIONKEYPART' +
            self.decryption_condition.encode() +
            util.curve_point_tuple_to_bytes(self.encryption_key_vector)
        )

        signature = util.sign_with_key(msg_bytes, private_key)

        return (self.encryption_key_vector, signature)

    def get_signed_decryption_key_part(self) -> (int, 'rsv triplet'):
        private_key = self.node.private_key

        msg_bytes = (
            b'DECRYPTIONKEYPART' +
            self.decryption_condition.encode() +
            util.private_value_to_bytes(self.secret_poly1[0])
        )

        signature = util.sign_with_key(msg_bytes, private_key)

        return (self.secret_poly1[0], signature)

    def get_complaints_by(self, address: int) -> dict:
        return (
            db.Session
            .query(ECDKGComplaint)
            .filter(  # ECDKGComplaint.participant.ecdkg_id == self.id,
                    ECDKGComplaint.complainer_address == address)
            .all()
        )

    def to_state_message(self) -> dict:
        own_address = self.node.address

        msg = {'address': '{:040x}'.format(own_address)}

        for attr in ('decryption_condition', 'phase', 'threshold'):
            val = getattr(self, attr)
            if val is not None:
                msg[attr] = val

        msg['participants'] = {'{:040x}'.format(p.eth_address): p.to_state_message() for p in self.participants}

        for attr in ('encryption_key',):
            val = getattr(self, attr)
            if val is not None:
                msg[attr] = '{0[0]:064x}{0[1]:064x}'.format(val)

        for attr in ('verification_points', 'encryption_key_vector'):
            val = getattr(self, attr)
            if val is not None:
                msg[attr] = tuple('{0[0]:064x}{0[1]:064x}'.format(pt) for pt in val)

        return msg
示例#17
0
class Contest(Base):
    """Class to store a contest (which is a single day of a
    programming competition). Not to be used directly (import it from
    SQLAlchemyAll).

    """
    __tablename__ = 'contests'
    __table_args__ = (
        CheckConstraint("token_initial <= token_max"),
        )

    # Auto increment primary key.
    id = Column(
        Integer,
        primary_key=True)

    # Short name of the contest, and longer description. Both human
    # readable.
    name = Column(
        String,
        nullable=False)
    description = Column(
        String,
        nullable=False)

    # Follows the enforcement of token for any person, for all the
    # task. This enforcements add up to the ones defined task-wise.

    # token_initial is the initial number of tokens available, or None
    # to disable completely the tokens.
    token_initial = Column(
        Integer,
        CheckConstraint("token_initial >= 0"),
        nullable=True)
    # token_max is the maximum number in any given time, or None not
    # to enforce this limitation.
    token_max = Column(
        Integer,
        CheckConstraint("token_max > 0"),
        nullable=True)
    # token_total is the maximum number that can be used in the whole
    # contest, or None not to enforce this limitation.
    token_total = Column(
        Integer,
        CheckConstraint("token_total > 0"),
        nullable=True)
    # token_min_interval is the minimum interval in seconds between
    # two uses of a token (set it to 0 to not enforce any limitation).
    token_min_interval = Column(
        Interval,
        CheckConstraint("token_min_interval >= '0 seconds'"),
        nullable=False)
    # Every token_gen_time from the beginning of the contest we generate
    # token_gen_number tokens. If _gen_number is 0 no tokens will be
    # generated, if _gen_number is > 0 and _gen_time is 0 tokens will be
    # infinite. In case of infinite tokens, the values of _initial, _max
    # and _total will be ignored (except when token_initial is None).
    token_gen_time = Column(
        Interval,
        CheckConstraint("token_gen_time >= '0 seconds'"),
        nullable=False)
    token_gen_number = Column(
        Integer,
        CheckConstraint("token_gen_number >= 0"),
        nullable=False)

    # Beginning and ending of the contest, unix times.
    start = Column(
        DateTime,
        nullable=True)
    stop = Column(
        DateTime,
        nullable=True)

    # Timezone for the contest. All timestamps in CWS will be shown
    # using the timezone associated to the logged-in user or (if it's
    # None or an invalid string) the timezone associated to the
    # contest or (if it's None or an invalid string) the local
    # timezone of the server. This value has to be a string like
    # "Europe/Rome", "Australia/Sydney", "America/New_York", etc.
    timezone = Column(
        String,
        nullable=True)

    # Max contest time for each user in seconds.
    per_user_time = Column(
        Interval,
        nullable=True)

    # Maximum number of submissions or user_tests allowed for each user
    # during the whole contest or None to not enforce this limitation.
    max_submission_number = Column(
        Integer,
        CheckConstraint("max_submission_number > 0"),
        nullable=True)
    max_user_test_number = Column(
        Integer,
        CheckConstraint("max_user_test_number > 0"),
        nullable=True)

    # Minimum interval between two submissions or user_tests, or None to
    # not enforce this limitation.
    min_submission_interval = Column(
        Interval,
        CheckConstraint("min_submission_interval > '0 seconds'"),
        nullable=True)
    min_user_test_interval = Column(
        Interval,
        CheckConstraint("min_user_test_interval > '0 seconds'"),
        nullable=True)

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # tasks (list of Task objects)
    # announcements (list of Announcement objects)
    # users (list of User objects)

    # Moreover, we have the following methods.
    # get_submissions (defined in SQLAlchemyAll)
    # get_user_tests (defined in SQLAlchemyAll)

    def __init__(self, name, description, tasks, users,
                 token_initial=None, token_max=None, token_total=None,
                 token_min_interval=timedelta(),
                 token_gen_time=timedelta(), token_gen_number=0,
                 start=None, stop=None, timezone=None, per_user_time=None,
                 max_submission_number=None, max_user_test_number=None,
                 min_submission_interval=None, min_user_test_interval=None,
                 announcements=None):
        self.name = name
        self.description = description
        self.tasks = tasks
        self.users = users
        self.token_initial = token_initial
        self.token_max = token_max
        self.token_total = token_total
        self.token_min_interval = token_min_interval
        self.token_gen_time = token_gen_time
        self.token_gen_number = token_gen_number
        self.start = start
        self.stop = stop
        self.timezone = timezone
        self.per_user_time = per_user_time
        self.max_submission_number = max_submission_number
        self.max_user_test_number = max_user_test_number
        self.min_submission_interval = min_submission_interval
        self.min_user_test_interval = min_user_test_interval
        self.announcements = announcements if announcements is not None else []

    def export_to_dict(self, skip_submissions=False, skip_user_tests=False):
        """Return object data as a dictionary.

        """
        return {'name':               self.name,
                'description':        self.description,
                'tasks':              [task.export_to_dict()
                                       for task in self.tasks],
                'users':              [user.export_to_dict(skip_submissions,
                                                           skip_user_tests)
                                       for user in self.users],
                'token_initial':      self.token_initial,
                'token_max':          self.token_max,
                'token_total':        self.token_total,
                'token_min_interval': self.token_min_interval.total_seconds(),
                'token_gen_time':     self.token_gen_time.total_seconds(),
                'token_gen_number':   self.token_gen_number,
                'start':
                    make_timestamp(self.start)
                    if self.start is not None else None,
                'stop':
                    make_timestamp(self.stop)
                    if self.stop is not None else None,
                'timezone':           self.timezone,
                'per_user_time':
                    self.per_user_time.total_seconds()
                    if self.per_user_time is not None else None,
                'max_submission_number': self.max_submission_number,
                'max_user_test_number': self.max_user_test_number,
                'min_submission_interval':
                    self.min_submission_interval.total_seconds()
                    if self.min_submission_interval is not None else None,
                'min_user_test_interval':
                    self.min_user_test_interval.total_seconds()
                    if self.min_user_test_interval is not None else None,
                'announcements':      [announcement.export_to_dict()
                                       for announcement in self.announcements],
                }

    # FIXME - Use SQL syntax
    def get_task(self, task_name):
        """Return the first task in the contest with the given name.

        task_name (string): the name of the task we are interested in.
        return (Task): the corresponding task object, or KeyError.

        """
        for task in self.tasks:
            if task.name == task_name:
                return task
        raise KeyError("Task not found")

    # FIXME - Use SQL syntax
    def get_task_index(self, task_name):
        """Return the index of the first task in the contest with the
        given name.

        task_name (string): the name of the task we are interested in.
        return (int): the index of the corresponding task, or
                      KeyError.

        """
        for idx, task in enumerate(self.tasks):
            if task.name == task_name:
                return idx
        raise KeyError("Task not found")

    # FIXME - Use SQL syntax
    def get_user(self, username):
        """Return the first user in the contest with the given name.

        username (string): the name of the user we are interested in.
        return (User): the corresponding user object, or KeyError.

        """
        for user in self.users:
            if user.username == username:
                return user
        raise KeyError("User not found")

    def enumerate_files(self, skip_submissions=False,
                        skip_user_tests=False, light=False):
        """Enumerate all the files (by digest) referenced by the
        contest.

        return (set): a set of strings, the digests of the file
                      referenced in the contest.

        """
        # Here we cannot use yield, because we want to detect
        # duplicates
        files = set()
        for task in self.tasks:

            # Enumerate statements
            for _file in task.statements.values():
                files.add(_file.digest)

            # Enumerate attachments
            for _file in task.attachments.values():
                files.add(_file.digest)

            # Enumerate managers
            for _file in task.managers.values():
                files.add(_file.digest)

            # Enumerate testcases
            if not light:
                for testcase in task.testcases:
                    files.add(testcase.input)
                    files.add(testcase.output)

        if not skip_submissions:
            for submission in self.get_submissions():

                # Enumerate files
                for _file in submission.files.values():
                    files.add(_file.digest)

                # Enumerate executables
                if not light:
                    for _file in submission.executables.values():
                        files.add(_file.digest)

        if not skip_user_tests:
            for user_test in self.get_user_tests():

                files.add(user_test.input)

                if not light and user_test.output is not None:
                    files.add(user_test.output)

                # Enumerate files
                for _file in user_test.files.values():
                    files.add(_file.digest)

                # Enumerate managers
                for _file in user_test.managers.values():
                    files.add(_file.digest)

                # Enumerate executables
                if not light:
                    for _file in user_test.executables.values():
                        files.add(_file.digest)

        return files

    def phase(self, timestamp):
        """Return: -1 if contest isn't started yet at time timestamp,
                    0 if the contest is active at time timestamp,
                    1 if the contest has ended.

        timestamp (int): the time we are iterested in.
        return (int): contest phase as above.

        """
        if self.start is not None and self.start > timestamp:
            return -1
        if self.stop is None or self.stop > timestamp:
            return 0
        return 1

    @staticmethod
    def _tokens_available(token_timestamps, token_initial,
                          token_max, token_total, token_min_interval,
                          token_gen_time, token_gen_number,
                          start, timestamp):
        """Do exactly the same computation stated in tokens_available,
        but ensuring only a single set of token_* directive.
        Basically, tokens_available call this twice for contest-wise
        and task-wise parameters and then assemble the result.

        token_timestamps (list): list of timestamps of used tokens.
        token_* (int): the parameters we want to enforce.
        start (int): the time from which we start accumulating tokens.
        timestamp (int): the time relative to which make the
                         calculation (has to be greater than or equal
                         to all elements of token_timestamps).
        return (tuple): same as tokens_available.

        """
        # If token_initial is None, it means that the admin disabled
        # tokens usage, hence no tokens.
        if token_initial is None:
            return (0, None, None)

        # expiration is the timestamp at which all min_intervals for
        # the tokens played up to now have expired (i.e. the first
        # time at which we can play another token). If no tokens have
        # been played so far, this time is the start of the contest.
        expiration = token_timestamps[-1] + token_min_interval \
                     if len(token_timestamps) > 0 else start

        # If we have infinite tokens we don't need to simulate
        # anything, since nothing gets consumed or generated. We can
        # return immediately.
        if token_gen_number > 0 and token_gen_time == timedelta():
            return (-1, None, expiration if expiration > timestamp else None)

        # If we already played the total number allowed, we don't have
        # anything left.
        played_tokens = len(token_timestamps)
        if token_total is not None and played_tokens >= token_total:
            return (0, None, None)

        # If we're in the case "generate 0 tokens every 0 seconds" we
        # set the _gen_time to a non-zero value, to ease calculations.
        if token_gen_time == timedelta():
            token_gen_time = timedelta(seconds=1)

        # avail is the current number of available tokens. We are
        # going to rebuild all the history to know how many of them we
        # have now.
        # We start with the initial number (it's already capped to max
        # by the DB). token_initial can be ignored after this.
        avail = token_initial

        def generate_tokens(prev_time, next_time):
            """Compute how many tokens have been generated between the
            two timestamps.

            prev_time (int): timestamp of begin of interval.
            next_time (int): timestamp of end of interval.
            return (int): number of tokens generated.

            """
            # How many generation times we passed from start to
            # the previous considered time?
            before_prev = int((prev_time - start).total_seconds()
                              / token_gen_time.total_seconds())
            # And from start to the current considered time?
            before_next = int((next_time - start).total_seconds()
                              / token_gen_time.total_seconds())
            # So...
            return token_gen_number * (before_next - before_prev)

        # Previous time we considered
        prev_token = start

        # Simulating!
        for token in token_timestamps:
            # Increment the number of tokens because of generation.
            avail += generate_tokens(prev_token, token)
            if token_max is not None:
                avail = min(avail, token_max)

            # Play the token.
            avail -= 1

            prev_token = token

        avail += generate_tokens(prev_token, timestamp)
        if token_max is not None:
            avail = min(avail, token_max)

        # Compute the time in which the next token will be generated.
        next_gen_time = None
        if token_gen_number > 0 and (token_max is None or avail < token_max):
            next_gen_time = start + token_gen_time * \
                            int((timestamp - start).total_seconds() /
                                token_gen_time.total_seconds() + 1)

        # If we have more tokens than how many we are allowed to play,
        # cap it, and note that no more will be generated.
        if token_total is not None:
            if avail >= token_total - played_tokens:
                avail = token_total - played_tokens
                next_gen_time = None

        return (avail,
                next_gen_time,
                expiration if expiration > timestamp else None)

    def tokens_available(self, username, task_name, timestamp=None):
        """Return three pieces of data:

        [0] the number of available tokens for the user to play on the
            task (independently from the fact that (s)he can play it
            right now or not due to a min_interval wating for
            expiration); -1 means infinite tokens;

        [1] the next time in which a token will be generated (or
            None); from the user perspective, i.e.: if the user will
            do nothing, [1] is the first time in which his number of
            available tokens will be greater than [0];

        [2] the time when the min_interval will expire, or None

        In particular, let r the return value of this method. We can
        sketch the code in the following way.:

        if r[0] > 0 or r[0] == -1:
            we have tokens
            if r[2] is None:
                we can play a token
            else:
                we must wait till r[2] to play a token
            if r[1] is not None:
                next one will be generated at r[1]
            else:
                no other tokens will be generated (max/total reached ?)
        else:
            we don't have tokens right now
            if r[1] is not None:
                next one will be generated at r[1]
                if r[2] is not None and r[2] > r[1]:
                    but we must wait also till r[2] to play it
            else:
                no other tokens will be generated (max/total reached ?)

        Note also that this method assumes that all played tokens were
        regularly played, and that there are no tokens played in the
        future. Also, if r[0] == 0 and r[1] is None, then r[2] should
        be ignored.

        username (string): the username of the user.
        task_name (string): the name of the task.
        timestamp (int): the time relative to which making the
                         calculation
        return ((int, int, int)): see description above the latter two
                                  are timestamps, or None.

        """
        if timestamp is None:
            timestamp = make_datetime()

        user = self.get_user(username)
        task = self.get_task(task_name)

        # Take the list of the tokens already played (sorted by time).
        tokens = user.get_tokens()
        token_timestamps_contest = sorted([token.timestamp
                                           for token in tokens])
        token_timestamps_task = sorted([
            token.timestamp for token in tokens
            if token.submission.task.name == task_name])

        # If the contest is USACO-style (i.e., the time for each user
        # start when he/she logs in for the first time), then we start
        # accumulating tokens from the user starting time; otherwise,
        # from the start of the contest.
        start = self.start
        if self.per_user_time is not None:
            start = user.starting_time

        # Compute separately for contest-wise and task-wise.
        res_contest = Contest._tokens_available(
            token_timestamps_contest, self.token_initial,
            self.token_max, self.token_total, self.token_min_interval,
            self.token_gen_time, self.token_gen_number,
            start, timestamp)
        res_task = Contest._tokens_available(
            token_timestamps_task, task.token_initial,
            task.token_max, task.token_total, task.token_min_interval,
            task.token_gen_time, task.token_gen_number,
            start, timestamp)

        # Merge the results.

        # First, the "expiration".
        if res_contest[2] is None:
            expiration = res_task[2]
        elif res_task[2] is None:
            expiration = res_contest[2]
        else:
            expiration = max(res_task[2], res_contest[2])

        # Then, check if both are infinite
        if res_contest[0] == -1 and res_task[0] == -1:
            res = (-1, None, expiration)
        # Else, "combine" them appropriately.
        else:
            # Having infinite contest tokens, in this situation, is the
            # same as having a finite number that is strictly greater
            # than the task tokens. The same holds the other way, too.
            if res_contest[0] == -1:
                res_contest = (res_task[0] + 1, None, None)
            if res_task[0] == -1:
                res_task = (res_contest[0] + 1, None, None)

            # About next token generation time: we need to see when the
            # *minimum* between res_contest[0] and res_task[0] is
            # increased by one, so if there is an actual minimum we
            # need to consider only the next generation time for it.
            # Otherwise, if they are equal, we need both to generate an
            # additional token and we store the maximum between the two
            # next times of generation.
            if res_contest[0] < res_task[0]:
                # We have more task-tokens than contest-tokens.
                # We just need a contest-token to be generated.
                res = (res_contest[0], res_contest[1], expiration)
            elif res_task[0] < res_contest[0]:
                # We have more contest-tokens than task-tokens.
                # We just need a task-token to be generated.
                res = (res_task[0], res_task[1], expiration)
            else:
                # Darn, we need both!
                if res_contest[1] is None or res_task[1] is None:
                    res = (res_task[0], None, expiration)
                else:
                    res = (res_task[0], max(res_contest[1], res_task[1]),
                           expiration)

        return res
示例#18
0
from hiku.graph import Edge, Link
from hiku.engine import Engine
from hiku.sources.sql import db_fields, db_link
from hiku.readers.simple import read
from hiku.executors.threads import ThreadsExecutor

from .base import TestCase


metadata = MetaData()
session = scoped_session(sessionmaker())

foo_table = Table(
    'foo',
    metadata,
    Column('id', Integer, primary_key=True, autoincrement=True),
    Column('name', Unicode),
    Column('count', Integer),
    Column('bar_id', ForeignKey('bar.id')),
)

bar_table = Table(
    'bar',
    metadata,
    Column('id', Integer, primary_key=True, autoincrement=True),
    Column('name', Unicode),
    Column('type', Integer),
)


def foo_list():
示例#19
0
def create_column(engine, table, name, type):
    with lock:
        if name not in table.columns.keys():
            col = Column(name, type)
            col.create(table, connection=engine)
示例#20
0
class ECDKGParticipant(db.Base):
    __tablename__ = 'ecdkg_participant'

    ecdkg_id = Column(types.Integer, ForeignKey('ecdkg.id'))
    ecdkg = relationship('ECDKG', back_populates='participants')
    eth_address = Column(db.EthAddress, index=True)

    encryption_key_vector = Column(db.CurvePointTuple)
    encryption_key_vector_signature = Column(db.Signature)

    decryption_key_part = Column(db.PrivateValue)
    decryption_key_part_signature = Column(db.Signature)

    verification_points = Column(db.CurvePointTuple)
    verification_points_signature = Column(db.Signature)

    secret_share1 = Column(db.PrivateValue)
    secret_share2 = Column(db.PrivateValue)
    shares_signature = Column(db.Signature)

    complaints = relationship('ECDKGComplaint', back_populates='participant')

    __table_args__ = (UniqueConstraint('ecdkg_id', 'eth_address'),)

    def get_or_create_complaint_by_complainer_address(self, address: int) -> 'ECDKGComplaint':
        complaint = (
            db.Session
            .query(ECDKGComplaint)
            .filter(ECDKGComplaint.participant_id == self.id,
                    ECDKGComplaint.complainer_address == address)
            .scalar()
        )

        if complaint is None:
            complaint = ECDKGComplaint(participant_id=self.id, complainer_address=address)
            db.Session.add(complaint)
            db.Session.commit()

        return complaint

    def to_state_message(self, address: int = None) -> dict:
        msg = {}

        for attr in ('verification_points', 'encryption_key_vector'):
            val = getattr(self, attr)
            if val is not None:
                msg[attr] = tuple('{0[0]:064x}{0[1]:064x}'.format(pt) for pt in val)

        return msg
示例#21
0
class InheritSecurity:
    """Mixin for objects with a parent relation and security inheritance."""

    inherit_security = Column(
        Boolean, default=True, nullable=False, info={"auditable": False}
    )
示例#22
0
文件: work.py 项目: mmarquar/cliche
class Work(Nameable):
    """Creative work(s) that could be a single work like a film, or
    a series of works such as a combic book series and a television series.
    """

    #: (:class:`int`) The primary key integer.
    id = Column(Integer, ForeignKey(Nameable.id), primary_key=True)

    #: (:class:`str`) Work media type.
    media_type = Column(String, nullable=False)

    #: (:class:`datetime.date`) The publication date.
    published_at = Column(Date)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`WorkGenre`\ s that the work has.
    work_genres = relationship('WorkGenre',
                               cascade='delete, merge, save-update',
                               collection_class=set)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`Genre`\ s that the work falls into.
    genres = relationship(Genre, secondary='work_genres', collection_class=set)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`Credit`\ s that the work has.
    credits = relationship(Credit,
                           cascade='delete, merge, save-update',
                           collection_class=set)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`WorkFranchise`\ s that the work has.
    work_franchises = relationship('WorkFranchise',
                                   cascade='delete, merge, save-update',
                                   collection_class=set)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`Franchise`\ s that the work belongs to.
    franchises = relationship(Franchise,
                              secondary='work_franchises',
                              collection_class=set)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`Character`\ s that appeared in the work.
    characters = relationship(Character,
                              secondary='work_characters',
                              collection_class=set)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`WorkTrope`.
    work_tropes = relationship(lambda: WorkTrope, collection_class=set)

    #: (:class:`collections.abc.MutableSet`) The set of
    #: :class:`Trope`.
    tropes = relationship(Trope, secondary='work_tropes', collection_class=set)

    #: (:class:`datetime.datetime`) The date and time on which
    #: the record was created.
    created_at = Column(DateTime(timezone=True),
                        nullable=False,
                        default=now(),
                        index=True)

    __tablename__ = 'works'
    __repr_columns__ = [id]
    __mapper_args__ = {
        'polymorphic_identity': 'works',
    }
示例#23
0
def _add_complex_type(cls, props, table, k, v):
    p = getattr(v.Attributes, 'store_as', None)
    col_args, col_kwargs = sanitize_args(v.Attributes.sqla_column_args)
    _sp_attrs_to_sqla_constraints(cls, v, col_kwargs)

    if isinstance(p, c_table):
        if _is_array(v):
            child_cust = v
            if issubclass(v, Array):
                child_cust, = v._type_info.values()

            child = child_cust
            if child_cust.__orig__ is not None:
                child = child_cust.__orig__

            if p.multi != False:  # many to many
                _gen_array_m2m(cls, props, k, child, p)

            elif issubclass(child, SimpleModel):  # one to many simple type
                _gen_array_simple(cls, props, k, child_cust, p)

            else:  # one to many complex type
                _gen_array_o2m(cls, props, k, child, child_cust, p)

        else:
            # v has the Attribute values we need whereas real_v is what the
            # user instantiates (thus what sqlalchemy needs)
            if v.__orig__ is None:  # vanilla class
                real_v = v
            else:  # customized class
                real_v = v.__orig__

            assert not getattr(
                p, 'multi', False), ('Storing a single element-type using a '
                                     'relation table is pointless.')

            assert p.right is None, "'right' is ignored in a one-to-one " \
                                    "relationship"

            col = _get_col_o2o(cls, k, v, p.left)
            p.left = col.name

            if col.name in table.c:
                col = table.c[col.name]
            else:
                table.append_column(col)
            rel = relationship(real_v,
                               uselist=False,
                               cascade=p.cascade,
                               foreign_keys=[col],
                               backref=p.backref,
                               lazy=p.lazy)

            _gen_index_info(table, col, k, v)

            props[k] = rel
            props[col.name] = col

    elif isinstance(p, c_xml):
        if k in table.c:
            col = table.c[k]
        else:
            t = PGObjectXml(v, p.root_tag, p.no_ns)
            col = Column(k, t, *col_args, **col_kwargs)

        props[k] = col
        if not k in table.c:
            table.append_column(col)

    elif isinstance(p, c_json):
        if k in table.c:
            col = table.c[k]
        else:
            t = PGObjectJson(v,
                             ignore_wrappers=p.ignore_wrappers,
                             complex_as=p.complex_as)
            col = Column(k, t, *col_args, **col_kwargs)

        props[k] = col
        if not k in table.c:
            table.append_column(col)

    elif isinstance(p, c_msgpack):
        raise NotImplementedError(c_msgpack)

    elif p is None:
        pass

    else:
        raise ValueError(p)
示例#24
0
def format_server_default(compiler, default):
    return compiler.get_column_default_string(
        Column("x", Integer, server_default=default))
示例#25
0
def _gen_array_simple(cls, props, k, child_cust, p):
    table_name = cls.Attributes.table_name
    metadata = cls.Attributes.sqla_metadata

    # get left (fk) column info
    _gen_col = _get_col_o2m(cls, p.left)
    col_info = next(_gen_col)  # gets the column name
    p.left, child_left_col_type = col_info[
        0]  # FIXME: Add support for multi-column primary keys.
    child_left_col_name = p.left

    # get right(data) column info
    child_right_col_type = get_sqlalchemy_type(child_cust)
    child_right_col_name = p.right  # this is the data column
    if child_right_col_name is None:
        child_right_col_name = k

    # get table name
    child_table_name = child_cust.Attributes.table_name
    if child_table_name is None:
        child_table_name = '_'.join([table_name, k])

    if child_table_name in metadata.tables:
        child_t = metadata.tables[child_table_name]

        # if we have the table, we sure have the right column (data column)
        assert child_right_col_type.__class__ is \
               child_t.c[child_right_col_name].type.__class__, "%s.%s: %r != %r" % \
                   (cls, child_right_col_name, child_right_col_type.__class__,
                               child_t.c[child_right_col_name].type.__class__)

        # Table exists but our own foreign key doesn't.
        if child_left_col_name in child_t.c:
            assert child_left_col_type.__class__ is \
                child_t.c[child_left_col_name].type.__class__, "%r != %r" % \
                   (child_left_col_type.__class__,
                               child_t.c[child_left_col_name].type.__class__)
        else:
            child_left_col = next(_gen_col)
            _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_left_col)
            child_t.append_column(child_left_col)

    else:
        # table does not exist, generate table
        child_right_col = Column(child_right_col_name, child_right_col_type)
        _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_right_col)

        child_left_col = next(_gen_col)
        _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_left_col)

        child_t = Table(
            child_table_name,
            metadata,
            Column('id', sqlalchemy.Integer, primary_key=True),
            child_left_col,
            child_right_col,
        )
        _gen_index_info(child_t, child_right_col, child_right_col_name,
                        child_cust)

    # generate temporary class for association proxy
    cls_name = ''.join(x.capitalize() or '_'
                       for x in child_table_name.split('_'))

    # generates camelcase class name.

    def _i(self, *args):
        setattr(self, child_right_col_name, args[0])

    cls_ = type("_" + cls_name, (object, ), {'__init__': _i})
    own_mapper(cls_)(cls_, child_t)
    props["_" + k] = relationship(cls_)

    # generate association proxy
    setattr(cls, k, association_proxy("_" + k, child_right_col_name))
示例#26
0
from trove.db.sqlalchemy.migrate_repo.schema import DateTime
from trove.db.sqlalchemy.migrate_repo.schema import drop_tables
from trove.db.sqlalchemy.migrate_repo.schema import Integer
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
from trove.db.sqlalchemy import utils as db_utils
from trove.openstack.common import log as logging

logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema')

meta = MetaData()

clusters = Table(
    'clusters',
    meta,
    Column('id', String(36), primary_key=True, nullable=False),
    Column('created', DateTime(), nullable=False),
    Column('updated', DateTime(), nullable=False),
    Column('name', String(255), nullable=False),
    Column('task_id', Integer(), nullable=False),
    Column('tenant_id', String(36), nullable=False),
    Column("datastore_version_id",
           String(36),
           ForeignKey('datastore_versions.id'),
           nullable=False),
    Column('deleted', Boolean()),
    Column('deleted_at', DateTime()),
    Index("clusters_tenant_id", "tenant_id"),
    Index("clusters_deleted", "deleted"),
)
示例#27
0
class NameMixin(BaseMixin):

    name = Column(Unicode, nullable=False)

    __repr_attr__ = 'id', 'name',
示例#28
0
文件: db.py 项目: wfaye/neican
from contacts.contact import Contact
from sqlalchemy import MetaData
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import mapper
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.schema import Table, Column, Sequence
from sqlalchemy.types import Integer, String

metadata = MetaData()
engine = create_engine('postgresql+psycopg2://openpg:@localhost/testdb', echo=True)
Session = sessionmaker(bind=engine)

        
contact = Table('contacts', metadata,
                Column('id', Sequence, primary_key=True),
                Column('name', String(50)),
                Column('email', String(100)),
                Column('tel', String(20)))

mapper(Contact, contact)

session = Session()

class proxy_db():
    def _init__(self):
        self.session = Session()
        
    def getContactList(self):
        contactlist = session.query(Contact.name).all();
        return contactlist
示例#29
0
class Note(Base):
    __tablename__ = 'notes'

    id = Column(Integer, Sequence('notes_id_seq'), primary_key=True)
    title = Column(String)
    content = Column(String, nullable=False)
示例#30
0
class AddCPU(CRUDModel):
    __tablename__ = 'cpu'
    table_args__ = {'sqlite_autoincrement': True}
    name = Column(String, nullable=False, index=False)
    cpu = Column(String, nullable=False, index=True)
示例#31
0
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import UniqueConstraint

from trove.db.sqlalchemy.migrate_repo.schema import create_tables
from trove.db.sqlalchemy.migrate_repo.schema import drop_tables
from trove.db.sqlalchemy.migrate_repo.schema import DateTime
from trove.db.sqlalchemy.migrate_repo.schema import Boolean
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table

meta = MetaData()

datastore_configuration_parameters = Table(
    'datastore_configuration_parameters', meta,
    Column('id', String(36), primary_key=True, nullable=False),
    Column('name', String(128), primary_key=True, nullable=False),
    Column('datastore_version_id',
           String(36),
           ForeignKey("datastore_versions.id"),
           primary_key=True,
           nullable=False),
    Column('restart_required', Boolean(), nullable=False, default=False),
    Column('max_size', String(40)), Column('min_size', String(40)),
    Column('data_type', String(128), nullable=False),
    Column('deleted', Boolean()), Column('deleted_at', DateTime()),
    UniqueConstraint(
        'datastore_version_id',
        'name',
        name='UQ_datastore_configuration_parameters_datastore_version_id_name')
)
示例#32
0
from sqlalchemy import ForeignKey
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData

from trove.db.sqlalchemy.migrate_repo.schema import create_tables
from trove.db.sqlalchemy.migrate_repo.schema import DateTime
from trove.db.sqlalchemy.migrate_repo.schema import Boolean
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table

meta = MetaData()

configurations = Table(
    'configurations',
    meta,
    Column('id', String(36), primary_key=True, nullable=False),
    Column('name', String(64), nullable=False),
    Column('description', String(256)),
    Column('tenant_id', String(36), nullable=False),
    Column('datastore_version_id', String(36), nullable=False),
    Column('deleted', Boolean(), nullable=False, default=False),
    Column('deleted_at', DateTime()),
)

configuration_parameters = Table(
    'configuration_parameters',
    meta,
    Column('configuration_id',
           String(36),
           ForeignKey("configurations.id"),
           nullable=False,
示例#33
0
class Blob(Model):
    """Model for storing large file content.

    Files are stored on-disk, named after their uuid. Repository is
    located in instance folder/data/files.
    """

    __tablename__ = "blob"

    id = Column(Integer(), primary_key=True, autoincrement=True)
    uuid = Column(UUID(), unique=True, nullable=False, default=uuid.uuid4)
    meta = Column(JSONDict(), nullable=False, default=dict)

    def __init__(self, value=None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if self.uuid is None:
            self.uuid = uuid.uuid4()

        if self.meta is None:
            self.meta = {}

        if value is not None:
            self.value = value

    @property
    def file(self) -> Optional[Path]:
        """Return :class:`pathlib.Path` object used for storing value."""
        from abilian.services.repository import \
            session_repository as repository

        return repository.get(self, self.uuid)

    @property
    def size(self) -> int:
        """Return size in bytes of value."""
        f = self.file
        return f.stat().st_size if f is not None else 0

    @property
    def value(self) -> bytes:
        """Binary value content."""
        v = self.file
        return v.open("rb").read() if v is not None else v

    @value.setter
    def value(self, value: bytes):
        """Store binary content to applications's repository and update
        `self.meta['md5']`.

        :param:content: bytes, or any object with a `read()` method
        :param:encoding: encoding to use when content is Unicode
        """
        from abilian.services.repository import \
            session_repository as repository

        repository.set(self, self.uuid, value)
        self.meta["md5"] = str(hashlib.md5(self.value).hexdigest())

        if hasattr(value, "filename"):
            filename = value.filename
            if isinstance(filename, bytes):
                filename = filename.decode("utf-8")
            self.meta["filename"] = filename

        if hasattr(value, "content_type"):
            self.meta["mimetype"] = value.content_type

    @value.deleter
    def value(self) -> None:
        """Remove value from repository."""
        from abilian.services.repository import \
            session_repository as repository

        repository.delete(self, self.uuid)

    @property
    def md5(self) -> str:
        """Return md5 from meta, or compute it if absent."""
        md5 = self.meta.get("md5")
        if md5 is None:
            md5 = str(hashlib.md5(self.value).hexdigest())

        return md5

    def __bool__(self) -> bool:
        """A blob is considered falsy if it has no file."""
        return self.file is not None and self.file.exists()

    # Py3k compat
    __nonzero__ = __bool__
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    records_table = Table('records', meta, autoload=True)

    # We need to autoload the domains table for the FK to succeed.
    Table('domains', meta, autoload=True)

    # Prepare an empty dict to cache (domain_id, name, type) tuples to
    # RRSet id's
    cache = {}

    # Create the recordsets_table table
    recordsets_table.create()

    # NOTE(kiall): Since we need a unique UUID for each recordset, and need
    #              to maintain cross DB compatibility, we're stuck doing this
    #              in code rather than an
    #              INSERT INTO recordsets_table SELECT (..) FROM records;
    results = select(
        columns=[
            records_table.c.tenant_id,
            records_table.c.domain_id,
            records_table.c.name,
            records_table.c.type,
            func.min(records_table.c.ttl).label('ttl'),
            func.min(records_table.c.created_at).label('created_at'),
            func.max(records_table.c.updated_at).label('updated_at')
        ],
        group_by=[
            records_table.c.tenant_id,
            records_table.c.domain_id,
            records_table.c.name,
            records_table.c.type
        ]
    ).execute()

    for result in results:
        # Create the new RecordSet and remember it's id
        pk = recordsets_table.insert().execute(
            tenant_id=result.tenant_id,
            domain_id=result.domain_id,
            name=result.name,
            type=result.type,
            ttl=result.ttl,
            created_at=result.created_at,
            updated_at=result.updated_at
        ).inserted_primary_key[0]

        # Cache the ID for later
        cache_key = "%s.%s.%s" % (result.domain_id, result.name, result.type)
        cache[cache_key] = pk

    # Add the recordset column to the records table
    record_recordset_id = Column('recordset_id', UUID,
                                 default=None,
                                 nullable=True)
    record_recordset_id.create(records_table, populate_default=True)

    # Fetch all the records
    # TODO(kiall): Batch this..
    results = select(
        columns=[
            records_table.c.id,
            records_table.c.domain_id,
            records_table.c.name,
            records_table.c.type,
            records_table.c.data,
            records_table.c.priority
        ]
    ).execute()

    # Update each result with the approperiate recordset_id, and refresh
    # the hash column to reflect the removal of several fields.
    for result in results:
        cache_key = "%s.%s.%s" % (result.domain_id, result.name,
                                  result.type)

        recordset_id = cache[cache_key]
        new_hash = _build_hash(recordset_id, result)

        records_table.update()\
            .where(records_table.c.id == result.id)\
            .values(recordset_id=cache[cache_key], hash=new_hash)\
            .execute()

    # Now that the records.recordset_id field is populated, lets ensure the
    # column is not nullable and is a FK to the records table.
    records_table.c.recordset_id.alter(nullable=False)
    ForeignKeyConstraint(columns=[records_table.c.recordset_id],
                         refcolumns=[recordsets_table.c.id],
                         ondelete='CASCADE',
                         name='fkey_records_recordset_id').create()

    # Finally, drop the now-defunct columns from the records table
    records_table.c.name.drop()
    records_table.c.type.drop()
    records_table.c.ttl.drop()
示例#35
0
 def test_lots_of_types(self, engine, connection):
     # Presto doesn't have raw CREATE TABLE support, so we ony test hive
     # take type list from sqlalchemy.types
     types = [
         'INT',
         'CHAR',
         'VARCHAR',
         'NCHAR',
         'TEXT',
         'Text',
         'FLOAT',
         'NUMERIC',
         'DECIMAL',
         'TIMESTAMP',
         'DATETIME',
         'CLOB',
         'BLOB',
         'BOOLEAN',
         'SMALLINT',
         'DATE',
         'TIME',
         'String',
         'Integer',
         'SmallInteger',
         'Numeric',
         'Float',
         'DateTime',
         'Date',
         'Time',
         'Binary',
         'Boolean',
         'Unicode',
         'UnicodeText',
     ]
     cols = []
     for i, t in enumerate(types):
         cols.append(Column(str(i), getattr(sqlalchemy.types, t)))
     cols.append(Column('hive_date', HiveDate))
     cols.append(Column('hive_decimal', HiveDecimal))
     cols.append(Column('hive_timestamp', HiveTimestamp))
     table = Table('test_table',
                   MetaData(bind=engine),
                   *cols,
                   schema='pyhive_test_database')
     table.drop(checkfirst=True)
     table.create()
     connection.execute('SET mapred.job.tracker=local')
     connection.execute('USE pyhive_test_database')
     big_number = 10**10 - 1
     connection.execute(
         """
     INSERT OVERWRITE TABLE test_table
     SELECT
         1, "a", "a", "a", "a", "a", 0.1,
         0.1, 0.1, 0, 0, "a", "a",
         false, 1, 0, 0,
         "a", 1, 1,
         0.1, 0.1, 0, 0, 0, "a",
         false, "a", "a",
         0, %d, 123 + 2000
     FROM default.one_row
     """, big_number)
     row = connection.execute(table.select()).fetchone()
     self.assertEqual(row.hive_date, datetime.date(1970, 1, 1))
     self.assertEqual(row.hive_decimal, decimal.Decimal(big_number))
     self.assertEqual(row.hive_timestamp,
                      datetime.datetime(1970, 1, 1, 0, 0, 2, 123000))
     table.drop()
示例#36
0
class Creatable(object):
    created_at = Column(DateTime, nullable=False, default=datetime.utcnow)
示例#37
0
class Item(Base):
    __tablename__ = 'item'

    item_id = Column(Integer, primary_key=True, autoincrement=False)
    location = Column(Geography('POINT', spatial_index=True), nullable=False)
    enwiki = Column(String, index=True)
    entity = Column(postgresql.JSON)
    categories = Column(postgresql.ARRAY(String))
    old_tags = Column(postgresql.ARRAY(String))
    qid = column_property('Q' + cast(item_id, String))
    ewkt = column_property(func.ST_AsEWKT(location), deferred=True)
    query_label = Column(String, index=True)
    # extract = Column(String)
    extract_names = Column(postgresql.ARRAY(String))

    db_tags = relationship('ItemTag',
                           collection_class=set,
                           cascade='save-update, merge, delete, delete-orphan',
                           backref='item')

    tags = association_proxy('db_tags', 'tag_or_key')

    isa = relationship('IsA', secondary='item_isa')
    wiki_extracts = relationship(
        'Extract',
        collection_class=attribute_mapped_collection('site'),
        cascade='save-update, merge, delete, delete-orphan',
        backref='item')
    extracts = association_proxy('wiki_extracts', 'extract')

    @property
    def extract(self):
        return self.extracts.get('enwiki')

    @extract.setter
    def extract(self, value):
        self.extracts['enwiki'] = value

    @property
    def labels(self):
        if not self.entity:
            return None

        return {
            l['language']: l['value']
            for l in self.entity['labels'].values()
        }

    def label(self, lang='en'):
        if not self.entity:
            return self.enwiki or self.query_label or None

        labels = self.entity['labels']
        if lang in labels:
            return labels[lang]['value']
        elif lang != 'en' and 'en' in labels:
            return labels['en']['value']
        elif labels:
            return list(labels.values())[0]['value']

    def label_best_language(self, languages):
        if not languages:
            return self.label()
        labels = self.entity['labels']
        for lang in languages:
            code = lang if isinstance(lang,
                                      str) else lang.wikimedia_language_code
            if code in labels:
                return labels[code]['value']
        return self.label()

    def languages(self):
        entity = self.entity
        labels = {lang for lang in entity['labels'].keys() if '-' not in lang}
        sitelinks = {
            i[:-4]
            for i in entity['sitelinks'].keys() if i.endswith('wiki')
        }

        return labels | sitelinks

    def more_endings_from_isa(self):
        endings = set()
        langs = self.languages()
        # avoid trimming "cottage", it produces too many mismatches
        skip_isa = {
            5783996,  # cottage
        }
        for isa in self.isa:
            if isa.item_id in skip_isa or not isa.entity or 'missing' in isa.entity:
                continue
            for lang, label in isa.entity.get('labels', {}).items():
                if lang in langs:
                    endings.add(label['value'])
        return endings

    @classmethod
    def get_by_qid(cls, qid):
        if qid and len(qid) > 1 and qid[0].upper() == 'Q' and qid[1:].isdigit(
        ):
            return cls.query.get(qid[1:])

    def label_and_qid(self, lang='en'):
        label = self.label(lang=lang)
        if label:
            return '{label} ({item.qid})'.format(label=label, item=self)
        else:
            return self.qid

    @property
    def wikidata_uri(self):
        return 'https://www.wikidata.org/wiki/Q{}'.format(self.item_id)

    def get_lat_lon(self):
        return session.query(func.ST_Y(self.location),
                             func.ST_X(self.location)).one()

    def get_osm_url(self, zoom=18):
        lat, lon = self.get_lat_lon()
        return f'https://www.openstreetmap.org/#map={zoom}/{lat}/{lon}'

    def get_extra_tags(self):
        tags = set()
        for qid in self.instanceof():
            for tag in wikidata.extra_keys.get(qid, []):
                if tag:
                    tags.add(tag[4:])

        return tags

    @property
    def ref_keys(self):
        return {f'ref:nrhp={v}' for v in (self.ref_nrhp() or [])}

    def disused_tags(self):
        tags = set()
        prefixes = ('disused', 'was', 'abandoned', 'demolished', 'destroyed',
                    'ruins', 'historic')
        for i in self.tags:
            if i == 'amenity':  # too generic
                continue
            key = i.split('=')[0] if '=' in i else i
            if key in disused_prefix_key:
                tags |= {prefix + ':' + i for prefix in prefixes}
        return tags

    def calculate_tags(self, ignore_tags=None):
        ignore_tags = set(ignore_tags or [])

        # Ignore some overly generic tags from Wikidata objects:
        # facility (Q13226383)            - osm tag: amenity
        # geographic location (Q2221906)  - osm tag: location
        # artificial entity (Q16686448)   - osm tag: man_made

        ignore_tags.update('amenity', 'location', 'man_made')

        instanceof = self.instanceof()

        tags = (self.get_extra_tags() | set(self.tags)) - ignore_tags
        if matcher.could_be_building(tags, instanceof):
            tags.add('building')
            if any(n.lower().endswith(' church') for n in self.names().keys()):
                tags.update({'amenity=place_of_worship', 'building=church'})

        tags |= self.ref_keys | self.disused_tags()
        tags -= ignore_tags
        return tags

    def instanceof(self):
        if self.entity and 'claims' not in self.entity:
            subject = f'missing claims: {self.qid}'
            body = f'''
Wikidata entity is missing claims

https://www.wikidata.org/wiki/{self.qid}
'''
            mail.send_mail(subject, body)

        if not self.entity or 'claims' not in self.entity:
            return []

        return [
            i['mainsnak']['datavalue']['value']['id']
            for i in self.entity['claims'].get('P31', [])
            if 'datavalue' in i['mainsnak']
        ]

    def identifiers(self):
        ret = set()
        for v in self.get_item_identifiers().values():
            ret.update(v)
        return ret

    def identifier_values(self):
        ret = defaultdict(set)
        for osm_key, wikidata_values in self.get_item_identifiers().items():
            for values, label in wikidata_values:
                ret[osm_key].update(values)
        return ret

    def get_item_identifiers(self):
        if not self.entity:
            return {}

        property_map = [
            ('P238', ['iata'], 'IATA airport code'),
            ('P239', ['icao'], 'ICAO airport code'),
            ('P240', ['faa', 'ref'], 'FAA airport code'),
            # ('P281', ['addr:postcode', 'postal_code'], 'postal code'),
            ('P296', ['ref', 'ref:train', 'railway:ref'], 'station code'),
            ('P300', ['ISO3166-2'], 'ISO 3166-2 code'),
            ('P649', ['ref:nrhp'], 'NRHP reference number'),
            ('P722', ['uic_ref'], 'UIC station code'),
            ('P782', ['ref'], 'LAU (local administrative unit)'),
            ('P836', ['ref:gss'], 'UK Government Statistical Service code'),
            ('P856', ['website', 'contact:website', 'url'], 'website'),
            ('P882', ['nist:fips_code'], 'FIPS 6-4 (US counties)'),
            ('P883', ['state_code', 'ref',
                      'nist:fips_code'], 'FIPS 5-2 (code for US states)'),
            # A UIC id can be a IBNR, but not every IBNR is an UIC id
            ('P954', ['uic_ref'], 'IBNR ID'),
            ('P1216', ['HE_ref'], 'National Heritage List for England number'),
            ('P2253', ['ref:edubase'], 'EDUBase URN'),
            ('P2815', ['esr:user', 'ref', 'ref:train'], 'ESR station code'),
            ('P3425', ['ref', 'ref:SIC'], 'Natura 2000 site ID'),
            ('P3562', ['seamark:light:reference'], 'Admiralty number'),
            ('P4755', ['ref', 'ref:train', 'ref:crs',
                       'crs'], 'UK railway station code'),
            ('P4803', ['ref', 'ref:train'], 'Amtrak station code'),
        ]

        tags = defaultdict(list)
        for claim, osm_keys, label in property_map:
            values = [
                i['mainsnak']['datavalue']['value']
                for i in self.entity['claims'].get(claim, [])
                if 'datavalue' in i['mainsnak']
            ]
            if not values:
                continue
            if claim == 'P782':
                values += [
                    m.group(1) for m in (re_lau_code.match(v) for v in values)
                    if m
                ]
            for osm_key in osm_keys:
                tags[osm_key].append((tuple(values), label))
        return tags

    def ref_nrhp(self):
        if self.entity:
            return [
                i['mainsnak']['datavalue']['value']
                for i in self.entity['claims'].get('P649', [])
            ]
        else:
            return []

    def names(self):
        d = wikidata.names_from_entity(self.entity) or defaultdict(list)
        for name in self.extract_names or []:
            d[name].append(('extract', 'enwiki'))
        return dict(d) or None

    def refresh_extract_names(self):
        self.extract_names = wikipedia.html_names(self.extract)

    def get_oql(self):
        lat, lon = session.query(func.ST_Y(self.location),
                                 func.ST_X(self.location)).one()
        union = []
        for tag in self.tags:
            osm_filter = 'around:1000,{:f},{:f}'.format(lat, lon)
            union += oql_from_tag(tag, False, osm_filter)
        return union

    def coords(self):
        return session.query(func.ST_Y(self.location),
                             func.ST_X(self.location)).one()

    def image_filenames(self):
        return [
            i['mainsnak']['datavalue']['value']
            for i in self.entity['claims'].get('P18', [])
        ]

    def defunct_cats(self):
        words = {
            'demolish', 'disestablishment', 'defunct', 'abandon', 'mothballed',
            'decommission', 'former', 'dismantled', 'disused', 'disassembled',
            'abandoned', 'disband', 'scrapped', 'unused', 'closed',
            'condemned', 'redundant'
        }

        exclude = {
            'Defunct baseball venues in the United States',
            'Defunct National Football League venues',
            'Enclosed roller coasters', 'Former civil parishes in England',
            'Capitals of former nations',
            'Former state capitals in the United States'
        }

        found = []
        for item_cat in self.categories or []:
            if item_cat in exclude:
                continue
            lc_item_cat = item_cat.lower()
            found += [item_cat for i in words if i in lc_item_cat]
        return found

    def get_claim(self, pid):
        return [
            i['mainsnak']['datavalue']['value']
            for i in self.entity['claims'].get(pid, [])
        ]

    @property
    def criteria(self):
        return {('Tag:' if '=' in t else 'Key:') + t for t in self.tags or []}

    @property
    def category_map(self):
        if self.categories:
            return matcher.categories_to_tags_map(self.categories)

    def sitelinks(self):
        if self.entity:
            return self.entity.get('sitelinks')

    def is_hamlet(self):
        return ('Q5084' in self.instanceof() or any(
            cat.startswith('Hamlets ') for cat in self.categories or []))

    def is_farm_house(self):
        return 'Q489357' in self.instanceof()

    def is_mountain_range(self):
        return 'Q46831' in self.instanceof()

    def is_farmhouse(self):
        return 'Q489357' in self.instanceof()

    def is_proposed(self):
        '''is this item a proposed building or structure?'''

        cats = self.categories or []
        if any(cat.startswith('Disused ') for cat in cats):
            # disused stations that might be reopened could be in OSM
            return False
        if any(cat.startswith('Proposed ') for cat in cats):
            return True
        # proposed building or structure (Q811683)
        return 'Q811683' in (self.instanceof() or [])

    def is_a_historic_district(self):
        cats = self.categories or []
        return (('Q15243209' in (self.instanceof() or [])
                 or any(cat.startswith('Historic district') for cat in cats))
                and not any(
                    cat.startswith('Historic district contributing properties')
                    or cat.startswith('Churches ') or cat.startswith('Towers ')
                    or cat.startswith('Educational institutions ')
                    or cat.startswith('Schools ') or cat.startswith('Houses ')
                    or cat.startswith('Historic house ')
                    or cat.startswith('Museums ') or ' buildings ' in cat
                    or cat.startswith('Buildings and structures ')
                    for cat in cats))

    def is_a_station(self):
        stations = {
            'Q55488',  # railway station
            'Q928830',  # metro station
            'Q4663385',  # former railway station
        }
        if set(self.instanceof()) & stations:
            return True

        cats = {
            'railway stations', 'railroad stations', 'train stations',
            'metro stations', 'subway stations'
        }

        return any(
            any(cat in item_cat.lower() for cat in cats)
            for item_cat in (self.categories or []))

    def is_a_stadium(self):
        isa = {
            'Q483110',  # stadium
            'Q641226',  # arena
            'Q1076486',  # sports venue
        }
        if set(self.instanceof()) & isa:
            return True

        cats = {
            'football venues', 'ice rinks', 'stadiums', 'velodromes',
            'cycling venues', 'grounds'
        }

        return any(
            any(cat in item_cat.lower() for cat in cats)
            for item_cat in (self.categories or []))

    def is_a_school(self):
        return 'amenity=school' in self.tags

    def skip_item_during_match(self):
        ''' cebwiki and svwiki contain lots of poor quality stubs
        best to skip items that are only cebwiki or cebwiki + svwiki
        '''
        if self.is_proposed():  # skip proposed building or structure
            return True
        if not self.entity:
            return False
        sitelinks = self.entity.get('sitelinks')
        if not sitelinks:
            return False
        sites = set(sitelinks.keys())
        return sites == {'cebwiki'} or sites == {'cebwiki', 'svwiki'}

    def get_names(self):
        item = self.entity
        if not item:
            return

        names = defaultdict(list)
        skip_lang = {'ar', 'arc', 'pl'}
        # only include aliases if there are less than 6 other names
        if len(item.get('sitelinks', {})) < 6 and len(item['labels']) < 6:
            for k, v in item.get('aliases', {}).items():
                if k in skip_lang:
                    continue
                if len(v) > 3:
                    continue
                for name in v:
                    names[name].append(('alias', k))
        for k, v in item['labels'].items():
            if k in skip_lang:
                continue
            names[v].append(('label', k))
        for k, v in item.get('sitelinks', {}).items():
            if k + 'wiki' in skip_lang:
                continue
            names[v].append(('sitelink', k))
        return names

    def first_paragraph(self, languages=None):
        if languages is None:
            return self.first_paragraph_language('enwiki')
        for lang in languages:
            extract = self.first_paragraph_language(lang.site_name)
            if extract:
                return extract

    def first_paragraph_language(self, lang):
        extract = self.extracts.get(lang)
        if not extract:
            return

        empty_p_span = '<p><span></span></p>'
        text = extract.strip()
        if text.startswith(empty_p_span):
            text = text[len(empty_p_span):].strip()

        close_tag = '</p>'
        first_end_p_tag = text.find(close_tag)
        if first_end_p_tag == -1:
            # FIXME: e-mail admin
            return text

        return text[:first_end_p_tag + len(close_tag)]

    def place_names(self):
        names = set()
        for place in self.places:
            if not isinstance(place.address, list):
                continue
            names.update({
                i['name']
                for i in place.address if i['type'] != 'country_code'
            })
        start = 'Isle of '
        trimmed = {
            utils.drop_start(n, start)
            for n in names if n.startswith(start)
        }
        return names | trimmed

    def set_country_code(self):
        for place in self.places:
            if place.country_code:
                g.country_code = place.country_code
                return

    @property
    def is_nhle(self):
        '''Is this a National Heritage List for England item?'''
        return self.entity and 'P1216' in self.entity.get('claims', {})
示例#38
0
class SecurityAudit(db.Model):
    """Logs changes on security."""

    GRANT = "GRANT"
    REVOKE = "REVOKE"
    SET_INHERIT = "SET_INHERIT"
    UNSET_INHERIT = "UNSET_INHERIT"

    __tablename__ = "securityaudit"
    __table_args__ = (
        # constraint: either a inherit/no_inherit op on an object AND no user no group
        #             either a grant/revoke on a user XOR a group.
        CheckConstraint(
            "(op IN ('{grant}', '{revoke}') "
            " AND object_id IS NOT NULL"
            " AND user_id IS NULL "
            " AND group_id IS NULL "
            " AND (CAST(anonymous AS INTEGER) = 0)"
            ")"
            " OR "
            "(op NOT IN ('{grant}', '{revoke}')"
            " AND "
            " (((CAST(anonymous AS INTEGER) = 1) "
            "   AND user_id IS NULL AND group_id IS NULL)"
            "  OR "
            "  ((CAST(anonymous AS INTEGER) = 0) "
            "   AND ((user_id IS NOT NULL AND group_id IS NULL)"
            "  OR "
            "  (user_id IS NULL AND group_id IS NOT NULL)))"
            "))".format(grant=SET_INHERIT, revoke=UNSET_INHERIT),
            name="securityaudit_ck_user_xor_group",
        ),
    )

    id = Column(Integer, primary_key=True)
    happened_at = Column(DateTime, default=datetime.utcnow, index=True)
    op = Column(
        Enum(GRANT, REVOKE, SET_INHERIT, UNSET_INHERIT, name="securityaudit_enum_op")
    )
    role = Column(RoleType)

    manager_id = Column(Integer, ForeignKey(User.id))
    manager = relationship(User, foreign_keys=manager_id)
    anonymous = Column("anonymous", Boolean, nullable=True, default=False)
    user_id = Column(Integer, ForeignKey("user.id"))
    user = relationship("User", lazy="joined", foreign_keys=user_id)
    group_id = Column(Integer, ForeignKey("group.id"))
    group = relationship("Group", lazy="joined")

    _fk_object_id = Column(Integer, ForeignKey(Entity.id, ondelete="SET NULL"))
    object = relationship(Entity, lazy="select")

    object_id = Column(Integer)
    object_type = Column(String(1000))
    object_name = Column(UnicodeText)

    query: BaseQuery
示例#39
0
class User(Base, UserMixin):
    __tablename__ = 'user'
    id = Column(Integer, primary_key=True)
    username = Column(String)
    password = Column(String)
    name = Column(String)
    email = Column(String)
    active = Column(Boolean, default=True)
    sign_up = Column(DateTime, default=now_utc())
    is_admin = Column(Boolean, default=False)
    description = Column(Text)
    img = Column(String)
    languages = Column(postgresql.ARRAY(String))
    single = Column(String)
    multi = Column(String)
    units = Column(String)
    wikipedia_tag = Column(Boolean, default=False)

    def is_active(self):
        return self.active
示例#40
0
 def __init__(self):
     Column.__init__(self, 'foo', Integer)
示例#41
0
class ItemCandidate(Base):
    __tablename__ = 'item_candidate'

    item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True)
    osm_id = Column(BigInteger, primary_key=True)
    osm_type = Column(osm_type_enum, primary_key=True)
    name = Column(String)
    dist = Column(Float)
    tags = Column(postgresql.JSON)
    planet_table = Column(String)
    src_id = Column(BigInteger)
    geom = Column(Geography(srid=4326, spatial_index=True))
    geojson = column_property(func.ST_AsGeoJSON(geom), deferred=True)
    identifier_match = Column(Boolean)
    address_match = Column(Boolean)
    name_match = Column(postgresql.JSON)

    #    __table_args__ = (
    #        ForeignKeyConstraint(
    #            ['osm_type', 'osm_id'],
    #            ['osm_candidate.osm_type', 'osm_candidate.osm_id']
    #        ),
    #    )

    item = relationship(
        'Item',
        backref=backref('candidates',
                        lazy='dynamic',
                        cascade='save-update, merge, delete, delete-orphan'))
    # candidate = relationship(OsmCandidate)

    #     @property
    #     def name(self):
    #         return self.candidate.name
    #
    #     @property
    #     def tags(self):
    #         return self.candidate.tags
    #
    @property
    def key(self):
        return f'Q{self.item_id}-{self.osm_type:s}-{self.osm_id:d}'

    def get_match(self):
        endings = matcher.get_ending_from_criteria(self.tags)
        wikidata_names = self.item.names()
        return match.check_for_match(self.tags, wikidata_names, endings)

    def get_all_matches(self):
        endings = matcher.get_ending_from_criteria(self.item.tags)
        wikidata_names = self.item.names()
        m = match.get_all_matches(self.tags, wikidata_names, endings)
        return m

    def languages(self):
        return {key[5:] for key in self.tags.keys() if key.startswith('name:')}

    def matching_tags(self):
        tags = []

        for tag_or_key in self.item.tags:
            if '=' not in tag_or_key and tag_or_key in self.tags:
                tags.append(tag_or_key)
                continue
            key, _, value = tag_or_key.partition('=')
            if self.tags.get(key) == value:
                tags.append(tag_or_key)
                continue

        return tags

    def update(self, candidate):
        for k, v in candidate.items():
            if k in {'osm_id', 'osm_type'}:
                continue
            setattr(self, k, v)

    @property
    def wikidata_tag(self):
        return self.tags.get('wikidata') or None

    def label_best_language(self, languages):
        if not languages:
            return self.label

        for key in 'bridge:name', 'tunnel:name', 'lock_name':
            if key in self.tags:
                return self.tags[key]

        names = {
            k[5:]: v
            for k, v in self.tags.items() if k.startswith('name:')
        }
        if 'name' in self.tags:
            top_lang = g.default_languages[0]['code']
            if top_lang not in names:
                names[top_lang] = self.tags['name']

        for lang in languages:
            key = lang if isinstance(lang, str) else lang.iso_639_1
            if key in names:
                return names[key]

        return self.label

    @property
    def label(self):
        for key in 'bridge:name', 'tunnel:name', 'lock_name':
            if key in self.tags:
                return self.tags[key]

        if 'name' in self.tags:
            name = self.tags['name']
            if 'addr:housename' in self.tags:
                return f'{name} (house name: {self.tags["addr:housename"]})'
            else:
                return name

        if 'name:en' in self.tags:
            return self.tags['name:en']
        for k, v in self.tags.items():
            if k.startswith('name:'):
                return v
        for k, v in self.tags.items():
            if 'name' in k:
                return v

        if all(tag in self.tags
               for tag in ('addr:housenumber', 'addr:street')):
            housenumber = self.tags['addr:housenumber']
            street = self.tags['addr:street']
            return f'{housenumber} {street}'

        return f'{self.osm_type}/{self.osm_id}'

    @property
    def url(self):
        return f'{osm_api_base}/{self.osm_type}/{self.osm_id}'

    def name_match_count(self, osm_key):
        if not self.name_match:
            return

        match_count = 0
        for match_type, wikidata_name, source in self.name_match[osm_key]:
            match_count += len(source)
        return match_count

    def set_match_detail(self):
        keys = ['identifier', 'address', 'name']
        if any(getattr(self, key + '_match') is not None for key in keys):
            return False  # no need

        endings = matcher.get_ending_from_criteria(self.tags)
        endings |= self.item.more_endings_from_isa()

        names = self.item.names()
        identifiers = self.item.get_item_identifiers()
        self.address_match = match.check_name_matches_address(self.tags, names)
        self.name_match = match.check_for_match(self.tags, names, endings)
        self.identifier_match = match.check_identifier(self.tags, identifiers)
        return True

    def display_distance(self):
        if has_app_context() and g.user.is_authenticated and g.user.units:
            units = g.user.units
        else:
            units = 'local'  # default

        if units == 'local':
            country_code = (getattr(g, 'country_code', None)
                            if has_app_context() else None)
            units = country_units.get(country_code, 'km_and_metres')

        return utils.display_distance(units, self.dist)

    def checkbox_ticked(self):
        max_dist = 500
        if any(tag == 'place' or (
                tag != 'place=farm' and tag.startswith('place='))
               for tag in self.matching_tags()):
            max_dist = 2000
        elif self.item.is_nhle:
            max_dist = 100
        return ((not self.dist or self.dist < max_dist
                 and 'designation=civil_parish' not in self.matching_tags())
                or self.item.candidates.count() > 1)

    def new_wikipedia_tag(self, languages):
        sitelinks = {
            code[:-4]: link['title']
            for code, link in self.item.sitelinks().items()
            if code.endswith('wiki')
        }

        for lang in languages:
            code = lang if isinstance(lang,
                                      str) else lang.wikimedia_language_code
            if code in sitelinks:
                return (code, sitelinks[code])
        return (None, None)
示例#42
0
class Workflow(Base):
    """
    An collection Stages and Tasks encoded as a DAG
    """
    __tablename__ = 'workflow'

    id = Column(Integer, primary_key=True)
    name = Column(VARCHAR(200), unique=True, nullable=False)
    successful = Column(Boolean, nullable=False)
    created_on = Column(DateTime)
    started_on = Column(DateTime)
    finished_on = Column(DateTime)
    max_cores = Column(Integer)
    primary_log_path = Column(String(255))
    _log = None

    info = Column(MutableDict.as_mutable(JSONEncodedDict))
    _status = Column(Enum_ColumnType(WorkflowStatus),
                     default=WorkflowStatus.no_attempt)
    stages = relationship("Stage",
                          cascade="all, merge, delete-orphan",
                          order_by="Stage.number",
                          passive_deletes=True,
                          backref='workflow')

    exclude_from_dict = ['info']
    dont_garbage_collect = None
    termination_signal = None

    @declared_attr
    def status(cls):
        def get_status(self):
            return self._status

        def set_status(self, value):
            if self._status != value:
                self._status = value
                signal_workflow_status_change.send(self)

        return synonym('_status', descriptor=property(get_status, set_status))

    @validates('name')
    def validate_name(self, key, name):
        assert re.match(r"^[\w-]+$", name), 'Invalid workflow name, characters are limited to letters, numbers, ' \
                                            'hyphens and underscores'
        return name

    @orm.reconstructor
    def constructor(self):
        self.__init__(manual_instantiation=False)

    def __init__(self, manual_instantiation=True, *args, **kwargs):
        # FIXME provide the cosmos_app instance?

        if manual_instantiation:
            raise TypeError(
                'Do not instantiate an Workflow manually.  Use the Cosmos.start method.'
            )
        super(Workflow, self).__init__(*args, **kwargs)
        # assert self.output_dir is not None, 'output_dir cannot be None'
        if self.info is None:
            # mutable dict column defaults to None
            self.info = dict()
        self.jobmanager = None
        if not self.created_on:
            self.created_on = datetime.datetime.now()
        self.dont_garbage_collect = []

    @property
    def log(self):
        if self._log is None:
            self._log = get_logger('%s' % self, self.primary_log_path)
        return self._log

    def make_output_dirs(self):
        """
        Create directory paths of all output files
        """
        dirs = set()

        for task in self.tasks:
            for out_name, v in task.output_map.iteritems():
                dirname = lambda p: p if out_name.endswith(
                    'dir') or p is None else os.path.dirname(p)

                if isinstance(v, (tuple, list)):
                    dirs.update(map(dirname, v))
                elif isinstance(v, dict):
                    raise NotImplemented()
                else:
                    dirs.add(dirname(v))

        for d in dirs:
            if d is not None and '://' not in d:
                mkdir(d)

    def add_task(self,
                 func,
                 params=None,
                 parents=None,
                 stage_name=None,
                 uid=None,
                 drm=None,
                 queue=None,
                 must_succeed=True,
                 time_req=None,
                 core_req=None,
                 mem_req=None,
                 max_attempts=None,
                 noop=False):
        """
        Adds a new Task to the Workflow.  If the Task already exists (and was successful), return the successful Task stored in the database

        :param callable func: A function which returns a string which will get converted to a shell script to be executed.  `func` will not get called until
          all of its dependencies have completed.
        :param dict params: Parameters to `func`.  Must be jsonable so that it can be stored in the database.  Any Dependency objects will get resolved into
            a string, and the Dependency.task will be added to this Task's parents.
        :param list[Tasks] parents: A list of dependent Tasks.
        :param str uid: A unique identifier for this Task, primarily used for skipping  previously successful Tasks.
            If a Task with this stage_name and uid already exists in the database (and was successful), the
            database version will be returned and a new one will not be created.
        :param str stage_name: The name of the Stage to add this Task to.  Defaults to `func.__name__`.
        :param str drm: The drm to use for this Task (example 'local', 'ge' or 'drmaa:lsf').  Defaults to the `default_drm` parameter of :meth:`Cosmos.start`
        :param queue: The name of a queue to submit to; defaults to the `default_queue` parameter of :meth:`Cosmos.start`
        :param bool must_succeed: Default True.  If False, the Workflow will not fail if this Task does not succeed.  Dependent Jobs will not be executed.
        :param bool time_req: The time requirement; will set the Task.time_req attribute which is intended to be used by :func:`get_submit_args` to request resources.
        :param int cpu_req: Number of cpus required for this Task.  Can also be set in the `params` dict or the default value of the Task function signature, but this value takes precedence.
            Warning!  In future versions, this will be the only way to set it.
        :param int mem_req: Number of MB of RAM required for this Task.   Can also be set in the `params` dict or the default value of the Task function signature, but this value takes predence.
            Warning!  In future versions, this will be the only way to set it.
        :param int max_attempts: The maximum number of times to retry a failed job.  Defaults to the `default_max_attempts` parameter of :meth:`Cosmos.start`
        :rtype: cosmos.api.Task
        """
        from cosmos.models.Stage import Stage
        from cosmos import recursive_resolve_dependency

        # parents
        if parents is None:
            parents = []
        elif isinstance(parents, Task):
            parents = [parents]
        else:
            parents = list(parents)

        # params
        if params is None:
            params = dict()
        for k, v in params.iteritems():
            # decompose `Dependency` objects to values and parents
            new_val, parent_tasks = recursive_resolve_dependency(v)

            params[k] = new_val
            parents.extend(parent_tasks - set(parents))

        # uid
        if uid is None:
            raise AssertionError, 'uid parameter must be specified'
            # Fix me assert params are all JSONable
            # uid = str(params)
        else:
            assert isinstance(uid, basestring), 'uid must be a string'

        if stage_name is None:
            stage_name = str(func.__name__)

        # Get the right Stage
        stage = only_one((s for s in self.stages if s.name == stage_name),
                         None)
        if stage is None:
            stage = Stage(workflow=self,
                          name=stage_name,
                          status=StageStatus.no_attempt)
            self.session.add(stage)

        # Check if task is already in stage
        task = stage.get_task(uid, None)

        if task is not None:
            # if task is already in stage, but unsuccessful, raise an error (duplicate params) since unsuccessful tasks
            # were already removed on workflow load
            if task.successful:
                # If the user manually edited the dag and this a resume, parents might need to be-readded
                task.parents.extend(set(parents).difference(set(task.parents)))

                for p in parents:
                    if p.stage not in stage.parents:
                        stage.parents.append(p.stage)

                return task
            else:
                # TODO check for duplicate params here?  would be a lot faster at Workflow.run
                raise ValueError(
                    'Duplicate uid, you have added a Task to Stage %s with the uid (unique identifier) `%s` twice.  '
                    'Task uids must be unique within the same Stage.' %
                    (stage_name, uid))
        else:
            # Create Task
            sig = funcsigs.signature(func)

            def params_or_signature_default_or(name, default):
                if name in params:
                    return params[name]
                if name in sig.parameters:
                    param_default = sig.parameters[name].default
                    if param_default is funcsigs._empty:
                        return default
                    else:
                        return param_default
                return default

            input_map = dict()
            output_map = dict()

            for keyword, param in sig.parameters.iteritems():
                if keyword.startswith('in_'):
                    v = params.get(keyword, param.default)
                    assert v != funcsigs._empty, 'parameter %s for %s is required' % (
                        param, func)
                    input_map[keyword] = v
                elif keyword.startswith('out_'):
                    v = params.get(keyword, param.default)
                    assert v != funcsigs._empty, 'parameter %s for %s is required' % (
                        param, func)
                    output_map[keyword] = v

            task = Task(stage=stage,
                        params=params,
                        parents=parents,
                        input_map=input_map,
                        output_map=output_map,
                        uid=uid,
                        drm=drm or self.cosmos_app.default_drm,
                        queue=queue or self.cosmos_app.default_queue,
                        must_succeed=must_succeed,
                        core_req=core_req if core_req is not None else
                        params_or_signature_default_or('core_req', 1),
                        mem_req=mem_req if mem_req is not None else
                        params_or_signature_default_or('mem_req', None),
                        time_req=time_req,
                        successful=False,
                        max_attempts=max_attempts if max_attempts is not None
                        else self.cosmos_app.default_max_attempts,
                        attempt=1,
                        NOOP=noop)

            task.cmd_fxn = func

        # Add Stage Dependencies
        for p in parents:
            if p.stage not in stage.parents:
                stage.parents.append(p.stage)

        self.dont_garbage_collect.append(task)

        return task

    def run(self,
            max_cores=None,
            dry=False,
            set_successful=True,
            cmd_wrapper=signature.default_cmd_fxn_wrapper,
            log_out_dir_func=default_task_log_output_dir):
        """
        Runs this Workflow's DAG

        :param int max_cores: The maximum number of cores to use at once.  A value of None indicates no maximum.
        :param int max_attempts: The maximum number of times to retry a failed job.
             Can be overridden with on a per-Task basis with Workflow.add_task(..., max_attempts=N, ...)
        :param callable log_out_dir_func: A function that returns a Task's logging directory (must be unique).
             It receives one parameter: the Task instance.
             By default a Task's log output is stored in log/stage_name/task_id.
             See _default_task_log_output_dir for more info.
        :param callable cmd_wrapper: A decorator which will be applied to every Task's cmd_fxn.
        :param bool dry: If True, do not actually run any jobs.
        :param bool set_successful: Sets this workflow as successful if all tasks finish without a failure.  You might set this to False if you intend to add and
            run more tasks in this workflow later.

        Returns True if all tasks in the workflow ran successfully, False otherwise.
        If dry is specified, returns None.
        """
        assert os.path.exists(os.getcwd(
        )), 'current working dir does not exist! %s' % os.getcwd()

        assert hasattr(
            self, 'cosmos_app'
        ), 'Workflow was not initialized using the Workflow.start method'
        assert hasattr(log_out_dir_func,
                       '__call__'), 'log_out_dir_func must be a function'
        assert self.session, 'Workflow must be part of a sqlalchemy session'

        session = self.session
        self.log.info('Preparing to run %s using DRM `%s`, cwd is `%s`' %
                      (self, self.cosmos_app.default_drm, os.getcwd()))
        self.log.info('Running as %s@%s, pid %s' %
                      (getpass.getuser(), os.uname()[1], os.getpid()))

        self.max_cores = max_cores

        from ..job.JobManager import JobManager

        if self.jobmanager is None:
            self.jobmanager = JobManager(
                get_submit_args=self.cosmos_app.get_submit_args,
                cmd_wrapper=cmd_wrapper,
                log_out_dir_func=log_out_dir_func)

        self.status = WorkflowStatus.running
        self.successful = False

        if self.started_on is None:
            self.started_on = datetime.datetime.now()

        task_graph = self.task_graph()
        stage_graph = self.stage_graph()

        assert len(set(self.stages)) == len(
            self.stages), 'duplicate stage name detected: %s' % (next(
                duplicates(self.stages)))

        # renumber stages
        stage_graph_no_cycles = nx.DiGraph()
        stage_graph_no_cycles.add_nodes_from(stage_graph.nodes())
        stage_graph_no_cycles.add_edges_from(stage_graph.edges())
        for cycle in nx.simple_cycles(stage_graph):
            stage_graph_no_cycles.remove_edge(cycle[-1], cycle[0])
        for i, s in enumerate(topological_sort(stage_graph_no_cycles)):
            s.number = i + 1
            if s.status != StageStatus.successful:
                s.status = StageStatus.no_attempt

        # Make sure everything is in the sqlalchemy session
        session.add(self)
        successful = filter(lambda t: t.successful, task_graph.nodes())

        # print stages
        for s in sorted(self.stages, key=lambda s: s.number):
            self.log.info('%s %s' % (s, s.status))

        # Create Task Queue
        task_queue = _copy_graph(task_graph)
        self.log.info('Skipping %s successful tasks...' % len(successful))
        task_queue.remove_nodes_from(successful)

        handle_exits(self)

        if self.max_cores is not None:
            self.log.info('Ensuring there are enough cores...')
            # make sure we've got enough cores
            for t in task_queue:
                assert int(
                    t.core_req
                ) <= self.max_cores, '%s requires more cpus (%s) than `max_cores` (%s)' % (
                    t, t.core_req, self.max_cores)

        # Run this thing!
        self.log.info('Committing to SQL db...')
        session.commit()
        if not dry:
            _run(self, session, task_queue)

            # set status
            if self.status == WorkflowStatus.failed_but_running:
                self.status = WorkflowStatus.failed
                # set stage status to failed
                for s in self.stages:
                    if s.status == StageStatus.running_but_failed:
                        s.status = StageStatus.failed
                session.commit()
                return False
            elif self.status == WorkflowStatus.running:
                if set_successful:
                    self.status = WorkflowStatus.successful
                session.commit()
                return True
            else:
                self.log.warning('%s exited with status "%s"', self,
                                 self.status)
                session.commit()
                return False
        else:
            self.log.info('Workflow dry run is complete')
            return None

    def terminate(self, due_to_failure=True):
        self.log.warning('Terminating %s!' % self)
        if self.jobmanager:
            self.log.info(
                'Processing finished tasks and terminating %s running tasks' %
                len(self.jobmanager.running_tasks))
            _process_finished_tasks(self.jobmanager)
            self.jobmanager.terminate()

        if due_to_failure:
            self.status = WorkflowStatus.failed
        else:
            self.status = WorkflowStatus.killed

        self.session.commit()

    @property
    def tasks(self):
        return [t for s in self.stages for t in s.tasks]
        # return session.query(Task).join(Stage).filter(Stage.workflow == ex).all()

    def stage_graph(self):
        """
        :return: (networkx.DiGraph) a DAG of the stages
        """
        g = nx.DiGraph()
        g.add_nodes_from(self.stages)
        g.add_edges_from((s, c) for s in self.stages for c in s.children if c)
        return g

    def task_graph(self):
        """
        :return: (networkx.DiGraph) a DAG of the tasks
        """
        g = nx.DiGraph()
        g.add_nodes_from(self.tasks)
        g.add_edges_from([(t, c) for t in self.tasks for c in t.children])
        return g

    def get_stage(self, name_or_id):
        if isinstance(name_or_id, int):
            f = lambda s: s.id == name_or_id
        else:
            f = lambda s: s.name == name_or_id

        for stage in self.stages:
            if f(stage):
                return stage

        raise ValueError('Stage with name %s does not exist' % name_or_id)

    @property
    def url(self):
        return url_for('cosmos.workflow', name=self.name)

    def __repr__(self):
        return '<Workflow[%s] %s>' % (self.id or '', self.name)

    def __unicode__(self):
        return self.__repr__()

    def delete(self, delete_files=False):
        """
        :param delete_files: (bool) If True, delete :attr:`output_dir` directory and all contents on the filesystem
        """
        if hasattr(self, 'log'):
            self.log.info('Deleting %s, delete_files=%s' %
                          (self, delete_files))
            for h in self.log.handlers:
                h.flush()
                h.close()
                self.log.removeHandler(h)

        if delete_files:
            raise NotImplementedError(
                'This should delete all Task.output_files')

        print >> sys.stderr, '%s Deleting from SQL...' % self
        self.session.delete(self)
        self.session.commit()
        print >> sys.stderr, '%s Deleted' % self

    def get_first_failed_task(self, key=lambda t: t.finished_on):
        """
        Return the first failed Task (chronologically).

        If no Task failed, return None.
        """
        for t in sorted([t for t in self.tasks if key(t) is not None],
                        key=key):
            if t.exit_status:
                return t
        return None
示例#43
0
class TagOrKey(Base):
    __tablename__ = 'tag_or_key'

    name = Column(String, primary_key=True)
    count_all = Column(Integer)
示例#44
0
class Blob(Base, BaseMixin):
    """Encapsulates a large binary file.
    
      Instances must have a unique ``self.name``, which has a maximum length
      of 64 characters.
      
      The binary data is stored in ``self.value``, either directly by assigning
      a bytestring, or passing a file like object to ``self.update()`` or by
      downloading a file from a url using ``self.update_from_url()``. The
      downloaded file can optionally be unzipped if compressed using gzip.
      
      When reading the data, ``self.value`` is a read only buffer. A convienience
      ``self.get_as_named_tempfile()`` method is provided as an easy way to get
      the value as a readable and writable file that can optionally be closed
      so the data is available from the filesystem.
    """

    __tablename__ = 'blobs'

    name = Column(Unicode(64), nullable=False, unique=True)
    value = Column(LargeBinary, nullable=False)

    @classmethod
    def factory(cls, name, file_like_object=None):
        """Create and return."""

        instance = cls()
        instance.update(name, file_like_object=file_like_object)
        return instance

    def update(self, name, file_like_object=None):
        """Update properties, reading the ``file_like_object`` into
          ``self.value`` if provided.
        """

        self.name = name
        if file_like_object is not None:
            self.value = file_like_object.read()

    def update_from_url(self,
                        url,
                        should_unzip=False,
                        requests=None,
                        gzip_cls=None,
                        io_cls=None):
        """Update ``self.value`` to be the contents of the file downloaded
          from the ``url`` provided.
        """

        # Compose.
        if requests is None:
            requests = requests_lib
        if gzip_cls is None:
            gzip_cls = GzipFile
        if io_cls is None:
            io_cls = StringIO

        # Download the file, raising an exception if the download fails
        # after retrying once.
        attempts = 0
        max_attempts = 2
        while True:
            attempts += 1
            r = requests.get(url)
            if r.status_code == requests.codes.ok:
                break
            if attempts < max_attempts:
                continue
            r.raise_for_status()

        # If necessary, unzip using the gzip library.
        if should_unzip:
            self.value = gzip_cls(fileobj=io_cls(r.content)).read()
        else:  # Read the response into ``self.value``.
            self.value = r.content

    def get_as_named_tempfile(self,
                              should_close=False,
                              named_tempfile_cls=None):
        """Read ``self.value`` into and return a named temporary file."""

        # Compose.
        if named_tempfile_cls is None:
            named_tempfile_cls = NamedTemporaryFile

        # Prepare the temp file.
        f = NamedTemporaryFile(delete=False)

        # Read self.value into it.
        if self.value is not None:
            f.write(self.value)

        # Close the file so its readable from the filename.
        if should_close:
            f.close()

        # Return the file.
        return f

    def __json__(self):
        return {'name': self.name}
示例#45
0
class Category(Base):
    __tablename__ = 'category'

    name = Column(String, primary_key=True)
    page_count = Column(Integer)
示例#46
0
from sqlalchemy.sql import text

from sqlalchemy.schema import (MetaData, Table, Column, CheckConstraint,
                               ForeignKeyConstraint)
from sqlalchemy.types import (
    Integer,
    UnicodeText,
    BigInteger,
    Boolean,
)

metadata = MetaData()

roles = Table('roles',
              metadata,
              Column('role_name', UnicodeText, primary_key=True),
              Column('role_password', UnicodeText, nullable=False),
              Column('role_email', UnicodeText, nullable=False),
              Column('role_phone', UnicodeText),
              Column('is_active',
                     Boolean,
                     nullable=False,
                     server_default=text('True')),
              Column('is_admin',
                     Boolean,
                     nullable=False,
                     server_default=text('False')),
              schema="application")

groups = Table('groups',
               metadata,
示例#47
0
def define_images_table(meta):
    images = Table('images',
                   meta,
                   Column('id', Integer(), primary_key=True, nullable=False),
                   Column('name', String(255)),
                   Column('type', String(30)),
                   Column('size', Integer()),
                   Column('status', String(30), nullable=False),
                   Column('is_public',
                          Boolean(),
                          nullable=False,
                          default=False,
                          index=True),
                   Column('location', Text()),
                   Column('created_at', DateTime(), nullable=False),
                   Column('updated_at', DateTime()),
                   Column('deleted_at', DateTime()),
                   Column('deleted',
                          Boolean(),
                          nullable=False,
                          default=False,
                          index=True),
                   mysql_engine='InnoDB',
                   extend_existing=True)

    return images