Esempio n. 1
0
class AWS(Base):
    """AWS Weather record."""

    __tablename__ = 'aws'

    id = Column(Integer, nullable=False, primary_key=True)

    # 지역명
    name = Column(String, nullable=False)

    # 고도
    height = Column(Integer, nullable=False)

    # 강수
    is_raining = Column(Boolean)

    # 강수15
    rain15 = Column(Float)

    # 강수60
    rain60 = Column(Float)

    # 강수3H
    rain3h = Column(Float)

    # 강수6H
    rain6h = Column(Float)

    # 강수12H
    rain12h = Column(Float)

    # 일강수
    rainday = Column(Float)

    # 기온
    temperature = Column(Float)

    # 풍향1
    wind_direction1 = Column(String)

    # 풍속1
    wind_speed1 = Column(Float)

    # 풍향10
    wind_direction10 = Column(String)

    # 풍속10
    wind_speed10 = Column(Float)

    # 습도
    humidity = Column(Integer)

    # 해면기압
    pressure = Column(Float)

    # 위치
    location = Column(String)

    # 관측 시간
    insert_datetime_field('observed', locals(), False)
Esempio n. 2
0
 class Model(Base):
     __tablename__ = 'model'
     id = Column('id', Integer, primary_key=True)
     name = Column('name', String(50))
Esempio n. 3
0
 class Model2(Base):
     __tablename__ = 'model2'
     id = Column('id', Integer, primary_key=True)
Esempio n. 4
0
 class Test(self.declarative_base):
     __tablename__ = 'Oops'
     id = Column(Integer, primary_key=True)
Esempio n. 5
0
 class Foobar(Bar):
     __tablename__ = 'Foobar'
     __mapper_args__ = {'polymorphic_identity': 'foobar'}
     id = Column(Integer, ForeignKey('Bar.id'), primary_key=True)
Esempio n. 6
0
from designate.sqlalchemy.types import UUID


meta = MetaData()

ZONE_ATTRIBUTE_KEYS = ('master',)

ZONE_TYPES = ('PRIMARY', 'SECONDARY')


meta = MetaData()


domain_attributes = Table(
    'domain_attributes', meta,
    Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
    Column('version', Integer(), default=1, nullable=False),
    Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
    Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),

    Column('key', Enum(name='key', *ZONE_ATTRIBUTE_KEYS)),
    Column('value', String(255), nullable=False),
    Column('domain_id', UUID(), nullable=False),

    UniqueConstraint('key', 'value', 'domain_id', name='unique_attributes'),
    ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),

    mysql_engine='INNODB',
    mysql_charset='utf8'
)
Esempio n. 7
0
            "SELECT typarray FROM pg_type WHERE typname = 'citext'"))
    oids = tuple(row[0] for row in results)
    array_type = psycopg2.extensions.new_array_type(oids, 'citext[]',
                                                    psycopg2.STRING)
    psycopg2.extensions.register_type(array_type, None)


if __name__ == '__main__':
    from sqlalchemy import create_engine, MetaData, Integer
    from sqlalchemy.schema import Column, Table
    import sqlalchemy.orm as orm

    engine = create_engine('postgresql://localhost/test_db')
    meta = MetaData()

    test_table = Table('test', meta, Column('id', Integer(), primary_key=True),
                       Column('txt', CIText()))

    conn = engine.connect()

    meta.bind = conn
    meta.drop_all()
    meta.create_all()

    class TestObj(object):
        def __init__(self, id_, txt):
            self.id = id_
            self.txt = txt

        def __repr__(self):
            return "TestObj(%r, %r)" % (self.id, self.txt)
Esempio n. 8
0
class DbNode(Base):
    """Class to store nodes using SQLA backend."""

    __tablename__ = 'db_dbnode'

    id = Column(Integer, primary_key=True)  # pylint: disable=invalid-name
    uuid = Column(UUID(as_uuid=True), default=get_new_uuid, unique=True)
    node_type = Column(String(255), index=True)
    process_type = Column(String(255), index=True)
    label = Column(
        String(255), index=True, nullable=True,
        default='')  # Does it make sense to be nullable and have a default?
    description = Column(Text(), nullable=True, default='')
    ctime = Column(DateTime(timezone=True), default=timezone.now)
    mtime = Column(DateTime(timezone=True),
                   default=timezone.now,
                   onupdate=timezone.now)
    attributes = Column(JSONB)
    extras = Column(JSONB)

    dbcomputer_id = Column(Integer,
                           ForeignKey('db_dbcomputer.id',
                                      deferrable=True,
                                      initially='DEFERRED',
                                      ondelete='RESTRICT'),
                           nullable=True)

    # This should have the same ondelete behaviour as db_computer_id, right?
    user_id = Column(Integer,
                     ForeignKey('db_dbuser.id',
                                deferrable=True,
                                initially='DEFERRED',
                                ondelete='restrict'),
                     nullable=False)

    # pylint: disable=fixme
    # TODO SP: The 'passive_deletes=all' argument here means that SQLAlchemy
    # won't take care of automatic deleting in the DbLink table. This still
    # isn't exactly the same behaviour than with Django. The solution to
    # this is probably a ON DELETE inside the DB. On removing node with id=x,
    # we would remove all link with x as an output.

    dbcomputer = relationship('DbComputer',
                              backref=backref('dbnodes',
                                              passive_deletes='all',
                                              cascade='merge'))

    # User
    user = relationship('DbUser',
                        backref=backref(
                            'dbnodes',
                            passive_deletes='all',
                            cascade='merge',
                        ))

    # outputs via db_dblink table
    outputs_q = relationship('DbNode',
                             secondary='db_dblink',
                             primaryjoin='DbNode.id == DbLink.input_id',
                             secondaryjoin='DbNode.id == DbLink.output_id',
                             backref=backref('inputs_q',
                                             passive_deletes=True,
                                             lazy='dynamic'),
                             lazy='dynamic',
                             passive_deletes=True)

    def __init__(self, *args, **kwargs):
        """Add three additional attributes to the base class: mtime, attributes and extras."""
        super().__init__(*args, **kwargs)
        # The behavior of an unstored Node instance should be that all its attributes should be initialized in
        # accordance with the defaults specified on the colums, i.e. if a default is specified for the `uuid` column,
        # then an unstored `DbNode` instance should have a default value for the `uuid` attribute. The exception here
        # is the `mtime`, that we do not want to be set upon instantiation, but only upon storing. However, in
        # SqlAlchemy a default *has* to be defined if one wants to get that value upon storing. But since defining a
        # default on the column in combination with the hack in `aiida.backend.SqlAlchemy.models.__init__` to force all
        # defaults to be populated upon instantiation, we have to unset the `mtime` attribute here manually.
        #
        # The only time that we allow mtime not to be null is when we explicitly pass mtime as a kwarg. This covers
        # the case that a node is constructed based on some very predefined data like when we create nodes at the
        # AiiDA import functions.
        if 'mtime' not in kwargs:
            self.mtime = None

        if self.attributes is None:
            self.attributes = dict()

        if self.extras is None:
            self.extras = dict()

    @property
    def outputs(self):
        return self.outputs_q.all()

    @property
    def inputs(self):
        return self.inputs_q.all()  # pylint: disable=no-member

    def get_simple_name(self, invalid_result=None):
        """
        Return a string with the last part of the type name.

        If the type is empty, use 'Node'.
        If the type is invalid, return the content of the input variable
        ``invalid_result``.

        :param invalid_result: The value to be returned if the node type is
            not recognized.
        """
        thistype = self.node_type
        # Fix for base class
        if thistype == '':
            thistype = 'node.Node.'
        if not thistype.endswith('.'):
            return invalid_result
        thistype = thistype[:-1]  # Strip final dot
        return thistype.rpartition('.')[2]

    @property
    def pk(self):
        return self.id

    def __str__(self):
        """Get string object out of DbNode object."""
        simplename = self.get_simple_name(invalid_result='Unknown')
        # node pk + type
        if self.label:
            return f'{simplename} node [{self.pk}]: {self.label}'
        return f'{simplename} node [{self.pk}]'
Esempio n. 9
0
File: contest.py Progetto: raoz/cms
class Contest(Base):
    """Class to store a contest (which is a single day of a
    programming competition).

    """
    __tablename__ = 'contests'
    __table_args__ = (
        CheckConstraint("start <= stop"),
        CheckConstraint("stop <= analysis_start"),
        CheckConstraint("analysis_start <= analysis_stop"),
        CheckConstraint("token_gen_initial <= token_gen_max"),
    )

    # Auto increment primary key.
    id = Column(
        Integer,
        primary_key=True)

    # Short name of the contest.
    name = Column(
        Unicode,
        CodenameConstraint("name"),
        nullable=False,
        unique=True)
    # Description of the contest (human readable).
    description = Column(
        Unicode,
        nullable=False)

    # The list of language codes of the localizations that contestants
    # are allowed to use (empty means all).
    allowed_localizations = Column(
        ARRAY(String),
        nullable=False,
        default=[])

    # The list of names of languages allowed in the contest.
    languages = Column(
        ARRAY(String),
        nullable=False,
        default=["C11 / gcc", "C++11 / g++", "Pascal / fpc"])

    # Whether contestants allowed to download their submissions.
    submissions_download_allowed = Column(
        Boolean,
        nullable=False,
        default=True)

    # Whether the user question is enabled.
    allow_questions = Column(
        Boolean,
        nullable=False,
        default=True)

    # Whether the user test interface is enabled.
    allow_user_tests = Column(
        Boolean,
        nullable=False,
        default=True)

    # Whether to prevent hidden participations to log in.
    block_hidden_participations = Column(
        Boolean,
        nullable=False,
        default=False)

    # Whether to allow username/password authentication
    allow_password_authentication = Column(
        Boolean,
        nullable=False,
        default=True)

    # Whether to enforce that the IP address of the request matches
    # the IP address or subnet specified for the participation (if
    # present).
    ip_restriction = Column(
        Boolean,
        nullable=False,
        default=True)

    # Whether to automatically log in users connecting from an IP
    # address specified in the ip field of a participation to this
    # contest.
    ip_autologin = Column(
        Boolean,
        nullable=False,
        default=False)

    # The parameters that control contest-tokens follow. Note that
    # their effect during the contest depends on the interaction with
    # the parameters that control task-tokens, defined on each Task.

    # The "kind" of token rules that will be active during the contest.
    # - disabled: The user will never be able to use any token.
    # - finite: The user has a finite amount of tokens and can choose
    #   when to use them, subject to some limitations. Tokens may not
    #   be all available at start, but given periodically during the
    #   contest instead.
    # - infinite: The user will always be able to use a token.
    token_mode = Column(
        Enum(TOKEN_MODE_DISABLED, TOKEN_MODE_FINITE, TOKEN_MODE_INFINITE,
             name="token_mode"),
        nullable=False,
        default="infinite")

    # The maximum number of tokens a contestant is allowed to use
    # during the whole contest (on all tasks).
    token_max_number = Column(
        Integer,
        CheckConstraint("token_max_number > 0"),
        nullable=True)

    # The minimum interval between two successive uses of tokens for
    # the same user (on any task).
    token_min_interval = Column(
        Interval,
        CheckConstraint("token_min_interval >= '0 seconds'"),
        nullable=False,
        default=timedelta())

    # The parameters that control generation (if mode is "finite"):
    # the user starts with "initial" tokens and receives "number" more
    # every "interval", but their total number is capped to "max".
    token_gen_initial = Column(
        Integer,
        CheckConstraint("token_gen_initial >= 0"),
        nullable=False,
        default=2)
    token_gen_number = Column(
        Integer,
        CheckConstraint("token_gen_number >= 0"),
        nullable=False,
        default=2)
    token_gen_interval = Column(
        Interval,
        CheckConstraint("token_gen_interval > '0 seconds'"),
        nullable=False,
        default=timedelta(minutes=30))
    token_gen_max = Column(
        Integer,
        CheckConstraint("token_gen_max > 0"),
        nullable=True)

    # Beginning and ending of the contest.
    start = Column(
        DateTime,
        nullable=False,
        default=datetime(2000, 1, 1))
    stop = Column(
        DateTime,
        nullable=False,
        default=datetime(2030, 1, 1))

    # Beginning and ending of the contest anaylsis mode.
    analysis_enabled = Column(
        Boolean,
        nullable=False,
        default=False)
    analysis_start = Column(
        DateTime,
        nullable=False,
        default=datetime(2030, 1, 1))
    analysis_stop = Column(
        DateTime,
        nullable=False,
        default=datetime(2030, 1, 1))

    # Timezone for the contest. All timestamps in CWS will be shown
    # using the timezone associated to the logged-in user or (if it's
    # None or an invalid string) the timezone associated to the
    # contest or (if it's None or an invalid string) the local
    # timezone of the server. This value has to be a string like
    # "Europe/Rome", "Australia/Sydney", "America/New_York", etc.
    timezone = Column(
        Unicode,
        nullable=True)

    # Max contest time for each user in seconds.
    per_user_time = Column(
        Interval,
        CheckConstraint("per_user_time >= '0 seconds'"),
        nullable=True)

    # Maximum number of submissions or user_tests allowed for each user
    # during the whole contest or None to not enforce this limitation.
    max_submission_number = Column(
        Integer,
        CheckConstraint("max_submission_number > 0"),
        nullable=True)
    max_user_test_number = Column(
        Integer,
        CheckConstraint("max_user_test_number > 0"),
        nullable=True)

    # Minimum interval between two submissions or user_tests, or None to
    # not enforce this limitation.
    min_submission_interval = Column(
        Interval,
        CheckConstraint("min_submission_interval > '0 seconds'"),
        nullable=True)
    min_user_test_interval = Column(
        Interval,
        CheckConstraint("min_user_test_interval > '0 seconds'"),
        nullable=True)

    # The scores for this contest will be rounded to this number of
    # decimal places.
    score_precision = Column(
        Integer,
        CheckConstraint("score_precision >= 0"),
        nullable=False,
        default=0)

    # These one-to-many relationships are the reversed directions of
    # the ones defined in the "child" classes using foreign keys.

    tasks = relationship(
        "Task",
        collection_class=ordering_list("num"),
        order_by="[Task.num]",
        cascade="all",
        passive_deletes=True,
        back_populates="contest")

    announcements = relationship(
        "Announcement",
        order_by="[Announcement.timestamp]",
        cascade="all, delete-orphan",
        passive_deletes=True,
        back_populates="contest")

    participations = relationship(
        "Participation",
        cascade="all, delete-orphan",
        passive_deletes=True,
        back_populates="contest")

    def phase(self, timestamp):
        """Return: -1 if contest isn't started yet at time timestamp,
                    0 if the contest is active at time timestamp,
                    1 if the contest has ended but analysis mode
                      hasn't started yet
                    2 if the contest has ended and analysis mode is active
                    3 if the contest has ended and analysis mode is disabled or
                      has ended

        timestamp (datetime): the time we are iterested in.
        return (int): contest phase as above.

        """
        if timestamp < self.start:
            return -1
        if timestamp <= self.stop:
            return 0
        if self.analysis_enabled:
            if timestamp < self.analysis_start:
                return 1
            elif timestamp <= self.analysis_stop:
                return 2
        return 3
Esempio n. 10
0
class EventPart(Base):
    """
    An ``Event`` can be divided into logical ``EventPart``s related to time or location.
    Examples: 
        - An ``Event`` can span several days.
        - An ``Event`` several locations each requiring differently prized tickets.
    """
    
    __tablename__ = 'tickee_eventparts'
    
    # Columns
    
    id = Column(Integer, primary_key=True, index=True)
    name = Column(String)
    description_ref = Column(Integer)
    starts_on = Column(DateTime)
    ends_on = Column(DateTime)
    event_id = Column(Integer, ForeignKey('tickee_events.id'))
    venue_id = Column(Integer, ForeignKey('tickee_venues.id'))
    
    # Relations
    
    venue = orm.relationship('Venue', backref='event_parts')
    tickettypes = orm.relationship("TicketTypeEventPartAssociation", backref="eventpart")
    event = orm.relationship('Event', backref='parts')
    
    # Constructor
    
    def __init__(self, starts_on=None, ends_on=None, venue_id=None):
        """
        Construct a new ``EventPart`` object.
        """
        self.starts_on = starts_on
        self.ends_on = starts_on #ends_on
        self.venue_id = venue_id
        self.description_ref = l10n.create_text_localisation().reference_id
    
    # Description
    
    def set_description(self, text, lang='en'):
        """
        Sets the description for a specific language
        """
        l10n.set_translation(self.description_ref, text, lang)
    
    def get_description(self, lang='en'):
        """
        Returns the description in the correct language.
        """
        return l10n.get_translation(self.description_ref, lang)
    
    # Methods
    
    def get_name(self):
        if self.name is not None:
            return self.name
        else:
            return self.event.get_name()
    
    def get_tickets(self):
        """
        Get the tickets of all the ticket types connected to this event.
        """
        tickets = []
        for tickettype in self.ticket_types:
            tickets += tickettype.get_tickets()
        return tickets
    
    def get_ticket_types(self, include_inactive=False,
                               include_if_sales_finished=False):
        """
        Get the ticket types of the eventpart
        """
        tickettype_assocs = self.tickettypes
        tickettypes =  map(lambda a:a.tickettype, tickettype_assocs)
        
        # remove ticket types with finished sales
        if not include_if_sales_finished:
            tickettypes = filter(lambda tt: tt.sales_end is None or tt.sales_end >= datetime.datetime.utcnow(), tickettypes)
            
        # remove inactive ticket types
        if not include_inactive:
            tickettypes = filter(lambda tt: tt.is_active, tickettypes)
        
        return tickettypes
    
    def get_availability(self):
        """
        Derives the availability status from all the ``TicketType``s for this ``EventPart``.
        """
        currently = states.SOLD
        
        for type in self.get_ticket_types(include_inactive=False,
                                          include_if_sales_finished=True):
            # Only improve if not currently available
            if currently == states.AVAILABLE:
                return currently
            # Improve if better than SOLD
            if type.availability != states.SOLD:
                currently = type.get_availability()
        return currently
Esempio n. 11
0
class Project(db.Model, DomainObject):
    '''A microtasking Project to which Tasks are associated.
    '''

    __tablename__ = 'project'

    #: ID of the project
    id = Column(Integer, primary_key=True)
    #: UTC timestamp when the project is created
    created = Column(Text, default=make_timestamp)
    #: UTC timestamp when the project is updated (or any of its relationships)
    updated = Column(Text, default=make_timestamp, onupdate=make_timestamp)
    #: Project name
    name = Column(Unicode(length=255), unique=True, nullable=False)
    #: Project slug for the URL
    short_name = Column(Unicode(length=255), unique=True, nullable=False)
    #: Project description
    description = Column(Unicode(length=255), nullable=False)
    #: Project long description
    long_description = Column(UnicodeText)
    #: Project webhook
    webhook = Column(Text)
    #: If the project allows anonymous contributions
    allow_anonymous_contributors = Column(Boolean, default=True)
    long_tasks = Column(Integer, default=0)
    #: If the project is hidden
    hidden = Column(Integer, default=0)
    # If the project is featured
    featured = Column(Boolean, nullable=False, default=False)
    # If the project owner has been emailed
    contacted = Column(Boolean, nullable=False, default=False)
    #: Project owner_id
    owner_id = Column(Integer, ForeignKey('user.id'), nullable=False)
    time_estimate = Column(Integer, default=0)
    time_limit = Column(Integer, default=0)
    calibration_frac = Column(Float, default=60.0)
    bolt_course_id = Column(Integer, default=0)
    #: Project Category
    category_id = Column(Integer, ForeignKey('category.id'), nullable=False)
    #: Project info field formatted as JSON
    info = Column(JSONEncodedDict, default=dict)

    tasks = relationship(Task,
                         cascade='all, delete, delete-orphan',
                         backref='project')
    task_runs = relationship(TaskRun,
                             backref='project',
                             cascade='all, delete-orphan',
                             order_by='TaskRun.finish_time.desc()')
    category = relationship(Category)
    blogposts = relationship(Blogpost,
                             cascade='all, delete-orphan',
                             backref='project')

    def needs_password(self):
        return self.get_passwd_hash() is not None

    def get_passwd_hash(self):
        return self.info.get('passwd_hash')

    def get_passwd(self):
        if self.needs_password():
            return signer.loads(self.get_passwd_hash())
        return None

    def set_password(self, password):
        if len(password) > 1:
            self.info['passwd_hash'] = signer.dumps(password)
            return True
        self.info['passwd_hash'] = None
        return False

    def check_password(self, password):
        if self.needs_password():
            return self.get_passwd() == password
        return False

    def has_autoimporter(self):
        return self.get_autoimporter() is not None

    def get_autoimporter(self):
        return self.info.get('autoimporter')

    def set_autoimporter(self, new=None):
        self.info['autoimporter'] = new

    def delete_autoimporter(self):
        del self.info['autoimporter']
Esempio n. 12
0
class Formular(CRUDModel):
    __tablename__ = 'formular'
    __table_args__ = {'sqlite_autoincrement': True}
    id = Column(Integer, primary_key=True)
    jmeno = Column(String, nullable=False, index=False)
    prijmeni = Column(String, nullable=False, index=True)
Esempio n. 13
0
class ContextNumbers(Base):

    __tablename__ = 'contextnumbers'

    context = Column(String(39), primary_key=True)
    type = Column(Enum('user',
                       'group',
                       'queue',
                       'meetme',
                       'incall',
                       name='contextnumbers_type',
                       metadata=Base.metadata),
                  primary_key=True)
    numberbeg = Column(String(16), server_default='', primary_key=True)
    numberend = Column(String(16), server_default='', primary_key=True)
    didlength = Column(Integer, nullable=False, server_default='0')

    @hybrid_property
    def start(self):
        return self.numberbeg

    @start.setter
    def start(self, value):
        self.numberbeg = value

    @hybrid_property
    def end(self):
        if self.numberend == '':
            return self.numberbeg
        return self.numberend

    @end.expression
    def end(cls):
        return case([(cls.numberend == '', cls.numberbeg)],
                    else_=cls.numberend)

    @end.setter
    def end(self, value):
        self.numberend = value

    @hybrid_property
    def did_length(self):
        return self.didlength

    @did_length.setter
    def did_length(self, value):
        self.didlength = value

    def in_range(self, exten):
        exten = int(exten)
        start = self._convert_limit(self.start)
        end = self._convert_limit(self.end)

        if start == end and exten == start:
            return True
        elif start <= exten <= end:
            return True
        return False

    def _convert_limit(self, limit):
        return int(limit[-self.did_length:])
Esempio n. 14
0
def create_translation_table(_table_name,
                             foreign_class,
                             relation_name,
                             language_class,
                             relation_lazy='select',
                             **kwargs):
    """Creates a table that represents some kind of data attached to the given
    foreign class, but translated across several languages.  Returns the new
    table's mapped class.  It won't be declarative, but it will have a
    `__table__` attribute so you can retrieve the Table object.

    `foreign_class` must have a `__singlename__`, currently only used to create
    the name of the foreign key column.

    Also supports the notion of a default language, which is attached to the
    session.  This is English by default, for historical and practical reasons.

    Usage looks like this:

        class Foo(Base): ...

        create_translation_table('foo_bars', Foo, 'bars',
            name = Column(...),
        )

        # Now you can do the following:
        foo.name
        foo.name_map['en']
        foo.foo_bars['en']

        foo.name_map['en'] = "new name"
        del foo.name_map['en']

        q.options(joinedload(Foo.bars_local))
        q.options(joinedload(Foo.bars))

    The following properties are added to the passed class:

    - `(relation_name)`, a relation to the new table.  It uses a dict-based
      collection class, where the keys are language identifiers and the values
      are rows in the created tables.
    - `(relation_name)_local`, a relation to the row in the new table that
      matches the current default language.
    - `(relation_name)_table`, the class created by this function.

    Note that these are distinct relations.  Even though the former necessarily
    includes the latter, SQLAlchemy doesn't treat them as linked; loading one
    will not load the other.  Modifying both within the same transaction has
    undefined behavior.

    For each column provided, the following additional attributes are added to
    Foo:

    - `(column)_map`, an association proxy onto `foo_bars`.
    - `(column)`, an association proxy onto `foo_bars_local`.

    Pardon the naming disparity, but the grammar suffers otherwise.

    Modifying these directly is not likely to be a good idea.

    For Markdown-formatted columns, `(column)_map` and `(column)` will give
    Markdown objects.
    """
    # n.b.: language_class only exists for the sake of tests, which sometimes
    # want to create tables entirely separate from the pokedex metadata

    foreign_key_name = foreign_class.__singlename__ + '_id'

    Translations = type(
        _table_name, (object, ), {
            '_language_identifier':
            association_proxy('local_language', 'identifier'),
            'relation_name':
            relation_name,
            '__tablename__':
            _table_name,
        })

    # Create the table object
    table = Table(
        _table_name,
        foreign_class.__table__.metadata,
        Column(foreign_key_name,
               Integer,
               ForeignKey(foreign_class.id),
               primary_key=True,
               nullable=False,
               doc=u"ID of the %s these texts relate to" %
               foreign_class.__singlename__),
        Column('local_language_id',
               Integer,
               ForeignKey(language_class.id),
               primary_key=True,
               nullable=False,
               doc=u"Language these texts are in"),
    )
    Translations.__table__ = table

    # Add ye columns
    # Column objects have a _creation_order attribute in ascending order; use
    # this to get the (unordered) kwargs sorted correctly
    kwitems = list(kwargs.items())
    kwitems.sort(key=lambda kv: kv[1]._creation_order)
    for name, column in kwitems:
        column.name = name
        table.append_column(column)

    # Construct ye mapper
    mapper(Translations,
           table,
           properties={
               'foreign_id':
               synonym(foreign_key_name),
               'local_language':
               relationship(
                   language_class,
                   primaryjoin=table.c.local_language_id == language_class.id,
                   innerjoin=True),
           })

    # Add full-table relations to the original class
    # Foo.bars_table
    setattr(foreign_class, relation_name + '_table', Translations)
    # Foo.bars
    setattr(
        foreign_class, relation_name,
        relationship(
            Translations,
            primaryjoin=foreign_class.id == Translations.foreign_id,
            collection_class=attribute_mapped_collection('local_language'),
        ))
    # Foo.bars_local
    # This is a bit clever; it uses bindparam() to make the join clause
    # modifiable on the fly.  db sessions know the current language and
    # populate the bindparam.
    # The 'dummy' value is to trick SQLA; without it, SQLA thinks this
    # bindparam is just its own auto-generated clause and everything gets
    # f****d up.
    local_relation_name = relation_name + '_local'
    setattr(
        foreign_class, local_relation_name,
        relationship(
            Translations,
            primaryjoin=and_(
                Translations.foreign_id == foreign_class.id,
                Translations.local_language_id == bindparam(
                    '_default_language_id',
                    value='dummy',
                    type_=Integer,
                    required=True),
            ),
            foreign_keys=[
                Translations.foreign_id, Translations.local_language_id
            ],
            uselist=False,
            lazy=relation_lazy,
        ))

    # Add per-column proxies to the original class
    for name, column in kwitems:
        getset_factory = None
        string_getter = column.info.get('string_getter')
        if string_getter:
            getset_factory = _getset_factory_factory(column.name,
                                                     string_getter)

        # Class.(column) -- accessor for the default language's value
        setattr(
            foreign_class, name,
            LocalAssociationProxy(local_relation_name,
                                  name,
                                  getset_factory=getset_factory))

        # Class.(column)_map -- accessor for the language dict
        # Need a custom creator since Translations doesn't have an init, and
        # these are passed as *args anyway
        def creator(language, value):
            row = Translations()
            row.local_language = language
            setattr(row, name, value)
            return row

        setattr(
            foreign_class, name + '_map',
            association_proxy(relation_name,
                              name,
                              creator=creator,
                              getset_factory=getset_factory))

    # Add to the list of translation classes
    foreign_class.translation_classes.append(Translations)

    # Done
    return Translations
Esempio n. 15
0
class Submission(Base):
    """Class to store a submission.

    """
    __tablename__ = 'submissions'

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # User and Contest, thus Participation (id and object) that did the
    # submission.
    participation_id = Column(Integer,
                              ForeignKey(Participation.id,
                                         onupdate="CASCADE",
                                         ondelete="CASCADE"),
                              nullable=False,
                              index=True)
    participation = relationship(Participation,
                                 backref=backref("submissions",
                                                 cascade="all, delete-orphan",
                                                 passive_deletes=True))

    # Task (id and object) of the submission.
    task_id = Column(Integer,
                     ForeignKey(Task.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False,
                     index=True)
    task = relationship(Task,
                        backref=backref("submissions",
                                        cascade="all, delete-orphan",
                                        passive_deletes=True))

    # Time of the submission.
    timestamp = Column(DateTime, nullable=False)

    # Language of submission, or None if not applicable.
    language = Column(String, nullable=True)

    # Comment from the administrator on the submission.
    comment = Column(Unicode, nullable=False, default="")

    # If false, submission will not be considered in contestant's score.
    official = Column(
        Boolean,
        nullable=False,
        default=True,
    )

    @property
    def short_comment(self):
        """The first line of the comment."""
        return self.comment.split("\n", 1)[0]

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # files (dict of File objects indexed by filename)
    # token (Token object or None)
    # results (list of SubmissionResult objects)

    def get_result(self, dataset=None):
        """Return the result associated to a dataset.

        dataset (Dataset|None): the dataset for which the caller wants
            the submission result; if None, the active one is used.

        return (SubmissionResult|None): the submission result
            associated to this submission and the given dataset, if it
            exists in the database, otherwise None.

        """
        if dataset is not None:
            # Use IDs to avoid triggering a lazy-load query.
            assert self.task_id == dataset.task_id
            dataset_id = dataset.id
        else:
            dataset_id = self.task.active_dataset_id

        return SubmissionResult.get_from_id((self.id, dataset_id),
                                            self.sa_session)

    def get_result_or_create(self, dataset=None):
        """Return and, if necessary, create the result for a dataset.

        dataset (Dataset|None): the dataset for which the caller wants
            the submission result; if None, the active one is used.

        return (SubmissionResult): the submission result associated to
            the this submission and the given dataset; if it
            does not exists, a new one is created.

        """
        if dataset is None:
            dataset = self.task.active_dataset

        submission_result = self.get_result(dataset)

        if submission_result is None:
            submission_result = SubmissionResult(submission=self,
                                                 dataset=dataset)

        return submission_result

    def tokened(self):
        """Return if the user played a token against the submission.

        return (bool): True if tokened, False otherwise.

        """
        return self.token is not None
Esempio n. 16
0
class Project(db.Model, DomainObject):
    '''A microtasking Project to which Tasks are associated.
    '''

    __tablename__ = 'project'

    #: ID of the project
    id = Column(Integer, primary_key=True)
    #: UTC timestamp when the project is created
    created = Column(Text, default=make_timestamp)
    #: UTC timestamp when the project is updated (or any of its relationships)
    updated = Column(Text, default=make_timestamp, onupdate=make_timestamp)
    #: Project name
    name = Column(Unicode(length=255), unique=True, nullable=False)
    #: Project slug for the URL
    short_name = Column(Unicode(length=255), unique=True, nullable=False)
    #: Project description
    description = Column(Unicode(length=255), nullable=False)
    #: Project long description
    long_description = Column(UnicodeText)
    #: Project webhook
    webhook = Column(Text)
    #: If the project allows anonymous contributions
    allow_anonymous_contributors = Column(Boolean, default=True)
    #: If the project is published
    published = Column(Boolean, nullable=False, default=False)
    #: If the project is public
    public = Column(Boolean, nullable=False, default=False)
    # If the project is featured
    featured = Column(Boolean, nullable=False, default=False)
    # Secret key for project
    secret_key = Column(Text, default=make_uuid)
    # Zip download
    zip_download = Column(Boolean, default=True)
    # If the project owner has been emailed
    contacted = Column(Boolean, nullable=False, default=False)
    #: Project owner_id
    owner_id = Column(Integer, ForeignKey('user.id'), nullable=False)
    #: Project Category
    category_id = Column(Integer, ForeignKey('category.id'), nullable=False)
    #: Project info field formatted as JSON
    info = Column(MutableDict.as_mutable(JSONB), default=dict())

    tasks = relationship(Task,
                         cascade='all, delete, delete-orphan',
                         backref='project')
    task_runs = relationship(TaskRun,
                             backref='project',
                             cascade='all, delete-orphan',
                             order_by='TaskRun.finish_time.desc()')
    category = relationship(Category)
    blogposts = relationship(Blogpost,
                             cascade='all, delete-orphan',
                             backref='project')
    owners_ids = Column(MutableList.as_mutable(ARRAY(Integer)), default=list())

    def needs_password(self):
        return self.get_passwd_hash() is not None

    def get_passwd_hash(self):
        return self.info.get('passwd_hash')

    def get_passwd(self):
        if self.needs_password():
            return signer.loads(self.get_passwd_hash())
        return None

    def set_password(self, password):
        if len(password) > 1:
            self.info['passwd_hash'] = signer.dumps(password)
            return True
        self.info['passwd_hash'] = None
        return False

    def check_password(self, password):
        if self.needs_password():
            return self.get_passwd() == password
        return False

    def has_autoimporter(self):
        return self.get_autoimporter() is not None

    def get_autoimporter(self):
        return self.info.get('autoimporter')

    def set_autoimporter(self, new=None):
        self.info['autoimporter'] = new

    def delete_autoimporter(self):
        del self.info['autoimporter']

    def has_presenter(self):
        if current_app.config.get('DISABLE_TASK_PRESENTER') is True:
            return True
        else:
            return self.info.get('task_presenter') not in ('', None)

    @classmethod
    def public_attributes(self):
        """Return a list of public attributes."""
        return [
            'id', 'description', 'info', 'n_tasks', 'n_volunteers', 'name',
            'overall_progress', 'short_name', 'created', 'category_id',
            'long_description', 'last_activity', 'last_activity_raw',
            'n_task_runs', 'n_results', 'owner', 'updated', 'featured',
            'owner_id', 'n_completed_tasks', 'n_blogposts', 'owners_ids'
        ]

    @classmethod
    def public_info_keys(self):
        """Return a list of public info keys."""
        default = [
            'container', 'thumbnail', 'thumbnail_url', 'task_presenter',
            'tutorial', 'sched'
        ]
        extra = current_app.config.get('PROJECT_INFO_PUBLIC_FIELDS')
        if extra:
            return list(set(default).union(set(extra)))
        else:
            return default
Esempio n. 17
0
class Evaluation(Base):
    """Class to store information about the outcome of the evaluation
    of a submission against one testcase.

    """
    __tablename__ = 'evaluations'
    __table_args__ = (
        ForeignKeyConstraint(
            ('submission_id', 'dataset_id'),
            (SubmissionResult.submission_id, SubmissionResult.dataset_id),
            onupdate="CASCADE",
            ondelete="CASCADE"),
        UniqueConstraint('submission_id', 'dataset_id', 'testcase_id'),
    )

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # Submission (id and object) owning the evaluation.
    submission_id = Column(Integer,
                           ForeignKey(Submission.id,
                                      onupdate="CASCADE",
                                      ondelete="CASCADE"),
                           nullable=False,
                           index=True)
    submission = relationship(Submission, viewonly=True)

    # Dataset (id and object) owning the evaluation.
    dataset_id = Column(Integer,
                        ForeignKey(Dataset.id,
                                   onupdate="CASCADE",
                                   ondelete="CASCADE"),
                        nullable=False,
                        index=True)
    dataset = relationship(Dataset, viewonly=True)

    # SubmissionResult owning the evaluation.
    submission_result = relationship(SubmissionResult,
                                     backref=backref(
                                         'evaluations',
                                         cascade="all, delete-orphan",
                                         passive_deletes=True))

    # Testcase (id and object) this evaluation was performed on.
    testcase_id = Column(Integer,
                         ForeignKey(Testcase.id,
                                    onupdate="CASCADE",
                                    ondelete="CASCADE"),
                         nullable=False,
                         index=True)
    testcase = relationship(Testcase)

    # String containing the outcome of the evaluation (usually 1.0,
    # ...) not necessary the points awarded, that will be computed by
    # the score type.
    outcome = Column(Unicode, nullable=True)

    # String containing output from the grader (usually "Correct",
    # "Time limit", ...).
    text = Column(String, nullable=True)

    # Evaluation's time and wall-clock time, in seconds.
    execution_time = Column(Float, nullable=True)
    execution_wall_clock_time = Column(Float, nullable=True)

    # Memory used by the evaluation, in bytes.
    execution_memory = Column(Integer, nullable=True)

    # Worker shard and sandbox where the evaluation was performed.
    evaluation_shard = Column(Integer, nullable=True)
    evaluation_sandbox = Column(Unicode, nullable=True)

    @property
    def codename(self):
        """Return the codename of the testcase."""
        return self.testcase.codename
Esempio n. 18
0
class User(db.Model, DomainObject, UserMixin):
    '''A registered user of the PyBossa system'''

    __tablename__ = 'user'

    id = Column(Integer, primary_key=True)
    #: UTC timestamp of the user when it's created.
    created = Column(Text, default=make_timestamp)
    email_addr = Column(Unicode(length=254), unique=True, nullable=False)
    #: Name of the user (this is used as the nickname).
    name = Column(Unicode(length=254), unique=True, nullable=False)
    #: Fullname of the user.
    fullname = Column(Unicode(length=500), nullable=False)
    #: Language used by the user in the PyBossa server.
    locale = Column(Unicode(length=254), default=u'en', nullable=False)
    api_key = Column(String(length=36), default=make_uuid, unique=True)
    passwd_hash = Column(Unicode(length=254), unique=True)
    admin = Column(Boolean, default=False)
    pro = Column(Boolean, default=False)
    privacy_mode = Column(Boolean, default=True, nullable=False)
    category = Column(Integer)
    flags = Column(Integer)
    twitter_user_id = Column(BigInteger, unique=True)
    facebook_user_id = Column(BigInteger, unique=True)
    google_user_id = Column(String, unique=True)
    ckan_api = Column(String, unique=True)
    newsletter_prompted = Column(Boolean, default=False)
    valid_email = Column(Boolean, default=False)
    confirmation_email_sent = Column(Boolean, default=False)
    info = Column(JSONEncodedDict, default=dict)

    ## Relationships
    task_runs = relationship(TaskRun, backref='user')
    apps = relationship(App, backref='owner')
    blogposts = relationship(Blogpost, backref='owner')

    def get_id(self):
        '''id for login system. equates to name'''
        return self.name

    def set_password(self, password):
        self.passwd_hash = signer.generate_password_hash(password)

    def check_password(self, password):
        # OAuth users do not have a password
        if self.passwd_hash:
            return signer.check_password_hash(self.passwd_hash, password)
        return False
Esempio n. 19
0
class DbComputer(Base):
    __tablename__ = "db_dbcomputer"

    id = Column(Integer, primary_key=True)

    uuid = Column(UUID(as_uuid=True), default=uuid_func)
    name = Column(String(255), unique=True, nullable=False)
    hostname = Column(String(255))

    description = Column(Text, nullable=True)
    enabled = Column(Boolean)

    transport_type = Column(String(255))
    scheduler_type = Column(String(255))

    transport_params = Column(JSONB)
    _metadata = Column('metadata', JSONB)

    def __init__(self, *args, **kwargs):
        self.enabled = True
        self._metadata = {}
        self.transport_params = {}
        # TODO SP: it's supposed to be nullable, but there is a NOT NULL
        # constraint inside the DB.
        self.description= ""

        super(DbComputer, self).__init__(*args, **kwargs)

    @classmethod
    def get_dbcomputer(cls, computer):
        """
        Return a DbComputer from its name (or from another Computer or DbComputer instance)
        """

        from aiida.orm.computer import Computer
        if isinstance(computer, basestring):
            try:
                dbcomputer = cls.session.query(cls).filter(cls.name==computer).one()
            except NoResultFound:
                raise NotExistent("No computer found in the table of computers with "
                                  "the given name '{}'".format(computer))
            except MultipleResultsFound:
                raise DbContentError("There is more than one computer with name '{}', "
                                     "pass a Computer instance".format(computer))
        elif isinstance(computer, int):
            try:
                dbcomputer = cls.session.query(cls).filter(cls.id==computer).one()
            except NoResultFound:
                raise NotExistent("No computer found in the table of computers with "
                                  "the given id '{}'".format(computer))
        elif isinstance(computer, DbComputer):
            if computer.id is None:
                raise ValueError("The computer instance you are passing has not been stored yet")
            dbcomputer = computer
        elif isinstance(computer, Computer):
            if computer.dbcomputer.id is None:
                raise ValueError("The computer instance you are passing has not been stored yet")
            dbcomputer = computer.dbcomputer
        else:
            raise TypeError("Pass either a computer name, a DbComputer SQLAlchemy instance, a Computer id or a Computer object")
        return dbcomputer

    def get_aiida_class(self):
        from aiida.orm.computer import Computer
        return Computer(dbcomputer=self)

    def get_workdir(self):
        try:
            return self._metadata['workdir']
        except KeyError:
            raise ConfigurationError('No workdir found for DbComputer {} '.format(
                self.name))

    @property
    def pk(self):
        return self.id

    def __str__(self):
        if self.enabled:
            return "{} ({})".format(self.name, self.hostname)
        else:
            return "{} ({}) [DISABLED]".format(self.name, self.hostname)
Esempio n. 20
0
class PostMapped(Base, Post):
    '''
    Provides the mapping for Post.
    '''
    __tablename__ = 'post'
    __table_args__ = dict(mysql_engine='InnoDB', mysql_charset='utf8')

    Id = Column('id', INTEGER(unsigned=True), primary_key=True)
    Uuid = Column('uuid', String(32))
    Type = association_proxy('type', 'Key')
    Creator = Column('fk_creator_id', ForeignKey(UserMapped.Id, ondelete='RESTRICT'), nullable=False)
    Author = Column('fk_author_id', ForeignKey(CollaboratorMapped.Id, ondelete='RESTRICT'))
    Feed = Column('fk_feed_id', ForeignKey(SourceMapped.Id, ondelete='RESTRICT'))
    Meta = Column('meta', TEXT)
    ContentPlain = Column('content_plain', TEXT)
    Content = Column('content', TEXT)
    CreatedOn = Column('created_on', DateTime, nullable=False)
    PublishedOn = Column('published_on', DateTime)
    WasPublished = Column('was_published', Boolean)
    UpdatedOn = Column('updated_on', DateTime)
    DeletedOn = Column('deleted_on', DateTime)
    @hybrid_property
    def IsModified(self):
        return self.UpdatedOn is not None
    @hybrid_property
    def IsPublished(self):
        return self.PublishedOn is not None
    @hybrid_property
    def AuthorName(self):
        if self.Author is None:
            if self.creator is None: return None
            else: return self.creator.Name
        return self.author.Name

    # Non REST model attributes --------------------------------------
    typeId = Column('fk_type_id', ForeignKey(PostTypeMapped.id, ondelete='RESTRICT'), nullable=False)
    type = relationship(PostTypeMapped, uselist=False, lazy='joined')
    author = relationship(CollaboratorMapped, uselist=False, lazy='joined')
    creator = relationship(UserMapped, uselist=False, lazy='joined')

    # Expression for hybrid ------------------------------------
    @classmethod
    @IsModified.expression
    def _IsModified(cls):
        return case([(cls.UpdatedOn != None, True)], else_=False)
    @classmethod
    @IsPublished.expression
    def _IsPublished(cls):
        return case([(cls.PublishedOn != None, True)], else_=False)
    @classmethod
    @AuthorName.expression
    def _AuthorName(cls):
        return case([(cls.Author == None, UserMapped.Name)], else_=CollaboratorMapped.Name)
Esempio n. 21
0
 def testPrimaryKey4(self):
     table = Table('Test', self.metadata,
                   Column('id', Integer, primary_key=True))
     SADatum = self.SADatum = newSADatum(self.metadata)
     datum = SADatum('Test')
     self.assertEqual(('id', ), datum._tableau_id_fields)
Esempio n. 22
0
def register_tables(metadata: MetaData) -> ModelMap:
    return ModelMap(
        post=ModelHelper(
            table=Table(
                "post",
                metadata,
                Column("id", Integer, primary_key=True, autoincrement=True),
                Column("title", String(150), nullable=False),
                Column("author_id", String(32), nullable=False, index=True),
                Column("content", Text),
                Column("created",
                       TIMESTAMP,
                       nullable=False,
                       server_default=func.now()),
                Column(
                    "updated",
                    TIMESTAMP,
                    nullable=False,
                    server_default=text(
                        "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
                ),
                mysql_engine="InnoDB",
                mysql_charset="utf8mb4",
            ),
            author_key="author_id",
            engine=metadata.bind,
        ),
        comment=ModelHelper(
            table=Table(
                "comment",
                metadata,
                Column("id", Integer, primary_key=True, autoincrement=True),
                Column(
                    "post_id",
                    Integer,
                    ForeignKey("post.id", ondelete="CASCADE"),
                    nullable=False,
                    index=True,
                ),
                Column("author_id", String(32), nullable=False),
                Column("content", Text),
                Column("created",
                       TIMESTAMP,
                       nullable=False,
                       server_default=func.now()),
                Column(
                    "updated",
                    TIMESTAMP,
                    nullable=False,
                    server_default=text(
                        "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
                ),
                mysql_engine="InnoDB",
                mysql_charset="utf8mb4",
            ),
            author_key="author_id",
            engine=metadata.bind,
        ),
        reaction=ModelHelper(
            table=Table(
                "reaction",
                metadata,
                Column("id", Integer, primary_key=True, autoincrement=True),
                Column(
                    "comment_id",
                    Integer,
                    ForeignKey("comment.id", ondelete="CASCADE"),
                    nullable=False,
                    index=True,
                ),
                Column("author_id", String(32), nullable=False),
                Column("reaction_type", Enum(ReactionType), nullable=False),
                Column(
                    "updated",
                    TIMESTAMP,
                    nullable=False,
                    server_default=text(
                        "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
                ),
                UniqueConstraint("comment_id", "author_id"),
                mysql_engine="InnoDB",
                mysql_charset="utf8mb4",
            ),
            author_key="author_id",
            engine=metadata.bind,
        ),
    )
Esempio n. 23
0
 class Bar(self.declarative_base):
     __tablename__ = 'Bar'
     id = Column(Integer, primary_key=True)
     type = Column(String)
     foo_id = Column(Integer, ForeignKey('Foo.id'))
     __mapper_args__ = {'polymorphic_on': type}
Esempio n. 24
0
def _gen_array_simple(cls, props, k, child_cust, p):
    table_name = cls.Attributes.table_name
    metadata = cls.Attributes.sqla_metadata

    # get left (fk) column info
    _gen_col = _get_col_o2m(cls,
                            p.left,
                            ondelete=p.fk_left_ondelete,
                            onupdate=p.fk_left_onupdate,
                            deferrable=p.fk_left_deferrable,
                            initially=p.fk_left_initially)
    col_info = next(_gen_col)  # gets the column name
    # FIXME: Add support for multi-column primary keys.
    p.left, child_left_col_type = col_info[0]
    child_left_col_name = p.left

    # get right(data) column info
    child_right_col_type = _get_sqlalchemy_type(child_cust)
    child_right_col_name = p.right  # this is the data column
    if child_right_col_name is None:
        child_right_col_name = k

    # get table name
    child_table_name = child_cust.Attributes.table_name
    if child_table_name is None:
        child_table_name = '_'.join([table_name, k])

    if child_table_name in metadata.tables:
        child_t = metadata.tables[child_table_name]

        # if we have the table, make sure have the right column (data column)
        assert child_right_col_type.__class__ is \
               child_t.c[child_right_col_name].type.__class__, "%s.%s: %r != %r" % \
                   (cls, child_right_col_name, child_right_col_type.__class__,
                               child_t.c[child_right_col_name].type.__class__)

        if child_left_col_name in child_t.c:
            assert child_left_col_type is \
                child_t.c[child_left_col_name].type.__class__, "%r != %r" % \
                   (child_left_col_type,
                               child_t.c[child_left_col_name].type.__class__)
        else:
            # Table exists but our own foreign key doesn't.
            child_left_col = next(_gen_col)
            _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_left_col)
            child_t.append_column(child_left_col)

    else:
        # table does not exist, generate table
        child_right_col = Column(child_right_col_name, child_right_col_type)
        _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_right_col)

        child_left_col = next(_gen_col)
        _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_left_col)

        child_t = Table(
            child_table_name,
            metadata,
            Column('id', sqlalchemy.Integer, primary_key=True),
            child_left_col,
            child_right_col,
        )
        _gen_index_info(child_t, child_right_col, child_right_col_name,
                        child_cust)

    # generate temporary class for association proxy
    cls_name = ''.join(x.capitalize() or '_'
                       for x in child_table_name.split('_'))

    # generates camelcase class name.

    def _i(self, *args):
        setattr(self, child_right_col_name, args[0])

    cls_ = type("_" + cls_name, (object, ), {'__init__': _i})
    mapper(cls_, child_t)
    props["_" + k] = relationship(cls_)

    # generate association proxy
    setattr(cls, k, association_proxy("_" + k, child_right_col_name))
Esempio n. 25
0
 class Test(self.declarative_base):
     __tablename__ = 'Test'
     id = Column(Integer, primary_key=True)
     field = Column(String)
Esempio n. 26
0
class Workflow(Base):
    """
    An collection Stages and Tasks encoded as a DAG
    """
    __tablename__ = 'workflow'

    id = Column(Integer, primary_key=True)
    name = Column(VARCHAR(200), unique=True, nullable=False)
    successful = Column(Boolean, nullable=False)
    created_on = Column(DateTime)
    started_on = Column(DateTime)
    finished_on = Column(DateTime)
    max_cores = Column(Integer)
    primary_log_path = Column(String(255))
    _log = None

    info = Column(MutableDict.as_mutable(JSONEncodedDict))
    _status = Column(Enum_ColumnType(WorkflowStatus, length=255),
                     default=WorkflowStatus.no_attempt)
    stages = relationship("Stage",
                          cascade="all, merge, delete-orphan",
                          order_by="Stage.number",
                          passive_deletes=True,
                          backref='workflow')

    exclude_from_dict = ['info']
    dont_garbage_collect = None
    termination_signal = None

    @declared_attr
    def status(cls):
        def get_status(self):
            return self._status

        def set_status(self, value):
            if self._status != value:
                self._status = value
                signal_workflow_status_change.send(self)

        return synonym('_status', descriptor=property(get_status, set_status))

    @validates('name')
    def validate_name(self, key, name):
        assert re.match(r"^[\w-]+$", name), 'Invalid workflow name, characters are limited to letters, numbers, ' \
                                            'hyphens and underscores'
        return name

    @orm.reconstructor
    def constructor(self):
        self.__init__(manual_instantiation=False)

    def __init__(self, manual_instantiation=True, *args, **kwargs):
        # FIXME provide the cosmos_app instance?

        if manual_instantiation:
            raise TypeError(
                'Do not instantiate an Workflow manually.  Use the Cosmos.start method.'
            )
        super(Workflow, self).__init__(*args, **kwargs)
        # assert self.output_dir is not None, 'output_dir cannot be None'
        if self.info is None:
            # mutable dict column defaults to None
            self.info = dict()
        self.jobmanager = None
        if not self.created_on:
            self.created_on = datetime.datetime.now()
        self.dont_garbage_collect = []

    @property
    def log(self):
        if self._log is None:
            self._log = get_logger('%s' % self, self.primary_log_path)
        return self._log

    def make_output_dirs(self):
        """
        Create directory paths of all output files
        """
        dirs = set()

        for task in self.tasks:
            for out_name, v in task.output_map.iteritems():
                dirname = lambda p: p if out_name.endswith(
                    'dir') or p is None else os.path.dirname(p)

                if isinstance(v, (tuple, list)):
                    dirs.update(map(dirname, v))
                elif isinstance(v, dict):
                    raise NotImplemented()
                else:
                    dirs.add(dirname(v))

        for d in dirs:
            if d is not None and '://' not in d:
                mkdir(d)

    def add_task(self,
                 func,
                 params=None,
                 parents=None,
                 stage_name=None,
                 uid=None,
                 drm=None,
                 queue=None,
                 must_succeed=True,
                 time_req=None,
                 core_req=None,
                 mem_req=None,
                 max_attempts=None,
                 noop=False,
                 job_class=None,
                 drm_options=None):
        """
        Adds a new Task to the Workflow.  If the Task already exists (and was successful), return the successful Task stored in the database

        :param callable func: A function which returns a string which will get converted to a shell script to be executed.  `func` will not get called until
          all of its dependencies have completed.
        :param dict params: Parameters to `func`.  Must be jsonable so that it can be stored in the database.  Any Dependency objects will get resolved into
            a string, and the Dependency.task will be added to this Task's parents.
        :param list[Tasks] parents: A list of dependent Tasks.
        :param str uid: A unique identifier for this Task, primarily used for skipping  previously successful Tasks.
            If a Task with this stage_name and uid already exists in the database (and was successful), the
            database version will be returned and a new one will not be created.
        :param str stage_name: The name of the Stage to add this Task to.  Defaults to `func.__name__`.
        :param str drm: The drm to use for this Task (example 'local', 'ge' or 'drmaa:lsf').  Defaults to the `default_drm` parameter of :meth:`Cosmos.start`
        :param job_class: The name of a job_class to submit to; defaults to the `default_job_class` parameter of :meth:`Cosmos.start`
        :param queue: The name of a queue to submit to; defaults to the `default_queue` parameter of :meth:`Cosmos.start`
        :param bool must_succeed: Default True.  If False, the Workflow will not fail if this Task does not succeed.  Dependent Jobs will not be executed.
        :param bool time_req: The time requirement; will set the Task.time_req attribute which is intended to be used by :func:`get_submit_args` to request resources.
        :param int cpu_req: Number of cpus required for this Task.  Can also be set in the `params` dict or the default value of the Task function signature, but this value takes precedence.
            Warning!  In future versions, this will be the only way to set it.
        :param int mem_req: Number of MB of RAM required for this Task.   Can also be set in the `params` dict or the default value of the Task function signature, but this value takes predence.
            Warning!  In future versions, this will be the only way to set it.
        :param int max_attempts: The maximum number of times to retry a failed job.  Defaults to the `default_max_attempts` parameter of :meth:`Cosmos.start`
        :rtype: cosmos.api.Task
        """
        # Avoid cyclical import dependencies
        from cosmos.job.drm.DRM_Base import DRM
        from cosmos.models.Stage import Stage
        from cosmos import recursive_resolve_dependency

        # parents
        if parents is None:
            parents = []
        elif isinstance(parents, Task):
            parents = [parents]
        else:
            parents = list(parents)

        # params
        if params is None:
            params = dict()
        for k, v in params.iteritems():
            # decompose `Dependency` objects to values and parents
            new_val, parent_tasks = recursive_resolve_dependency(v)

            params[k] = new_val
            parents.extend(parent_tasks - set(parents))

        # uid
        if uid is None:
            raise AssertionError, 'uid parameter must be specified'
            # Fix me assert params are all JSONable
            # uid = str(params)
        else:
            assert isinstance(uid, basestring), 'uid must be a string'

        if stage_name is None:
            stage_name = str(func.__name__)

        # Get the right Stage
        stage = only_one((s for s in self.stages if s.name == stage_name),
                         None)
        if stage is None:
            stage = Stage(workflow=self,
                          name=stage_name,
                          status=StageStatus.no_attempt)
            self.session.add(stage)

        # Check if task is already in stage
        task = stage.get_task(uid, None)

        if task is not None:
            # if task is already in stage, but unsuccessful, raise an error (duplicate params) since unsuccessful tasks
            # were already removed on workflow load
            if task.successful:
                # If the user manually edited the dag and this a resume, parents might need to be-readded
                task.parents.extend(set(parents).difference(set(task.parents)))

                for p in parents:
                    if p.stage not in stage.parents:
                        stage.parents.append(p.stage)

                return task
            else:
                # TODO check for duplicate params here?  would be a lot faster at Workflow.run
                raise ValueError(
                    'Duplicate uid, you have added a Task to Stage %s with the uid (unique identifier) `%s` twice.  '
                    'Task uids must be unique within the same Stage.' %
                    (stage_name, uid))
        else:
            # Create Task
            sig = funcsigs.signature(func)

            def params_or_signature_default_or(name, default):
                if name in params:
                    return params[name]
                if name in sig.parameters:
                    param_default = sig.parameters[name].default
                    if param_default is funcsigs._empty:
                        return default
                    else:
                        return param_default
                return default

            input_map = dict()
            output_map = dict()

            for keyword, param in sig.parameters.iteritems():
                if keyword.startswith('in_'):
                    v = params.get(keyword, param.default)
                    assert v != funcsigs._empty, 'parameter %s for %s is required' % (
                        param, func)
                    input_map[keyword] = v
                elif keyword.startswith('out_'):
                    v = params.get(keyword, param.default)
                    assert v != funcsigs._empty, 'parameter %s for %s is required' % (
                        param, func)
                    output_map[keyword] = v

            task = Task(
                stage=stage,
                params=params,
                parents=parents,
                input_map=input_map,
                output_map=output_map,
                uid=uid,
                drm=drm if drm is not None else self.cosmos_app.default_drm,
                job_class=job_class if job_class is not None else
                self.cosmos_app.default_job_class,
                queue=queue
                if queue is not None else self.cosmos_app.default_queue,
                must_succeed=must_succeed,
                core_req=core_req if core_req is not None else
                params_or_signature_default_or('core_req', 1),
                mem_req=mem_req if mem_req is not None else
                params_or_signature_default_or('mem_req', None),
                time_req=time_req
                if time_req is not None else self.cosmos_app.default_time_req,
                successful=False,
                max_attempts=max_attempts if max_attempts is not None else
                self.cosmos_app.default_max_attempts,
                attempt=1,
                NOOP=noop)

            task.cmd_fxn = func

            task.drm_options = drm_options if drm_options is not None else self.cosmos_app.default_drm_options
            DRM.validate_drm_options(task.drm, task.drm_options)

        # Add Stage Dependencies
        for p in parents:
            if p.stage not in stage.parents:
                stage.parents.append(p.stage)

        self.dont_garbage_collect.append(task)

        return task

    def run(self,
            max_cores=None,
            dry=False,
            set_successful=True,
            cmd_wrapper=signature.default_cmd_fxn_wrapper,
            log_out_dir_func=default_task_log_output_dir):
        """
        Runs this Workflow's DAG

        :param int max_cores: The maximum number of cores to use at once.  A value of None indicates no maximum.
        :param int max_attempts: The maximum number of times to retry a failed job.
             Can be overridden with on a per-Task basis with Workflow.add_task(..., max_attempts=N, ...)
        :param callable log_out_dir_func: A function that returns a Task's logging directory (must be unique).
             It receives one parameter: the Task instance.
             By default a Task's log output is stored in log/stage_name/task_id.
             See _default_task_log_output_dir for more info.
        :param callable cmd_wrapper: A decorator which will be applied to every Task's cmd_fxn.
        :param bool dry: If True, do not actually run any jobs.
        :param bool set_successful: Sets this workflow as successful if all tasks finish without a failure.  You might set this to False if you intend to add and
            run more tasks in this workflow later.

        Returns True if all tasks in the workflow ran successfully, False otherwise.
        If dry is specified, returns None.
        """
        try:
            assert os.path.exists(os.getcwd(
            )), 'current working dir does not exist! %s' % os.getcwd()

            assert hasattr(
                self, 'cosmos_app'
            ), 'Workflow was not initialized using the Workflow.start method'
            assert hasattr(log_out_dir_func,
                           '__call__'), 'log_out_dir_func must be a function'
            assert self.session, 'Workflow must be part of a sqlalchemy session'

            session = self.session
            self.log.info('Preparing to run %s using DRM `%s`, cwd is `%s`' %
                          (self, self.cosmos_app.default_drm, os.getcwd()))
            self.log.info('Running as %s@%s, pid %s' %
                          (getpass.getuser(), os.uname()[1], os.getpid()))

            self.max_cores = max_cores

            from ..job.JobManager import JobManager

            if self.jobmanager is None:
                self.jobmanager = JobManager(
                    get_submit_args=self.cosmos_app.get_submit_args,
                    cmd_wrapper=cmd_wrapper,
                    log_out_dir_func=log_out_dir_func)

            self.status = WorkflowStatus.running
            self.successful = False

            if self.started_on is None:
                self.started_on = datetime.datetime.now()

            task_graph = self.task_graph()
            stage_graph = self.stage_graph()

            assert len(set(self.stages)) == len(
                self.stages), 'duplicate stage name detected: %s' % (next(
                    duplicates(self.stages)))

            # renumber stages
            stage_graph_no_cycles = nx.DiGraph()
            stage_graph_no_cycles.add_nodes_from(stage_graph.nodes())
            stage_graph_no_cycles.add_edges_from(stage_graph.edges())
            for cycle in nx.simple_cycles(stage_graph):
                stage_graph_no_cycles.remove_edge(cycle[-1], cycle[0])
            for i, s in enumerate(topological_sort(stage_graph_no_cycles)):
                s.number = i + 1
                if s.status != StageStatus.successful:
                    s.status = StageStatus.no_attempt

            # Make sure everything is in the sqlalchemy session
            session.add(self)
            successful = filter(lambda t: t.successful, task_graph.nodes())

            # print stages
            for s in sorted(self.stages, key=lambda s: s.number):
                self.log.info('%s %s' % (s, s.status))

            # Create Task Queue
            task_queue = _copy_graph(task_graph)
            self.log.info('Skipping %s successful tasks...' % len(successful))
            task_queue.remove_nodes_from(successful)

            handle_exits(self)

            if self.max_cores is not None:
                self.log.info('Ensuring there are enough cores...')
                # make sure we've got enough cores
                for t in task_queue:
                    assert int(
                        t.core_req
                    ) <= self.max_cores, '%s requires more cpus (%s) than `max_cores` (%s)' % (
                        t, t.core_req, self.max_cores)

            # Run this thing!
            self.log.info('Committing to SQL db...')
            session.commit()
            if not dry:
                _run(self, session, task_queue)

                # set status
                if self.status == WorkflowStatus.failed_but_running:
                    self.status = WorkflowStatus.failed
                    # set stage status to failed
                    for s in self.stages:
                        if s.status == StageStatus.running_but_failed:
                            s.status = StageStatus.failed
                    session.commit()
                    return False
                elif self.status == WorkflowStatus.running:
                    if set_successful:
                        self.status = WorkflowStatus.successful
                    session.commit()
                    return True
                else:
                    self.log.warning('%s exited with status "%s"', self,
                                     self.status)
                    session.commit()
                    return False
            else:
                self.log.info('Workflow dry run is complete')
                return None
        except Exception as ex:
            self.log.fatal(ex, exc_info=True)
            raise

    def terminate(self, due_to_failure=True):
        self.log.warning('Terminating %s!' % self)
        if self.jobmanager:
            self.log.info(
                'Processing finished tasks and terminating {num_running_tasks} running tasks'
                .format(num_running_tasks=len(
                    self.jobmanager.running_tasks), ))
            _process_finished_tasks(self.jobmanager)
            self.jobmanager.terminate()

        if due_to_failure:
            self.status = WorkflowStatus.failed
        else:
            self.status = WorkflowStatus.killed

        self.session.commit()

    def cleanup(self):
        if self.jobmanager:
            self.log.info('Cleaning up {num_dead_tasks} dead tasks'.format(
                num_dead_tasks=len(self.jobmanager.dead_tasks), ))
            self.jobmanager.cleanup()

    @property
    def tasks(self):
        return [t for s in self.stages for t in s.tasks]
        # return session.query(Task).join(Stage).filter(Stage.workflow == ex).all()

    def stage_graph(self):
        """
        :return: (networkx.DiGraph) a DAG of the stages
        """
        g = nx.DiGraph()
        g.add_nodes_from(self.stages)
        g.add_edges_from((s, c) for s in self.stages for c in s.children if c)
        return g

    def task_graph(self):
        """
        :return: (networkx.DiGraph) a DAG of the tasks
        """
        g = nx.DiGraph()
        g.add_nodes_from(self.tasks)
        g.add_edges_from([(t, c) for t in self.tasks for c in t.children])
        return g

    def get_stage(self, name_or_id):
        if isinstance(name_or_id, int):
            f = lambda s: s.id == name_or_id
        else:
            f = lambda s: s.name == name_or_id

        for stage in self.stages:
            if f(stage):
                return stage

        raise ValueError('Stage with name %s does not exist' % name_or_id)

    @property
    def url(self):
        return url_for('cosmos.workflow', name=self.name)

    def __repr__(self):
        return '<Workflow[%s] %s>' % (self.id or '', self.name)

    def __unicode__(self):
        return self.__repr__()

    def delete(self, delete_files=False):
        """
        :param delete_files: (bool) If True, delete :attr:`output_dir` directory and all contents on the filesystem
        """
        if hasattr(self, 'log'):
            self.log.info('Deleting %s, delete_files=%s' %
                          (self, delete_files))
            for h in self.log.handlers:
                h.flush()
                h.close()
                self.log.removeHandler(h)

        if delete_files:
            raise NotImplementedError(
                'This should delete all Task.output_files')

        print >> sys.stderr, '%s Deleting from SQL...' % self
        self.session.delete(self)
        self.session.commit()
        print >> sys.stderr, '%s Deleted' % self

    def get_first_failed_task(self, key=lambda t: t.finished_on):
        """
        Return the first failed Task (chronologically).

        If no Task failed, return None.
        """
        for t in sorted([t for t in self.tasks if key(t) is not None],
                        key=key):
            if t.exit_status:
                return t
        return None
Esempio n. 27
0
 class Model1(Base):
     __tablename__ = 'model1'
     id = Column(Integer, primary_key=True)
     model2_id = Column(Integer, ForeignKey('model2.id'))
     model2 = relationship("Model2")
Esempio n. 28
0
class SubmissionResult(Base):
    """Class to store the evaluation results of a submission.

    """
    # Possible statuses of a submission result. COMPILING and
    # EVALUATING do not necessarily imply we are going to schedule
    # compilation and evalution for these submission results: for
    # example, they might be for datasets not scheduled for
    # evaluation, or they might have passed the maximum number of
    # tries. If a submission result does not exists for a pair
    # (submission, dataset), its status can be implicitly assumed to
    # be COMPILING.
    COMPILING = 1
    COMPILATION_FAILED = 2
    EVALUATING = 3
    SCORING = 4
    SCORED = 5

    __tablename__ = 'submission_results'
    __table_args__ = (UniqueConstraint('submission_id', 'dataset_id'), )

    # Primary key is (submission_id, dataset_id).
    submission_id = Column(Integer,
                           ForeignKey(Submission.id,
                                      onupdate="CASCADE",
                                      ondelete="CASCADE"),
                           primary_key=True)
    submission = relationship(Submission,
                              backref=backref("results",
                                              cascade="all, delete-orphan",
                                              passive_deletes=True))

    dataset_id = Column(Integer,
                        ForeignKey(Dataset.id,
                                   onupdate="CASCADE",
                                   ondelete="CASCADE"),
                        primary_key=True)
    dataset = relationship(Dataset)

    # Now below follow the actual result fields.

    # Compilation outcome (can be None = yet to compile, "ok" =
    # compilation successful and we can evaluate, "fail" =
    # compilation unsuccessful, throw it away).
    compilation_outcome = Column(String, nullable=True)

    # String containing output from the sandbox.
    compilation_text = Column(String, nullable=True)

    # Number of failures during compilation.
    compilation_tries = Column(Integer, nullable=False, default=0)

    # The compiler stdout and stderr.
    compilation_stdout = Column(Unicode, nullable=True)
    compilation_stderr = Column(Unicode, nullable=True)

    # Other information about the compilation.
    compilation_time = Column(Float, nullable=True)
    compilation_wall_clock_time = Column(Float, nullable=True)
    compilation_memory = Column(Integer, nullable=True)

    # Worker shard and sandbox where the compilation was performed.
    compilation_shard = Column(Integer, nullable=True)
    compilation_sandbox = Column(Unicode, nullable=True)

    # Evaluation outcome (can be None = yet to evaluate, "ok" =
    # evaluation successful). At any time, this should be equal to
    # evaluations != [].
    evaluation_outcome = Column(String, nullable=True)

    # Number of failures during evaluation.
    evaluation_tries = Column(Integer, nullable=False, default=0)

    # Score as computed by ScoringService. Null means not yet scored.
    score = Column(Float, nullable=True)

    # Score details. It's a JSON-encoded string containing information
    # that is given to ScoreType.get_html_details to generate an HTML
    # snippet that is shown on AWS and, if the user used a token, on
    # CWS to display the details of the submission.
    # For example, results for each testcases, subtask, etc.
    score_details = Column(String, nullable=True)

    # The same as the last two fields, but only showing information
    # visible to the user (assuming they did not use a token on this
    # submission).
    public_score = Column(Float, nullable=True)
    public_score_details = Column(String, nullable=True)

    # Ranking score details. It is a list of strings that are going to
    # be shown in a single row in the table of submission in RWS. JSON
    # encoded.
    ranking_score_details = Column(String, nullable=True)

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # executables (dict of Executable objects indexed by filename)
    # evaluations (list of Evaluation objects)

    def get_status(self):
        """Return the status of this object.

        """
        if not self.compiled():
            return SubmissionResult.COMPILING
        elif self.compilation_failed():
            return SubmissionResult.COMPILATION_FAILED
        elif not self.evaluated():
            return SubmissionResult.EVALUATING
        elif not self.scored():
            return SubmissionResult.SCORING
        else:
            return SubmissionResult.SCORED

    def get_evaluation(self, testcase):
        """Return the Evaluation of this SR on the given Testcase, if any

        testcase (Testcase): the testcase the returned evaluation will
            belong to.

        return (Evaluation|None): the (only!) evaluation of this
            submission result on the given testcase, or None if there
            isn't any.

        """
        # Use IDs to avoid triggering a lazy-load query.
        assert self.dataset_id == testcase.dataset_id

        # XXX If self.evaluations is already loaded we can walk over it
        # and spare a query.
        # (We could use .one() and avoid a LIMIT but we would need to
        # catch a NoResultFound exception.)
        return self.sa_session.query(Evaluation)\
            .filter(Evaluation.submission_result == self)\
            .filter(Evaluation.testcase == testcase)\
            .first()

    def compiled(self):
        """Return whether the submission result has been compiled.

        return (bool): True if compiled, False otherwise.

        """
        return self.compilation_outcome is not None

    @staticmethod
    def filter_compiled():
        """Return a filtering expression for compiled submission results.

        """
        return SubmissionResult.compilation_outcome != None  # noqa

    def compilation_failed(self):
        """Return whether the submission result did not compile.

        return (bool): True if the compilation failed (in the sense
            that there is a problem in the user's source), False if
            not yet compiled or compilation was successful.

        """
        return self.compilation_outcome == "fail"

    @staticmethod
    def filter_compilation_failed():
        """Return a filtering expression for submission results failing
        compilation.

        """
        return SubmissionResult.compilation_outcome == "fail"

    def compilation_succeeded(self):
        """Return whether the submission compiled.

        return (bool): True if the compilation succeeded (in the sense
            that an executable was created), False if not yet compiled
            or compilation was unsuccessful.

        """
        return self.compilation_outcome == "ok"

    @staticmethod
    def filter_compilation_succeeded():
        """Return a filtering expression for submission results passing
        compilation.

        """
        return SubmissionResult.compilation_outcome == "ok"

    def evaluated(self):
        """Return whether the submission result has been evaluated.

        return (bool): True if evaluated, False otherwise.

        """
        return self.evaluation_outcome is not None

    @staticmethod
    def filter_evaluated():
        """Return a filtering lambda for evaluated submission results.

        """
        return SubmissionResult.evaluation_outcome != None  # noqa

    def needs_scoring(self):
        """Return whether the submission result needs to be scored.

        return (bool): True if in need of scoring, False otherwise.

        """
        return (self.compilation_failed() or self.evaluated()) and \
            not self.scored()

    def scored(self):
        """Return whether the submission result has been scored.

        return (bool): True if scored, False otherwise.

        """
        return all(
            getattr(self, k) is not None for k in [
                "score", "score_details", "public_score",
                "public_score_details", "ranking_score_details"
            ])

    @staticmethod
    def filter_scored():
        """Return a filtering lambda for scored submission results.

        """
        return ((SubmissionResult.score != None)
                & (SubmissionResult.score_details != None)
                & (SubmissionResult.public_score != None)
                & (SubmissionResult.public_score_details != None)
                & (SubmissionResult.ranking_score_details != None))  # noqa

    def invalidate_compilation(self):
        """Blank all compilation and evaluation outcomes, and the score.

        """
        self.invalidate_evaluation()
        self.compilation_outcome = None
        self.compilation_text = None
        self.compilation_tries = 0
        self.compilation_time = None
        self.compilation_wall_clock_time = None
        self.compilation_memory = None
        self.compilation_shard = None
        self.compilation_sandbox = None
        self.executables = {}

    def invalidate_evaluation(self):
        """Blank the evaluation outcomes and the score.

        """
        self.invalidate_score()
        self.evaluation_outcome = None
        self.evaluation_tries = 0
        self.evaluations = []

    def invalidate_score(self):
        """Blank the score.

        """
        self.score = None
        self.score_details = None
        self.public_score = None
        self.public_score_details = None
        self.ranking_score_details = None

    def set_compilation_outcome(self, success):
        """Set the compilation outcome based on the success.

        success (bool): if the compilation was successful.

        """
        self.compilation_outcome = "ok" if success else "fail"

    def set_evaluation_outcome(self):
        """Set the evaluation outcome (always ok now).

        """
        self.evaluation_outcome = "ok"
Esempio n. 29
0
class Submission(Base):
    """Class to store a submission. Not to be used directly (import it
    from SQLAlchemyAll).

    """
    __tablename__ = 'submissions'

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # User (id and object) that did the submission.
    user_id = Column(Integer,
                     ForeignKey(User.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False,
                     index=True)
    user = relationship(User,
                        backref=backref("submissions",
                                        cascade="all, delete-orphan",
                                        passive_deletes=True))

    # Task (id and object) of the submission.
    task_id = Column(Integer,
                     ForeignKey(Task.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False,
                     index=True)
    task = relationship(Task,
                        backref=backref("submissions",
                                        cascade="all, delete-orphan",
                                        passive_deletes=True))

    # Time of the submission.
    timestamp = Column(DateTime, nullable=False)

    # Language of submission, or None if not applicable.
    language = Column(String, nullable=True)

    # Compilation outcome (can be None = yet to compile, "ok" =
    # compilation successful and we can evaluate, "fail" =
    # compilation unsuccessful, throw it away).
    compilation_outcome = Column(String, nullable=True)

    # String containing output from the sandbox, and the compiler
    # stdout and stderr.
    compilation_text = Column(String, nullable=True)

    # Number of attempts of compilation.
    compilation_tries = Column(Integer, nullable=False)

    # Worker shard and sanbox where the compilation was performed
    compilation_shard = Column(Integer, nullable=True)
    compilation_sandbox = Column(String, nullable=True)

    # Evaluation outcome (can be None = yet to evaluate, "ok" =
    # evaluation successful). At any time, this should be equal to
    # evaluations != [].
    evaluation_outcome = Column(String, nullable=True)

    # Number of attempts of evaluation.
    evaluation_tries = Column(Integer, nullable=False)

    # Score as computed by ScoreService. Null means not yet scored.
    score = Column(Float, nullable=True)

    # Score details. It is a string containing *simple* HTML code that
    # AWS (and CWS if the user used a token) uses to display the
    # details of the submission. For example, results for each
    # testcases, subtask, etc.
    score_details = Column(String, nullable=True)

    # The same as the last two fields, but from the point of view of
    # the user (when he/she did not play a token).
    public_score = Column(Float, nullable=True)
    public_score_details = Column(String, nullable=True)

    # Ranking score details. It is a list of strings that are going to
    # be shown in a single row in the table of submission in RWS. JSON
    # encoded.
    ranking_score_details = Column(String, nullable=True)

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # files (dict of File objects indexed by filename)
    # executables (dict of Executable objects indexed by filename)
    # evaluations (list of Evaluation objects, one for testcase)
    # token (Token object or None)

    LANGUAGES = ["c", "cpp", "pas"]
    LANGUAGES_MAP = {
        ".c": "c",
        ".cpp": "cpp",
        ".cc": "cpp",
        ".pas": "******",
    }

    def __init__(self,
                 user,
                 task,
                 timestamp,
                 files,
                 language=None,
                 compilation_outcome=None,
                 compilation_text=None,
                 compilation_tries=0,
                 executables=None,
                 evaluation_outcome=None,
                 evaluation_tries=0,
                 evaluations=None,
                 token=None,
                 compilation_shard=None,
                 compilation_sandbox=None):
        self.user = user
        self.task = task
        self.timestamp = timestamp
        self.files = files
        self.language = language
        self.compilation_outcome = compilation_outcome
        self.executables = executables if executables is not None else {}
        self.compilation_text = compilation_text
        self.evaluation_outcome = evaluation_outcome
        self.evaluations = evaluations if evaluations is not None else []
        self.compilation_tries = compilation_tries
        self.evaluation_tries = evaluation_tries
        self.token = token
        self.compilation_shard = compilation_shard
        self.compilation_sandbox = compilation_sandbox

    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            'task':
            self.task.name,
            'timestamp':
            make_timestamp(self.timestamp),
            'files':
            [_file.export_to_dict() for _file in self.files.itervalues()],
            'language':
            self.language,
            'compilation_outcome':
            self.compilation_outcome,
            'compilation_tries':
            self.compilation_tries,
            'compilation_text':
            self.compilation_text,
            'compilation_shard':
            self.compilation_shard,
            'compilation_sandbox':
            self.compilation_sandbox,
            'executables': [
                executable.export_to_dict()
                for executable in self.executables.itervalues()
            ],
            'evaluation_outcome':
            self.evaluation_outcome,
            'evaluations':
            [evaluation.export_to_dict() for evaluation in self.evaluations],
            'evaluation_tries':
            self.evaluation_tries,
            'token':
            self.token
        }
        if self.token is not None:
            res['token'] = self.token.export_to_dict()
        return res

    @classmethod
    def import_from_dict(cls, data, tasks_by_name):
        """Build the object using data from a dictionary.

        """
        data['files'] = [
            File.import_from_dict(file_data) for file_data in data['files']
        ]
        data['files'] = dict([(_file.filename, _file)
                              for _file in data['files']])
        data['executables'] = [
            Executable.import_from_dict(executable_data)
            for executable_data in data['executables']
        ]
        data['executables'] = dict([(executable.filename, executable)
                                    for executable in data['executables']])
        data['evaluations'] = [
            Evaluation.import_from_dict(eval_data)
            for eval_data in data['evaluations']
        ]
        if data['token'] is not None:
            data['token'] = Token.import_from_dict(data['token'])
        data['task'] = tasks_by_name[data['task']]
        data['user'] = None
        data['timestamp'] = make_datetime(data['timestamp'])
        return cls(**data)

    def tokened(self):
        """Return if the user played a token against the submission.

        return (bool): True if tokened, False otherwise.

        """
        return self.token is not None

    def compiled(self):
        """Return if the submission has been compiled.

        return (bool): True if compiled, False otherwise.

        """
        return self.compilation_outcome is not None

    def evaluated(self):
        """Return if the submission has been evaluated.

        return (bool): True if evaluated, False otherwise.

        """
        return self.evaluation_outcome is not None

    def scored(self):
        """Return if the submission has been scored.

        return (bool): True if scored, False otherwise.

        """
        return self.score is not None

    def invalidate_compilation(self):
        """Blank all compilation and evaluation outcomes, and the score.

        """
        self.invalidate_evaluation()
        self.compilation_outcome = None
        self.compilation_text = None
        self.compilation_tries = 0
        self.executables = {}

    def invalidate_evaluation(self):
        """Blank the evaluation outcomes and the score.

        """
        self.invalidate_score()
        self.evaluation_outcome = None
        self.evaluations = []
        self.evaluation_tries = 0

    def invalidate_score(self):
        """Blank the score.

        """
        self.score = None
        self.score_details = None
        self.public_score = None
        self.public_score_details = None

    def play_token(self, timestamp=None):
        """Tell the submission that a token has been used.

        timestamp (int): the time the token has been played.

        """
        self.token = Token(timestamp=timestamp)
Esempio n. 30
0
class TaskRun(db.Model, DomainObject):
    '''A run of a given task by a specific user.
    '''
    __tablename__ = 'task_run'

    #: ID of the TaskRun
    id = Column(Integer, primary_key=True)
    #: UTC timestamp for when TaskRun is delivered to user.
    created = Column(Text, default=make_timestamp)
    #: Project.id of the project associated with this TaskRun.
    project_id = Column(Integer, ForeignKey('project.id'), nullable=False)
    #: Task.id of the task associated with this TaskRun.
    task_id = Column(Integer, ForeignKey('task.id', ondelete='CASCADE'),
                     nullable=False)
    #: User.id of the user contributing the TaskRun (only if authenticated)
    user_id = Column(Integer, ForeignKey('user.id'))
    #: User.ip of the user contributing the TaskRun (only if anonymous)
    user_ip = Column(Text)
    #: UTC timestamp for when TaskRun is saved to DB.
    finish_time = Column(Text, default=make_timestamp)
    timeout = Column(Integer)
    calibration = Column(Integer)
    #: External User ID
    external_uid = Column(Text)
    #: Value of the answer.
    info = Column(JSONB)
    '''General writable field that should be used by clients to record results\