Exemplo n.º 1
0
def create_view(metadata, molecule_design_pool_tbl, stock_sample_tbl, sample_tbl, container_tbl):
    """
    stock_info_view factory.
    """
    mdp = molecule_design_pool_tbl
    ss = stock_sample_tbl
    c = container_tbl
    s = sample_tbl
    stock = (
        select(
            [
                (
                    literal("mdp")
                    + cast(mdp.c.molecule_design_set_id, String)
                    + literal("c")
                    + cast(coalesce(ss.c.concentration * 1e6, 0), String)
                ).label("stock_info_id"),
                mdp.c.molecule_design_set_id,
                # We need to set the label explicitly here because
                # mdp.c.molecule_type_id is really mdp.c.molecule_type.
                mdp.c.molecule_type_id.label("molecule_type_id"),
                # pylint: disable=E1101
                coalesce(ss.c.concentration, 0).label("concentration"),
                coalesce(func.count(c.c.container_id), 0).label("total_tubes"),
                coalesce(func.sum(s.c.volume), 0).label("total_volume"),
                coalesce(func.min(s.c.volume), 0).label("minimum_volume"),
                coalesce(func.max(s.c.volume), 0).label("maximum_volume")
                # pylint: enable=E1101
            ],
            from_obj=mdp.outerjoin(ss, ss.c.molecule_design_set_id == mdp.c.molecule_design_set_id)
            .outerjoin(s, s.c.sample_id == ss.c.sample_id)
            .outerjoin(c, and_(c.c.container_id == s.c.container_id, c.c.item_status == _STOCK_CONTAINER_ITEM_STATUS)),
        )
        .group_by(mdp.c.molecule_design_set_id, ss.c.concentration)
        .alias("ssi")
    )
    fkey_mds = ForeignKey(mdp.c.molecule_design_set_id)
    fkey_mds.parent = stock.c.molecule_design_set_id
    stock.c.molecule_design_set_id.foreign_keys.add(fkey_mds)
    fkey_mt = ForeignKey(mdp.c.molecule_type_id)
    fkey_mt.parent = stock.c.molecule_type_id
    stock.c.molecule_type_id.foreign_keys.add(fkey_mt)
    return view_factory(VIEW_NAME, metadata, stock)
def create_view(metadata, molecule_tbl, single_supplier_molecule_design_tbl,
                supplier_molecule_design_tbl):
    """
    molecule_type_modification_view factory.
    """
    m = molecule_tbl
    ssmd = single_supplier_molecule_design_tbl
    smd = supplier_molecule_design_tbl
    msmd = \
      select([m.c.molecule_id,
              smd.c.supplier_molecule_design_id],
             from_obj=m \
              .join(ssmd,
                    ssmd.c.molecule_design_id == m.c.molecule_design_id) \
              .join(smd,
                    and_(smd.c.supplier_molecule_design_id ==
                                    ssmd.c.supplier_molecule_design_id,
                         smd.c.supplier_id == m.c.supplier_id,
                         smd.c.is_current))
              )
    fkey_m = ForeignKey(m.c.molecule_id)
    fkey_m.parent = msmd.c.molecule_id
    msmd.c.molecule_id.foreign_keys.add(fkey_m)
    return view_factory(VIEW_NAME, metadata, msmd)
Exemplo n.º 3
0
from sqlalchemy.dialects.postgresql import BYTEA
from sqlalchemy.schema import ForeignKey

from . import metadata
from . import now_func
from ...utils import GUIDFactory
from .utc_dt import UTCDateTime


files = Table(
    'files',
    metadata,
    Column('guid', Unicode(64), primary_key=True, default=GUIDFactory('FL')),
    Column('session_guid', Unicode(64), ForeignKey(
        'sessions.guid',
        ondelete='CASCADE',
        onupdate='CASCADE',
    ), nullable=False, index=True),
    # name of file
    Column('filename', Unicode, nullable=False),
    # mime type of file
    Column('mime_type', Unicode, nullable=False),
    # TODO: save in amazon S3 instead?
    # file content
    Column('content', BYTEA, nullable=False),
    # AES 256 encryption IV
    Column('aes_iv', BYTEA),
    # TODO: add a hash column for querying files?
    Column('created_at', UTCDateTime, default=now_func),
    Column(
        'updated_at',
Exemplo n.º 4
0
class Condition(ModelBase):
    """
    Holds information attached to particular run.
    Such as start comments, end comments, statistics, etc...
    :see ConditionType
    """

    __tablename__ = 'conditions'
    id = Column(Integer, primary_key=True)

    text_value = Column(Text, nullable=True, default=None)
    int_value = Column(Integer, nullable=False, default=0)
    float_value = Column(Float, nullable=False, default=0.0)
    bool_value = Column(Boolean, nullable=False, default=False)
    time_value = Column(DateTime, nullable=True, default=None)

    run_number = Column(Integer, ForeignKey('runs.number'))
    run = relationship("Run", back_populates="conditions")

    condition_type_id = Column('condition_type_id', Integer,
                               ForeignKey('condition_types.id'))
    type = relationship("ConditionType", back_populates="values")
    """:type: ConditionType"""

    created = Column(DateTime, default=datetime.datetime.now)

    @hybrid_property
    def name(self):
        return self.type.name

    @name.expression
    def balance(self):
        return ConditionType.name

    @hybrid_property
    def value_type(self):
        return self.type.value_type

    @hybrid_property
    def value(self):
        """ Gets value of the corrected type """
        field_type = self.type.value_type
        if field_type == ConditionType.INT_FIELD:
            return self.int_value
        if field_type == ConditionType.STRING_FIELD \
                or field_type == ConditionType.JSON_FIELD \
                or field_type == ConditionType.BLOB_FIELD:
            return self.text_value
        if field_type == ConditionType.FLOAT_FIELD:
            return self.float_value
        if field_type == ConditionType.BOOL_FIELD:
            return self.bool_value
        if field_type == ConditionType.TIME_FIELD:
            return self.time_value
        return self.text_value

    @value.setter
    def value(self, val):
        """ Gets value of the corrected type """
        field_type = self.type.value_type
        if field_type == ConditionType.INT_FIELD:
            self.int_value = val
        elif field_type == ConditionType.STRING_FIELD \
                or field_type == ConditionType.JSON_FIELD \
                or field_type == ConditionType.BLOB_FIELD:
            self.text_value = val
        elif field_type == ConditionType.FLOAT_FIELD:
            self.float_value = val
        elif field_type == ConditionType.BOOL_FIELD:
            self.bool_value = val
        elif field_type == ConditionType.TIME_FIELD:
            self.time_value = val
        else:
            raise ValueError(
                "Unknown field type! field_type='{}'".format(field_type))

    def __repr__(self):
        return "<Condition id='{}', run_number='{}', value={}>".format(
            self.id, self.run_number, self.value)
Exemplo n.º 5
0
class IndieGame(MagModel, ReviewMixin):
    studio_id = Column(UUID, ForeignKey('indie_studio.id'))
    title = Column(UnicodeText)
    brief_description = Column(UnicodeText)  # 140 max
    genres = Column(MultiChoice(c.MIVS_INDIE_GENRE_OPTS))
    platforms = Column(MultiChoice(c.MIVS_INDIE_PLATFORM_OPTS))
    platforms_text = Column(UnicodeText)
    description = Column(UnicodeText)  # 500 max
    how_to_play = Column(UnicodeText)  # 1000 max
    link_to_video = Column(UnicodeText)
    link_to_game = Column(UnicodeText)
    password_to_game = Column(UnicodeText)
    code_type = Column(Choice(c.MIVS_CODE_TYPE_OPTS), default=c.NO_CODE)
    code_instructions = Column(UnicodeText)
    build_status = Column(Choice(c.MIVS_BUILD_STATUS_OPTS),
                          default=c.PRE_ALPHA)
    build_notes = Column(UnicodeText)  # 500 max
    shown_events = Column(UnicodeText)
    video_submitted = Column(Boolean, default=False)
    submitted = Column(Boolean, default=False)
    agreed_liability = Column(Boolean, default=False)
    agreed_showtimes = Column(Boolean, default=False)
    agreed_reminder1 = Column(Boolean, default=False)
    agreed_reminder2 = Column(Boolean, default=False)
    alumni_years = Column(MultiChoice(c.PREV_MIVS_YEAR_OPTS))
    alumni_update = Column(UnicodeText)

    link_to_promo_video = Column(UnicodeText)
    link_to_webpage = Column(UnicodeText)
    twitter = Column(UnicodeText)
    facebook = Column(UnicodeText)
    other_social_media = Column(UnicodeText)

    tournament_at_event = Column(Boolean, default=False)
    tournament_prizes = Column(UnicodeText)
    has_multiplayer = Column(Boolean, default=False)
    player_count = Column(UnicodeText)

    # Length in minutes
    multiplayer_game_length = Column(Integer, nullable=True)
    leaderboard_challenge = Column(Boolean, default=False)

    status = Column(Choice(c.MIVS_GAME_STATUS_OPTS),
                    default=c.NEW,
                    admin_only=True)
    judge_notes = Column(UnicodeText, admin_only=True)
    registered = Column(UTCDateTime, server_default=utcnow())
    waitlisted = Column(UTCDateTime, nullable=True)
    accepted = Column(UTCDateTime, nullable=True)

    codes = relationship('IndieGameCode', backref='game')
    reviews = relationship('IndieGameReview', backref='game')
    images = relationship('IndieGameImage',
                          backref='game',
                          order_by='IndieGameImage.id')

    email_model_name = 'game'

    @presave_adjustment
    def accepted_time(self):
        if self.status == c.ACCEPTED and not self.accepted:
            self.accepted = datetime.now(UTC)

    @presave_adjustment
    def waitlisted_time(self):
        if self.status == c.WAITLISTED and not self.waitlisted:
            self.waitlisted = datetime.now(UTC)

    @property
    def email(self):
        return self.studio.email

    @property
    def reviews_to_email(self):
        return [review for review in self.reviews if review.send_to_studio]

    @property
    def video_href(self):
        return make_url(self.link_to_video)

    @property
    def href(self):
        return make_url(self.link_to_game)

    @property
    def screenshots(self):
        return [img for img in self.images if img.is_screenshot]

    @property
    def best_screenshots(self):
        return [
            img for img in self.images
            if img.is_screenshot and img.use_in_promo
        ]

    def best_screenshot_downloads(self, count=2):
        all_images = reversed(
            sorted(self.images,
                   key=lambda img: (img.is_screenshot and img.use_in_promo, img
                                    .is_screenshot, img.use_in_promo)))

        screenshots = []
        for i, screenshot in enumerate(all_images):
            if os.path.exists(screenshot.filepath):
                screenshots.append(screenshot)
                if len(screenshots) >= count:
                    break
        return screenshots

    def best_screenshot_download_filenames(self, count=2):
        nonchars = re.compile(r'[\W]+')
        best_screenshots = self.best_screenshot_downloads(count)
        screenshots = []
        for i, screenshot in enumerate(best_screenshots):
            if os.path.exists(screenshot.filepath):
                name = '_'.join([s for s in self.title.lower().split() if s])
                name = nonchars.sub('', name)
                filename = '{}_{}.{}'.format(name,
                                             len(screenshots) + 1,
                                             screenshot.extension.lower())
                screenshots.append(filename)
                if len(screenshots) >= count:
                    break
        return screenshots + ([''] * (count - len(screenshots)))

    @property
    def promo_image(self):
        return next(
            iter([img for img in self.images if not img.is_screenshot]), None)

    @property
    def missing_steps(self):
        steps = []
        if not self.link_to_game:
            steps.append(
                'You have not yet included a link to where the judges can '
                'access your game')
        if self.code_type != c.NO_CODE and self.link_to_game:
            if not self.codes:
                steps.append(
                    'You have not yet attached any codes to this game for '
                    'our judges to use')
            elif not self.unlimited_code \
                    and len(self.codes) < c.MIVS_CODES_REQUIRED:
                steps.append(
                    'You have not attached the {} codes you must provide '
                    'for our judges'.format(c.MIVS_CODES_REQUIRED))
        if not self.agreed_showtimes:
            steps.append(
                'You must agree to the showtimes detailed on the game form')
        if not self.agreed_liability:
            steps.append(
                'You must check the box that agrees to our liability waiver')

        return steps

    @property
    def video_broken(self):
        for r in self.reviews:
            if r.video_status == c.BAD_LINK:
                return True

    @property
    def unlimited_code(self):
        for code in self.codes:
            if code.unlimited_use:
                return code

    @property
    def video_submittable(self):
        return bool(self.link_to_video)

    @property
    def submittable(self):
        return not self.missing_steps

    @property
    def scores(self):
        return [r.game_score for r in self.reviews if r.game_score]

    @property
    def score_sum(self):
        return sum(self.scores, 0)

    @property
    def average_score(self):
        return (self.score_sum / len(self.scores)) if self.scores else 0

    @property
    def has_issues(self):
        return any(r.has_issues for r in self.reviews)

    @property
    def confirmed(self):
        return self.status == c.ACCEPTED \
            and self.studio \
            and self.studio.group_id
Exemplo n.º 6
0
from sqlalchemy.schema import Column, ForeignKey, Table, UniqueConstraint
from sqlalchemy.types import Boolean, DateTime, Integer, LargeBinary, \
    UnicodeText

from abilian.core import sqlalchemy as sa_types
from abilian.core.util import fqcn

from .base import SEARCHABLE, SYSTEM, IdMixin, Indexable, TimestampedMixin, db

__all__ = ["User", "Group", "Principal", "ClearPasswordStrategy"]

# Tables for many-to-many relationships
following = Table(
    "following",
    db.Model.metadata,
    Column("follower_id", Integer, ForeignKey("user.id")),
    Column("followee_id", Integer, ForeignKey("user.id")),
    UniqueConstraint("follower_id", "followee_id"),
)

membership = Table(
    "membership",
    db.Model.metadata,
    Column(
        "user_id",
        Integer,
        ForeignKey("user.id", onupdate="CASCADE", ondelete="CASCADE"),
    ),
    Column(
        "group_id",
        Integer,
Exemplo n.º 7
0
class Keyword(_Base, SelectMixin):
    """
    Keywords, external or curated by the NLM.

    Attributes:

        pmid
            the record's identifier (PubMed ID)
        owner
            the entity that provided the keyword
        cnt
            a unique counter for all keywords from a given owner and record
            (starting from 1)
        major
            if the keyword is a major topic of this article
        name
            the keyword itself

    Primary Key: ``(pmid, owner, cnt)``
    """

    __tablename__ = 'keywords'

    OWNERS = frozenset({'NASA', 'PIP', 'KIE', 'NLM', 'NOTNLM', 'HHS'})

    pmid = Column(BigInteger,
                  ForeignKey('records', ondelete="CASCADE"),
                  primary_key=True)
    owner = Column(Enum(*OWNERS, name='owner'), primary_key=True)
    cnt = Column(SmallInteger, CheckConstraint("cnt > 0"), primary_key=True)
    major = Column(Boolean, nullable=False)
    name = Column(UnicodeText, CheckConstraint("name <> ''"), nullable=False)

    def __init__(self,
                 pmid: int,
                 owner: str,
                 cnt: int,
                 name: str,
                 major: bool = False):
        assert pmid > 0, pmid
        assert owner in Keyword.OWNERS, repr(owner)
        assert cnt > 0, cnt
        assert name, repr(name)
        self.pmid = pmid
        self.owner = owner
        self.cnt = cnt
        self.major = major
        self.name = name

    def __str__(self):
        return '{}\t{}\t{}\t{}\t{}\n'.format(NULL(self.pmid), NULL(self.owner),
                                             NULL(self.cnt),
                                             'T' if self.major else 'F',
                                             STRING(self.name))

    def __repr__(self):
        return "Keyword<{}:{}:{}>".format(self.pmid, self.owner, self.cnt)

    def __eq__(self, other):
        return isinstance(other, Keyword) and \
               self.pmid == other.pmid and \
               self.owner == other.owner and \
               self.cnt == other.cnt and \
               self.major == other.major and \
               self.name == other.name
Exemplo n.º 8
0
class Author(_Base, SelectMixin):
    """
    Author names for a PubMed record.

    Attributes:

        pmid
            the record this author belongs to
        pos
            the order/position of the name in the PubMed record (starting from 1)
        name
            an author's last name or the collective's name (never empty)
        initials
            an author's initials *
        forename
            the expansion of the initials known to PubMed *
        suffix
            an author name's suffix *

    * empty string if explicitly non-existant, NULL if unknown

    Primary Key: ``(pmid, pos)``
    """

    __tablename__ = 'authors'

    pmid = Column(BigInteger,
                  ForeignKey('records', ondelete="CASCADE"),
                  primary_key=True)
    pos = Column(SmallInteger, CheckConstraint("pos > 0"), primary_key=True)
    name = Column(UnicodeText, CheckConstraint("name <> ''"), nullable=False)
    initials = Column(Unicode(length=128), nullable=True)
    forename = Column(Unicode(length=256), nullable=True)
    suffix = Column(Unicode(length=128), nullable=True)

    def __init__(self,
                 pmid: int,
                 pos: int,
                 name: str,
                 initials: str = None,
                 forename: str = None,
                 suffix: str = None):
        assert pmid > 0, pmid
        assert pos > 0, pos
        assert name, repr(name)
        self.pmid = pmid
        self.pos = pos
        self.name = name
        self.initials = initials
        self.forename = forename
        self.suffix = suffix

    def __str__(self):
        return "{}\t{}\t{}\t{}\t{}\t{}\n".format(NULL(self.pmid),
                                                 NULL(self.pos),
                                                 STRING(self.name),
                                                 NULL(self.initials),
                                                 NULL(self.forename),
                                                 NULL(self.suffix))

    def __repr__(self):
        return "Author<{}:{}>".format(self.pmid, self.pos)

    def __eq__(self, other):
        return isinstance(other, Author) and \
               self.pmid == other.pmid and \
               self.pos == other.pos and \
               self.name == other.name and \
               self.initials == other.initials and \
               self.forename == other.forename and \
               self.suffix == other.suffix

    def fullName(self) -> str:
        """
        Return the full name of this author
        (using forename or initials, last, and suffix).
        """
        name = []
        if self.forename:
            name.append(self.forename)
        elif self.initials:
            name.append(self.initials)
        name.append(self.name)
        if self.suffix:
            name.append(self.suffix)
        return ' '.join(name)

    def shortName(self) -> str:
        "Return the short name of this author (using initials and last)."
        name = []
        if self.initials:
            name.append(self.initials)
        elif self.forename:
            name.append(''.join([n[0] for n in self.forename.split()]))
        name.append(self.name)
        return ' '.join(name)
Exemplo n.º 9
0
class Submission(Base):
    """Class to store a submission.

    """
    __tablename__ = 'submissions'

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # User and Contest, thus Participation (id and object) that did the
    # submission.
    participation_id = Column(Integer,
                              ForeignKey(Participation.id,
                                         onupdate="CASCADE",
                                         ondelete="CASCADE"),
                              nullable=False,
                              index=True)
    participation = relationship(Participation, back_populates="submissions")

    # Task (id and object) of the submission.
    task_id = Column(Integer,
                     ForeignKey(Task.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False,
                     index=True)
    task = relationship(Task, back_populates="submissions")

    # Time of the submission.
    timestamp = Column(DateTime, nullable=False)

    # Language of submission, or None if not applicable.
    language = Column(String, nullable=True)

    # Comment from the administrator on the submission.
    comment = Column(Unicode, nullable=False, default="")

    # If false, submission will not be considered in contestant's score.
    official = Column(
        Boolean,
        nullable=False,
        default=True,
    )

    @property
    def short_comment(self):
        """The first line of the comment."""
        return self.comment.split("\n", 1)[0]

    # These one-to-many relationships are the reversed directions of
    # the ones defined in the "child" classes using foreign keys.

    files = relationship(
        "File",
        collection_class=attribute_mapped_collection("filename"),
        cascade="all, delete-orphan",
        passive_deletes=True,
        back_populates="submission")

    token = relationship("Token",
                         uselist=False,
                         cascade="all, delete-orphan",
                         passive_deletes=True,
                         back_populates="submission")

    results = relationship("SubmissionResult",
                           cascade="all, delete-orphan",
                           passive_deletes=True,
                           back_populates="submission")

    def get_result(self, dataset=None):
        """Return the result associated to a dataset.

        dataset (Dataset|None): the dataset for which the caller wants
            the submission result; if None, the active one is used.

        return (SubmissionResult|None): the submission result
            associated to this submission and the given dataset, if it
            exists in the database, otherwise None.

        """
        if dataset is not None:
            # Use IDs to avoid triggering a lazy-load query.
            assert self.task_id == dataset.task_id
            dataset_id = dataset.id
        else:
            dataset_id = self.task.active_dataset_id

        return SubmissionResult.get_from_id((self.id, dataset_id),
                                            self.sa_session)

    def get_result_or_create(self, dataset=None):
        """Return and, if necessary, create the result for a dataset.

        dataset (Dataset|None): the dataset for which the caller wants
            the submission result; if None, the active one is used.

        return (SubmissionResult): the submission result associated to
            the this submission and the given dataset; if it
            does not exists, a new one is created.

        """
        if dataset is None:
            dataset = self.task.active_dataset

        submission_result = self.get_result(dataset)

        if submission_result is None:
            submission_result = SubmissionResult(submission=self,
                                                 dataset=dataset)

        return submission_result

    def tokened(self):
        """Return if the user played a token against the submission.

        return (bool): True if tokened, False otherwise.

        """
        return self.token is not None
Exemplo n.º 10
0
class SubmissionResult(Base):
    """Class to store the evaluation results of a submission.

    """
    # Possible statuses of a submission result. COMPILING and
    # EVALUATING do not necessarily imply we are going to schedule
    # compilation and evalution for these submission results: for
    # example, they might be for datasets not scheduled for
    # evaluation, or they might have passed the maximum number of
    # tries. If a submission result does not exists for a pair
    # (submission, dataset), its status can be implicitly assumed to
    # be COMPILING.
    COMPILING = 1
    COMPILATION_FAILED = 2
    EVALUATING = 3
    SCORING = 4
    SCORED = 5

    __tablename__ = 'submission_results'
    __table_args__ = (UniqueConstraint('submission_id', 'dataset_id'), )

    # Primary key is (submission_id, dataset_id).
    submission_id = Column(Integer,
                           ForeignKey(Submission.id,
                                      onupdate="CASCADE",
                                      ondelete="CASCADE"),
                           primary_key=True)
    submission = relationship(Submission, back_populates="results")

    dataset_id = Column(Integer,
                        ForeignKey(Dataset.id,
                                   onupdate="CASCADE",
                                   ondelete="CASCADE"),
                        primary_key=True)
    dataset = relationship(Dataset)

    # Now below follow the actual result fields.

    # Compilation outcome (can be None = yet to compile, "ok" =
    # compilation successful and we can evaluate, "fail" =
    # compilation unsuccessful, throw it away).
    compilation_outcome = Column(Enum("ok", "fail",
                                      name="compilation_outcome"),
                                 nullable=True)

    # The output from the sandbox (to allow localization the first item
    # of the list is a format string, possibly containing some "%s",
    # that will be filled in using the remaining items of the list).
    compilation_text = Column(ARRAY(String), nullable=False, default=[])

    # Number of failures during compilation.
    compilation_tries = Column(Integer, nullable=False, default=0)

    # The compiler stdout and stderr.
    compilation_stdout = Column(Unicode, nullable=True)
    compilation_stderr = Column(Unicode, nullable=True)

    # Other information about the compilation.
    compilation_time = Column(Float, nullable=True)
    compilation_wall_clock_time = Column(Float, nullable=True)
    compilation_memory = Column(BigInteger, nullable=True)

    # Worker shard and sandbox where the compilation was performed.
    compilation_shard = Column(Integer, nullable=True)
    compilation_sandbox = Column(Unicode, nullable=True)

    # Evaluation outcome (can be None = yet to evaluate, "ok" =
    # evaluation successful). At any time, this should be equal to
    # evaluations != [].
    evaluation_outcome = Column(Enum("ok", name="evaluation_outcome"),
                                nullable=True)

    # Number of failures during evaluation.
    evaluation_tries = Column(Integer, nullable=False, default=0)

    # Score as computed by ScoringService. Null means not yet scored.
    score = Column(Float, nullable=True)

    # Score details. It's a JSON-like structure containing information
    # that is given to ScoreType.get_html_details to generate an HTML
    # snippet that is shown on AWS and, if the user used a token, on
    # CWS to display the details of the submission.
    # For example, results for each testcases, subtask, etc.
    score_details = Column(JSONB, nullable=True)

    # The same as the last two fields, but only showing information
    # visible to the user (assuming they did not use a token on this
    # submission).
    public_score = Column(Float, nullable=True)
    public_score_details = Column(JSONB, nullable=True)

    # Subtask scores as computed by ScoringService.
    # Null means not yes scored.
    # This is essential when score mode is SCORE_MODE_MAX_SUBTASK.
    subtask_scores = Column(ARRAY(Float), nullable=True)
    public_subtask_scores = Column(ARRAY(Float), nullable=True)

    # Ranking score details. It is a list of strings that are going to
    # be shown in a single row in the table of submission in RWS.
    ranking_score_details = Column(ARRAY(String), nullable=True)

    # These one-to-many relationships are the reversed directions of
    # the ones defined in the "child" classes using foreign keys.

    executables = relationship(
        "Executable",
        collection_class=attribute_mapped_collection("filename"),
        cascade="all, delete-orphan",
        passive_deletes=True,
        back_populates="submission_result")

    evaluations = relationship("Evaluation",
                               cascade="all, delete-orphan",
                               passive_deletes=True,
                               back_populates="submission_result")

    def get_status(self):
        """Return the status of this object.

        """
        if not self.compiled():
            return SubmissionResult.COMPILING
        elif self.compilation_failed():
            return SubmissionResult.COMPILATION_FAILED
        elif not self.evaluated():
            return SubmissionResult.EVALUATING
        elif not self.scored():
            return SubmissionResult.SCORING
        else:
            return SubmissionResult.SCORED

    def get_evaluation(self, testcase):
        """Return the Evaluation of this SR on the given Testcase, if any

        testcase (Testcase): the testcase the returned evaluation will
            belong to.

        return (Evaluation|None): the (only!) evaluation of this
            submission result on the given testcase, or None if there
            isn't any.

        """
        # Use IDs to avoid triggering a lazy-load query.
        assert self.dataset_id == testcase.dataset_id

        # XXX If self.evaluations is already loaded we can walk over it
        # and spare a query.
        # (We could use .one() and avoid a LIMIT but we would need to
        # catch a NoResultFound exception.)
        return self.sa_session.query(Evaluation)\
            .filter(Evaluation.submission_result == self)\
            .filter(Evaluation.testcase == testcase)\
            .first()

    def get_max_evaluation_resources(self):
        """Return the maximum time and memory used by this result

        return (float|None, int|None): max used time in seconds and
            memory in bytes, or None if data is incomplete or
            unavailable.

        """
        t, m = None, None
        if self.evaluated() and self.evaluations:
            for ev in self.evaluations:
                if ev.execution_time is not None \
                        and (t is None or t < ev.execution_time):
                    t = ev.execution_time
                if ev.execution_memory is not None \
                        and (m is None or m < ev.execution_memory):
                    m = ev.execution_memory
        return (t, m)

    def compiled(self):
        """Return whether the submission result has been compiled.

        return (bool): True if compiled, False otherwise.

        """
        return self.compilation_outcome is not None

    @staticmethod
    def filter_compiled():
        """Return a filtering expression for compiled submission results.

        """
        return SubmissionResult.compilation_outcome.isnot(None)

    def compilation_failed(self):
        """Return whether the submission result did not compile.

        return (bool): True if the compilation failed (in the sense
            that there is a problem in the user's source), False if
            not yet compiled or compilation was successful.

        """
        return self.compilation_outcome == "fail"

    @staticmethod
    def filter_compilation_failed():
        """Return a filtering expression for submission results failing
        compilation.

        """
        return SubmissionResult.compilation_outcome == "fail"

    def compilation_succeeded(self):
        """Return whether the submission compiled.

        return (bool): True if the compilation succeeded (in the sense
            that an executable was created), False if not yet compiled
            or compilation was unsuccessful.

        """
        return self.compilation_outcome == "ok"

    @staticmethod
    def filter_compilation_succeeded():
        """Return a filtering expression for submission results passing
        compilation.

        """
        return SubmissionResult.compilation_outcome == "ok"

    def evaluated(self):
        """Return whether the submission result has been evaluated.

        return (bool): True if evaluated, False otherwise.

        """
        return self.evaluation_outcome is not None

    @staticmethod
    def filter_evaluated():
        """Return a filtering lambda for evaluated submission results.

        """
        return SubmissionResult.evaluation_outcome.isnot(None)

    def needs_scoring(self):
        """Return whether the submission result needs to be scored.

        return (bool): True if in need of scoring, False otherwise.

        """
        return (self.compilation_failed() or self.evaluated()) and \
            not self.scored()

    def scored(self):
        """Return whether the submission result has been scored.

        return (bool): True if scored, False otherwise.

        """
        return all(
            getattr(self, k) is not None for k in [
                "score", "subtask_scores", "public_score",
                "public_subtask_scores", "ranking_score_details"
            ])

    @staticmethod
    def filter_scored():
        """Return a filtering lambda for scored submission results.

        """
        return ((SubmissionResult.score.isnot(None))
                & (SubmissionResult.score_details.isnot(None))
                & (SubmissionResult.public_score.isnot(None))
                & (SubmissionResult.public_score_details.isnot(None))
                & (SubmissionResult.ranking_score_details.isnot(None)))

    def invalidate_compilation(self):
        """Blank all compilation and evaluation outcomes, and the score.

        """
        self.invalidate_evaluation()
        self.compilation_outcome = None
        self.compilation_text = []
        self.compilation_tries = 0
        self.compilation_time = None
        self.compilation_wall_clock_time = None
        self.compilation_memory = None
        self.compilation_shard = None
        self.compilation_sandbox = None
        self.executables = {}

    def invalidate_evaluation(self):
        """Blank the evaluation outcome, all evaluations, and the score.

        """
        self.invalidate_evaluation_result()
        self.evaluations = []

    def invalidate_evaluation_result(self):
        """Blank the evaluation outcome and the score.

        """
        self.invalidate_score()
        self.evaluation_outcome = None
        self.evaluation_tries = 0

    def invalidate_score(self):
        """Blank the score.

        """
        self.score = None
        self.score_details = None
        self.public_score = None
        self.public_score_details = None
        self.ranking_score_details = None

    def set_compilation_outcome(self, success):
        """Set the compilation outcome based on the success.

        success (bool): if the compilation was successful.

        """
        self.compilation_outcome = "ok" if success else "fail"

    def set_evaluation_outcome(self):
        """Set the evaluation outcome (always ok now).

        """
        self.evaluation_outcome = "ok"
Exemplo n.º 11
0
class InstalledComponent(componentsBase):
    """
  This class defines the schema of the InstalledComponents table in the
  InstalledComponentsDB database
  """

    __tablename__ = 'InstalledComponents'
    __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}

    componentID = Column('ComponentID',
                         Integer,
                         ForeignKey('Components.ComponentID'),
                         primary_key=True)
    hostID = Column('HostID',
                    Integer,
                    ForeignKey('Hosts.HostID'),
                    primary_key=True)
    instance = Column('Instance', String(64), primary_key=True)
    installationTime = Column('InstallationTime', DateTime, primary_key=True)
    unInstallationTime = Column('UnInstallationTime', DateTime)
    installedBy = Column('InstalledBy', String(32))
    unInstalledBy = Column('UnInstalledBy', String(32))
    installationComponent = relationship('Component',
                                         backref='installationList')

    def __init__(self,
                 instance=null(),
                 installationTime=null(),
                 unInstallationTime=null(),
                 installedBy=null(),
                 unInstalledBy=null()):
        self.instance = instance
        self.installationTime = installationTime
        self.unInstallationTime = unInstallationTime
        self.installedBy = installedBy
        self.unInstalledBy = unInstalledBy

    def fromDict(self, dictionary):
        """
    Fill the fields of the InstalledComponent object from a dictionary
    The dictionary may contain the keys: ComponentID, HostID, Instance,
    InstallationTime, UnInstallationTime
    """

        self.componentID = dictionary.get('ComponentID', self.componentID)
        self.hostID = dictionary.get('HostID', self.hostID)
        self.instance = dictionary.get('Instance', self.instance)
        self.installationTime = dictionary.get('InstallationTime',
                                               self.installationTime)
        self.unInstallationTime = dictionary.get('UnInstallationTime',
                                                 self.unInstallationTime)
        self.installedBy = dictionary.get('InstalledBy', self.installedBy)
        self.unInstalledBy = dictionary.get('UnInstalledBy',
                                            self.unInstalledBy)

        return S_OK('Successfully read from dictionary')

    def toDict(self, includeComponents=False, includeHosts=False):
        """
    Return the object as a dictionary
    If includeComponents is True, information about which Components where
    installed is included
    If includeHosts is True, information about the Hosts where the
    installations are is included
    """

        dictionary = {
            'Instance': self.instance,
            'InstallationTime': self.installationTime,
            'UnInstallationTime': self.unInstallationTime,
            'InstalledBy': self.installedBy,
            'UnInstalledBy': self.unInstalledBy
        }

        if includeComponents:
            dictionary['Component'] = self.installationComponent.toDict(
            )['Value']
        else:
            dictionary['ComponentID'] = self.componentID

        if includeHosts:
            dictionary['Host'] = self.installationHost.toDict()['Value']
        else:
            dictionary['HostID'] = self.hostID

        return S_OK(dictionary)
Exemplo n.º 12
0
class Task(Base):
    """Class to store a task.

    """
    __tablename__ = 'tasks'
    __table_args__ = (
        UniqueConstraint('contest_id', 'num'),
        UniqueConstraint('contest_id', 'name'),
        ForeignKeyConstraint(
            ("id", "active_dataset_id"),
            ("datasets.task_id", "datasets.id"),
            onupdate="SET NULL",
            ondelete="SET NULL",
            # Use an ALTER query to set this foreign key after
            # both tables have been CREATEd, to avoid circular
            # dependencies.
            use_alter=True,
            name="fk_active_dataset_id"),
        CheckConstraint("token_gen_initial <= token_gen_max"),
    )

    # Auto increment primary key.
    id = Column(
        Integer,
        primary_key=True,
        # Needed to enable autoincrement on integer primary keys that
        # are referenced by a foreign key defined on this table.
        autoincrement='ignore_fk')

    # Number of the task for sorting.
    num = Column(Integer, nullable=False)

    # Contest (id and object) owning the task.
    contest_id = Column(Integer,
                        ForeignKey(Contest.id,
                                   onupdate="CASCADE",
                                   ondelete="CASCADE"),
                        nullable=False,
                        index=True)
    contest = relationship(Contest,
                           backref=backref(
                               'tasks',
                               collection_class=ordering_list('num'),
                               order_by=[num],
                               cascade="all, delete-orphan",
                               passive_deletes=True))

    # Short name and long human readable title of the task.
    name = Column(Unicode, nullable=False)
    title = Column(Unicode, nullable=False)

    # A JSON-encoded lists of strings: the language codes of the
    # statements that will be highlighted to all users for this task.
    primary_statements = Column(String, nullable=False, default="[]")

    # The parameters that control task-tokens follow. Note that their
    # effect during the contest depends on the interaction with the
    # parameters that control contest-tokens, defined on the Contest.

    # The "kind" of token rules that will be active during the contest.
    # - disabled: The user will never be able to use any token.
    # - finite: The user has a finite amount of tokens and can choose
    #   when to use them, subject to some limitations. Tokens may not
    #   be all available at start, but given periodically during the
    #   contest instead.
    # - infinite: The user will always be able to use a token.
    token_mode = Column(Enum("disabled",
                             "finite",
                             "infinite",
                             name="token_mode"),
                        nullable=False,
                        default="disabled")

    # The maximum number of tokens a contestant is allowed to use
    # during the whole contest (on this tasks).
    token_max_number = Column(Integer,
                              CheckConstraint("token_max_number > 0"),
                              nullable=True)

    # The minimum interval between two successive uses of tokens for
    # the same user (on this task).
    token_min_interval = Column(
        Interval,
        CheckConstraint("token_min_interval >= '0 seconds'"),
        nullable=False,
        default=timedelta())

    # The parameters that control generation (if mode is "finite"):
    # the user starts with "initial" tokens and receives "number" more
    # every "interval", but their total number is capped to "max".
    token_gen_initial = Column(Integer,
                               CheckConstraint("token_gen_initial >= 0"),
                               nullable=False,
                               default=2)
    token_gen_number = Column(Integer,
                              CheckConstraint("token_gen_number >= 0"),
                              nullable=False,
                              default=2)
    token_gen_interval = Column(
        Interval,
        CheckConstraint("token_gen_interval > '0 seconds'"),
        nullable=False,
        default=timedelta(minutes=30))
    token_gen_max = Column(Integer,
                           CheckConstraint("token_gen_max > 0"),
                           nullable=True)

    # Maximum number of submissions or user_tests allowed for each user
    # on this task during the whole contest or None to not enforce
    # this limitation.
    max_submission_number = Column(
        Integer, CheckConstraint("max_submission_number > 0"), nullable=True)
    max_user_test_number = Column(Integer,
                                  CheckConstraint("max_user_test_number > 0"),
                                  nullable=True)

    # Minimum interval between two submissions or user_tests for this
    # task, or None to not enforce this limitation.
    min_submission_interval = Column(
        Interval,
        CheckConstraint("min_submission_interval > '0 seconds'"),
        nullable=True)
    min_user_test_interval = Column(
        Interval,
        CheckConstraint("min_user_test_interval > '0 seconds'"),
        nullable=True)

    # The scores for this task will be rounded to this number of
    # decimal places.
    score_precision = Column(Integer,
                             CheckConstraint("score_precision >= 0"),
                             nullable=False,
                             default=0)

    # Active Dataset (id and object) currently being used for scoring.
    # The ForeignKeyConstraint for this column is set at table-level.
    active_dataset_id = Column(Integer, nullable=True)
    active_dataset = relationship(
        'Dataset',
        foreign_keys=[active_dataset_id],
        # XXX In SQLAlchemy 0.8 we could remove this:
        primaryjoin='Task.active_dataset_id == Dataset.id',
        # Use an UPDATE query *after* an INSERT query (and *before* a
        # DELETE query) to set (and unset) the column associated to
        # this relationship.
        post_update=True)
Exemplo n.º 13
0
class Dataset(Base):
    """Class to store the information about a data set.

    """
    __tablename__ = 'datasets'
    __table_args__ = (
        UniqueConstraint('task_id', 'description'),
        # Useless, in theory, because 'id' is already unique. Yet, we
        # need this because it's a target of a foreign key.
        UniqueConstraint('id', 'task_id'),
    )

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # Task (id and object) owning the dataset.
    task_id = Column(Integer,
                     ForeignKey(Task.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False)
    task = relationship(
        Task,
        foreign_keys=[task_id],
        # XXX In SQLAlchemy 0.8 we could remove this:
        primaryjoin='Task.id == Dataset.task_id',
        backref=backref('datasets',
                        cascade="all, delete-orphan",
                        passive_deletes=True))

    # A human-readable text describing the dataset.
    description = Column(Unicode, nullable=False)

    # Whether this dataset will be automatically judged by ES and SS
    # "in background", together with the active dataset of each task.
    autojudge = Column(Boolean, nullable=False, default=False)

    # Time and memory limits for every testcase.
    time_limit = Column(Float, nullable=True)
    memory_limit = Column(Integer, nullable=True)

    # Name of the TaskType child class suited for the task.
    task_type = Column(String, nullable=False)

    # Parameters for the task type class, JSON encoded.
    task_type_parameters = Column(String, nullable=False)

    # Name of the ScoreType child class suited for the task.
    score_type = Column(String, nullable=False)

    # Parameters for the score type class, JSON encoded.
    score_type_parameters = Column(String, nullable=False)

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # managers (dict of Manager objects indexed by filename)
    # testcases (dict of Testcase objects indexed by codename)

    @property
    def active(self):
        """Shorthand for detecting if the dataset is active.

        return (bool): True if this dataset is the active one for its
            task.

        """
        return self is self.task.active_dataset
Exemplo n.º 14
0
from flexget import db_schema, plugin
from flexget.db_schema import UpgradeImpossible
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.log import log_once
from flexget.utils.imdb import ImdbSearch, ImdbParser, extract_id, make_url
from flexget.utils.database import with_session

SCHEMA_VER = 8

Base = db_schema.versioned_base('imdb_lookup', SCHEMA_VER)

# association tables
genres_table = Table('imdb_movie_genres', Base.metadata,
                     Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
                     Column('genre_id', Integer, ForeignKey('imdb_genres.id')),
                     Index('ix_imdb_movie_genres', 'movie_id', 'genre_id'))
Base.register_table(genres_table)

actors_table = Table('imdb_movie_actors', Base.metadata,
                     Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
                     Column('actor_id', Integer, ForeignKey('imdb_actors.id')),
                     Index('ix_imdb_movie_actors', 'movie_id', 'actor_id'))
Base.register_table(actors_table)

directors_table = Table(
    'imdb_movie_directors', Base.metadata,
    Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
    Column('director_id', Integer, ForeignKey('imdb_directors.id')),
    Index('ix_imdb_movie_directors', 'movie_id', 'director_id'))
Exemplo n.º 15
0
class Task(Base):
    """Class to store a task. Not to be used directly (import it from
    SQLAlchemyAll).

    """
    __tablename__ = 'tasks'
    __table_args__ = (
        UniqueConstraint('contest_id', 'num', name='cst_task_contest_id_num'),
        UniqueConstraint('contest_id', 'name',
                         name='cst_task_contest_id_name'),
        CheckConstraint("token_initial <= token_max"),
    )

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # Number of the task for sorting.
    num = Column(Integer, nullable=False)

    # Contest (id and object) owning the task.
    contest_id = Column(Integer,
                        ForeignKey(Contest.id,
                                   onupdate="CASCADE",
                                   ondelete="CASCADE"),
                        nullable=False,
                        index=True)
    contest = relationship(Contest,
                           backref=backref(
                               'tasks',
                               collection_class=ordering_list('num'),
                               order_by=[num],
                               cascade="all, delete-orphan",
                               passive_deletes=True))

    # Short name and long human readable title of the task.
    name = Column(String, nullable=False)
    title = Column(String, nullable=False)

    # A JSON-encoded lists of strings: the language codes of the
    # statments that will be highlighted to all users for this task.
    primary_statements = Column(String, nullable=False)

    # Time and memory limits for every testcase.
    time_limit = Column(Float, nullable=True)
    memory_limit = Column(Integer, nullable=True)

    # Name of the TaskType child class suited for the task.
    task_type = Column(String, nullable=False)

    # Parameters for the task type class, JSON encoded.
    task_type_parameters = Column(String, nullable=False)

    # Name of the ScoreType child class suited for the task.
    score_type = Column(String, nullable=False)

    # Parameters for the scorer class, JSON encoded.
    score_parameters = Column(String, nullable=False)

    # Parameter to define the token behaviour. See Contest.py for
    # details. The only change is that these parameters influence the
    # contest in a task-per-task behaviour. To play a token on a given
    # task, a user must satisfy the condition of the contest and the
    # one of the task.
    token_initial = Column(Integer,
                           CheckConstraint("token_initial >= 0"),
                           nullable=True)
    token_max = Column(Integer,
                       CheckConstraint("token_max > 0"),
                       nullable=True)
    token_total = Column(Integer,
                         CheckConstraint("token_total > 0"),
                         nullable=True)
    token_min_interval = Column(
        Interval,
        CheckConstraint("token_min_interval >= '0 seconds'"),
        nullable=False)
    token_gen_time = Column(Interval,
                            CheckConstraint("token_gen_time >= '0 seconds'"),
                            nullable=False)
    token_gen_number = Column(Integer,
                              CheckConstraint("token_gen_number >= 0"),
                              nullable=False)

    # Maximum number of submissions or user_tests allowed for each user
    # on this task during the whole contest or None to not enforce
    # this limitation.
    max_submission_number = Column(
        Integer, CheckConstraint("max_submission_number > 0"), nullable=True)
    max_user_test_number = Column(Integer,
                                  CheckConstraint("max_user_test_number > 0"),
                                  nullable=True)

    # Minimum interval between two submissions or user_tests for this
    # task, or None to not enforce this limitation.
    min_submission_interval = Column(
        Interval,
        CheckConstraint("min_submission_interval > '0 seconds'"),
        nullable=True)
    min_user_test_interval = Column(
        Interval,
        CheckConstraint("min_user_test_interval > '0 seconds'"),
        nullable=True)

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # submission_format (list of SubmissionFormatElement objects)
    # testcases (list of Testcase objects)
    # attachments (dict of Attachment objects indexed by filename)
    # managers (dict of Manager objects indexed by filename)
    # statements (dict of Statement objects indexed by language code)
    # submissions (list of Submission objects)
    # user_tests (list of UserTest objects)

    # This object (independent from SQLAlchemy) is the instance of the
    # ScoreType class with the given parameters, taking care of
    # building the scores of the submissions.
    scorer = None

    def __init__(self,
                 name,
                 title,
                 statements,
                 attachments,
                 time_limit,
                 memory_limit,
                 primary_statements,
                 task_type,
                 task_type_parameters,
                 submission_format,
                 managers,
                 score_type,
                 score_parameters,
                 testcases,
                 token_initial=None,
                 token_max=None,
                 token_total=None,
                 token_min_interval=timedelta(),
                 token_gen_time=timedelta(),
                 token_gen_number=0,
                 max_submission_number=None,
                 max_user_test_number=None,
                 min_submission_interval=None,
                 min_user_test_interval=None,
                 contest=None,
                 num=0):
        for filename, attachment in attachments.iteritems():
            attachment.filename = filename
        for filename, manager in managers.iteritems():
            manager.filename = filename
        for language, statement in statements.iteritems():
            statement.language = language

        self.num = num
        self.name = name
        self.title = title
        self.statements = statements
        self.attachments = attachments
        self.time_limit = time_limit
        self.memory_limit = memory_limit
        self.primary_statements = primary_statements if primary_statements is not None else "[]"
        self.task_type = task_type
        self.task_type_parameters = task_type_parameters
        self.submission_format = submission_format
        self.managers = managers
        self.score_type = score_type
        self.score_parameters = score_parameters
        self.testcases = testcases
        self.token_initial = token_initial
        self.token_max = token_max
        self.token_total = token_total
        self.token_min_interval = token_min_interval
        self.token_gen_time = token_gen_time
        self.token_gen_number = token_gen_number
        self.max_submission_number = max_submission_number
        self.max_user_test_number = max_user_test_number
        self.min_submission_interval = min_submission_interval
        self.min_user_test_interval = min_user_test_interval
        self.contest = contest

    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        return {
            'name':
            self.name,
            'title':
            self.title,
            'num':
            self.num,
            'statements': [
                statement.export_to_dict()
                for statement in self.statements.itervalues()
            ],
            'attachments': [
                attachment.export_to_dict()
                for attachment in self.attachments.itervalues()
            ],
            'time_limit':
            self.time_limit,
            'memory_limit':
            self.memory_limit,
            'primary_statements':
            self.primary_statements,
            'task_type':
            self.task_type,
            'task_type_parameters':
            self.task_type_parameters,
            'submission_format':
            [element.export_to_dict() for element in self.submission_format],
            'managers': [
                manager.export_to_dict()
                for manager in self.managers.itervalues()
            ],
            'score_type':
            self.score_type,
            'score_parameters':
            self.score_parameters,
            'testcases':
            [testcase.export_to_dict() for testcase in self.testcases],
            'token_initial':
            self.token_initial,
            'token_max':
            self.token_max,
            'token_total':
            self.token_total,
            'token_min_interval':
            self.token_min_interval.total_seconds(),
            'token_gen_time':
            self.token_gen_time.total_seconds(),
            'token_gen_number':
            self.token_gen_number,
            'max_submission_number':
            self.max_submission_number,
            'max_user_test_number':
            self.max_user_test_number,
            'min_submission_interval':
            self.min_submission_interval.total_seconds()
            if self.min_submission_interval is not None else None,
            'min_user_test_interval':
            self.min_user_test_interval.total_seconds()
            if self.min_user_test_interval is not None else None,
        }

    @classmethod
    def import_from_dict(cls, data):
        """Build the object using data from a dictionary.

        """
        data['attachments'] = [
            Attachment.import_from_dict(attch_data)
            for attch_data in data['attachments']
        ]
        data['attachments'] = dict([(attachment.filename, attachment)
                                    for attachment in data['attachments']])
        data['submission_format'] = [
            SubmissionFormatElement.import_from_dict(sfe_data)
            for sfe_data in data['submission_format']
        ]
        data['managers'] = [
            Manager.import_from_dict(manager_data)
            for manager_data in data['managers']
        ]
        data['managers'] = dict([(manager.filename, manager)
                                 for manager in data['managers']])
        data['testcases'] = [
            Testcase.import_from_dict(testcase_data)
            for testcase_data in data['testcases']
        ]
        data['statements'] = [
            Statement.import_from_dict(statement_data)
            for statement_data in data['statements']
        ]
        data['statements'] = dict([(statement.language, statement)
                                   for statement in data['statements']])
        if 'token_min_interval' in data:
            data['token_min_interval'] = \
                timedelta(seconds=data['token_min_interval'])
        if 'token_gen_time' in data:
            data['token_gen_time'] = timedelta(seconds=data['token_gen_time'])
        if 'min_submission_interval' in data and \
                data['min_submission_interval'] is not None:
            data['min_submission_interval'] = \
                timedelta(seconds=data['min_submission_interval'])
        if 'min_user_test_interval' in data and \
                data['min_user_test_interval'] is not None:
            data['min_user_test_interval'] = \
                timedelta(seconds=data['min_user_test_interval'])
        return cls(**data)
Exemplo n.º 16
0
    def _s_customize(cls, **kwargs):
        """This function duplicates and customizes the class it belongs to. The
        original class remains unchanged.

        Not meant to be overridden.
        """
        def _log_debug(s, *args):
            logger.debug("\t%s: %s" % (cls.get_type_name(), s), *args)

        cls_dict = odict({
            '__module__': cls.__module__,
            '__doc__': cls.__doc__
        })

        if getattr(cls, '__orig__', None) is None:
            cls_dict['__orig__'] = cls
        else:
            cls_dict['__orig__'] = cls.__orig__

        class Attributes(cls.Attributes):
            _explicit_type_name = False

        if cls.Attributes.translations is None:
            Attributes.translations = {}
        if cls.Attributes.sqla_column_args is None:
            Attributes.sqla_column_args = (), {}
        else:
            Attributes.sqla_column_args = deepcopy(
                cls.Attributes.sqla_column_args)

        cls_dict['Attributes'] = Attributes

        # properties get reset every time a new class is defined. So we need
        # to reinitialize them explicitly.
        for k in ('nillable', '_xml_cloth', '_xml_root_cloth', '_html_cloth',
                  '_html_root_cloth'):
            v = getattr(cls.Attributes, k)
            if v is not None:
                setattr(Attributes, k, v)

        class Annotations(cls.Annotations):
            pass

        cls_dict['Annotations'] = Annotations

        # get protocol attrs
        prot = kwargs.get('protocol', None)
        if prot is None:
            prot = kwargs.get('prot', None)
        if prot is None:
            prot = kwargs.get('p', None)
        if prot is not None and len(prot.type_attrs) > 0:
            # if there is a class customization from protocol, do it

            type_attrs = prot.type_attrs.copy()
            type_attrs.update(kwargs)
            _log_debug("kwargs %r => %r from prot typeattr %r", kwargs,
                       type_attrs, prot.type_attrs)
            kwargs = type_attrs

        for k, v in kwargs.items():
            if k.startswith('_'):
                _log_debug("ignoring '%s' because of leading underscore", k)
                continue

            if k in ('protocol', 'prot', 'p'):
                Attributes.prot = v
                _log_debug("setting prot=%r", v)

            elif k in ('voa', 'validate_on_assignment'):
                Attributes.validate_on_assignment = v
                _log_debug("setting voa=%r", v)

            elif k in ('parser', 'cast'):
                setattr(Attributes, k, staticmethod(v))
                _log_debug("setting %s=%r", k, v)

            elif k in ("doc", "appinfo"):
                setattr(Annotations, k, v)
                _log_debug("setting Annotations.%s=%r", k, v)

            elif k in ('primary_key', 'pk'):
                setattr(Attributes, 'primary_key', v)
                Attributes.sqla_column_args[-1]['primary_key'] = v
                _log_debug("setting primary_key=%r", v)

            elif k in ('protocol_attrs', 'prot_attrs', 'pa'):
                setattr(Attributes, 'prot_attrs', _decode_pa_dict(v))
                _log_debug("setting prot_attrs=%r", v)

            elif k in ('foreign_key', 'fk'):
                from sqlalchemy.schema import ForeignKey
                t, d = Attributes.sqla_column_args
                fkt = (ForeignKey(v), )
                new_v = (t + fkt, d)
                Attributes.sqla_column_args = new_v
                _log_debug("setting sqla_column_args=%r", new_v)

            elif k in ('autoincrement', 'onupdate', 'server_default'):
                Attributes.sqla_column_args[-1][k] = v
                _log_debug("adding %s=%r to Attributes.sqla_column_args", k, v)

            elif k == 'values_dict':
                assert not 'values' in v, "`values` and `values_dict` can't be" \
                                          "specified at the same time"

                if not isinstance(v, dict):
                    # our odict has one nasty implicit behaviour: setitem on
                    # int keys is treated as array indexes, not dict keys. so
                    # dicts with int indexes can't work with odict. so we use
                    # the one from stdlib
                    v = OrderedDict(v)

                Attributes.values = v.keys()
                Attributes.values_dict = v
                _log_debug("setting values=%r, values_dict=%r",
                           Attributes.values, Attributes.values_dict)

            elif k == 'exc_table':
                Attributes.exc_table = v
                Attributes.exc_db = v
                _log_debug("setting exc_table=%r, exc_db=%r", v, v)

            elif k == 'max_occurs' and v in ('unbounded', 'inf', float('inf')):
                new_v = decimal.Decimal('inf')
                setattr(Attributes, k, new_v)
                _log_debug("setting max_occurs=%r", new_v)

            elif k == 'type_name':
                Attributes._explicit_type_name = True
                _log_debug("setting _explicit_type_name=True because "
                           "we have 'type_name'")

            else:
                setattr(Attributes, k, v)
                _log_debug("setting %s=%r", k, v)

        return (cls.__name__, (cls, ), cls_dict)
Exemplo n.º 17
0
class Identifier(_Base, SelectMixin):
    """
    All known unique IDs for a PubMed record.

    Attributes:

        pmid
            the record this alternate ID belongs to
        namespace
            the type of ID (doi, pii, pmc, pubmed, etc.)
        value
            the actual ID string

    Primary Key: ``(pmid, namespace)``
    """

    __tablename__ = 'identifiers'

    pmid = Column(BigInteger,
                  ForeignKey('records', ondelete="CASCADE"),
                  primary_key=True)
    namespace = Column(Unicode(length=32),
                       CheckConstraint("namespace <> ''"),
                       primary_key=True)
    value = Column(Unicode(length=256),
                   CheckConstraint("value <> ''"),
                   nullable=False)

    def __init__(self, pmid: int, namespace: str, value: str):
        assert pmid > 0, pmid
        assert namespace
        assert value
        self.pmid = pmid
        self.namespace = namespace
        self.value = value

    def __str__(self):
        return '{}\t{}\t{}\n'.format(NULL(self.pmid), NULL(self.namespace),
                                     STRING(self.value))

    def __repr__(self):
        return "Identifier<{}:{}>".format(self.pmid, self.namespace)

    def __eq__(self, other):
        return isinstance(other, Identifier) and \
               self.pmid == other.pmid and \
               self.namespace == other.namespace and \
               self.value == other.value

    @classmethod
    def pmid2doi(cls, pmid: int) -> str:
        "Convert a PMID to a DOI (or ``None`` if no mapping is found)."
        c = cls.__table__.c
        query = select([c.value], (c.namespace == 'doi') & (c.pmid == pmid))
        return _fetch_first(query)

    @classmethod
    def doi2pmid(cls, doi: str) -> int:
        "Convert a DOI to a PMID (or ``None`` if no mapping is found)."
        c = cls.__table__.c
        query = select([c.pmid], (c.namespace == 'doi') & (c.value == doi))
        return _fetch_first(query)

    @classmethod
    def mapDois2Pmids(cls, dois: list) -> dict:
        """
        Return a mapping :class:`dict` for a list of DOIs to their PMIDs
        (or and empty :class:`dict` if no mapping is found).

        If for a given DOI no mapping exists, it is no included in the
        returned dictionary.
        """
        if not len(dois):
            return {}

        c = cls.__table__.c
        query = select([c.value, c.pmid],
                       (c.namespace == 'doi') & c.value.in_(dois))
        mappings = _fetch_all(query)
        return dict(mappings) if mappings is not None else {}

    @classmethod
    def mapPmids2Dois(cls, pmids: list) -> dict:
        """
        Return a mapping :class:`dict` for a list of PMIDs to their DOIs
        (or and empty :class:`dict` if no mapping is found).

        If for a given PMID no mapping exists, it is no included in the
        returned dictionary.
        """
        if not len(pmids):
            return {}

        t = cls.__table__
        query = select([t.c.pmid, t.c.value],
                       (t.c.namespace == 'doi') & t.c.pmid.in_(pmids))
        mappings = _fetch_all(query)
        return dict(mappings) if mappings is not None else {}
Exemplo n.º 18
0
def create_translation_table(_table_name, foreign_class, relation_name,
    language_class, relation_lazy='select', **kwargs):
    """Creates a table that represents some kind of data attached to the given
    foreign class, but translated across several languages.  Returns the new
    table's mapped class.  It won't be declarative, but it will have a
    `__table__` attribute so you can retrieve the Table object.

    `foreign_class` must have a `__singlename__`, currently only used to create
    the name of the foreign key column.

    Also supports the notion of a default language, which is attached to the
    session.  This is English by default, for historical and practical reasons.

    Usage looks like this:

        class Foo(Base): ...

        create_translation_table('foo_bars', Foo, 'bars',
            name = Column(...),
        )

        # Now you can do the following:
        foo.name
        foo.name_map['en']
        foo.foo_bars['en']

        foo.name_map['en'] = "new name"
        del foo.name_map['en']

        q.options(joinedload(Foo.bars_local))
        q.options(joinedload(Foo.bars))

    The following properties are added to the passed class:

    - `(relation_name)`, a relation to the new table.  It uses a dict-based
      collection class, where the keys are language identifiers and the values
      are rows in the created tables.
    - `(relation_name)_local`, a relation to the row in the new table that
      matches the current default language.
    - `(relation_name)_table`, the class created by this function.

    Note that these are distinct relations.  Even though the former necessarily
    includes the latter, SQLAlchemy doesn't treat them as linked; loading one
    will not load the other.  Modifying both within the same transaction has
    undefined behavior.

    For each column provided, the following additional attributes are added to
    Foo:

    - `(column)_map`, an association proxy onto `foo_bars`.
    - `(column)`, an association proxy onto `foo_bars_local`.

    Pardon the naming disparity, but the grammar suffers otherwise.

    Modifying these directly is not likely to be a good idea.

    For Markdown-formatted columns, `(column)_map` and `(column)` will give
    Markdown objects.
    """
    # n.b.: language_class only exists for the sake of tests, which sometimes
    # want to create tables entirely separate from the pokedex metadata

    foreign_key_name = foreign_class.__singlename__ + '_id'

    Translations = type(_table_name, (object,), {
        '_language_identifier': association_proxy('local_language', 'identifier'),
        'relation_name': relation_name,
        '__tablename__': _table_name,
    })

    # Create the table object
    table = Table(_table_name, foreign_class.__table__.metadata,
        Column(foreign_key_name, Integer, ForeignKey(foreign_class.id),
            primary_key=True, nullable=False,
            info=dict(description="ID of the %s these texts relate to" % foreign_class.__singlename__)),
        Column('local_language_id', Integer, ForeignKey(language_class.id),
            primary_key=True, nullable=False,
            info=dict(description="Language these texts are in")),
    )
    Translations.__table__ = table

    # Add ye columns
    # Column objects have a _creation_order attribute in ascending order; use
    # this to get the (unordered) kwargs sorted correctly
    kwitems = kwargs.items()
    kwitems.sort(key=lambda kv: kv[1]._creation_order)
    for name, column in kwitems:
        column.name = name
        table.append_column(column)

    # Construct ye mapper
    mapper(Translations, table, properties={
        'foreign_id': synonym(foreign_key_name),
        'local_language': relationship(language_class,
            primaryjoin=table.c.local_language_id == language_class.id,
            innerjoin=True),
    })

    # Add full-table relations to the original class
    # Foo.bars_table
    setattr(foreign_class, relation_name + '_table', Translations)
    # Foo.bars
    setattr(foreign_class, relation_name, relationship(Translations,
        primaryjoin=foreign_class.id == Translations.foreign_id,
        collection_class=attribute_mapped_collection('local_language'),
    ))
    # Foo.bars_local
    # This is a bit clever; it uses bindparam() to make the join clause
    # modifiable on the fly.  db sessions know the current language and
    # populate the bindparam.
    # The 'dummy' value is to trick SQLA; without it, SQLA thinks this
    # bindparam is just its own auto-generated clause and everything gets
    # f****d up.
    local_relation_name = relation_name + '_local'
    setattr(foreign_class, local_relation_name, relationship(Translations,
        primaryjoin=and_(
            Translations.foreign_id == foreign_class.id,
            Translations.local_language_id == bindparam('_default_language_id',
                value='dummy', type_=Integer, required=True),
        ),
        foreign_keys=[Translations.foreign_id, Translations.local_language_id],
        uselist=False,
        #innerjoin=True,
        lazy=relation_lazy,
    ))

    # Add per-column proxies to the original class
    for name, column in kwitems:
        getset_factory = None
        string_getter = column.info.get('string_getter')
        if string_getter:
            getset_factory = _getset_factory_factory(
                column.name, string_getter)

        # Class.(column) -- accessor for the default language's value
        setattr(foreign_class, name,
            LocalAssociationProxy(local_relation_name, name,
                    getset_factory=getset_factory))

        # Class.(column)_map -- accessor for the language dict
        # Need a custom creator since Translations doesn't have an init, and
        # these are passed as *args anyway
        def creator(language, value):
            row = Translations()
            row.local_language = language
            setattr(row, name, value)
            return row
        setattr(foreign_class, name + '_map',
            association_proxy(relation_name, name, creator=creator,
                    getset_factory=getset_factory))

    # Add to the list of translation classes
    foreign_class.translation_classes.append(Translations)

    # Done
    return Translations
Exemplo n.º 19
0
class Qualifier(_Base, SelectMixin):
    """
    One of a MeSH descriptor's qualifiers for a record.

    Attributes:

        pmid
            the record this qualifier name belongs to
        num
            the descriptor order in the record (starting from 1)
        sub
            the qualifier order within the descriptor (starting from 1)
        major
            ``True`` if major, ``False`` if minor
        name
            the qualifier (name)

    Primary Key: ``(pmid, num, sub)``
    """

    __tablename__ = 'qualifiers'
    __table_args__ = (ForeignKeyConstraint(
        ('pmid', 'num'), ('descriptors.pmid', 'descriptors.num'),
        ondelete="CASCADE"), )

    pmid = Column(BigInteger,
                  ForeignKey('records', ondelete="CASCADE"),
                  primary_key=True)
    num = Column(SmallInteger, primary_key=True)
    sub = Column(SmallInteger, CheckConstraint("sub > 0"), primary_key=True)
    major = Column(Boolean, nullable=False)
    name = Column(UnicodeText, CheckConstraint("name <> ''"), nullable=False)

    def __init__(self,
                 pmid: int,
                 num: int,
                 sub: int,
                 name: str,
                 major: bool = False):
        assert pmid > 0, pmid
        assert num > 0, num
        assert sub > 0, sub
        assert name, repr(name)
        self.pmid = pmid
        self.num = num
        self.sub = sub
        self.major = major
        self.name = name

    def __str__(self):
        return '{}\t{}\t{}\t{}\t{}\n'.format(
            NULL(self.pmid),
            NULL(self.num),
            NULL(self.sub),
            'T' if self.major else 'F',
            STRING(self.name),
        )

    def __repr__(self):
        return "Qualifier<{}:{}:{}>".format(self.pmid, self.num, self.sub)

    def __eq__(self, other):
        return isinstance(other, Qualifier) and \
               self.pmid == other.pmid and \
               self.num == other.num and \
               self.sub == other.sub and \
               self.name == other.name and \
               self.major == other.major
Exemplo n.º 20
0
def foreignkey(name):
    return Column(Integer, ForeignKey('{}.id'.format(name)))
Exemplo n.º 21
0
class Section(_Base, SelectMixin):
    """
    The text sections of the records.

    Attributes:

        pmid
            the record's identifier (PubMed ID)
        seq
            the sequence of sections in the record (starting from 1)
        name
            the name of the section (Title, Abstract, Vernacular, Copyright, ...)
        label
            section label as defined by the publisher (if any)
        content
            the text content of this section

    Primary Key: ``(pmid, seq)``
    """
    __tablename__ = 'sections'

    pmid = Column(BigInteger,
                  ForeignKey('records', ondelete="CASCADE"),
                  primary_key=True)
    seq = Column(SmallInteger, CheckConstraint("seq > 0"), primary_key=True)
    name = Column(Unicode(length=64),
                  CheckConstraint("name <> ''"),
                  nullable=False)
    label = Column(Unicode(length=256),
                   CheckConstraint("label <> ''"),
                   nullable=True)
    content = Column(UnicodeText,
                     CheckConstraint("content <> ''"),
                     nullable=False)

    def __init__(self,
                 pmid: int,
                 seq: int,
                 name: str,
                 content: str,
                 label: str = None):
        assert pmid > 0, pmid
        assert seq > 0, seq
        assert name, repr(name)
        assert content, repr(content)
        assert label is None or label, repr(label)
        self.pmid = pmid
        self.seq = seq
        self.name = name
        self.label = label
        self.content = content

    def __str__(self):
        return '{}\t{}\t{}\t{}\t{}\n'.format(NULL(self.pmid), NULL(self.seq),
                                             NULL(self.name), NULL(self.label),
                                             STRING(self.content))

    def __repr__(self):
        return "Section<{}:{}>".format(self.pmid, self.seq)

    def __eq__(self, other):
        return isinstance(other, Section) and \
               self.pmid == other.pmid and \
               self.seq == other.seq and \
               self.name == other.name and \
               self.label == other.label and \
               self.content == other.content
Exemplo n.º 22
0
        if entry.get('tvrage_id', eval_lazy=lazy):
            ids['tvrage'] = entry['tvrage_id']
        if ids:
            break
    return ids


class TraktGenre(Base):
    __tablename__ = 'trakt_genres'

    id = Column(Integer, primary_key=True, autoincrement=True)
    name = Column(Unicode)


show_genres_table = Table('trakt_show_genres', Base.metadata,
                          Column('show_id', Integer, ForeignKey('trakt_shows.id')),
                          Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(show_genres_table)

movie_genres_table = Table('trakt_movie_genres', Base.metadata,
                           Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
                           Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(movie_genres_table)


def get_db_genres(genres, session):
    """Takes a list of genres as strings, returns the database instances for them."""
    db_genres = []
    for genre in genres:
        genre = genre.replace('-', ' ')
        db_genre = session.query(TraktGenre).filter(TraktGenre.name == genre).first()
Exemplo n.º 23
0
class Stage(Base):
    __tablename__ = 'stage'
    __table_args__ = (UniqueConstraint('workflow_id', 'name', name='_uc_workflow_name'),)

    id = Column(Integer, primary_key=True)
    number = Column(Integer)
    name = Column(String(255))
    started_on = Column(DateTime)
    finished_on = Column(DateTime)
    workflow_id = Column(ForeignKey('workflow.id', ondelete="CASCADE"), nullable=False, index=True)
    started_on = Column(DateTime)
    finished_on = Column(DateTime)
    # relationship_type = Column(Enum34_ColumnType(RelationshipType))
    successful = Column(Boolean, nullable=False, default=False)
    _status = Column(Enum34_ColumnType(StageStatus), default=StageStatus.no_attempt)
    parents = relationship("Stage",
                           secondary=StageEdge.__table__,
                           primaryjoin=id == StageEdge.parent_id,
                           secondaryjoin=id == StageEdge.child_id,
                           backref="children",
                           passive_deletes=True,
                           cascade="save-update, merge, delete",
                           )
    tasks = relationship("Task", backref="stage", cascade="all, merge, delete-orphan", passive_deletes=True)

    @declared_attr
    def status(cls):
        def get_status(self):
            return self._status

        def set_status(self, value):
            if self._status != value:
                self._status = value
                signal_stage_status_change.send(self)

        return synonym('_status', descriptor=property(get_status, set_status))

    def __init__(self, *args, **kwargs):
        super(Stage, self).__init__(*args, **kwargs)

        if not re.match('^[a-zA-Z0-9_\.-]+$', self.name):
            raise Exception('invalid stage name %s' % self.name)

    def __iter__(self):
        for t in self.tasks:
            yield t

    def __getitem__(self, key):
        return self.tasks[key]

    @property
    def tasksq(self):
        from .Task import Task

        return self.session.query(Task)

    #
    # def num_tasks(self):
    #     return self.tasksq.count()

    def num_successful_tasks(self):
        # return self.tasksq.filter_by(stage=self, successful=True).count()
        return len(filter(lambda t: t.successful, self.tasks))

    def num_failed_tasks(self):
        # return self.tasksq.filter_by(stage=self, status=TaskStatus.failed).count()
        return len(filter(lambda t: t.status == TaskStatus.failed, self.tasks))

    @property
    def url(self):
        return url_for('cosmos.stage', workflow_name=self.workflow.name, stage_name=self.name)

    @property
    def log(self):
        return self.workflow.log

    def delete(self, delete_files=False, delete_descendants=False):
        """
        Deletes this stage
        :param delete_files: Delete all files (will be slow if there are a lot of files)
        :param delete_descendants: Also delete all delete_descendants of this stage
        :return: None
        """
        if delete_descendants:
            self.log.info('Deleting all delete_descendants of %s' % self)
            for stage in reversed(list(self.descendants())):
                stage.delete(delete_files)

        self.log.info('Deleting %s. delete_files=%s' % (self, delete_files))
        if delete_files:
            for t in self.tasks:
                t.delete(delete_files=True)
        self.session.delete(self)
        self.session.commit()

    def filter_tasks(self, **filter_by):
        return (t for t in self.tasks if all(t.params.get(k, None) == v for k, v in filter_by.items()))

    def get_task(self, uid, default='ERROR@#$'):
        # params = {k: v for k, v in params.items() if
        #         any(isinstance(v, t) for t in ACCEPTABLE_TAG_TYPES)}  # These are the only params that actually get saved to the DB
        for task in self.tasks:
            if task.uid == uid:
                return task

        if default == 'ERROR@#$':
            raise KeyError('Task with uid %s does not exist' % uid)
        else:
            return default

    # def get_task(self, **filter_by):
    #     tasks = self.filter_tasks(**filter_by)
    #     assert len(tasks) > 0, 'no task found with params %s' % filter_by
    #     assert len(tasks) == 1, 'more than one task with params %s' % filter_by
    #     return tasks[0]

    def percent_successful(self):
        return round(float(self.num_successful_tasks()) / (float(len(self.tasks)) or 1) * 100, 2)

    def percent_failed(self):
        return round(float(self.num_failed_tasks()) / (float(len(self.tasks)) or 1) * 100, 2)

    def percent_running(self):
        return round(float(len([t for t in self.tasks if t.status == TaskStatus.submitted])) / (
            float(len(self.tasks)) or 1) * 100, 2)

    def descendants(self, include_self=False):
        """
        :return: (list) all stages that descend from this stage in the stage_graph
        """
        # return set(it.chain(*breadth_first_search.bfs_successors(self.ex.stage_graph(), self).values()))
        x = nx.descendants(self.workflow.stage_graph(), self)
        if include_self:
            return sorted({self}.union(x), key=lambda stage: stage.number)
        else:
            return x

    @property
    def label(self):
        return '{0} ({1}/{2})'.format(self.name, self.num_successful_tasks(), len(self.tasks))

    def __repr__(self):
        return '<Stage[%s] %s>' % (self.id or '', self.name)
Exemplo n.º 24
0
class SubmissionResult(Base):
    """Class to store the evaluation results of a submission.

    """
    # Possible statuses of a submission result. COMPILING and
    # EVALUATING do not necessarily imply we are going to schedule
    # compilation and evalution for these submission results: for
    # example, they might be for datasets not scheduled for
    # evaluation, or they might have passed the maximum number of
    # tries. If a submission result does not exists for a pair
    # (submission, dataset), its status can be implicitly assumed to
    # be COMPILING.
    COMPILING = 1
    COMPILATION_FAILED = 2
    EVALUATING = 3
    SCORING = 4
    SCORED = 5

    __tablename__ = 'submission_results'
    __table_args__ = (UniqueConstraint('submission_id', 'dataset_id'), )

    # Primary key is (submission_id, dataset_id).
    submission_id = Column(Integer,
                           ForeignKey(Submission.id,
                                      onupdate="CASCADE",
                                      ondelete="CASCADE"),
                           primary_key=True)
    submission = relationship(Submission,
                              backref=backref("results",
                                              cascade="all, delete-orphan",
                                              passive_deletes=True))

    dataset_id = Column(Integer,
                        ForeignKey(Dataset.id,
                                   onupdate="CASCADE",
                                   ondelete="CASCADE"),
                        primary_key=True)
    dataset = relationship(Dataset)

    # Now below follow the actual result fields.

    # Compilation outcome (can be None = yet to compile, "ok" =
    # compilation successful and we can evaluate, "fail" =
    # compilation unsuccessful, throw it away).
    compilation_outcome = Column(String, nullable=True)

    # String containing output from the sandbox.
    compilation_text = Column(String, nullable=True)

    # Number of failures during compilation.
    compilation_tries = Column(Integer, nullable=False, default=0)

    # The compiler stdout and stderr.
    compilation_stdout = Column(Unicode, nullable=True)
    compilation_stderr = Column(Unicode, nullable=True)

    # Other information about the compilation.
    compilation_time = Column(Float, nullable=True)
    compilation_wall_clock_time = Column(Float, nullable=True)
    compilation_memory = Column(Integer, nullable=True)

    # Worker shard and sandbox where the compilation was performed.
    compilation_shard = Column(Integer, nullable=True)
    compilation_sandbox = Column(Unicode, nullable=True)

    # Evaluation outcome (can be None = yet to evaluate, "ok" =
    # evaluation successful). At any time, this should be equal to
    # evaluations != [].
    evaluation_outcome = Column(String, nullable=True)

    # Number of failures during evaluation.
    evaluation_tries = Column(Integer, nullable=False, default=0)

    # Score as computed by ScoringService. Null means not yet scored.
    score = Column(Float, nullable=True)

    # Score details. It's a JSON-encoded string containing information
    # that is given to ScoreType.get_html_details to generate an HTML
    # snippet that is shown on AWS and, if the user used a token, on
    # CWS to display the details of the submission.
    # For example, results for each testcases, subtask, etc.
    score_details = Column(String, nullable=True)

    # The same as the last two fields, but from the point of view of
    # the user (when he/she did not play a token).
    public_score = Column(Float, nullable=True)
    public_score_details = Column(String, nullable=True)

    # Ranking score details. It is a list of strings that are going to
    # be shown in a single row in the table of submission in RWS. JSON
    # encoded.
    ranking_score_details = Column(String, nullable=True)

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # executables (dict of Executable objects indexed by filename)
    # evaluations (list of Evaluation objects)

    def get_status(self):
        """Return the status of this object.

        """
        if not self.compiled():
            return SubmissionResult.COMPILING
        elif self.compilation_failed():
            return SubmissionResult.COMPILATION_FAILED
        elif not self.evaluated():
            return SubmissionResult.EVALUATING
        elif not self.scored():
            return SubmissionResult.SCORING
        else:
            return SubmissionResult.SCORED

    def get_evaluation(self, testcase):
        """Return the Evaluation of this SR on the given Testcase, if any

        testcase (Testcase): the testcase the returned evaluation will
            belong to.

        return (Evaluation|None): the (only!) evaluation of this
            submission result on the given testcase, or None if there
            isn't any.

        """
        # Use IDs to avoid triggering a lazy-load query.
        assert self.dataset_id == testcase.dataset_id

        # XXX If self.evaluations is already loaded we can walk over it
        # and spare a query.
        # (We could use .one() and avoid a LIMIT but we would need to
        # catch a NoResultFound exception.)
        self.sa_session.query(Evaluation)\
            .filter(Evaluation.submission_result == self)\
            .filter(Evaluation.testcase == testcase)\
            .first()

    def compiled(self):
        """Return whether the submission result has been compiled.

        return (bool): True if compiled, False otherwise.

        """
        return self.compilation_outcome is not None

    @staticmethod
    def filter_compiled():
        """Return a filtering expression for compiled submission results.

        """
        return SubmissionResult.compilation_outcome != None  # noqa

    def compilation_failed(self):
        """Return whether the submission result did not compile.

        return (bool): True if the compilation failed (in the sense
            that there is a problem in the user's source), False if
            not yet compiled or compilation was successful.

        """
        return self.compilation_outcome == "fail"

    @staticmethod
    def filter_compilation_failed():
        """Return a filtering expression for submission results failing
        compilation.

        """
        return SubmissionResult.compilation_outcome == "fail"

    def compilation_succeeded(self):
        """Return whether the submission compiled.

        return (bool): True if the compilation succeeded (in the sense
            that an executable was created), False if not yet compiled
            or compilation was unsuccessful.

        """
        return self.compilation_outcome == "ok"

    @staticmethod
    def filter_compilation_succeeded():
        """Return a filtering expression for submission results passing
        compilation.

        """
        return SubmissionResult.compilation_outcome == "ok"

    def evaluated(self):
        """Return whether the submission result has been evaluated.

        return (bool): True if evaluated, False otherwise.

        """
        return self.evaluation_outcome is not None

    @staticmethod
    def filter_evaluated():
        """Return a filtering lambda for evaluated submission results.

        """
        return SubmissionResult.evaluation_outcome != None  # noqa

    def needs_scoring(self):
        """Return whether the submission result needs to be scored.

        return (bool): True if in need of scoring, False otherwise.

        """
        return (self.compilation_failed() or self.evaluated()) and \
            not self.scored()

    def scored(self):
        """Return whether the submission result has been scored.

        return (bool): True if scored, False otherwise.

        """
        return all(
            getattr(self, k) is not None for k in [
                "score", "score_details", "public_score",
                "public_score_details", "ranking_score_details"
            ])

    @staticmethod
    def filter_scored():
        """Return a filtering lambda for scored submission results.

        """
        return ((SubmissionResult.score != None)
                & (SubmissionResult.score_details != None)
                & (SubmissionResult.public_score != None)
                & (SubmissionResult.public_score_details != None)
                & (SubmissionResult.ranking_score_details != None))  # noqa

    def invalidate_compilation(self):
        """Blank all compilation and evaluation outcomes, and the score.

        """
        self.invalidate_evaluation()
        self.compilation_outcome = None
        self.compilation_text = None
        self.compilation_tries = 0
        self.compilation_time = None
        self.compilation_wall_clock_time = None
        self.compilation_memory = None
        self.compilation_shard = None
        self.compilation_sandbox = None
        self.executables = {}

    def invalidate_evaluation(self):
        """Blank the evaluation outcomes and the score.

        """
        self.invalidate_score()
        self.evaluation_outcome = None
        self.evaluation_tries = 0
        self.evaluations = []

    def invalidate_score(self):
        """Blank the score.

        """
        self.score = None
        self.score_details = None
        self.public_score = None
        self.public_score_details = None
        self.ranking_score_details = None

    def set_compilation_outcome(self, success):
        """Set the compilation outcome based on the success.

        success (bool): if the compilation was successful.

        """
        self.compilation_outcome = "ok" if success else "fail"

    def set_evaluation_outcome(self):
        """Set the evaluation outcome (always ok now).

        """
        self.evaluation_outcome = "ok"
Exemplo n.º 25
0
RCDB_MAX_RUN = 18446744073709551615  # 2**64 - 1


class ModelBase(Base):
    __abstract__ = True

    @property
    def log_id(self):
        """returns id suitable for log. Which is tablename_id"""
        return self.__tablename__ + "_" + str(self.id)


_files_have_runs_association = Table(
    'files_have_runs', Base.metadata,
    Column('files_id', Integer, ForeignKey('files.id')),
    Column('run_number', Integer, ForeignKey('runs.number')))


# --------------------------------------------
# class RUN
# --------------------------------------------
class Run(ModelBase):
    """
    Represents data for run

    Attributes:
        Run.number (int): The run number

    """
    __tablename__ = 'runs'
Exemplo n.º 26
0
class Submission(Base):
    """Class to store a submission.

    """
    __tablename__ = 'submissions'

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # User and Contest, thus Participation (id and object) that did the
    # submission.
    participation_id = Column(Integer,
                              ForeignKey(Participation.id,
                                         onupdate="CASCADE",
                                         ondelete="CASCADE"),
                              nullable=False,
                              index=True)
    participation = relationship(Participation,
                                 backref=backref("submissions",
                                                 cascade="all, delete-orphan",
                                                 passive_deletes=True))

    # Task (id and object) of the submission.
    task_id = Column(Integer,
                     ForeignKey(Task.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False,
                     index=True)
    task = relationship(Task,
                        backref=backref("submissions",
                                        cascade="all, delete-orphan",
                                        passive_deletes=True))

    # Time of the submission.
    timestamp = Column(DateTime, nullable=False)

    # Language of submission, or None if not applicable.
    language = Column(String, nullable=True)

    # Comment from the administrator on the submission.
    comment = Column(Unicode, nullable=False, default="")

    @property
    def short_comment(self):
        """The first line of the comment."""
        return self.comment.split("\n", 1)[0]

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # files (dict of File objects indexed by filename)
    # token (Token object or None)
    # results (list of SubmissionResult objects)

    def get_result(self, dataset=None):
        """Return the result associated to a dataset.

        dataset (Dataset|None): the dataset for which the caller wants
            the submission result; if None, the active one is used.

        return (SubmissionResult|None): the submission result
            associated to this submission and the given dataset, if it
            exists in the database, otherwise None.

        """
        if dataset is not None:
            # Use IDs to avoid triggering a lazy-load query.
            assert self.task_id == dataset.task_id
            dataset_id = dataset.id
        else:
            dataset_id = self.task.active_dataset_id

        return SubmissionResult.get_from_id((self.id, dataset_id),
                                            self.sa_session)

    def get_result_or_create(self, dataset=None):
        """Return and, if necessary, create the result for a dataset.

        dataset (Dataset|None): the dataset for which the caller wants
            the submission result; if None, the active one is used.

        return (SubmissionResult): the submission result associated to
            the this submission and the given dataset; if it
            does not exists, a new one is created.

        """
        if dataset is None:
            dataset = self.task.active_dataset

        submission_result = self.get_result(dataset)

        if submission_result is None:
            submission_result = SubmissionResult(submission=self,
                                                 dataset=dataset)

        return submission_result

    def tokened(self):
        """Return if the user played a token against the submission.

        return (bool): True if tokened, False otherwise.

        """
        return self.token is not None
Exemplo n.º 27
0
class Airport(base):
    __tablename__ = 'airports'

    iata = Column(String(3), primary_key=True)
    city = Column(Integer, ForeignKey("cities.city_id"))
    geom = Column(Geometry(geometry_type='POINT', srid=4674))
Exemplo n.º 28
0
class Evaluation(Base):
    """Class to store information about the outcome of the evaluation
    of a submission against one testcase.

    """
    __tablename__ = 'evaluations'
    __table_args__ = (
        ForeignKeyConstraint(
            ('submission_id', 'dataset_id'),
            (SubmissionResult.submission_id, SubmissionResult.dataset_id),
            onupdate="CASCADE",
            ondelete="CASCADE"),
        UniqueConstraint('submission_id', 'dataset_id', 'testcase_id'),
    )

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # Submission (id and object) owning the evaluation.
    submission_id = Column(Integer,
                           ForeignKey(Submission.id,
                                      onupdate="CASCADE",
                                      ondelete="CASCADE"),
                           nullable=False,
                           index=True)
    submission = relationship(Submission, viewonly=True)

    # Dataset (id and object) owning the evaluation.
    dataset_id = Column(Integer,
                        ForeignKey(Dataset.id,
                                   onupdate="CASCADE",
                                   ondelete="CASCADE"),
                        nullable=False,
                        index=True)
    dataset = relationship(Dataset, viewonly=True)

    # SubmissionResult owning the evaluation.
    submission_result = relationship(SubmissionResult,
                                     backref=backref(
                                         'evaluations',
                                         cascade="all, delete-orphan",
                                         passive_deletes=True))

    # Testcase (id and object) this evaluation was performed on.
    testcase_id = Column(Integer,
                         ForeignKey(Testcase.id,
                                    onupdate="CASCADE",
                                    ondelete="CASCADE"),
                         nullable=False,
                         index=True)
    testcase = relationship(Testcase)

    # String containing the outcome of the evaluation (usually 1.0,
    # ...) not necessary the points awarded, that will be computed by
    # the score type.
    outcome = Column(Unicode, nullable=True)

    # String containing output from the grader (usually "Correct",
    # "Time limit", ...).
    text = Column(String, nullable=True)

    # Evaluation's time and wall-clock time, in seconds.
    execution_time = Column(Float, nullable=True)
    execution_wall_clock_time = Column(Float, nullable=True)

    # Memory used by the evaluation, in bytes.
    execution_memory = Column(Integer, nullable=True)

    # Worker shard and sandbox where the evaluation was performed.
    evaluation_shard = Column(Integer, nullable=True)
    evaluation_sandbox = Column(Unicode, nullable=True)

    @property
    def codename(self):
        """Return the codename of the testcase."""
        return self.testcase.codename
Exemplo n.º 29
0
class UserTest(Base):
    """Class to store a test requested by a user. Not to be used
    directly (import it from SQLAlchemyAll).

    """
    __tablename__ = 'user_tests'

    # Auto increment primary key.
    id = Column(Integer, primary_key=True)

    # User (id and object) that requested the test.
    user_id = Column(Integer,
                     ForeignKey(User.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False,
                     index=True)
    user = relationship(User,
                        backref=backref("user_tests",
                                        cascade="all, delete-orphan",
                                        passive_deletes=True))

    # Task (id and object) of the test.
    task_id = Column(Integer,
                     ForeignKey(Task.id,
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     nullable=False,
                     index=True)
    task = relationship(Task,
                        backref=backref("user_tests",
                                        cascade="all, delete-orphan",
                                        passive_deletes=True))

    # Time of the request.
    timestamp = Column(DateTime, nullable=False)

    # Language of test, or None if not applicable.
    language = Column(String, nullable=True)

    # Input (provided by the user) and output files' digests for this
    # test
    input = Column(String, nullable=False)
    output = Column(String, nullable=True)

    # Compilation outcome (can be None = yet to compile, "ok" =
    # compilation successful and we can evaluate, "fail" =
    # compilation unsuccessful, throw it away).
    compilation_outcome = Column(String, nullable=True)

    # String containing output from the sandbox, and the compiler
    # stdout and stderr.
    compilation_text = Column(String, nullable=True)

    # Number of attempts of compilation.
    compilation_tries = Column(Integer, nullable=False)

    # Worker shard and sandbox where the compilation was performed
    compilation_shard = Column(Integer, nullable=True)
    compilation_sandbox = Column(String, nullable=True)

    # Evaluation outcome (can be None = yet to evaluate, "ok" =
    # evaluation successful).
    evaluation_outcome = Column(String, nullable=True)
    evaluation_text = Column(String, nullable=True)

    # Number of attempts of evaluation.
    evaluation_tries = Column(Integer, nullable=False)

    # Worker shard and sandbox wgere the evaluation was performed
    evaluation_shard = Column(Integer, nullable=True)
    evaluation_sandbox = Column(String, nullable=True)

    # Other information about the execution
    memory_used = Column(Integer, nullable=True)
    execution_time = Column(Float, nullable=True)

    # Follows the description of the fields automatically added by
    # SQLAlchemy.
    # files (dict of UserTestFile objects indexed by filename)
    # executables (dict of UserTestExecutable objects indexed by filename)
    # managers (dict of UserTestManager objects indexed by filename)

    def __init__(self,
                 user,
                 task,
                 timestamp,
                 files,
                 managers,
                 input,
                 output=None,
                 language=None,
                 compilation_outcome=None,
                 compilation_text=None,
                 compilation_tries=0,
                 executables=None,
                 compilation_shard=None,
                 compilation_sandbox=None,
                 evaluation_outcome=None,
                 evaluation_text=None,
                 evaluation_tries=0,
                 evaluation_shard=None,
                 evaluation_sandbox=None,
                 memory_used=None,
                 execution_time=None):
        self.user = user
        self.task = task
        self.timestamp = timestamp
        self.files = files
        self.managers = managers
        self.input = input
        self.output = output
        self.language = language
        self.compilation_outcome = compilation_outcome
        self.compilation_text = compilation_text
        self.compilation_tries = compilation_tries
        self.executables = executables if executables is not None else {}
        self.compilation_shard = compilation_shard
        self.compilation_sandbox = compilation_sandbox
        self.evaluation_outcome = evaluation_outcome
        self.evaluation_text = evaluation_text
        self.evaluation_tries = evaluation_tries
        self.evaluation_shard = evaluation_shard
        self.evaluation_sandbox = evaluation_sandbox
        self.memory_used = memory_used
        self.execution_time = execution_time

    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            'task':
            self.task.name,
            'timestamp':
            make_timestamp(self.timestamp),
            'files':
            [_file.export_to_dict() for _file in self.files.itervalues()],
            'managers': [
                manager.export_to_dict()
                for manager in self.managers.itervalues()
            ],
            'input':
            self.input,
            'output':
            self.output,
            'language':
            self.language,
            'compilation_outcome':
            self.compilation_outcome,
            'compilation_tries':
            self.compilation_tries,
            'compilation_text':
            self.compilation_text,
            'compilation_shard':
            self.compilation_shard,
            'compilation_sandbox':
            self.compilation_sandbox,
            'executables': [
                executable.export_to_dict()
                for executable in self.executables.itervalues()
            ],
            'evaluation_outcome':
            self.evaluation_outcome,
            'evaluation_text':
            self.evaluation_text,
            'evaluation_tries':
            self.evaluation_tries,
            'evaluation_shard':
            self.evaluation_shard,
            'evaluation_sandbox':
            self.evaluation_sandbox,
            'memory_used':
            self.memory_used,
            'execution_time':
            self.execution_time,
        }
        return res

    @classmethod
    def import_from_dict(cls, data, tasks_by_name):
        """Build the object using data from a dictionary.

        """
        data['files'] = [
            UserTestFile.import_from_dict(file_data)
            for file_data in data['files']
        ]
        data['files'] = dict([(_file.filename, _file)
                              for _file in data['files']])
        data['executables'] = [
            UserTestExecutable.import_from_dict(executable_data)
            for executable_data in data['executables']
        ]
        data['executables'] = dict([(executable.filename, executable)
                                    for executable in data['executables']])
        data['managers'] = [
            UserTestManager.import_from_dict(eval_data)
            for manager_data in data['managers']
        ]
        data['managers'] = dict([(manager.filename, manager)
                                 for manager in data['managers']])
        data['task'] = tasks_by_name[data['task']]
        data['user'] = None
        data['timestamp'] = make_datetime(data['timestamp'])
        return cls(**data)

    def compiled(self):
        """Return if the user test has been compiled.

        return (bool): True if compiled, False otherwise.

        """
        return self.compilation_outcome is not None

    def evaluated(self):
        """Return if the user test has been evaluated.

        return (bool): True if evaluated, False otherwise.

        """
        return self.evaluation_outcome is not None
Exemplo n.º 30
0
class Child(CRUDModel):
    __tablename__ = 'child'
    id = Column(Integer, primary_key=True)
    parent_id = Column(Integer, ForeignKey('parent.id'))
    jmeno = Column(String, nullable=False, index=True)
Exemplo n.º 31
0
        for table in tables:
            session.execute(table.delete())
        table_add_column('rottentomatoes_actors', 'rt_id', String, session)
        ver = 1
    if ver is 1:
        table = table_schema('rottentomatoes_search_results', session)
        session.execute(sql.delete(table, table.c.movie_id == None))
        ver = 2
    return ver


# association tables
genres_table = Table(
    'rottentomatoes_movie_genres',
    Base.metadata,
    Column('movie_id', Integer, ForeignKey('rottentomatoes_movies.id')),
    Column('genre_id', Integer, ForeignKey('rottentomatoes_genres.id')),
    Index('ix_rottentomatoes_movie_genres', 'movie_id', 'genre_id'),
)
Base.register_table(genres_table)

actors_table = Table(
    'rottentomatoes_movie_actors',
    Base.metadata,
    Column('movie_id', Integer, ForeignKey('rottentomatoes_movies.id')),
    Column('actor_id', Integer, ForeignKey('rottentomatoes_actors.id')),
    Index('ix_rottentomatoes_movie_actors', 'movie_id', 'actor_id'),
)
Base.register_table(actors_table)

directors_table = Table(
Exemplo n.º 32
0
class UserProfile(meta.Base, BaseMixin, BaseDictMixin):
    """
    Profile information is added to the 'users' table.
    """
    PALETTE_DEFAULT_NAME = 'palette'
    PALETTE_DEFAULT_FRIENDLY_NAME = 'Palette Server Admin'
    PALETTE_DEFAULT_PASSWORD = '******'

    __tablename__ = 'users'
    userid = Column(BigInteger, unique=True, nullable=False, \
                        autoincrement=True, primary_key=True)
    envid = Column(BigInteger, ForeignKey("environment.envid"), nullable=False)
    active = Column(Boolean, default=True)
    name = Column(String, unique=True, nullable=False)
    friendly_name = Column(String)
    email = Column(String)
    email_level = Column(Integer, default=1)
    phone = Column(String)
    hashed_password = Column(String)
    salt = Column(String)
    roleid = Column(BigInteger, ForeignKey("roles.roleid"), default=0)
    system_user_id = Column(Integer, unique=True)
    login_at = Column(DateTime)
    licensing_role_id = Column(Integer)
    user_admin_level = Column(Integer)
    system_admin_level = Column(Integer)
    publisher = Column(Boolean)
    system_created_at = Column(DateTime)
    timestamp = Column(DateTime)  # last active time (in Palette)
    modification_time = Column(DateTime,
                               server_default=func.now(),
                               onupdate=func.current_timestamp())

    role = relationship("Role")

    def __unicode__(self):
        if self.friendly_name:
            return unicode(self.friendly_name)
        return unicode(self.name)

    def __str__(self):
        return unicode(self).encode('utf-8')

    def display_name(self):
        return unicode(self)

    def display_role(self):
        # pylint: disable=no-member
        if self.publisher:
            if self.roleid == Role.NO_ADMIN:
                return u'Publisher'
            return u'Publisher & ' + self.role.name
        else:
            return self.role.name

    @classmethod
    def get(cls, envid, userid):
        filters = {'envid': envid, 'userid': userid}
        return cls.get_unique_by_keys(filters, default=None)

    @classmethod
    def get_by_system_user_id(cls, envid, system_user_id):
        filters = {'envid': envid, 'system_user_id': system_user_id}
        return cls.get_unique_by_keys(filters, default=None)

    @classmethod
    def get_by_name(cls, envid, name):
        try:
            query = meta.Session.query(UserProfile).\
                    filter(UserProfile.envid == envid).\
                    filter(func.lower(UserProfile.name) == name.lower())
            entry = query.one()
        except NoResultFound:
            entry = None
        return entry

    @classmethod
    def verify(cls, envid, name, password):
        entry = cls.get_by_name(envid, name)
        if not entry:
            return False
        return entry.hashed_password == tableau_hash(password, entry.salt)

    defaults = [{
        'userid': 0,
        'envid': 1,
        'name': PALETTE_DEFAULT_NAME,
        'friendly_name': PALETTE_DEFAULT_FRIENDLY_NAME,
        'email_level': 1,
        'email': None,
        'salt': '',
        'roleid': 3,  # SUPER_ADMIN
        'system_user_id': 0
    }]

    @classmethod
    def user_count(cls, envid):
        return meta.Session.query(UserProfile).\
               filter(UserProfile.envid == envid).\
               count()

    @classmethod
    def update_timestamp(cls, entry):
        entry.timestamp = datetime.datetime.utcnow()