class Task(Base): """Class to store a task. """ __tablename__ = 'tasks' __table_args__ = ( UniqueConstraint('contest_id', 'num'), UniqueConstraint('contest_id', 'name'), ForeignKeyConstraint( ("id", "active_dataset_id"), ("datasets.task_id", "datasets.id"), onupdate="SET NULL", ondelete="SET NULL", # Use an ALTER query to set this foreign key after # both tables have been CREATEd, to avoid circular # dependencies. use_alter=True, name="fk_active_dataset_id"), CheckConstraint("token_gen_initial <= token_gen_max"), ) # Auto increment primary key. id = Column( Integer, primary_key=True, # Needed to enable autoincrement on integer primary keys that # are referenced by a foreign key defined on this table. autoincrement='ignore_fk') # Number of the task for sorting. num = Column(Integer, nullable=True) # Contest (id and object) owning the task. contest_id = Column(Integer, ForeignKey(Contest.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=True, index=True) contest = relationship(Contest, back_populates="tasks") # Short name and long human readable title of the task. name = Column(Codename, nullable=False, unique=True) title = Column(Unicode, nullable=False) # The names of the files that the contestant needs to submit (with # language-specific extensions replaced by "%l"). submission_format = Column(FilenameSchemaArray, nullable=False, default=[]) # The language codes of the statements that will be highlighted to # all users for this task. primary_statements = Column(ARRAY(String), nullable=False, default=[]) # The parameters that control task-tokens follow. Note that their # effect during the contest depends on the interaction with the # parameters that control contest-tokens, defined on the Contest. # The "kind" of token rules that will be active during the contest. # - disabled: The user will never be able to use any token. # - finite: The user has a finite amount of tokens and can choose # when to use them, subject to some limitations. Tokens may not # be all available at start, but given periodically during the # contest instead. # - infinite: The user will always be able to use a token. token_mode = Column(Enum(TOKEN_MODE_DISABLED, TOKEN_MODE_FINITE, TOKEN_MODE_INFINITE, name="token_mode"), nullable=False, default=TOKEN_MODE_DISABLED) # The maximum number of tokens a contestant is allowed to use # during the whole contest (on this tasks). token_max_number = Column(Integer, CheckConstraint("token_max_number > 0"), nullable=True) # The minimum interval between two successive uses of tokens for # the same user (on this task). token_min_interval = Column( Interval, CheckConstraint("token_min_interval >= '0 seconds'"), nullable=False, default=timedelta()) # The parameters that control generation (if mode is "finite"): # the user starts with "initial" tokens and receives "number" more # every "interval", but their total number is capped to "max". token_gen_initial = Column(Integer, CheckConstraint("token_gen_initial >= 0"), nullable=False, default=2) token_gen_number = Column(Integer, CheckConstraint("token_gen_number >= 0"), nullable=False, default=2) token_gen_interval = Column( Interval, CheckConstraint("token_gen_interval > '0 seconds'"), nullable=False, default=timedelta(minutes=30)) token_gen_max = Column(Integer, CheckConstraint("token_gen_max > 0"), nullable=True) # Maximum number of submissions or user_tests allowed for each user # on this task during the whole contest or None to not enforce # this limitation. max_submission_number = Column( Integer, CheckConstraint("max_submission_number > 0"), nullable=True) max_user_test_number = Column(Integer, CheckConstraint("max_user_test_number > 0"), nullable=True) # Minimum interval between two submissions or user_tests for this # task, or None to not enforce this limitation. min_submission_interval = Column( Interval, CheckConstraint("min_submission_interval > '0 seconds'"), nullable=True) min_user_test_interval = Column( Interval, CheckConstraint("min_user_test_interval > '0 seconds'"), nullable=True) # What information users can see about the evaluations of their # submissions. Offering full information might help some users to # reverse engineer task data. feedback_level = Column(Enum(FEEDBACK_LEVEL_FULL, FEEDBACK_LEVEL_RESTRICTED, name="feedback_level"), nullable=False, default=FEEDBACK_LEVEL_RESTRICTED) # The scores for this task will be rounded to this number of # decimal places. score_precision = Column(Integer, CheckConstraint("score_precision >= 0"), nullable=False, default=0) # Score mode for the task. score_mode = Column(Enum(SCORE_MODE_MAX_TOKENED_LAST, SCORE_MODE_MAX, SCORE_MODE_MAX_SUBTASK, name="score_mode"), nullable=False, default=SCORE_MODE_MAX_TOKENED_LAST) # Active Dataset (id and object) currently being used for scoring. # The ForeignKeyConstraint for this column is set at table-level. active_dataset_id = Column(Integer, nullable=True) active_dataset = relationship( 'Dataset', foreign_keys=[active_dataset_id], # Use an UPDATE query *after* an INSERT query (and *before* a # DELETE query) to set (and unset) the column associated to # this relationship. post_update=True) # These one-to-many relationships are the reversed directions of # the ones defined in the "child" classes using foreign keys. statements = relationship( "Statement", collection_class=attribute_mapped_collection("language"), cascade="all, delete-orphan", passive_deletes=True, back_populates="task") attachments = relationship( "Attachment", collection_class=attribute_mapped_collection("filename"), cascade="all, delete-orphan", passive_deletes=True, back_populates="task") spoilers = relationship( "Spoiler", collection_class=attribute_mapped_collection("filename"), cascade="all, delete-orphan", passive_deletes=True, back_populates="task") datasets = relationship( "Dataset", # Due to active_dataset_id, SQLAlchemy cannot unambiguously # figure out by itself which foreign key to use. foreign_keys="[Dataset.task_id]", cascade="all, delete-orphan", passive_deletes=True, back_populates="task") submissions = relationship("Submission", cascade="all, delete-orphan", passive_deletes=True, back_populates="task") user_tests = relationship("UserTest", cascade="all, delete-orphan", passive_deletes=True, back_populates="task")
class Contest(Base): """Class to store a contest (which is a single day of a programming competition). """ __tablename__ = 'contests' __table_args__ = ( CheckConstraint("start <= stop"), CheckConstraint("token_gen_initial <= token_gen_max"), ) # Auto increment primary key. id = Column( Integer, primary_key=True) # Short name of the contest. name = Column( Unicode, nullable=False, unique=True) # Description of the contest (human readable). description = Column( Unicode, nullable=False) # The list of language codes of the localizations that contestants # are allowed to use (empty means all). allowed_localizations = Column( RepeatedUnicode(), nullable=False, default=[]) # The list of names of languages allowed in the contest. languages = Column( RepeatedUnicode(), nullable=False, default=["C11 / gcc", "C++11 / g++", "Pascal / fpc"]) # Whether contestants allowed to download their submissions. submissions_download_allowed = Column( Boolean, nullable=False, default=True) # Whether the user question is enabled. allow_questions = Column( Boolean, nullable=False, default=True) # Whether the user test interface is enabled. allow_user_tests = Column( Boolean, nullable=False, default=True) # Whether to prevent hidden participations to log in. block_hidden_participations = Column( Boolean, nullable=False, default=False) # Whether to allow username/password authentication allow_password_authentication = Column( Boolean, nullable=False, default=True) # Whether to enforce that the IP address of the request matches # the IP address or subnet specified for the participation (if # present). ip_restriction = Column( Boolean, nullable=False, default=True) # Whether to automatically log in users connecting from an IP # address specified in the ip field of a participation to this # contest. ip_autologin = Column( Boolean, nullable=False, default=False) # The parameters that control contest-tokens follow. Note that # their effect during the contest depends on the interaction with # the parameters that control task-tokens, defined on each Task. # The "kind" of token rules that will be active during the contest. # - disabled: The user will never be able to use any token. # - finite: The user has a finite amount of tokens and can choose # when to use them, subject to some limitations. Tokens may not # be all available at start, but given periodically during the # contest instead. # - infinite: The user will always be able to use a token. token_mode = Column( Enum("disabled", "finite", "infinite", name="token_mode"), nullable=False, default="infinite") # The maximum number of tokens a contestant is allowed to use # during the whole contest (on all tasks). token_max_number = Column( Integer, CheckConstraint("token_max_number > 0"), nullable=True) # The minimum interval between two successive uses of tokens for # the same user (on any task). token_min_interval = Column( Interval, CheckConstraint("token_min_interval >= '0 seconds'"), nullable=False, default=timedelta()) # The parameters that control generation (if mode is "finite"): # the user starts with "initial" tokens and receives "number" more # every "interval", but their total number is capped to "max". token_gen_initial = Column( Integer, CheckConstraint("token_gen_initial >= 0"), nullable=False, default=2) token_gen_number = Column( Integer, CheckConstraint("token_gen_number >= 0"), nullable=False, default=2) token_gen_interval = Column( Interval, CheckConstraint("token_gen_interval > '0 seconds'"), nullable=False, default=timedelta(minutes=30)) token_gen_max = Column( Integer, CheckConstraint("token_gen_max > 0"), nullable=True) # Beginning and ending of the contest. start = Column( DateTime, nullable=False, default=datetime(2000, 1, 1)) stop = Column( DateTime, nullable=False, default=datetime(2100, 1, 1)) # Timezone for the contest. All timestamps in CWS will be shown # using the timezone associated to the logged-in user or (if it's # None or an invalid string) the timezone associated to the # contest or (if it's None or an invalid string) the local # timezone of the server. This value has to be a string like # "Europe/Rome", "Australia/Sydney", "America/New_York", etc. timezone = Column( Unicode, nullable=True) # Max contest time for each user in seconds. per_user_time = Column( Interval, CheckConstraint("per_user_time >= '0 seconds'"), nullable=True) # Maximum number of submissions or user_tests allowed for each user # during the whole contest or None to not enforce this limitation. max_submission_number = Column( Integer, CheckConstraint("max_submission_number > 0"), nullable=True) max_user_test_number = Column( Integer, CheckConstraint("max_user_test_number > 0"), nullable=True) # Minimum interval between two submissions or user_tests, or None to # not enforce this limitation. min_submission_interval = Column( Interval, CheckConstraint("min_submission_interval > '0 seconds'"), nullable=True) min_user_test_interval = Column( Interval, CheckConstraint("min_user_test_interval > '0 seconds'"), nullable=True) # The scores for this contest will be rounded to this number of # decimal places. score_precision = Column( Integer, CheckConstraint("score_precision >= 0"), nullable=False, default=0) # Follows the description of the fields automatically added by # SQLAlchemy. # tasks (list of Task objects) # announcements (list of Announcement objects) # participations (list of Participation objects) # Moreover, we have the following methods. # get_submissions (defined in __init__.py) # get_submission_results (defined in __init__.py) # get_user_tests (defined in __init__.py) # get_user_test_results (defined in __init__.py) # FIXME - Use SQL syntax def get_task(self, task_name): """Return the first task in the contest with the given name. task_name (string): the name of the task we are interested in. return (Task): the corresponding task object. raise (KeyError): if no tasks with the given name are found. """ for task in self.tasks: if task.name == task_name: return task raise KeyError("Task not found") # FIXME - Use SQL syntax def get_task_index(self, task_name): """Return the index of the first task in the contest with the given name. task_name (string): the name of the task we are interested in. return (int): the index of the corresponding task. raise (KeyError): if no tasks with the given name are found. """ for idx, task in enumerate(self.tasks): if task.name == task_name: return idx raise KeyError("Task not found") def enumerate_files(self, skip_submissions=False, skip_user_tests=False, skip_generated=False): """Enumerate all the files (by digest) referenced by the contest. return (set): a set of strings, the digests of the file referenced in the contest. """ # Here we cannot use yield, because we want to detect # duplicates files = set() for task in self.tasks: # Enumerate statements for file_ in task.statements.itervalues(): files.add(file_.digest) # Enumerate attachments for file_ in task.attachments.itervalues(): files.add(file_.digest) # Enumerate managers for dataset in task.datasets: for file_ in dataset.managers.itervalues(): files.add(file_.digest) # Enumerate testcases for dataset in task.datasets: for testcase in dataset.testcases.itervalues(): files.add(testcase.input) files.add(testcase.output) if not skip_submissions: for submission in self.get_submissions(): # Enumerate files for file_ in submission.files.itervalues(): files.add(file_.digest) # Enumerate executables if not skip_generated: for sr in submission.results: for file_ in sr.executables.itervalues(): files.add(file_.digest) if not skip_user_tests: for user_test in self.get_user_tests(): files.add(user_test.input) if not skip_generated: for ur in user_test.results: if ur.output is not None: files.add(ur.output) # Enumerate files for file_ in user_test.files.itervalues(): files.add(file_.digest) # Enumerate managers for file_ in user_test.managers.itervalues(): files.add(file_.digest) # Enumerate executables if not skip_generated: for ur in user_test.results: for file_ in ur.executables.itervalues(): files.add(file_.digest) return files def phase(self, timestamp): """Return: -1 if contest isn't started yet at time timestamp, 0 if the contest is active at time timestamp, 1 if the contest has ended. timestamp (datetime): the time we are iterested in. return (int): contest phase as above. """ if self.start is not None and self.start > timestamp: return -1 if self.stop is None or self.stop > timestamp: return 0 return 1 @staticmethod def _tokens_available(token_timestamps, token_mode, token_max_number, token_min_interval, token_gen_initial, token_gen_number, token_gen_interval, token_gen_max, start, timestamp): """Do exactly the same computation stated in tokens_available, but ensuring only a single set of token_* directive. Basically, tokens_available call this twice for contest-wise and task-wise parameters and then assemble the result. token_timestamps ([datetime]): list of timestamps of used tokens, sorted in chronological order. token_* (int): the parameters we want to enforce. start (datetime): the time from which we start accumulating tokens. timestamp (datetime): the time relative to which make the calculation (has to be greater than or equal to all elements of token_timestamps). return ((int, datetime|None, datetime|None)): same as tokens_available. """ # If tokens are disabled there are no tokens available. if token_mode == "disabled": return (0, None, None) # If tokens are infinite there are always tokens available. if token_mode == "infinite": return (-1, None, None) # expiration is the timestamp at which all min_intervals for # the tokens played up to now have expired (i.e. the first # time at which we can play another token). If no tokens have # been played so far, this time is the start of the contest. expiration = \ token_timestamps[-1] + token_min_interval \ if len(token_timestamps) > 0 else start # If we already played the total number allowed, we don't have # anything left. played_tokens = len(token_timestamps) if token_max_number is not None and played_tokens >= token_max_number: return (0, None, None) # avail is the current number of available tokens. We are # going to rebuild all the history to know how many of them we # have now. # We start with the initial number (it's already capped to max # by the DB). token_gen_initial can be ignored after this. avail = token_gen_initial def generate_tokens(prev_time, next_time): """Compute how many tokens have been generated between the two timestamps. prev_time (datetime): timestamp of begin of interval. next_time (datetime): timestamp of end of interval. return (int): number of tokens generated. """ # How many generation times we passed from start to # the previous considered time? before_prev = int((prev_time - start).total_seconds() / token_gen_interval.total_seconds()) # And from start to the current considered time? before_next = int((next_time - start).total_seconds() / token_gen_interval.total_seconds()) # So... return token_gen_number * (before_next - before_prev) # Previous time we considered prev_token = start # Simulating! for token in token_timestamps: # Increment the number of tokens because of generation. avail += generate_tokens(prev_token, token) if token_gen_max is not None: avail = min(avail, token_gen_max) # Play the token. avail -= 1 prev_token = token avail += generate_tokens(prev_token, timestamp) if token_gen_max is not None: avail = min(avail, token_gen_max) # Compute the time in which the next token will be generated. next_gen_time = None if token_gen_number > 0 and \ (token_gen_max is None or avail < token_gen_max): next_gen_time = \ start + token_gen_interval * \ int((timestamp - start).total_seconds() / token_gen_interval.total_seconds() + 1) # If we have more tokens than how many we are allowed to play, # cap it, and note that no more will be generated. if token_max_number is not None: if avail >= token_max_number - played_tokens: avail = token_max_number - played_tokens next_gen_time = None return (avail, next_gen_time, expiration if expiration > timestamp else None) def tokens_available(self, participation, task, timestamp=None): """Return three pieces of data: [0] the number of available tokens for the user to play on the task (independently from the fact that (s)he can play it right now or not due to a min_interval wating for expiration); -1 means infinite tokens; [1] the next time in which a token will be generated (or None); from the user perspective, i.e.: if the user will do nothing, [1] is the first time in which his number of available tokens will be greater than [0]; [2] the time when the min_interval will expire, or None In particular, let r the return value of this method. We can sketch the code in the following way.: if r[0] > 0 or r[0] == -1: we have tokens if r[2] is None: we can play a token else: we must wait till r[2] to play a token if r[1] is not None: next one will be generated at r[1] else: no other tokens will be generated (max/total reached ?) else: we don't have tokens right now if r[1] is not None: next one will be generated at r[1] if r[2] is not None and r[2] > r[1]: but we must wait also till r[2] to play it else: no other tokens will be generated (max/total reached ?) Note also that this method assumes that all played tokens were regularly played, and that there are no tokens played in the future. Also, if r[0] == 0 and r[1] is None, then r[2] should be ignored. participation (Participation): the participation. task (Task): the task. timestamp (datetime|None): the time relative to which making the calculation, or None to use now. return ((int, datetime|None, datetime|None)): see description above. """ if timestamp is None: timestamp = make_datetime() # Take the list of the tokens already played (sorted by time). tokens = participation.get_tokens() token_timestamps_contest = sorted([token.timestamp for token in tokens]) token_timestamps_task = sorted([ token.timestamp for token in tokens if token.submission.task.name == task.name]) # If the contest is USACO-style (i.e., the time for each user # start when he/she logs in for the first time), then we start # accumulating tokens from the user starting time; otherwise, # from the start of the contest. start = self.start if self.per_user_time is not None: start = participation.starting_time # Compute separately for contest-wise and task-wise. res_contest = Contest._tokens_available( token_timestamps_contest, self.token_mode, self.token_max_number, self.token_min_interval, self.token_gen_initial, self.token_gen_number, self.token_gen_interval, self.token_gen_max, start, timestamp) res_task = Contest._tokens_available( token_timestamps_task, task.token_mode, task.token_max_number, task.token_min_interval, task.token_gen_initial, task.token_gen_number, task.token_gen_interval, task.token_gen_max, start, timestamp) # Merge the results. # First, the "expiration". if res_contest[2] is None: expiration = res_task[2] elif res_task[2] is None: expiration = res_contest[2] else: expiration = max(res_task[2], res_contest[2]) # Then, check if both are infinite if res_contest[0] == -1 and res_task[0] == -1: res = (-1, None, expiration) # Else, "combine" them appropriately. else: # Having infinite contest tokens, in this situation, is the # same as having a finite number that is strictly greater # than the task tokens. The same holds the other way, too. if res_contest[0] == -1: res_contest = (res_task[0] + 1, None, None) if res_task[0] == -1: res_task = (res_contest[0] + 1, None, None) # About next token generation time: we need to see when the # *minimum* between res_contest[0] and res_task[0] is # increased by one, so if there is an actual minimum we # need to consider only the next generation time for it. # Otherwise, if they are equal, we need both to generate an # additional token and we store the maximum between the two # next times of generation. if res_contest[0] < res_task[0]: # We have more task-tokens than contest-tokens. # We just need a contest-token to be generated. res = (res_contest[0], res_contest[1], expiration) elif res_task[0] < res_contest[0]: # We have more contest-tokens than task-tokens. # We just need a task-token to be generated. res = (res_task[0], res_task[1], expiration) else: # Darn, we need both! if res_contest[1] is None or res_task[1] is None: res = (res_task[0], None, expiration) else: res = (res_task[0], max(res_contest[1], res_task[1]), expiration) return res
class Dataset(Base): """Class to store the information about a data set. """ __tablename__ = 'datasets' __table_args__ = ( UniqueConstraint('task_id', 'description'), # Useless, in theory, because 'id' is already unique. Yet, we # need this because it's a target of a foreign key. UniqueConstraint('id', 'task_id'), ) # Auto increment primary key. id = Column(Integer, primary_key=True) # Task (id and object) owning the dataset. task_id = Column(Integer, ForeignKey(Task.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=False) task = relationship(Task, foreign_keys=[task_id], back_populates="datasets") # A human-readable text describing the dataset. description = Column(Unicode, nullable=False) # Whether this dataset will be automatically judged by ES and SS # "in background", together with the active dataset of each task. autojudge = Column(Boolean, nullable=False, default=False) # Time and memory limits (in seconds and bytes) for every testcase. time_limit = Column(Float, CheckConstraint("time_limit > 0"), nullable=True) memory_limit = Column(BigInteger, CheckConstraint("memory_limit > 0"), CheckConstraint("MOD(memory_limit, 1048576) = 0"), nullable=True) # Name of the TaskType child class suited for the task. task_type = Column(String, nullable=False) # Parameters for the task type class. task_type_parameters = Column(JSONB, nullable=False) # Name of the ScoreType child class suited for the task. score_type = Column(String, nullable=False) # Parameters for the score type class. score_type_parameters = Column(JSONB, nullable=False) # These one-to-many relationships are the reversed directions of # the ones defined in the "child" classes using foreign keys. managers = relationship( "Manager", collection_class=attribute_mapped_collection("filename"), cascade="all, delete-orphan", passive_deletes=True, back_populates="dataset") testcases = relationship( "Testcase", collection_class=attribute_mapped_collection("codename"), cascade="all, delete-orphan", passive_deletes=True, back_populates="dataset") @property def active(self): """Shorthand for detecting if the dataset is active. return (bool): True if this dataset is the active one for its task. """ return self is self.task.active_dataset @property def task_type_object(self): if not hasattr(self, "_cached_task_type_object") \ or self.task_type != self._cached_task_type \ or (self.task_type_parameters != self._cached_task_type_parameters): # Import late to avoid a circular dependency. from cms.grading.tasktypes import get_task_type # This can raise. self._cached_task_type_object = get_task_type( self.task_type, self.task_type_parameters) # If an exception is raised these updates don't take place: # that way, next time this property is accessed, we get a # cache miss again and the same exception is raised again. self._cached_task_type = self.task_type self._cached_task_type_parameters = \ copy.deepcopy(self.task_type_parameters) return self._cached_task_type_object @property def score_type_object(self): public_testcases = {k: tc.public for k, tc in self.testcases.items()} if not hasattr(self, "_cached_score_type_object") \ or self.score_type != self._cached_score_type \ or (self.score_type_parameters != self._cached_score_type_parameters) \ or public_testcases != self._cached_public_testcases: # Import late to avoid a circular dependency. from cms.grading.scoretypes import get_score_type # This can raise. self._cached_score_type_object = get_score_type( self.score_type, self.score_type_parameters, public_testcases) # If an exception is raised these updates don't take place: # that way, next time this property is accessed, we get a # cache miss again and the same exception is raised again. self._cached_score_type = self.score_type self._cached_score_type_parameters = \ copy.deepcopy(self.score_type_parameters) self._cached_public_testcases = public_testcases return self._cached_score_type_object def clone_from(self, old_dataset, clone_managers=True, clone_testcases=True, clone_results=False): """Overwrite the data with that in dataset. old_dataset (Dataset): original dataset to copy from. clone_managers (bool): copy dataset managers. clone_testcases (bool): copy dataset testcases. clone_results (bool): copy submission results (will also copy managers and testcases). """ new_testcases = dict() if clone_testcases or clone_results: for old_t in old_dataset.testcases.values(): new_t = old_t.clone() new_t.dataset = self new_testcases[new_t.codename] = new_t if clone_managers or clone_results: for old_m in old_dataset.managers.values(): new_m = old_m.clone() new_m.dataset = self # TODO: why is this needed? self.sa_session.flush() if clone_results: old_results = self.get_submission_results(old_dataset) for old_sr in old_results: # Create the submission result. new_sr = old_sr.clone() new_sr.submission = old_sr.submission new_sr.dataset = self # Create executables. for old_e in old_sr.executables.values(): new_e = old_e.clone() new_e.submission_result = new_sr # Create evaluations. for old_e in old_sr.evaluations: new_e = old_e.clone() new_e.submission_result = new_sr new_e.testcase = new_testcases[old_e.codename] self.sa_session.flush()
nullable=False, server_default=text('False')), schema="application") groups = Table('groups', metadata, Column('group_name', UnicodeText, nullable=False, primary_key=True), Column('group_description', UnicodeText), Column('group_kind', UnicodeText, nullable=False, primary_key=True), CheckConstraint("group_kind IN ('instance', 'role')"), schema="application") instances = Table('instances', metadata, Column('agent_address', UnicodeText, nullable=False, primary_key=True), Column('agent_port', Integer, nullable=False, primary_key=True), Column('agent_key', UnicodeText), Column('hostname', UnicodeText, nullable=False), Column('cpu', Integer),
class FuncKeyDestGroupMember(Base): DESTINATION_TYPE_ID = 13 __tablename__ = 'func_key_dest_groupmember' __table_args__ = ( PrimaryKeyConstraint('func_key_id', 'destination_type_id'), ForeignKeyConstraint(['func_key_id', 'destination_type_id'], ['func_key.id', 'func_key.destination_type_id']), UniqueConstraint('group_id', 'extension_id'), CheckConstraint( 'destination_type_id = {}'.format(DESTINATION_TYPE_ID)), ) func_key_id = Column(Integer) destination_type_id = Column( Integer, server_default="{}".format(DESTINATION_TYPE_ID)) group_id = Column(Integer, ForeignKey('groupfeatures.id'), nullable=False) extension_id = Column(Integer, ForeignKey('extensions.id'), nullable=False) type = 'groupmember' func_key = relationship(FuncKey, cascade='all,delete-orphan', single_parent=True) group = relationship(GroupFeatures) extension = relationship(Extension, viewonly=True) extension_typeval = association_proxy( 'extension', 'typeval', # Only to keep value persistent in the instance creator=lambda _typeval: Extension(type='extenfeatures', typeval=_typeval)) def to_tuple(self): return ( ('action', self.action), ('group_id', self.group_id), ) @hybrid_property def action(self): ACTIONS = { 'groupmemberjoin': 'join', 'groupmemberleave': 'leave', 'groupmembertoggle': 'toggle' } return ACTIONS.get(self.extension_typeval, self.extension_typeval) @action.expression def action(cls): return cls.extension_typeval # only used to pass test @action.setter def action(self, value): TYPEVALS = { 'join': 'groupmemberjoin', 'leave': 'groupmemberleave', 'toggle': 'groupmembertoggle' } self.extension_typeval = TYPEVALS.get(value, value)
class Contest(Base): """Class to store a contest (which is a single day of a programming competition). """ __tablename__ = 'contests' __table_args__ = ( CheckConstraint("start <= stop"), CheckConstraint("stop <= analysis_start"), CheckConstraint("analysis_start <= analysis_stop"), CheckConstraint("token_gen_initial <= token_gen_max"), ) # Auto increment primary key. id = Column(Integer, primary_key=True) # Short name of the contest. name = Column(Unicode, CodenameConstraint("name"), nullable=False, unique=True) # Description of the contest (human readable). description = Column(Unicode, nullable=False) # The list of language codes of the localizations that contestants # are allowed to use (empty means all). allowed_localizations = Column(ARRAY(String), nullable=False, default=[]) # The list of names of languages allowed in the contest. languages = Column(ARRAY(String), nullable=False, default=["C11 / gcc", "C++11 / g++", "Pascal / fpc"]) # Whether contestants allowed to download their submissions. submissions_download_allowed = Column(Boolean, nullable=False, default=True) # Whether the user question is enabled. allow_questions = Column(Boolean, nullable=False, default=True) # Whether the user test interface is enabled. allow_user_tests = Column(Boolean, nullable=False, default=True) # Whether to prevent hidden participations to log in. block_hidden_participations = Column(Boolean, nullable=False, default=False) # Whether to allow username/password authentication allow_password_authentication = Column(Boolean, nullable=False, default=True) # Whether to enforce that the IP address of the request matches # the IP address or subnet specified for the participation (if # present). ip_restriction = Column(Boolean, nullable=False, default=True) # Whether to automatically log in users connecting from an IP # address specified in the ip field of a participation to this # contest. ip_autologin = Column(Boolean, nullable=False, default=False) # The parameters that control contest-tokens follow. Note that # their effect during the contest depends on the interaction with # the parameters that control task-tokens, defined on each Task. # The "kind" of token rules that will be active during the contest. # - disabled: The user will never be able to use any token. # - finite: The user has a finite amount of tokens and can choose # when to use them, subject to some limitations. Tokens may not # be all available at start, but given periodically during the # contest instead. # - infinite: The user will always be able to use a token. token_mode = Column(Enum(TOKEN_MODE_DISABLED, TOKEN_MODE_FINITE, TOKEN_MODE_INFINITE, name="token_mode"), nullable=False, default="infinite") # The maximum number of tokens a contestant is allowed to use # during the whole contest (on all tasks). token_max_number = Column(Integer, CheckConstraint("token_max_number > 0"), nullable=True) # The minimum interval between two successive uses of tokens for # the same user (on any task). token_min_interval = Column( Interval, CheckConstraint("token_min_interval >= '0 seconds'"), nullable=False, default=timedelta()) # The parameters that control generation (if mode is "finite"): # the user starts with "initial" tokens and receives "number" more # every "interval", but their total number is capped to "max". token_gen_initial = Column(Integer, CheckConstraint("token_gen_initial >= 0"), nullable=False, default=2) token_gen_number = Column(Integer, CheckConstraint("token_gen_number >= 0"), nullable=False, default=2) token_gen_interval = Column( Interval, CheckConstraint("token_gen_interval > '0 seconds'"), nullable=False, default=timedelta(minutes=30)) token_gen_max = Column(Integer, CheckConstraint("token_gen_max > 0"), nullable=True) # Beginning and ending of the contest. start = Column(DateTime, nullable=False, default=datetime(2000, 1, 1)) stop = Column(DateTime, nullable=False, default=datetime(2030, 1, 1)) # Beginning and ending of the contest anaylsis mode. analysis_enabled = Column(Boolean, nullable=False, default=False) analysis_start = Column(DateTime, nullable=False, default=datetime(2030, 1, 1)) analysis_stop = Column(DateTime, nullable=False, default=datetime(2030, 1, 1)) # Timezone for the contest. All timestamps in CWS will be shown # using the timezone associated to the logged-in user or (if it's # None or an invalid string) the timezone associated to the # contest or (if it's None or an invalid string) the local # timezone of the server. This value has to be a string like # "Europe/Rome", "Australia/Sydney", "America/New_York", etc. timezone = Column(Unicode, nullable=True) # Max contest time for each user in seconds. per_user_time = Column(Interval, CheckConstraint("per_user_time >= '0 seconds'"), nullable=True) # Maximum number of submissions or user_tests allowed for each user # during the whole contest or None to not enforce this limitation. max_submission_number = Column( Integer, CheckConstraint("max_submission_number > 0"), nullable=True) max_user_test_number = Column(Integer, CheckConstraint("max_user_test_number > 0"), nullable=True) # Minimum interval between two submissions or user_tests, or None to # not enforce this limitation. min_submission_interval = Column( Interval, CheckConstraint("min_submission_interval > '0 seconds'"), nullable=True) min_user_test_interval = Column( Interval, CheckConstraint("min_user_test_interval > '0 seconds'"), nullable=True) # The scores for this contest will be rounded to this number of # decimal places. score_precision = Column(Integer, CheckConstraint("score_precision >= 0"), nullable=False, default=0) # These one-to-many relationships are the reversed directions of # the ones defined in the "child" classes using foreign keys. tasks = relationship("Task", collection_class=ordering_list("num"), order_by="[Task.num]", cascade="all", passive_deletes=True, back_populates="contest") announcements = relationship("Announcement", order_by="[Announcement.timestamp]", cascade="all, delete-orphan", passive_deletes=True, back_populates="contest") participations = relationship("Participation", cascade="all, delete-orphan", passive_deletes=True, back_populates="contest") # Moreover, we have the following methods. # get_submissions (defined in __init__.py) # get_submission_results (defined in __init__.py) # get_user_tests (defined in __init__.py) # get_user_test_results (defined in __init__.py) def enumerate_files(self, skip_submissions=False, skip_user_tests=False, skip_generated=False): """Enumerate all the files (by digest) referenced by the contest. return (set): a set of strings, the digests of the file referenced in the contest. """ # Here we cannot use yield, because we want to detect # duplicates files = set() for task in self.tasks: # Enumerate statements for file_ in itervalues(task.statements): files.add(file_.digest) # Enumerate attachments for file_ in itervalues(task.attachments): files.add(file_.digest) # Enumerate managers for dataset in task.datasets: for file_ in itervalues(dataset.managers): files.add(file_.digest) # Enumerate testcases for dataset in task.datasets: for testcase in itervalues(dataset.testcases): files.add(testcase.input) files.add(testcase.output) if not skip_submissions: for submission in self.get_submissions(): # Enumerate files for file_ in itervalues(submission.files): files.add(file_.digest) # Enumerate executables if not skip_generated: for sr in submission.results: for file_ in itervalues(sr.executables): files.add(file_.digest) if not skip_user_tests: for user_test in self.get_user_tests(): files.add(user_test.input) if not skip_generated: for ur in user_test.results: if ur.output is not None: files.add(ur.output) # Enumerate files for file_ in itervalues(user_test.files): files.add(file_.digest) # Enumerate managers for file_ in itervalues(user_test.managers): files.add(file_.digest) # Enumerate executables if not skip_generated: for ur in user_test.results: for file_ in itervalues(ur.executables): files.add(file_.digest) return files def phase(self, timestamp): """Return: -1 if contest isn't started yet at time timestamp, 0 if the contest is active at time timestamp, 1 if the contest has ended but analysis mode hasn't started yet 2 if the contest has ended and analysis mode is active 3 if the contest has ended and analysis mode is disabled or has ended timestamp (datetime): the time we are iterested in. return (int): contest phase as above. """ if timestamp < self.start: return -1 if timestamp <= self.stop: return 0 if self.analysis_enabled: if timestamp < self.analysis_start: return 1 elif timestamp <= self.analysis_stop: return 2 return 3
def test_check_constraint_naming_convention(self, sqltext, expected): check_constraint = CheckConstraint(sqltext) table = Table('account', MetaData()) result = check_constraint_naming_convention(check_constraint, table) assert result == expected
class Task(Base): """Class to store a task. """ __tablename__ = 'tasks' __table_args__ = ( UniqueConstraint('contest_id', 'num'), UniqueConstraint('contest_id', 'name'), ForeignKeyConstraint( ("id", "active_dataset_id"), ("datasets.task_id", "datasets.id"), onupdate="SET NULL", ondelete="SET NULL", # Use an ALTER query to set this foreign key after # both tables have been CREATEd, to avoid circular # dependencies. use_alter=True, name="fk_active_dataset_id"), CheckConstraint("token_gen_initial <= token_gen_max"), ) # Auto increment primary key. id = Column( Integer, primary_key=True, # Needed to enable autoincrement on integer primary keys that # are referenced by a foreign key defined on this table. autoincrement='ignore_fk') # Number of the task for sorting. num = Column(Integer, nullable=True) # Contest (id and object) owning the task. contest_id = Column(Integer, ForeignKey(Contest.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=True, index=True) contest = relationship(Contest, backref=backref( 'tasks', collection_class=ordering_list('num'), order_by=[num], cascade="all", passive_deletes=True)) # Short name and long human readable title of the task. name = Column(Unicode, CodenameConstraint("name"), nullable=False, unique=True) title = Column(Unicode, nullable=False) # The language codes of the statements that will be highlighted to # all users for this task. primary_statements = Column(ARRAY(String), nullable=False, default=[]) # The parameters that control task-tokens follow. Note that their # effect during the contest depends on the interaction with the # parameters that control contest-tokens, defined on the Contest. # The "kind" of token rules that will be active during the contest. # - disabled: The user will never be able to use any token. # - finite: The user has a finite amount of tokens and can choose # when to use them, subject to some limitations. Tokens may not # be all available at start, but given periodically during the # contest instead. # - infinite: The user will always be able to use a token. token_mode = Column(Enum(TOKEN_MODE_DISABLED, TOKEN_MODE_FINITE, TOKEN_MODE_INFINITE, name="token_mode"), nullable=False, default="disabled") # The maximum number of tokens a contestant is allowed to use # during the whole contest (on this tasks). token_max_number = Column(Integer, CheckConstraint("token_max_number > 0"), nullable=True) # The minimum interval between two successive uses of tokens for # the same user (on this task). token_min_interval = Column( Interval, CheckConstraint("token_min_interval >= '0 seconds'"), nullable=False, default=timedelta()) # The parameters that control generation (if mode is "finite"): # the user starts with "initial" tokens and receives "number" more # every "interval", but their total number is capped to "max". token_gen_initial = Column(Integer, CheckConstraint("token_gen_initial >= 0"), nullable=False, default=2) token_gen_number = Column(Integer, CheckConstraint("token_gen_number >= 0"), nullable=False, default=2) token_gen_interval = Column( Interval, CheckConstraint("token_gen_interval > '0 seconds'"), nullable=False, default=timedelta(minutes=30)) token_gen_max = Column(Integer, CheckConstraint("token_gen_max > 0"), nullable=True) # Maximum number of submissions or user_tests allowed for each user # on this task during the whole contest or None to not enforce # this limitation. max_submission_number = Column( Integer, CheckConstraint("max_submission_number > 0"), nullable=True) max_user_test_number = Column(Integer, CheckConstraint("max_user_test_number > 0"), nullable=True) # Minimum interval between two submissions or user_tests for this # task, or None to not enforce this limitation. min_submission_interval = Column( Interval, CheckConstraint("min_submission_interval > '0 seconds'"), nullable=True) min_user_test_interval = Column( Interval, CheckConstraint("min_user_test_interval > '0 seconds'"), nullable=True) # The scores for this task will be rounded to this number of # decimal places. score_precision = Column(Integer, CheckConstraint("score_precision >= 0"), nullable=False, default=0) # Score mode for the task. score_mode = Column(Enum(SCORE_MODE_MAX_TOKENED_LAST, SCORE_MODE_MAX, name="score_mode"), nullable=False, default=SCORE_MODE_MAX_TOKENED_LAST) # Active Dataset (id and object) currently being used for scoring. # The ForeignKeyConstraint for this column is set at table-level. active_dataset_id = Column(Integer, nullable=True) active_dataset = relationship( 'Dataset', foreign_keys=[active_dataset_id], # Use an UPDATE query *after* an INSERT query (and *before* a # DELETE query) to set (and unset) the column associated to # this relationship. post_update=True)
class Group(Base): """Class to store a group of users (for timing, etc.). """ __tablename__ = 'group' __table_args__ = ( UniqueConstraint('contest_id', 'name'), CheckConstraint("start <= stop"), CheckConstraint("stop <= analysis_start"), CheckConstraint("analysis_start <= analysis_stop"), ) # Auto increment primary key. id = Column(Integer, primary_key=True) name = Column(Unicode, nullable=False) # Beginning and ending of the contest. start = Column(DateTime, nullable=False, default=datetime(2000, 1, 1)) stop = Column(DateTime, nullable=False, default=datetime(2100, 1, 1)) # Beginning and ending of the contest anaylsis mode. analysis_enabled = Column(Boolean, nullable=False, default=False) analysis_start = Column(DateTime, nullable=False, default=datetime(2100, 1, 1)) analysis_stop = Column(DateTime, nullable=False, default=datetime(2100, 1, 1)) # Max contest time for each user in seconds. per_user_time = Column(Interval, CheckConstraint("per_user_time >= '0 seconds'"), nullable=True) # Contest (id and object) to which this user group belongs. contest_id = Column( Integer, ForeignKey(Contest.id, onupdate="CASCADE", ondelete="CASCADE"), # nullable=False, index=True) contest = relationship(Contest, backref=backref('groups', cascade="all, delete-orphan", passive_deletes=True), primaryjoin="Contest.id==Group.contest_id") def phase(self, timestamp): """Return: -1 if contest isn't started yet at time timestamp, 0 if the contest is active at time timestamp, 1 if the contest has ended but analysis mode hasn't started yet 2 if the contest has ended and analysis mode is active 3 if the contest has ended and analysis mode is disabled or has ended timestamp (datetime): the time we are iterested in. return (int): contest phase as above. """ if timestamp < self.start: return -1 if timestamp <= self.stop: return 0 if self.analysis_enabled: if timestamp < self.analysis_start: return 1 elif timestamp <= self.analysis_stop: return 2 return 3
class Participation(Base): """Class to store a single participation of a user in a contest. """ __tablename__ = 'participations' # Auto increment primary key. id = Column(Integer, primary_key=True) # If the IP lock is enabled the user can log into CWS only if their # requests come from an IP address that belongs to any of these # subnetworks. An empty list prevents the user from logging in, # None disables the IP lock for the user. ip = Column(CastingArray(CIDR), nullable=True) # Starting time: for contests where every user has at most x hours # of the y > x hours totally available, this is the time the user # decided to start their time-frame. starting_time = Column(DateTime, nullable=True) # A shift in the time interval during which the user is allowed to # submit. delay_time = Column(Interval, CheckConstraint("delay_time >= '0 seconds'"), nullable=False, default=timedelta()) # An extra amount of time allocated for this user. extra_time = Column(Interval, CheckConstraint("extra_time >= '0 seconds'"), nullable=False, default=timedelta()) # Contest-specific password. If this password is not null then the # traditional user.password field will be "replaced" by this field's # value (only for this participation). password = Column(Unicode, nullable=True) # A hidden participation (e.g. does not appear in public rankings), can # also be used for debugging purposes. hidden = Column(Boolean, nullable=False, default=False) # An unofficial participation (e.g. can be hidden in public rankings). unofficial = Column(Boolean, nullable=False, default=False) # An unrestricted participation (e.g. contest time, # maximum number of submissions, minimum interval between submissions, # maximum number of user tests, minimum interval between user tests), # can also be used for debugging purposes. unrestricted = Column(Boolean, nullable=False, default=False) # Contest (id and object) to which the user is participating. contest_id = Column(Integer, ForeignKey(Contest.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True) contest = relationship(Contest, back_populates="participations") # User (id and object) which is participating. user_id = Column(Integer, ForeignKey(User.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True) user = relationship(User, back_populates="participations") __table_args__ = (UniqueConstraint('contest_id', 'user_id'), ) # Group this user belongs to group_id = Column(Integer, ForeignKey(Group.id, onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True) group = relationship(Group, backref=backref("participations", cascade="all, delete-orphan", passive_deletes=True)) # Team (id and object) that the user is representing with this # participation. team_id = Column(Integer, ForeignKey(Team.id, onupdate="CASCADE", ondelete="RESTRICT"), nullable=True) team = relationship(Team, back_populates="participations") # These one-to-many relationships are the reversed directions of # the ones defined in the "child" classes using foreign keys. messages = relationship("Message", order_by="[Message.timestamp]", cascade="all, delete-orphan", passive_deletes=True, back_populates="participation") questions = relationship( "Question", order_by="[Question.question_timestamp, Question.reply_timestamp]", cascade="all, delete-orphan", passive_deletes=True, back_populates="participation") submissions = relationship("Submission", cascade="all, delete-orphan", passive_deletes=True, back_populates="participation") user_tests = relationship("UserTest", cascade="all, delete-orphan", passive_deletes=True, back_populates="participation") printjobs = relationship("PrintJob", cascade="all, delete-orphan", passive_deletes=True, back_populates="participation")