示例#1
0
class JobTagRequirement(db.Model, UtilityMixins):
    """
    Model representing a dependency of a job on a tag

    If a job has a tag requirement, it will only run on agents that have that
    tag.
    """
    __tablename__ = config.get("table_job_tag_req")
    __table_args__ = (UniqueConstraint("tag_id", "job_id"), )

    id = id_column()

    tag_id = db.Column(db.Integer,
                       db.ForeignKey("%s.id" % config.get("table_tag")),
                       nullable=False,
                       doc="Reference to the required tag")

    job_id = db.Column(IDTypeWork,
                       db.ForeignKey("%s.id" % config.get("table_job")),
                       nullable=False,
                       doc="Foreign key to :class:`Job.id`")

    negate = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="If true, an agent that has this tag can not work on this job")

    job = db.relationship("Job",
                          backref=db.backref("tag_requirements",
                                             lazy="dynamic",
                                             cascade="all, delete-orphan"))

    tag = db.relationship("Tag")
示例#2
0
class TaskLog(db.Model, UtilityMixins, ReprMixin):
    """Table which represents a single task log entry"""
    __tablename__ = config.get("table_task_log")
    __table_args__ = (UniqueConstraint("identifier"),)

    id = id_column(db.Integer)

    identifier = db.Column(
        db.String(255),
        nullable=False,
        doc="The identifier for this log")

    agent_id = db.Column(
        IDTypeAgent,
        db.ForeignKey("%s.id" % config.get("table_agent")),
        nullable=True,
        doc="The agent this log was created on")

    created_on = db.Column(
        db.DateTime,
        default=datetime.utcnow,
        doc="The time when this log was created")

    #
    # Relationships
    #
    agent = db.relationship(
        "Agent",
        backref=db.backref("task_logs", lazy="dynamic"),
        doc="Relationship between an :class:`TaskLog`"
            "and the :class:`pyfarm.models.Agent` it was "
            "created on")

    task_associations = db.relationship(
        TaskTaskLogAssociation, backref="log",
        doc="Relationship between tasks and their logs."
    )

    def num_queued_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=None).count()

    def num_running_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=WorkState.RUNNING).count()

    def num_failed_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=WorkState.FAILED).count()

    def num_done_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=WorkState.DONE).count()
示例#3
0
class JobGroup(db.Model, UtilityMixins):
    """
    Used to group jobs together for better presentation in the UI
    """
    __tablename__ = config.get("table_job_group")

    id = id_column(IDTypeWork)

    title = db.Column(
        db.String(config.get("max_jobgroup_name_length")),
        nullable=False,
        doc="The title of the job group's name")

    main_jobtype_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_type")),
        nullable=False,
        doc="ID of the jobtype of the main job in this "
            "group. Purely for display and filtering.")

    user_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_user")),
        doc="The id of the user who owns these jobs")

    #
    # Relationships
    #
    main_jobtype = db.relationship(
        "JobType",
        backref=db.backref("jobgroups", lazy="dynamic"),
        doc="The jobtype of the main job in this group")

    user = db.relationship(
        "User",
        backref=db.backref("jobgroups", lazy="dynamic"),
        doc="The user who owns these jobs")
示例#4
0
class JobTypeSoftwareRequirement(db.Model, UtilityMixins):
    """
    Model representing a dependency of a job on a software tag, with optional
    version constraints
    """
    __tablename__ = config.get("table_job_type_software_req")
    __table_args__ = (UniqueConstraint("software_id", "jobtype_version_id"), )

    software_id = db.Column(db.Integer,
                            db.ForeignKey("%s.id" %
                                          config.get("table_software")),
                            primary_key=True,
                            doc="Reference to the required software")

    jobtype_version_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_type_version")),
        primary_key=True,
        doc="Foreign key to :class:`JobTypeVersion.id`")

    min_version_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_software_version")),
        nullable=True,
        doc="Reference to the minimum required version")

    max_version_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_software_version")),
        nullable=True,
        doc="Reference to the maximum required version")

    #
    # Relationships
    #
    jobtype_version = db.relationship("JobTypeVersion",
                                      backref=db.backref(
                                          "software_requirements",
                                          lazy="dynamic",
                                          cascade="all, delete-orphan"))

    software = db.relationship("Software")

    min_version = db.relationship("SoftwareVersion",
                                  foreign_keys=[min_version_id])

    max_version = db.relationship("SoftwareVersion",
                                  foreign_keys=[max_version_id])
示例#5
0
class PathMap(db.Model, ReprMixin, UtilityMixins):
    """
    Defines a table which is used for cross-platform
    file path mappings.
    """
    __tablename__ = config.get("table_path_map")

    id = id_column(db.Integer)

    path_linux = db.Column(
        db.String(config.get("max_path_length")),
        nullable=False,
        doc="The path on linux platforms")

    path_windows = db.Column(
        db.String(config.get("max_path_length")),
        nullable=False,
        doc="The path on Windows platforms")

    path_osx = db.Column(
        db.String(config.get("max_path_length")),
        nullable=False,
        doc="The path on Mac OS X platforms")

    tag_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_tag")),
        nullable=True,
        doc="The tag an agent needs to have for this path map "
            "to apply to it. "
            "If this is NULL, this path map applies to all "
            "agents, but is overridden by applying path maps "
            "that do specify a tag.")

    #
    # Relationships
    #
    tag = db.relationship(
        "Tag",
        backref=db.backref("path_maps", lazy="dynamic"),
        doc="Relationship attribute for the tag this path map "
            "applies to.")
示例#6
0
class JobNotifiedUser(db.Model):
    """
    Defines the table containing users to be notified of certain
    events pertaining to jobs.
    """
    __tablename__ = config.get("table_job_notified_users")

    user_id = db.Column(db.Integer,
                        db.ForeignKey("%s.id" % config.get("table_user")),
                        primary_key=True,
                        doc="The id of the user to be notified")

    job_id = db.Column(IDTypeWork,
                       db.ForeignKey("%s.id" % config.get("table_job")),
                       primary_key=True,
                       doc="The id of the associated job")

    on_success = db.Column(
        db.Boolean,
        nullable=False,
        default=True,
        doc="True if a user should be notified on successful "
        "completion of a job")

    on_failure = db.Column(
        db.Boolean,
        nullable=False,
        default=True,
        doc="True if a user should be notified of a job's failure")

    on_deletion = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="True if a user should be notified on deletion of "
        "a job")

    user = db.relationship("User",
                           backref=db.backref("subscribed_jobs",
                                              lazy="dynamic"))
示例#7
0
class TaskTaskLogAssociation(db.Model):
    """Stores an association between the task table and a task log"""
    __tablename__ = config.get("table_task_log_assoc")
    __table_args__ = (
        PrimaryKeyConstraint("task_log_id", "task_id", "attempt"),)

    task_log_id = db.Column(
        db.Integer,
        db.ForeignKey(
            "%s.id" % config.get("table_task_log"), ondelete="CASCADE"),
        doc="The ID of the task log")

    task_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_task"), ondelete="CASCADE"),
        doc="The ID of the job a task log is associated with")

    attempt = db.Column(
        db.Integer,
        autoincrement=False,
        doc="The attempt number for the given task log")

    state = db.Column(
        WorkStateEnum,
        nullable=True,
        doc="The state of the work being performed")

    #
    # Relationships
    #
    task = db.relationship(
        "Task",
        backref=db.backref(
            "log_associations",
            lazy="dynamic",
            passive_deletes=True))
示例#8
0
class Software(db.Model, UtilityMixins):
    """
    Model to represent a versioned piece of software that can be present on an
    agent and may be depended on by a job and/or jobtype through the appropriate
    SoftwareRequirement table
    """
    __tablename__ = config.get("table_software")
    __table_args__ = (UniqueConstraint("software"), )

    id = id_column()

    software = db.Column(db.String(config.get("max_tag_length")),
                         nullable=False,
                         doc="The name of the software")

    #
    # Relationships
    #
    versions = db.relationship("SoftwareVersion",
                               backref=db.backref("software"),
                               lazy="dynamic",
                               order_by="asc(SoftwareVersion.rank)",
                               cascade="all, delete-orphan",
                               doc="All known versions of this software")
示例#9
0
class Task(db.Model, ValidatePriorityMixin, WorkStateChangedMixin,
           UtilityMixins, ReprMixin):
    """
    Defines a task which a child of a :class:`Job`.  This table represents
    rows which contain the individual work unit(s) for a job.
    """
    __tablename__ = TABLE_TASK
    STATE_ENUM = WorkState
    STATE_DEFAULT = STATE_ENUM.QUEUED
    REPR_COLUMNS = ("id", "state", "frame", "project")
    REPR_CONVERT_COLUMN = {"state": partial(repr_enum, enum=STATE_ENUM)}

    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(STATE_DEFAULT, "job.priority")
    project_id = db.Column(db.Integer,
                           db.ForeignKey("%s.id" % TABLE_PROJECT),
                           doc="stores the project id")
    agent_id = db.Column(IDTypeAgent,
                         db.ForeignKey("%s.id" % TABLE_AGENT),
                         doc="Foreign key which stores :attr:`Job.id`")
    job_id = db.Column(IDTypeWork,
                       db.ForeignKey("%s.id" % TABLE_JOB),
                       doc="Foreign key which stores :attr:`Job.id`")
    hidden = db.Column(db.Boolean,
                       default=False,
                       doc=dedent("""
                       hides the task from queue and web ui"""))
    attempts = db.Column(db.Integer,
                         doc=dedent("""
                         The number attempts which have been made on this
                         task. This value is auto incremented when
                         :attr:`state` changes to a value synonyms with a
                         running state."""))
    frame = db.Column(db.Float,
                      nullable=False,
                      doc=dedent("""
                      The frame the :class:`Task` will be executing."""))

    # relationships
    parents = db.relationship("Task",
                              secondary=TaskDependencies,
                              primaryjoin=id == TaskDependencies.c.parent_id,
                              secondaryjoin=id == TaskDependencies.c.child_id,
                              backref=db.backref("children", lazy="dynamic"))
    project = db.relationship("Project",
                              backref=db.backref("tasks", lazy="dynamic"),
                              doc=dedent("""
                              relationship attribute which retrieves the
                              associated project for the task"""))
    job = db.relationship("Job",
                          backref=db.backref("tasks", lazy="dynamic"),
                          doc=dedent("""
                          relationship attribute which retrieves the
                          associated job for this task"""))

    @staticmethod
    def agentChangedEvent(target, new_value, old_value, initiator):
        """set the state to ASSIGN whenever the agent is changed"""
        if new_value is not None:
            target.state = target.STATE_ENUM.ASSIGN
示例#10
0
class User(db.Model, UserMixin, ReprMixin):
    """
    Stores information about a user including the roles they belong to
    """
    __tablename__ = TABLE_USERS_USER
    REPR_COLUMNS = ("id", "username")

    id = db.Column(db.Integer, primary_key=True, nullable=False)

    active = db.Column(db.Boolean, default=True,
                       doc=dedent("""
                       Enables or disables a particular user across the
                       entire system"""))

    username = db.Column(
        db.String(MAX_USERNAME_LENGTH), unique=True, nullable=False,
        doc="The username used to login.")

    password = db.Column(db.String(SHA256_ASCII_LENGTH),
                         doc="The password used to login")

    email = db.Column(db.String(MAX_EMAILADDR_LENGTH), unique=True,
                      doc=dedent("""
                      Contact email for registration and possible
                      notifications"""))

    expiration = db.Column(db.DateTime,
                           doc=dedent("""
                           User expiration.  If this value is set then the user
                           will no longer be able to access PyFarm past the
                           expiration."""))

    onetime_code = db.Column(db.String(SHA256_ASCII_LENGTH),
                             doc=dedent("""
                             SHA256 one time use code which can be used for
                             unique urls such as for password resets."""))

    last_login = db.Column(db.DateTime,
                           doc=dedent("""
                           The last date that this user was logged in."""))

    roles = db.relationship("Role", secondary=UserRoles,
                            backref=db.backref("users", lazy="dynamic"))

    projects = db.relationship("Project",
                               secondary=UserProjects,
                               backref=db.backref("users", lazy="dynamic"),
                               lazy="dynamic",
                               doc="The project or projects this user is "
                                   "associated with.  By default a user "
                                   "which is not associated with any projects "
                                   "will be a member of all projects.")

    @classmethod
    def create(cls, username, password, email=None, roles=None):
        # create the list or roles to add
        if roles is None:
            roles = []

        elif isinstance(roles, STRING_TYPES):
            roles = [roles]

        # create the user with the proper initial values
        user = cls(
            username=username,
            password=cls.hash_password(password),
            email=email)
        user.roles.extend(map(Role.create, roles))

        # commit and return
        db.session.add(user)
        db.session.commit()
        return user

    @classmethod
    def get(cls, id_or_username):
        """Get a user model either by id or by the user's username"""
        try:
            id_or_username = int(id_or_username)
        except ValueError:
            pass

        if isinstance(id_or_username, int):
            return cls.query.filter_by(id=id_or_username).first()
        elif isinstance(id_or_username, STRING_TYPES):
            return cls.query.filter_by(username=id_or_username).first()
        else:
            raise TypeError("string or integer required for User.get()")

    @classmethod
    def hash_password(cls, value):
        value = app.secret_key + value

        if PY3:
            value = value.encode("utf-8")

        return sha256(value).hexdigest()

    def get_auth_token(self):
        return login_serializer.dumps([str(self.id), self.password])

    def get_id(self):
        return self.id

    def check_password(self, password):
        """checks the password provided against the stored password"""
        assert isinstance(password, STRING_TYPES)
        return self.hash_password(password) == self.password

    def is_active(self):
        """returns true if the user and the roles it belongs to are active"""
        logger.debug("%(self)s.is_active()" % locals())
        now = datetime.now()

        # user is not active
        if not self.active:
            return False

        # user has expired
        if self.expiration is not None and now > self.expiration:
            return False

        # TODO: there's probably some way to cache this information
        return all(role.is_active() for role in self.roles)

    def has_roles(self, allowed=None, required=None):
        """checks the provided arguments against the roles assigned"""
        if not allowed and not required:
            return True

        allowed = split_and_extend(allowed)
        required = split_and_extend(required)

        logger.debug(
            "%(self)s.has_roles(allowed=%(allowed)s, required=%(required)s)"
            % locals())

        if allowed:
            # Ask the database if the user has any of the allowed roles.  For
            # smaller numbers of roles this is very slightly slower with
            # SQLite in :memory: but is a good amount faster over the network
            # or with large role sets.
            return bool(
                User.query.filter(
                    User.roles.any(
                        Role.name.in_(allowed))
                ).filter(User.id == self.id).count())

        if required:
            # Ask the database for all roles matching ``required``.  In order
            # for this to return True is the number of entries found must
            # be equal to len(required).
            count = Role.query.filter(
                Role.name.in_(required)).filter(User.id == self.id).count()
            return count == len(required)
示例#11
0
class Agent(db.Model, ValidatePriorityMixin, UtilityMixins, ReprMixin):
    """
    Stores information about an agent include its network address,
    state, allocation configuration, etc.

    .. note::
        This table enforces two forms of uniqueness.  The :attr:`id` column
        must be unique and the combination of these columns must also be
        unique to limit the frequency of duplicate data:

            * :attr:`hostname`
            * :attr:`ip`
            * :attr:`port`

    """
    __tablename__ = TABLE_AGENT
    __table_args__ = (UniqueConstraint("hostname", "ip", "port"), )
    STATE_DEFAULT = "online"
    REPR_COLUMNS = (
        "id", "hostname", "state", "ip", "remote_ip", "port", "cpus",
        "ram", "free_ram")
    REPR_CONVERT_COLUMN = {
        "ip": repr_ip,
        "remote_ip": repr_ip,
        "state": repr}
    MIN_PORT = read_env_int("PYFARM_AGENT_MIN_PORT", 1024)
    MAX_PORT = read_env_int("PYFARM_AGENT_MAX_PORT", 65535)
    MIN_CPUS = read_env_int("PYFARM_AGENT_MIN_CPUS", 1)
    MAX_CPUS = read_env_int("PYFARM_AGENT_MAX_CPUS", 256)
    MIN_RAM = read_env_int("PYFARM_AGENT_MIN_RAM", 16)
    MAX_RAM = read_env_int("PYFARM_AGENT_MAX_RAM", 262144)

    # quick check of the configured data
    assert MIN_PORT >= 1, "$PYFARM_AGENT_MIN_PORT must be > 0"
    assert MAX_PORT >= 1, "$PYFARM_AGENT_MAX_PORT must be > 0"
    assert MAX_PORT >= MIN_PORT, "MIN_PORT must be <= MAX_PORT"
    assert MIN_CPUS >= 1, "$PYFARM_AGENT_MIN_CPUS must be > 0"
    assert MAX_CPUS >= 1, "$PYFARM_AGENT_MAX_CPUS must be > 0"
    assert MAX_CPUS >= MIN_CPUS, "MIN_CPUS must be <= MAX_CPUS"
    assert MIN_RAM >= 1, "$PYFARM_AGENT_MIN_RAM must be > 0"
    assert MAX_RAM >= 1, "$PYFARM_AGENT_MAX_RAM must be > 0"
    assert MAX_RAM >= MIN_RAM, "MIN_RAM must be <= MAX_RAM"

    id = id_column(IDTypeAgent)

    # basic host attribute information
    hostname = db.Column(db.String(MAX_HOSTNAME_LENGTH), nullable=False,
                         doc=dedent("""
                         The hostname we should use to talk to this host.
                         Preferably this value will be the fully qualified
                         name instead of the base hostname alone."""))
    ip = db.Column(IPv4Address, nullable=True,
                   doc="The IPv4 network address this host resides on")
    remote_ip = db.Column(IPv4Address, nullable=True,
                          doc="the remote address which came in with the "
                              "request")
    use_address = db.Column(UseAgentAddressEnum, nullable=False,
                            default="remote",
                            doc="The address we should use when communicating "
                                "with the agent")
    ram = db.Column(db.Integer, nullable=False,
                    doc="The amount of ram installed on the agent in megabytes")
    free_ram = db.Column(db.Integer, nullable=False,
                         doc="The amount of ram which was last considered free")
    cpus = db.Column(db.Integer, nullable=False,
                     doc="The number of cpus installed on the agent")
    port = db.Column(db.Integer, nullable=False,
                     doc="The port the agent is currently running on")
    time_offset = db.Column(db.Integer, nullable=False, default=0,
                            doc="the offset in seconds the agent is from "
                                "an official time server")

    # host state
    state = db.Column(AgentStateEnum, default=AgentState.ONLINE,
                      nullable=False,
                      doc=dedent("""
                      Stores the current state of the host.  This value can be
                      changed either by a master telling the host to do
                      something with a task or from the host via REST api."""))

    # Max allocation of the two primary resources which `1.0` is 100%
    # allocation.  For `cpu_allocation` 100% allocation typically means
    # one task per cpu.
    ram_allocation = db.Column(db.Float,
                               default=read_env_number(
                                   "PYFARM_AGENT_RAM_ALLOCATION", .8),
                               doc=dedent("""
                               The amount of ram the agent is allowed to
                               allocate towards work.  A value of 1.0 would
                               mean to let the agent use all of the memory
                               installed on the system when assigning work."""))

    cpu_allocation = db.Column(db.Float,
                               default=read_env_number(
                                   "PYFARM_AGENT_CPU_ALLOCATION", 1.0),
                               doc=dedent("""
                               The total amount of cpu space an agent is
                               allowed to process work in.  A value of 1.0
                               would mean an agent can handle as much work
                               as the system could handle given the
                               requirements of a task.  For example if an agent
                               has 8 cpus, cpu_allocation is .5, and a task
                               requires 4 cpus then only that task will run
                               on the system."""))

    # relationships
    tasks = db.relationship("Task", backref="agent", lazy="dynamic",
                            doc=dedent("""
                            Relationship between an :class:`Agent`
                            and any :class:`pyfarm.models.Task`
                            objects"""))
    tags = db.relationship("Tag", secondary=AgentTagAssociation,
                            backref=db.backref("agents", lazy="dynamic"),
                            lazy="dynamic",
                            doc="Tags associated with this agent")
    software = db.relationship("Software",
                               secondary=AgentSoftwareAssociation,
                               backref=db.backref("agents", lazy="dynamic"),
                               lazy="dynamic",
                               doc="software this agent has installed or is "
                                   "configured for")
    projects = db.relationship("Project",
                               secondary=AgentProjects,
                               backref=db.backref("agents", lazy="dynamic"),
                               lazy="dynamic",
                               doc="The project or projects this agent is "
                                   "associated with.  By default an agent "
                                   "which is not associated with any projects "
                                   "will be a member of all projects.")

    @classmethod
    def validate_hostname(cls, key, value):
        """
        Ensures that the hostname provided by `value` matches a regular
        expression that expresses what a valid hostname is.
        """
        # ensure hostname does not contain characters we can't use
        if not REGEX_HOSTNAME.match(value):
            raise ValueError("%s is not valid for %s" % (value, key))

        return value

    @classmethod
    def validate_resource(cls, key, value):
        """
        Ensure the `value` provided for `key` is within an expected range as
        specified in `agent.yml`
        """
        min_value = getattr(cls, "MIN_%s" % key.upper())
        max_value = getattr(cls, "MAX_%s" % key.upper())

        # check the provided input
        if min_value > value or value > max_value:
            msg = "value for `%s` must be between " % key
            msg += "%s and %s" % (min_value, max_value)
            raise ValueError(msg)

        return value

    @classmethod
    def validate_ip_address(cls, key, value):
        """
        Ensures the :attr:`ip` address is valid.  This checks to ensure
        that the value provided is:

            * not a hostmask
            * not link local (:rfc:`3927`)
            * not used for multicast (:rfc:`1112`)
            * not a netmask (:rfc:`4632`)
            * not reserved (:rfc:`6052`)
            * a private address (:rfc:`1918`)
        """
        if not value:
            return

        try:
            ip = netaddr.IPAddress(value)

        except (AddrFormatError, ValueError) as e:
            raise ValueError(
                "%s is not a valid address format: %s" % (value, e))

        if not app.config.get("DEV_ALLOW_ANY_AGENT_ADDRESS", False):
            if PYFARM_REQUIRE_PRIVATE_IP and not ip.is_private():
                raise ValueError("%s is not a private ip address" % value)

            if not app.config.get("DEV_ALLOW_ANY_AGENT_ADDRESS", False) and \
                not all([
                    not ip.is_hostmask(), not ip.is_link_local(),
                    not ip.is_loopback(), not ip.is_multicast(),
                    not ip.is_netmask(), not ip.is_reserved()
                ]):
                raise ValueError("%s is not a usable ip address" % value)

        return value

    @validates("ip")
    def validate_address_column(self, key, value):
        """validates the ip column"""
        return self.validate_ip_address(key, value)

    @validates("hostname")
    def validate_hostname_column(self, key, value):
        """validates the hostname column"""
        return self.validate_hostname(key, value)

    @validates("ram", "cpus", "port")
    def validate_resource_column(self, key, value):
        """validates the ram, cpus, and port columns"""
        return self.validate_resource(key, value)

    def serialize_column(self, column):
        """serializes a single column, typically used by a dictionary mixin"""
        if isinstance(column, IPAddress):
            return str(column)
        return column
示例#12
0
class Agent(db.Model, ValidatePriorityMixin, ValidateWorkStateMixin,
            UtilityMixins, ReprMixin):
    """
    Stores information about an agent include its network address,
    state, allocation configuration, etc.

    .. note::
        This table enforces two forms of uniqueness.  The :attr:`id` column
        must be unique and the combination of these columns must also be
        unique to limit the frequency of duplicate data:

            * :attr:`hostname`
            * :attr:`port`
            * :attr:`id`

    """
    __tablename__ = config.get("table_agent")
    __table_args__ = (UniqueConstraint("hostname", "port", "id"), )
    STATE_ENUM = AgentState
    STATE_DEFAULT = "online"
    REPR_COLUMNS = (
        "id", "hostname", "port", "state", "remote_ip",
        "cpus", "ram", "free_ram")
    REPR_CONVERT_COLUMN = {"remote_ip": repr_ip}
    URL_TEMPLATE = config.get("agent_api_url_template")

    MIN_PORT = config.get("agent_min_port")
    MAX_PORT = config.get("agent_max_port")
    MIN_CPUS = config.get("agent_min_cpus")
    MAX_CPUS = config.get("agent_max_cpus")
    MIN_RAM = config.get("agent_min_ram")
    MAX_RAM = config.get("agent_max_ram")

    # quick check of the configured data
    assert MIN_PORT >= 1, "`agent_min_port` must be > 0"
    assert MAX_PORT >= 1, "`agent_max_port` must be > 0"
    assert MAX_PORT >= MIN_PORT, "MIN_PORT must be <= MAX_PORT"
    assert MIN_CPUS >= 1, "`agent_min_cpus` must be > 0"
    assert MAX_CPUS >= 1, "`agent_max_cpus` must be > 0"
    assert MAX_CPUS >= MIN_CPUS, "MIN_CPUS must be <= MAX_CPUS"
    assert MIN_RAM >= 1, "`agent_min_ram` must be > 0"
    assert MAX_RAM >= 1, "`agent_max_ram` must be > 0"
    assert MAX_RAM >= MIN_RAM, "`agent_min_ram` must be <= `agent_max_ram`"

    id = id_column(IDTypeAgent, default=uuid.uuid4, autoincrement=False)

    # basic host attribute information
    hostname = db.Column(
        db.String(config.get("max_hostname_length")),
        nullable=False,
        doc="The hostname we should use to talk to this host. "
            "Preferably this value will be the fully qualified "
            "name instead of the base hostname alone.")

    notes = db.Column(
        db.Text,
        default="",
        doc="Free form notes about this agent")

    remote_ip = db.Column(
        IPv4Address, nullable=True,
        doc="the remote address which came in with the request")

    use_address = db.Column(
        UseAgentAddressEnum,
        nullable=False, default=UseAgentAddress.REMOTE,
        doc="The address we should use when communicating with the agent")

    # TODO Make non-nullable later
    os_class = db.Column(
        OperatingSystemEnum,
        doc="The type of operating system running on the "
            "agent; 'linux', 'windows', or 'mac'.")

    os_fullname = db.Column(
        db.String(config.get("max_osname_length")),
        doc="The full human-readable name of the agent's OS, as returned "
            "by platform.platform()")

    ram = db.Column(
        db.Integer,
        nullable=False,
        doc="The amount of ram installed on the agent in megabytes")

    free_ram = db.Column(
        db.Integer,
        nullable=False,
        doc="The amount of ram which was last considered free")

    cpus = db.Column(
        db.Integer,
        nullable=False,
        doc="The number of logical CPU cores installed on the agent")

    cpu_name = db.Column(
        db.String(config.get("max_cpuname_length")),
        doc="The make and model of CPUs in this agents")

    port = db.Column(
        db.Integer,
        nullable=False,
        doc="The port the agent is currently running on")

    time_offset = db.Column(
        db.Integer,
        nullable=False, default=0,
        doc="The offset in seconds the agent is from an official time server")

    version = db.Column(
        db.String(16),
        nullable=True,
        doc="The pyfarm version number this agent is running.")

    upgrade_to = db.Column(
        db.String(16),
        nullable=True,
        doc="The version this agent should upgrade to.")

    restart_requested = db.Column(
        db.Boolean,
        default=False, nullable=False,
        doc="If True, the agent will be restarted")

    # host state
    state = db.Column(
        AgentStateEnum,
        default=AgentState.ONLINE, nullable=False,
        doc="Stores the current state of the host.  This value can be "
            "changed either by a master telling the host to do "
            "something with a task or from the host via REST api.")

    last_heard_from = db.Column(
        db.DateTime,
        default=datetime.utcnow,
        doc="Time we last had contact with this agent")

    last_success_on = db.Column(
        db.DateTime,
        nullable=True,
        doc="The last time this agent has set a task to `done`")

    last_polled = db.Column(
        db.DateTime,
        doc="Time we last tried to contact the agent")

    # Max allocation of the two primary resources which `1.0` is 100%
    # allocation.  For `cpu_allocation` 100% allocation typically means
    # one task per cpu.
    ram_allocation = db.Column(
        db.Float,
        default=config.get("agent_ram_allocation"),
        doc="The amount of ram the agent is allowed to allocate "
            "towards work.  A value of 1.0 would mean to let the "
            "agent use all of the memory installed on the system "
            "when assigning work.")

    cpu_allocation = db.Column(
        db.Float,
        default=config.get("agent_cpu_allocation"),
        doc="The total amount of cpu space an agent is allowed to "
            "process work in.  A value of 1.0 would mean an agent "
            "can handle as much work as the system could handle "
            "given the requirements of a task.  For example if "
            "an agent has 8 cpus, cpu_allocation is .5, and a "
            "task requires 4 cpus then only that task will "
            "run on the system.")

    #
    # Relationships
    #

    tasks = db.relationship(
        "Task",
        backref="agent", lazy="dynamic",
        doc="Relationship between an :class:`Agent` and any "
            ":class:`pyfarm.models.Task` objects")

    tags = db.relationship(
        "Tag",
        secondary=AgentTagAssociation,
        backref=db.backref("agents", lazy="dynamic"),
        lazy="dynamic",
        doc="Tags associated with this agent")

    software_versions = db.relationship(
        "SoftwareVersion",
        secondary=AgentSoftwareVersionAssociation,
        backref=db.backref("agents", lazy="dynamic"),
        lazy="dynamic",
        doc="software this agent has installed or is configured for")

    mac_addresses = db.relationship(
        "AgentMacAddress", backref="agent",
        lazy="dynamic",
        doc="The MAC addresses this agent has",
        cascade="save-update, merge, delete, delete-orphan")

    gpus = db.relationship(
        "GPU",
        secondary=GPUInAgent,
        backref=db.backref("agents", lazy="dynamic"),
        lazy="dynamic",
        doc="The graphics cards that are installed in this agent")

    disks = db.relationship(
        "AgentDisk",
        backref=db.backref("agent"),
        lazy="dynamic",
        doc="The known disks available to this agent",
        cascade="save-update, merge, delete, delete-orphan")

    failed_tasks = db.relationship(
        "Task",
        secondary=FailedTaskInAgent,
        backref=db.backref("failed_in_agents", lazy="dynamic"),
        lazy="dynamic",
        doc="The tasks this agents failed to execute")

    def is_offline(self):
        return self.state == AgentState.OFFLINE

    def is_disabled(self):
        return self.state == AgentState.DISABLED

    def get_supported_types(self):
        try:
            return self.support_jobtype_versions
        except AttributeError:
            jobtype_versions_query = JobTypeVersion.query.filter(
                JobTypeVersion.jobs.any(
                    or_(Job.state == None, Job.state == WorkState.RUNNING)))

            self.support_jobtype_versions = []
            for jobtype_version in jobtype_versions_query:
                if self.satisfies_jobtype_requirements(jobtype_version):
                    self.support_jobtype_versions.append(jobtype_version.id)

            return self.support_jobtype_versions

    def satisfies_jobtype_requirements(self, jobtype_version):
        requirements_to_satisfy = list(jobtype_version.software_requirements)

        for software_version in self.software_versions:
            for requirement in list(requirements_to_satisfy):
                if (software_version.software == requirement.software and
                    (requirement.min_version == None or
                    requirement.min_version.rank <= software_version.rank) and
                    (requirement.max_version == None or
                    requirement.max_version.rank >= software_version.rank)):
                    requirements_to_satisfy.remove(requirement)

        return len(requirements_to_satisfy) == 0

    def satisfies_job_requirements(self, job):
        if not self.satisfies_jobtype_requirements(job.jobtype_version):
            return False

        if self.cpus < job.cpus:
            return False

        if self.free_ram < job.ram:
            return False

        for tag_requirement in job.tag_requirements:
            if (not tag_requirement.negate and
                tag_requirement.tag not in self.tags):
                return False
            if (tag_requirement.negate and
                tag_requirement.tag in self.tags):
                return False

        return True

    @classmethod
    def validate_hostname(cls, key, value):
        """
        Ensures that the hostname provided by `value` matches a regular
        expression that expresses what a valid hostname is.
        """
        # ensure hostname does not contain characters we can't use
        if not REGEX_HOSTNAME.match(value):
            raise ValueError("%s is not valid for %s" % (value, key))

        return value

    @classmethod
    def validate_resource(cls, key, value):
        """
        Ensure the ``value`` provided for ``key`` is within an expected
        range.  This classmethod retrieves the min and max values from
        the :class:`Agent` class directory using:

            >>> min_value = getattr(Agent, "MIN_%s" % key.upper())
            >>> max_value = getattr(Agent, "MAX_%s" % key.upper())
        """
        min_value = getattr(cls, "MIN_%s" % key.upper())
        max_value = getattr(cls, "MAX_%s" % key.upper())

        # check the provided input
        if not min_value <= value <= max_value:
            msg = "value for `%s` must be between " % key
            msg += "%s and %s" % (min_value, max_value)
            raise ValueError(msg)

        return value

    @classmethod
    def validate_ipv4_address(cls, _, value):
        """
        Ensures the :attr:`ip` address is valid.  This checks to ensure
        that the value provided is:

            * not a hostmask
            * not link local (:rfc:`3927`)
            * not used for multicast (:rfc:`1112`)
            * not a netmask (:rfc:`4632`)
            * not reserved (:rfc:`6052`)
            * a private address (:rfc:`1918`)
        """
        if value is None:
            return value

        try:
            address = IPAddress(value)

        except (AddrFormatError, ValueError) as e:
            raise ValueError(
                "%s is not a valid address format: %s" % (value, e))

        if ALLOW_AGENT_LOOPBACK:
            loopback = lambda: False
        else:
            loopback = address.is_loopback

        if any([address.is_hostmask(), address.is_link_local(),
                loopback(), address.is_multicast(),
                address.is_netmask(), address.is_reserved()]):
            raise ValueError("%s is not a valid address type" % value)

        return value

    def api_url(self):
        """
        Returns the base url which should be used to access the api
        of this specific agent.

        :except ValueError:
            Raised if this function is called while the agent's
            :attr:`use_address` column is set to ``PASSIVE``
        """
        if self.use_address == UseAgentAddress.REMOTE:
            return self.URL_TEMPLATE.format(
                host=self.remote_ip,
                port=self.port
            )

        elif self.use_address == UseAgentAddress.HOSTNAME:
            return self.URL_TEMPLATE.format(
                host=self.hostname,
                port=self.port
            )

        else:
            raise ValueError(
                "Cannot construct an agent API url using mode %r "
                "`use_address`" % self.use_address)

    @validates("hostname")
    def validate_hostname_column(self, key, value):
        """Validates the hostname column"""
        return self.validate_hostname(key, value)

    @validates("ram", "cpus", "port")
    def validate_numeric_column(self, key, value):
        """
        Validates several numerical columns.  Columns such as ram, cpus
        and port a are validated with this method.
        """
        return self.validate_resource(key, value)

    @validates("remote_ip")
    def validate_remote_ip(self, key, value):
        """Validates the remote_ip column"""
        return self.validate_ipv4_address(key, value)
示例#13
0
class JobTypeVersion(db.Model, UtilityMixins, ReprMixin):
    """
    Defines a specific jobtype version.
    """
    __tablename__ = config.get("table_job_type_version")
    __table_args__ = (UniqueConstraint("jobtype_id", "version"), )

    REPR_COLUMNS = ("id", "jobtype_id", "version")

    id = id_column(IDTypeWork)

    jobtype_id = db.Column(IDTypeWork,
                           db.ForeignKey("%s.id" %
                                         config.get("table_job_type")),
                           nullable=False,
                           doc="The jobtype this version belongs to")

    version = db.Column(db.Integer, nullable=False, doc="The version number")

    max_batch = db.Column(
        db.Integer,
        default=config.get("job_type_max_batch"),
        doc="When the queue runs, this is the maximum number of tasks "
        "that the queue can select to assign to a single"
        "agent.  If left empty, no maximum applies")

    batch_contiguous = db.Column(
        db.Boolean,
        default=config.get("job_type_batch_contiguous"),
        doc="If True then the queue will be forced to batch"
        "numerically contiguous tasks only for this job type.  "
        "For example if True it would batch frames 1, 2, 3, 4 "
        "together but not 2, 4, 6, 8.  If this column is False "
        "however the queue will batch non-contiguous tasks too.")

    no_automatic_start_time = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="If set, we will not automatically set `time_started_on` "
        "for the tasks in jobs of this type when they are set "
        "to `running`.")

    supports_tiling = db.Column(
        db.Boolean,
        default=False,
        doc="Whether or not the jobtype supports tiling, i.e. splitting single "
        "frames into regions and then rendering those independently from "
        "each other.")

    classname = db.Column(
        db.String(config.get("job_type_max_class_name_length")),
        nullable=True,
        doc="The name of the job class contained within the file being "
        "loaded.  This field may be null but when it's not provided "
        "job type name will be used instead.")

    code = db.Column(db.UnicodeText,
                     nullable=False,
                     doc="The source code of the job type")

    #
    # Relationships
    #
    jobtype = db.relationship("JobType",
                              backref=db.backref("versions",
                                                 lazy="dynamic",
                                                 cascade="all, delete-orphan"),
                              doc="Relationship between this version and the "
                              ":class:`JobType` it belongs to"
                              "")

    jobs = db.relationship("Job",
                           backref="jobtype_version",
                           lazy="dynamic",
                           doc="Relationship between this jobtype version and "
                           ":class:`.Job` objects.")

    @validates("max_batch")
    def validate_max_batch(self, key, value):
        if isinstance(value, int) and value < 1:
            raise ValueError("max_batch must be greater than or equal to 1")

        return value

    @validates("version")
    def validate_version(self, key, value):
        if isinstance(value, int) and value < 1:
            raise ValueError("version must be greater than or equal to 1")

        return value
示例#14
0
class Task(db.Model, ValidatePriorityMixin, ValidateWorkStateMixin,
           UtilityMixins, ReprMixin):
    """
    Defines a task which a child of a :class:`Job`.  This table represents
    rows which contain the individual work unit(s) for a job.
    """
    __tablename__ = config.get("table_task")
    STATE_ENUM = list(WorkState) + [None]
    STATE_DEFAULT = None
    REPR_COLUMNS = ("id", "state", "frame", "project")
    REPR_CONVERT_COLUMN = {"state": partial(repr_enum, enum=STATE_ENUM)}

    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(STATE_DEFAULT, "job.priority")

    agent_id = db.Column(
        IDTypeAgent,
        db.ForeignKey("%s.id" % config.get("table_agent")),
        doc="Foreign key which stores :attr:`Job.id`")

    job_id = db.Column(
        IDTypeWork, db.ForeignKey("%s.id" % config.get("table_job")),
        nullable=False,
        doc="Foreign key which stores :attr:`Job.id`")

    hidden = db.Column(
        db.Boolean, default=False,
        doc="When True this hides the task from queue and web ui")

    attempts = db.Column(
        db.Integer,
        nullable=False, default=0,
        doc="The number of attempts which have been made on this "
            "task. This value is auto incremented when "
            "``state`` changes to a value synonymous with a "
            "running state.")

    failures = db.Column(
        db.Integer,
        nullable=False, default=0,
        doc="The number of times this task has failed. This value "
            "is auto incremented when :attr:`state` changes to a "
            "value synonymous with a failed state.")

    frame = db.Column(
        db.Numeric(10, 4),
        nullable=False,
        doc="The frame this :class:`Task` will be executing.")

    tile = db.Column(
        db.Integer,
        nullable=True,
        doc="When using tiled rendering, the number of the tile this task "
            "refers to. The jobtype will have to translate that into an "
            "actual image region. This will be NULL if the job doesn't use "
            "tiled rendering.")

    last_error = db.Column(
        db.UnicodeText,
        nullable=True,
        doc="This column may be set when an error is "
            "present.  The agent typically sets this "
            "column when the job type either can't or "
            "won't run a given task.  This column will "
            "be cleared whenever the task's state is "
            "returned to a non-error state.")

    sent_to_agent = db.Column(
        db.Boolean,
        default=False, nullable=False,
        doc="Whether this task was already sent to the assigned agent")

    progress = db.Column(
        db.Float, default=0.0,
        doc="The progress for this task, as a value between "
            "0.0 and 1.0. Used purely for display purposes.")

    #
    # Relationships
    #
    job = db.relationship(
        "Job",
        backref=db.backref("tasks", lazy="dynamic"),
        doc="relationship attribute which retrieves the "
            "associated job for this task")

    def running(self):
        return self.state == WorkState.RUNNING

    def failed(self):
        return self.state == WorkState.FAILED

    @staticmethod
    def increment_attempts(target, new_value, old_value, initiator):
        if new_value is not None and new_value != old_value:
            target.attempts += 1

    @staticmethod
    def log_assign_change(target, new_value, old_value, initiator):
        logger.debug("Agent change for task %s: old %s new: %s",
                     target.id, old_value, new_value)

    @staticmethod
    def update_failures(target, new_value, old_value, initiator):
        if new_value == WorkState.FAILED and new_value != old_value:
            target.failures += 1
            if target not in target.agent.failed_tasks:
                target.agent.failed_tasks.append(target)

    @staticmethod
    def set_progress_on_success(target, new_value, old_value, initiator):
        if new_value == WorkState.DONE:
            target.progress = 1.0

    @staticmethod
    def update_agent_on_success(target, new_value, old_value, initiator):
        if new_value == WorkState.DONE:
            agent = target.agent
            if agent:
                agent.last_success_on = datetime.utcnow()
                db.session.add(agent)

    @staticmethod
    def reset_agent_if_failed_and_retry(
            target, new_value, old_value, initiator):
        # There's nothing else we should do here if
        # we don't have a parent job.  This can happen if you're
        # testing or a job is disconnected from a task.
        if target.job is None:
            return new_value

        if (new_value == WorkState.FAILED and
            target.failures <= target.job.requeue):
            logger.info("Failed task %s will be retried", target.id)
            target.agent_id = None
            return None
        else:
            return new_value

    @staticmethod
    def clear_error_state(target, new_value, old_value, initiator):
        """
        Sets ``last_error`` column to ``None`` if the task's state is 'done'
        """
        if new_value == WorkState.DONE and target.last_error is not None:
            target.last_error = None

    @staticmethod
    def set_times(target, new_value, old_value, initiator):
        """update the datetime objects depending on the new value"""

        if (new_value == _WorkState.RUNNING and
            (old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or
             target.time_started == None)):
            if not target.job.jobtype_version.no_automatic_start_time:
                target.time_started = datetime.utcnow()
                target.time_finished = None

        elif (new_value in (_WorkState.DONE, _WorkState.FAILED) and
              not target.time_finished):
            target.time_finished = datetime.utcnow()

    @staticmethod
    def reset_finished_time(target, new_value, old_value, initiator):
        if (target.state not in (_WorkState.DONE, _WorkState.FAILED) or
            new_value is None):
            target.time_finished = None
        elif new_value is not None:
            if target.time_finished is not None:
                target.time_finished = max(target.time_finished,
                                           new_value)
            else:
                target.time_finished = max(new_value,
                                           datetime.utcnow())
示例#15
0
class Job(db.Model, ValidatePriorityMixin, ValidateWorkStateMixin,
          WorkStateChangedMixin, ReprMixin, UtilityMixins):
    """
    Defines the attributes and environment for a job.  Individual commands
    are kept track of by :class:`Task`
    """
    __tablename__ = config.get("table_job")
    REPR_COLUMNS = ("id", "state", "project")
    REPR_CONVERT_COLUMN = {"state": repr}
    STATE_ENUM = list(WorkState) + [None]

    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(None, "job.priority")

    jobtype_version_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_type_version")),
        nullable=False,
        doc="The foreign key which stores :class:`JobTypeVersion.id`")

    job_queue_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_queue")),
        nullable=True,
        doc="The foreign key which stores :class:`JobQueue.id`")

    job_group_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_group")),
        nullable=True,
        doc="The foreign key which stores:class:`JobGroup.id`")

    user_id = db.Column(db.Integer,
                        db.ForeignKey("%s.id" % config.get("table_user")),
                        doc="The id of the user who owns this job")

    minimum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will try to assign at least this number "
        "of agents to this job as long as it can use them, "
        "before any other considerations.")

    maximum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will never assign more than this number"
        "of agents to this job.")

    weight = db.Column(
        db.Integer,
        nullable=False,
        default=config.get("queue_default_weight"),
        doc="The weight of this job. The scheduler will distribute "
        "available agents between jobs and job queues in the "
        "same queue in proportion to their weights.")

    title = db.Column(db.String(config.get("jobtitle_max_length")),
                      nullable=False,
                      doc="The title of this job")

    notes = db.Column(
        db.Text,
        default="",
        doc="Notes that are provided on submission or added after "
        "the fact. This column is only provided for human "
        "consumption, is not scanned, indexed, or used when "
        "searching")

    output_link = db.Column(
        db.Text,
        nullable=True,
        doc="An optional link to a URI where this job's output can "
        "be viewed.")

    # task data
    by = db.Column(db.Numeric(10, 4),
                   default=1,
                   doc="The number of frames to count by between `start` and "
                   "`end`.  This column may also sometimes be referred to "
                   "as 'step' by other software.")

    num_tiles = db.Column(
        db.Integer,
        nullable=True,
        doc="How many regions to split frames into for rendering.")

    batch = db.Column(
        db.Integer,
        default=config.get("job_default_batch"),
        doc="Number of tasks to run on a single agent at once. Depending "
        "on the capabilities of the software being run this will "
        "either cause a single process to execute on the agent "
        "or multiple processes one after the other.")

    requeue = db.Column(db.Integer,
                        default=config.get("job_requeue_default"),
                        doc="Number of times to requeue failed tasks "
                        ""
                        ".. csv-table:: **Special Values**"
                        "   :header: Value, Result"
                        "   :widths: 10, 50"
                        ""
                        "   0, never requeue failed tasks"
                        "  -1, requeue failed tasks indefinitely")

    cpus = db.Column(
        db.Integer,
        default=config.get("job_default_cpus"),
        doc="Number of cpus or threads each task should consume on"
        "each agent.  Depending on the job type being executed "
        "this may result in additional cpu consumption, longer "
        "wait times in the queue (2 cpus means 2 'fewer' cpus on "
        "an agent), or all of the above."
        ""
        ".. csv-table:: **Special Values**"
        "   :header: Value, Result"
        "   :widths: 10, 50"
        ""
        "   0, minimum number of cpu resources not required "
        "   -1, agent cpu is exclusive for a task from this job")

    ram = db.Column(
        db.Integer,
        default=config.get("job_default_ram"),
        doc="Amount of ram a task from this job will require to be "
        "free in order to run.  A task exceeding this value will "
        "not result in any special behavior."
        ""
        ".. csv-table:: **Special Values**"
        "    :header: Value, Result"
        "    :widths: 10, 50"
        ""
        "0, minimum amount of free ram not required"
        "-1, agent ram is exclusive for a task from this job")

    ram_warning = db.Column(
        db.Integer,
        nullable=True,
        doc="Amount of ram used by a task before a warning raised. "
        "A task exceeding this value will not  cause any work "
        "stopping behavior.")

    ram_max = db.Column(
        db.Integer,
        nullable=True,
        doc="Maximum amount of ram a task is allowed to consume on "
        "an agent."
        ""
        ".. warning:: "
        "   If set, the task will be **terminated** if the ram in "
        "   use by the process exceeds this value.")

    hidden = db.Column(
        db.Boolean,
        default=False,
        nullable=False,
        doc="If True, keep the job hidden from the queue and web "
        "ui.  This is typically set to True if you either want "
        "to save a job for later viewing or if the jobs data "
        "is being populated in a deferred manner.")

    environ = db.Column(
        JSONDict,
        doc="Dictionary containing information about the environment "
        "in which the job will execute. "
        ""
        ".. note::"
        "    Changes made directly to this object are **not** "
        "    applied to the session.")

    data = db.Column(JSONDict,
                     doc="Json blob containing additional data for a job "
                     ""
                     ".. note:: "
                     "   Changes made directly to this object are **not** "
                     "   applied to the session.")

    to_be_deleted = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="If true, the master will stop all running tasks for "
        "this job and then delete it.")

    completion_notify_sent = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="Whether or not the finish notification mail has already "
        "been sent out.")

    autodelete_time = db.Column(
        db.Integer,
        nullable=True,
        default=None,
        doc="If not None, this job will be automatically deleted this "
        "number of seconds after it finishes.")

    #
    # Relationships
    #

    queue = db.relationship("JobQueue",
                            backref=db.backref("jobs", lazy="dynamic"),
                            doc="The queue for this job")

    group = db.relationship("JobGroup",
                            backref=db.backref("jobs", lazy="dynamic"),
                            doc="The job group this job belongs to")

    user = db.relationship("User",
                           backref=db.backref("jobs", lazy="dynamic"),
                           doc="The owner of this job")

    # self-referential many-to-many relationship
    parents = db.relationship("Job",
                              secondary=JobDependency,
                              primaryjoin=id == JobDependency.c.childid,
                              secondaryjoin=id == JobDependency.c.parentid,
                              backref="children")

    notified_users = db.relationship("JobNotifiedUser",
                                     lazy="dynamic",
                                     backref=db.backref("job"),
                                     cascade="all,delete")

    tasks_queued = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == None) & "
        "(Task.job_id == Job.id)",
        doc="Relationship between this job and any :class:`Task` "
        "objects which are queued.")

    tasks_running = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
        "(Task.job_id == Job.id)" % DBWorkState.RUNNING,
        doc="Relationship between this job and any :class:`Task` "
        "objects which are running.")

    tasks_done = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
        "(Task.job_id == Job.id)" % DBWorkState.DONE,
        doc="Relationship between this job and any :class:`Task` objects "
        "which are done.")

    tasks_failed = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
        "(Task.job_id == Job.id)" % DBWorkState.FAILED,
        doc="Relationship between this job and any :class:`Task` objects "
        "which have failed.")

    # resource relationships
    tags = db.relationship(
        "Tag",
        backref="jobs",
        lazy="dynamic",
        secondary=JobTagAssociation,
        doc="Relationship between this job and :class:`.Tag` objects")

    def paused(self):
        return self.state == WorkState.PAUSED

    def update_state(self):
        # Import here instead of at the top of the file to avoid a circular
        # import
        from pyfarm.scheduler.tasks import send_job_completion_mail
        from pyfarm.models.agent import Agent

        num_active_tasks = db.session.query(Task).\
            filter(Task.job == self,
                   or_(Task.state == None, and_(
                            Task.state != WorkState.DONE,
                            Task.state != WorkState.FAILED))).count()
        if num_active_tasks == 0:
            num_failed_tasks = db.session.query(Task).filter(
                Task.job == self, Task.state == WorkState.FAILED).count()
            if num_failed_tasks == 0:
                if self.state != _WorkState.DONE:
                    logger.info(
                        "Job %r (id %s): state transition %r -> 'done'",
                        self.title, self.id, self.state)
                    self.state = WorkState.DONE
                    send_job_completion_mail.apply_async(args=[self.id, True],
                                                         countdown=5)
            else:
                if self.state != _WorkState.FAILED:
                    logger.info(
                        "Job %r (id %s): state transition %r -> "
                        "'failed'", self.title, self.id, self.state)
                    self.state = WorkState.FAILED
                    send_job_completion_mail.apply_async(args=[self.id, False],
                                                         countdown=5)
            db.session.add(self)
        elif self.state != _WorkState.PAUSED:
            num_running_tasks = db.session.query(Task).\
                filter(Task.job == self,
                       Task.agent_id != None,
                       Task.agent.has(and_(Agent.state != AgentState.OFFLINE,
                                           Agent.state != AgentState.DISABLED)),
                       or_(
                            Task.state == WorkState.RUNNING,
                            Task.state == None)).count()
            if num_running_tasks == 0:
                logger.debug(
                    "No running tasks in job %s (id %s), setting it "
                    "to queued", self.title, self.id)
                self.state = None
                db.session.add(self)
            elif self.state != _WorkState.RUNNING:
                self.state = WorkState.RUNNING

    # Methods used by the scheduler
    def num_assigned_agents(self):
        # Import here instead of at the top of the file to avoid circular import
        from pyfarm.models.agent import Agent

        # Optimization: Blindly assume that we have no agents assigned if not
        # running
        if self.state != _WorkState.RUNNING:
            return 0

        try:
            return self.assigned_agents_count
        except AttributeError:
            self.assigned_agents_count =\
                db.session.query(distinct(Task.agent_id)).\
                    filter(Task.job == self,
                           Task.agent_id != None,
                           or_(Task.state == None,
                               Task.state == WorkState.RUNNING),
                           Task.agent.has(
                               and_(Agent.state != AgentState.OFFLINE,
                                    Agent.state != AgentState.DISABLED)))\
                                        .count()

            return self.assigned_agents_count

    def clear_assigned_counts(self):
        try:
            del self.assigned_agents_count
        except AttributeError:
            pass
        if self.queue:
            self.queue.clear_assigned_counts()

    def can_use_more_agents(self):
        # Import here instead of at the top of the file to avoid circular import
        from pyfarm.models.agent import Agent

        unassigned_tasks = Task.query.filter(
            Task.job == self,
            or_(Task.state == None,
                ~Task.state.in_([WorkState.DONE, WorkState.FAILED])),
            or_(
                Task.agent == None,
                Task.agent.has(
                    Agent.state.in_([AgentState.OFFLINE,
                                     AgentState.DISABLED])))).count()

        return unassigned_tasks > 0

    def get_batch(self, agent):
        # Import here instead of at the top of the file to avoid circular import
        from pyfarm.models.agent import Agent

        tasks_query = Task.query.filter(
            Task.job == self,
            ~Task.failed_in_agents.any(id=agent.id),
            or_(Task.state == None,
                ~Task.state.in_([WorkState.DONE, WorkState.FAILED])),
            or_(Task.agent == None,
                Task.agent.has(Agent.state.in_(
                    [AgentState.OFFLINE, AgentState.DISABLED])))).\
                        order_by("frame asc, tile asc")

        batch = []
        for task in tasks_query:
            if (len(batch) < self.batch and len(batch) <
                (self.jobtype_version.max_batch or maxsize)
                    and (not self.jobtype_version.batch_contiguous or
                         (len(batch) == 0
                          or batch[-1].frame + self.by == task.frame))):
                batch.append(task)

        return batch

    def alter_frame_range(self, start, end, by):
        # We have to import this down here instead of at the top to break a
        # circular dependency between the modules
        from pyfarm.scheduler.tasks import delete_task

        if end < start:
            raise ValueError("`end` must be greater than or equal to `start`")

        self.by = by

        required_frames = []
        current_frame = start
        while current_frame <= end:
            required_frames.append(current_frame)
            current_frame += by

        existing_tasks = Task.query.filter_by(job=self).all()
        frames_to_create = required_frames
        num_created = 0
        for task in existing_tasks:
            if task.frame not in required_frames:
                delete_task.delay(task.id)
            else:
                frames_to_create.remove(task.frame)

        for frame in frames_to_create:
            if self.num_tiles:
                for tile in range_(self.num_tiles - 1):
                    num_created += 1
                    task = Task()
                    task.job = self
                    task.frame = frame
                    task.tile = tile
                    task.priority = self.priority
                    db.session.add(task)
            else:
                num_created += 1
                task = Task()
                task.job = self
                task.frame = frame
                task.priority = self.priority
                db.session.add(task)

        if frames_to_create:
            if self.state != WorkState.RUNNING:
                self.state = None

        if config.get("enable_statistics"):
            task_event_count = TaskEventCount(num_new=num_created,
                                              job_queue_id=self.job_queue_id)
            task_event_count.time_start = datetime.utcnow()
            task_event_count.time_end = datetime.utcnow()
            db.session.add(task_event_count)

    def rerun(self):
        """
        Makes this job rerun all its task.  Tasks that are currently running are
        left untouched.
        """
        num_restarted = 0
        for task in self.tasks:
            if task.state != _WorkState.RUNNING and task.state is not None:
                task.state = None
                task.agent = None
                task.failures = 0
                db.session.add(task)
                num_restarted += 1

        self.completion_notify_sent = False
        self.update_state()
        db.session.add(self)

        if config.get("enable_statistics"):
            task_event_count = TaskEventCount(job_queue_id=self.job_queue_id,
                                              num_restarted=num_restarted)
            task_event_count.time_start = datetime.utcnow()
            task_event_count.time_end = datetime.utcnow()
            db.session.add(task_event_count)
            db.session.commit()

        for child in self.children:
            child.rerun()

    def rerun_failed(self):
        """
        Makes this job rerun all its failed tasks.  Tasks that are done or are
        currently running are left untouched
        """
        num_restarted = 0
        for task in self.tasks:
            if task.state == _WorkState.FAILED:
                task.state = None
                task.agent = None
                task.failures = 0
                db.session.add(task)
                num_restarted += 1

        self.completion_notify_sent = False
        self.update_state()
        db.session.add(self)

        if config.get("enable_statistics"):
            task_event_count = TaskEventCount(job_queue_id=self.job_queue_id,
                                              num_restarted=num_restarted)
            task_event_count.time_start = datetime.utcnow()
            task_event_count.time_end = datetime.utcnow()
            db.session.commit()

        for child in self.children:
            child.rerun_failed()

    @validates("ram", "cpus")
    def validate_resource(self, key, value):
        """
        Validation that ensures that the value provided for either
        :attr:`.ram` or :attr:`.cpus` is a valid value with a given range
        """
        assert isinstance(value, int), "%s must be an integer" % key
        min_value = config.get("agent_min_%s" % key)
        max_value = config.get("agent_max_%s" % key)

        # check the provided input
        if min_value > value or value > max_value:
            msg = "value for `%s` must be between " % key
            msg += "%s and %s" % (min_value, max_value)
            raise ValueError(msg)

        return value

    @validates("progress")
    def validate_progress(self, key, value):
        if value < 0.0 or value > 1.0:
            raise ValueError("Progress must be between 0.0 and 1.0")
示例#16
0
class Job(db.Model, ValidatePriorityMixin, WorkStateChangedMixin, ReprMixin):
    """
    Defines the attributes and environment for a job.  Individual commands
    are kept track of by |Task|
    """
    __tablename__ = TABLE_JOB
    REPR_COLUMNS = ("id", "state", "project")
    REPR_CONVERT_COLUMN = {
        "state": repr}
    MIN_CPUS = read_env_int("PYFARM_QUEUE_MIN_CPUS", 1)
    MAX_CPUS = read_env_int("PYFARM_QUEUE_MAX_CPUS", 256)
    MIN_RAM = read_env_int("PYFARM_QUEUE_MIN_RAM", 16)
    MAX_RAM = read_env_int("PYFARM_QUEUE_MAX_RAM", 262144)
    SPECIAL_RAM = read_env("PYFARM_AGENT_SPECIAL_RAM", [0], eval_literal=True)
    SPECIAL_CPUS = read_env("PYFARM_AGENT_SPECIAL_CPUS", [0], eval_literal=True)

    # quick check of the configured data
    assert MIN_CPUS >= 1, "$PYFARM_QUEUE_MIN_CPUS must be > 0"
    assert MAX_CPUS >= 1, "$PYFARM_QUEUE_MAX_CPUS must be > 0"
    assert MAX_CPUS >= MIN_CPUS, "MIN_CPUS must be <= MAX_CPUS"
    assert MIN_RAM >= 1, "$PYFARM_QUEUE_MIN_RAM must be > 0"
    assert MAX_RAM >= 1, "$PYFARM_QUEUE_MAX_RAM must be > 0"
    assert MAX_RAM >= MIN_RAM, "MIN_RAM must be <= MAX_RAM"


    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(WorkState.QUEUED, "job.priority")
    project_id = db.Column(db.Integer, db.ForeignKey("%s.id" % TABLE_PROJECT),
                           doc="stores the project id")
    job_type_id = db.Column(db.Integer, db.ForeignKey("%s.id" % TABLE_JOB_TYPE),
                            nullable=False,
                            doc=dedent("""
                            The foreign key which stores :class:`JobType.id`"""))
    user = db.Column(db.String(MAX_USERNAME_LENGTH),
                     doc=dedent("""
                     The user this job should execute as.  The agent
                     process will have to be running as root on platforms
                     that support setting the user id.

                     .. note::
                        The length of this field is limited by the
                        configuration value `job.max_username_length`

                     .. warning::
                        this may not behave as expected on all platforms
                        (windows in particular)"""))
    notes = db.Column(db.Text, default="",
                      doc=dedent("""
                      Notes that are provided on submission or added after
                      the fact. This column is only provided for human
                      consumption is not scanned, index, or used when
                      searching"""))

    # task data
    cmd = db.Column(db.String(MAX_COMMAND_LENGTH),
                    doc=dedent("""
                    The platform independent command to run. Each agent will
                    resolve this value for itself when the task begins so a
                    command like `ping` will work on any platform it's
                    assigned to.  The full command could be provided here,
                    but then the job must be tagged using
                    :class:`.JobSoftware` to limit which agent(s) it will
                    run on."""))
    start = db.Column(db.Float,
                      doc=dedent("""
                      The first frame of the job to run.  This value may
                      be a float so subframes can be processed."""))
    end = db.Column(db.Float,
                      doc=dedent("""
                      The last frame of the job to run.  This value may
                      be a float so subframes can be processed."""))
    by = db.Column(db.Float, default=1,
                   doc=dedent("""
                   The number of frames to count by between `start` and
                   `end`.  This column may also sometimes be referred to
                   as 'step' by other software."""))
    batch = db.Column(db.Integer,
                      default=read_env_int("PYFARM_QUEUE_DEFAULT_BATCH", 1),
                      doc=dedent("""
                      Number of tasks to run on a single agent at once.
                      Depending on the capabilities of the software being run
                      this will either cause a single process to execute on
                      the agent or multiple processes on after the other.

                      **configured by**: `job.batch`"""))
    requeue = db.Column(db.Integer,
                        default=read_env_int("PYFARM_QUEUE_DEFAULT_REQUEUE", 3),
                        doc=dedent("""
                        Number of times to requeue failed tasks

                        .. csv-table:: **Special Values**
                            :header: Value, Result
                            :widths: 10, 50

                            0, never requeue failed tasks
                            -1, requeue failed tasks indefinitely

                        **configured by**: `job.requeue`"""))
    cpus = db.Column(db.Integer,
                     default=read_env_int("PYFARM_QUEUE_DEFAULT_CPUS", 1),
                     doc=dedent("""
                     Number of cpus or threads each task should consume on
                     each agent.  Depending on the job type being executed
                     this may result in additional cpu consumption, longer
                     wait times in the queue (2 cpus means 2 'fewer' cpus on
                     an agent), or all of the above.

                     .. csv-table:: **Special Values**
                        :header: Value, Result
                        :widths: 10, 50

                        0, minimum number of cpu resources not required
                        -1, agent cpu is exclusive for a task from this job

                     **configured by**: `job.cpus`"""))
    ram = db.Column(db.Integer,
                    default=read_env_int("PYFARM_QUEUE_DEFAULT_RAM", 32),
                    doc=dedent("""
                    Amount of ram a task from this job will require to be
                    free in order to run.  A task exceeding this value will
                    not result in any special behavior.

                    .. csv-table:: **Special Values**
                        :header: Value, Result
                        :widths: 10, 50

                        0, minimum amount of free ram not required
                        -1, agent ram is exclusive for a task from this job

                    **configured by**: `job.ram`"""))
    ram_warning = db.Column(db.Integer, default=-1,
                            doc=dedent("""
                            Amount of ram used by a task before a warning
                            raised.  A task exceeding this value will not
                            cause any work stopping behavior.

                            .. csv-table:: **Special Values**
                                :header: Value, Result
                                :widths: 10, 50

                                -1, not set"""))
    ram_max = db.Column(db.Integer, default=-1,
                        doc=dedent("""
                        Maximum amount of ram a task is allowed to consume on
                        an agent.

                        .. warning::
                            The task will be **terminated** if the ram in use
                            by the process exceeds this value.

                        .. csv-table:: **Special Values**
                            :header: Value, Result
                            :widths: 10, 50

                            -1, not set
                        """))
    attempts = db.Column(db.Integer,
                         doc=dedent("""
                         The number attempts which have been made on this
                         task. This value is auto incremented when
                         :attr:`state` changes to a value synonyms with a
                         running state."""))
    hidden = db.Column(db.Boolean, default=False, nullable=False,
                       doc=dedent("""
                       If True, keep the job hidden from the queue and web
                       ui.  This is typically set to True if you either want
                       to save a job for later viewing or if the jobs data
                       is being populated in a deferred manner."""))
    environ = db.Column(JSONDict,
                        doc=dedent("""
                        Dictionary containing information about the environment
                        in which the job will execute.

                        .. note::
                            Changes made directly to this object are **not**
                            applied to the session."""))
    args = db.Column(JSONList,
                     doc=dedent("""
                     List containing the command line arguments.

                     .. note::
                        Changes made directly to this object are **not**
                        applied to the session."""))
    data = db.Column(JSONDict,
                     doc=dedent("""
                     Json blob containing additional data for a job

                     .. note::
                        Changes made directly to this object are **not**
                        applied to the session."""))

    project = db.relationship("Project",
                              backref=db.backref("jobs", lazy="dynamic"),
                              doc=dedent("""
                              relationship attribute which retrieves the
                              associated project for the job"""))

    # self-referential many-to-many relationship
    parents = db.relationship("Job",
                              secondary=JobDependencies,
                              primaryjoin=id==JobDependencies.c.parentid,
                              secondaryjoin=id==JobDependencies.c.childid,
                              backref="children")

    tasks_done = db.relationship("Task", lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
                    "(Task.job_id == Job.id)" % DBWorkState.DONE,
        doc=dedent("""
        Relationship between this job and any |Task| objects which are
        done."""))

    tasks_failed = db.relationship("Task", lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
                    "(Task.job_id == Job.id)" % DBWorkState.FAILED,
        doc=dedent("""
        Relationship between this job and any |Task| objects which have
        failed."""))

    tasks_queued = db.relationship("Task", lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
                    "(Task.job_id == Job.id)" % DBWorkState.QUEUED,
        doc=dedent("""
        Relationship between this job and any |Task| objects which
        are queued."""))

    # resource relationships
    tags = db.relationship("Tag", backref="jobs", lazy="dynamic",
                           secondary=JobTagAssociation,
                           doc=dedent("""
                           Relationship between this job and
                           :class:`.Tag` objects"""))
    software = db.relationship("Software",
                               secondary=JobSoftwareDependency,
                               backref=db.backref("jobs", lazy="dynamic"),
                               lazy="dynamic",
                               doc="software needed by this job")

    @validates("ram", "cpus")
    def validate_resource(self, key, value):
        """
        Validation that ensures that the value provided for either
        :attr:`.ram` or :attr:`.cpus` is a valid value with a given range
        """
        key_upper = key.upper()
        special = getattr(self, "SPECIAL_%s" % key_upper)

        if value is None or value in special:
            return value

        min_value = getattr(self, "MIN_%s" % key_upper)
        max_value = getattr(self, "MAX_%s" % key_upper)

        # quick sanity check of the incoming config
        assert isinstance(min_value, int), "db.min_%s must be an integer" % key
        assert isinstance(max_value, int), "db.max_%s must be an integer" % key
        assert min_value >= 1, "db.min_%s must be > 0" % key
        assert max_value >= 1, "db.max_%s must be > 0" % key

        # check the provided input
        if min_value > value or value > max_value:
            msg = "value for `%s` must be between " % key
            msg += "%s and %s" % (min_value, max_value)
            raise ValueError(msg)

        return value
示例#17
0
class JobQueue(db.Model, UtilityMixins, ReprMixin):
    """
    Stores information about a job queue. Used for flexible, configurable
    distribution of computing capacity to jobs.
    """
    __tablename__ = config.get("table_job_queue")
    __table_args__ = (UniqueConstraint("parent_jobqueue_id", "name"), )

    REPR_COLUMNS = ("id", "name")

    id = id_column(IDTypeWork)

    parent_jobqueue_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_queue")),
        nullable=True,
        doc="The parent queue of this queue. If NULL, this is a top "
        "level queue.")

    name = db.Column(db.String(config.get("max_queue_name_length")),
                     nullable=False,
                     doc="The name of the job queue")

    minimum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will try to assign at least this number of "
        "agents to jobs in or below this queue as long as it "
        "can use them, before any other considerations.")

    maximum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will never assign more than this number of "
        "agents to jobs in or below this queue.")

    priority = db.Column(
        db.Integer,
        nullable=False,
        default=config.get("queue_default_priority"),
        doc="The priority of this job queue. The scheduler will not "
        "assign any nodes to other job queues or jobs with the "
        "same parent and a lower priority as long as this one "
        "can still use nodes. The minimum_agents column takes "
        "precedence over this.")

    weight = db.Column(db.Integer,
                       nullable=False,
                       default=config.get("queue_default_weight"),
                       doc="The weight of this job queue. The scheduler will "
                       "distribute available agents between jobs and job "
                       "queues in the same queue in proportion to their "
                       "weights.")

    fullpath = db.Column(db.String(config.get("max_queue_path_length")),
                         doc="The path of this jobqueue.  This column is a "
                         "database denormalization.  It is technically "
                         "redundant, but faster to access than recursively "
                         "querying all parent queues.  If set to NULL, the "
                         "path must be computed by recursively querying "
                         "the parent queues.")

    #
    # Relationship
    #
    parent = db.relationship("JobQueue",
                             remote_side=[id],
                             backref=db.backref("children", lazy="dynamic"),
                             doc="Relationship between this queue its parent")

    def path(self):
        # Import here instead of at the top to break circular dependency
        from pyfarm.scheduler.tasks import cache_jobqueue_path

        if self.fullpath:
            return self.fullpath
        else:
            cache_jobqueue_path.delay(self.id)
            path = "/%s" % (self.name or "")
            if self.parent:
                return self.parent.path() + path
            else:
                return path

    def child_queues_sorted(self):
        """
        Return child queues sorted by number of currently assigned agents with
        priority as a secondary sort key.
        """
        queues = [x for x in self.children]
        return sorted(queues,
                      key=lambda x: x.num_assigned_agents(),
                      reverse=True)

    def child_jobs(self, filters):
        # Import down here instead of at the top to avoid circular import
        from pyfarm.models.job import Job

        jobs_query = Job.query

        if self.id:
            jobs_query = jobs_query.filter_by(queue=self)

        wanted_states = []
        if filters["state_paused"]:
            wanted_states.append(WorkState.PAUSED)
        if filters["state_running"]:
            wanted_states.append(WorkState.RUNNING)
        if filters["state_done"]:
            wanted_states.append(WorkState.DONE)
        if filters["state_failed"]:
            wanted_states.append(WorkState.FAILED)
        if filters["state_queued"]:
            jobs_query = jobs_query.filter(
                or_(Job.state == None, Job.state.in_(wanted_states)))
        else:
            jobs_query = jobs_query.filter(Job.state.in_(wanted_states))

        return sorted(jobs_query.all(),
                      key=lambda x: x.num_assigned_agents(),
                      reverse=True)

    def num_assigned_agents(self):
        try:
            return self.assigned_agents_count
        except AttributeError:
            # Import down here instead of at the top to avoid circular import
            from pyfarm.models.task import Task
            from pyfarm.models.job import Job

            self.assigned_agents_count = 0
            for queue in self.children:
                self.assigned_agents_count += queue.num_assigned_agents()
            self.assigned_agents_count +=\
                db.session.query(distinct(Task.agent_id)).\
                    filter(Task.job.has(Job.queue == self),
                           Task.agent_id != None,
                           Task.agent.has(
                               and_(Agent.state != AgentState.OFFLINE,
                                    Agent.state != AgentState.DISABLED)),
                           or_(Task.state == None,
                               Task.state == WorkState.RUNNING)).count()

            return self.assigned_agents_count

    def clear_assigned_counts(self):
        try:
            del self.assigned_agents_count
        except AttributeError:
            pass
        if self.parent:
            self.parent.clear_assigned_counts()

    def get_job_for_agent(self, agent, unwanted_job_ids=None):
        # Import down here instead of at the top to avoid circular import
        from pyfarm.models.job import Job

        supported_types = agent.get_supported_types()
        if not supported_types:
            return None

        available_ram = agent.ram if USE_TOTAL_RAM else agent.free_ram
        child_jobs = Job.query.filter(
            or_(Job.state == WorkState.RUNNING, Job.state == None),
            Job.job_queue_id == self.id, ~Job.parents.any(
                or_(Job.state == None, Job.state != WorkState.DONE)),
            Job.jobtype_version_id.in_(supported_types),
            Job.ram <= available_ram).all()
        child_jobs = [
            x for x in child_jobs if (agent.satisfies_job_requirements(x)
                                      and x.id not in unwanted_job_ids)
        ]
        if unwanted_job_ids:
            child_jobs = [
                x for x in child_jobs if x.id not in unwanted_job_ids
            ]
        child_queues = JobQueue.query.filter(
            JobQueue.parent_jobqueue_id == self.id).all()

        # Before anything else, enforce minimums
        for job in child_jobs:
            if job.state == _WorkState.RUNNING:
                if (job.num_assigned_agents() < (job.minimum_agents or 0)
                        and job.num_assigned_agents() <
                    (job.maximum_agents or maxsize)
                        and job.can_use_more_agents()):
                    return job
            elif job.minimum_agents and job.minimum_agents > 0:
                return job

        for queue in child_queues:
            if (queue.num_assigned_agents() < (queue.minimum_agents or 0)
                    and queue.num_assigned_agents() <
                (queue.maximum_agents or maxsize)):
                job = queue.get_job_for_agent(agent, unwanted_job_ids)
                if job:
                    return job

        objects_by_priority = {}

        for queue in child_queues:
            if queue.priority in objects_by_priority:
                objects_by_priority[queue.priority] += [queue]
            else:
                objects_by_priority[queue.priority] = [queue]

        for job in child_jobs:
            if job.priority in objects_by_priority:
                objects_by_priority[job.priority] += [job]
            else:
                objects_by_priority[job.priority] = [job]

        available_priorities = sorted(objects_by_priority.keys(), reverse=True)

        # Work through the priorities in descending order
        for priority in available_priorities:
            objects = objects_by_priority[priority]
            active_objects = [
                x for x in objects
                if (type(x) != Job or x.state == _WorkState.RUNNING)
            ]
            weight_sum = reduce(lambda a, b: a + b.weight, active_objects, 0)
            total_assigned = reduce(lambda a, b: a + b.num_assigned_agents(),
                                    objects, 0)
            objects.sort(key=(lambda x: ((float(x.num_assigned_agents(
            )) / total_assigned) if total_assigned else 0) / ((float(
                x.weight) / weight_sum) if weight_sum and x.weight else 1)))

            selected_job = None
            for item in objects:
                if isinstance(item, Job):
                    if item.state == _WorkState.RUNNING:
                        if (item.can_use_more_agents()
                                and item.num_assigned_agents() <
                            (item.maximum_agents or maxsize)):
                            if PREFER_RUNNING_JOBS:
                                return item
                            elif (selected_job is None
                                  or selected_job.time_submitted >
                                  item.time_submitted):
                                selected_job = item
                    elif (selected_job is None or
                          selected_job.time_submitted > item.time_submitted):
                        # If this job is not running yet, remember it, but keep
                        # looking for already running or queued but older jobs
                        selected_job = item
                if isinstance(item, JobQueue):
                    if (item.num_assigned_agents() <
                        (item.maximum_agents or maxsize)):
                        job = item.get_job_for_agent(agent, unwanted_job_ids)
                        if job:
                            return job
            if selected_job:
                return selected_job

        return None

    @staticmethod
    def top_level_unique_check(mapper, connection, target):
        if target.parent_jobqueue_id is None:
            count = JobQueue.query.filter_by(parent_jobqueue_id=None,
                                             name=target.name).count()
            if count > 0:
                raise ValueError("Cannot have two jobqueues named %r at the "
                                 "top level" % target.name)