예제 #1
0
class JobTagRequirement(db.Model, UtilityMixins):
    """
    Model representing a dependency of a job on a tag

    If a job has a tag requirement, it will only run on agents that have that
    tag.
    """
    __tablename__ = config.get("table_job_tag_req")
    __table_args__ = (UniqueConstraint("tag_id", "job_id"), )

    id = id_column()

    tag_id = db.Column(db.Integer,
                       db.ForeignKey("%s.id" % config.get("table_tag")),
                       nullable=False,
                       doc="Reference to the required tag")

    job_id = db.Column(IDTypeWork,
                       db.ForeignKey("%s.id" % config.get("table_job")),
                       nullable=False,
                       doc="Foreign key to :class:`Job.id`")

    negate = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="If true, an agent that has this tag can not work on this job")

    job = db.relationship("Job",
                          backref=db.backref("tag_requirements",
                                             lazy="dynamic",
                                             cascade="all, delete-orphan"))

    tag = db.relationship("Tag")
예제 #2
0
class JobTypeSoftwareRequirement(db.Model, UtilityMixins):
    """
    Model representing a dependency of a job on a software tag, with optional
    version constraints
    """
    __tablename__ = config.get("table_job_type_software_req")
    __table_args__ = (UniqueConstraint("software_id", "jobtype_version_id"), )

    software_id = db.Column(db.Integer,
                            db.ForeignKey("%s.id" %
                                          config.get("table_software")),
                            primary_key=True,
                            doc="Reference to the required software")

    jobtype_version_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_type_version")),
        primary_key=True,
        doc="Foreign key to :class:`JobTypeVersion.id`")

    min_version_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_software_version")),
        nullable=True,
        doc="Reference to the minimum required version")

    max_version_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_software_version")),
        nullable=True,
        doc="Reference to the maximum required version")

    #
    # Relationships
    #
    jobtype_version = db.relationship("JobTypeVersion",
                                      backref=db.backref(
                                          "software_requirements",
                                          lazy="dynamic",
                                          cascade="all, delete-orphan"))

    software = db.relationship("Software")

    min_version = db.relationship("SoftwareVersion",
                                  foreign_keys=[min_version_id])

    max_version = db.relationship("SoftwareVersion",
                                  foreign_keys=[max_version_id])
예제 #3
0
class AgentMacAddress(db.Model):
    __tablename__ = config.get("table_agent_mac_address")
    __table_args__ = (UniqueConstraint("agent_id", "mac_address"), )

    agent_id = db.Column(
        IDTypeAgent,
        db.ForeignKey("%s.id" % config.get("table_agent")),
        primary_key=True, nullable=False)
    mac_address = db.Column(
        MACAddress,
        primary_key=True, nullable=False, autoincrement=False)
예제 #4
0
class TaskLog(db.Model, UtilityMixins, ReprMixin):
    """Table which represents a single task log entry"""
    __tablename__ = config.get("table_task_log")
    __table_args__ = (UniqueConstraint("identifier"),)

    id = id_column(db.Integer)

    identifier = db.Column(
        db.String(255),
        nullable=False,
        doc="The identifier for this log")

    agent_id = db.Column(
        IDTypeAgent,
        db.ForeignKey("%s.id" % config.get("table_agent")),
        nullable=True,
        doc="The agent this log was created on")

    created_on = db.Column(
        db.DateTime,
        default=datetime.utcnow,
        doc="The time when this log was created")

    #
    # Relationships
    #
    agent = db.relationship(
        "Agent",
        backref=db.backref("task_logs", lazy="dynamic"),
        doc="Relationship between an :class:`TaskLog`"
            "and the :class:`pyfarm.models.Agent` it was "
            "created on")

    task_associations = db.relationship(
        TaskTaskLogAssociation, backref="log",
        doc="Relationship between tasks and their logs."
    )

    def num_queued_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=None).count()

    def num_running_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=WorkState.RUNNING).count()

    def num_failed_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=WorkState.FAILED).count()

    def num_done_tasks(self):
        return TaskTaskLogAssociation.query.filter_by(
            log=self, state=WorkState.DONE).count()
예제 #5
0
class JobNotifiedUser(db.Model):
    """
    Defines the table containing users to be notified of certain
    events pertaining to jobs.
    """
    __tablename__ = config.get("table_job_notified_users")

    user_id = db.Column(db.Integer,
                        db.ForeignKey("%s.id" % config.get("table_user")),
                        primary_key=True,
                        doc="The id of the user to be notified")

    job_id = db.Column(IDTypeWork,
                       db.ForeignKey("%s.id" % config.get("table_job")),
                       primary_key=True,
                       doc="The id of the associated job")

    on_success = db.Column(
        db.Boolean,
        nullable=False,
        default=True,
        doc="True if a user should be notified on successful "
        "completion of a job")

    on_failure = db.Column(
        db.Boolean,
        nullable=False,
        default=True,
        doc="True if a user should be notified of a job's failure")

    on_deletion = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="True if a user should be notified on deletion of "
        "a job")

    user = db.relationship("User",
                           backref=db.backref("subscribed_jobs",
                                              lazy="dynamic"))
예제 #6
0
class SoftwareVersion(db.Model, UtilityMixins):
    """
    Model to represent a version for a given software
    """
    __tablename__ = config.get("table_software_version")
    __table_args__ = (UniqueConstraint("software_id", "version"),
                      UniqueConstraint("software_id", "rank"))

    id = id_column()

    software_id = db.Column(db.Integer,
                            db.ForeignKey("%s.id" %
                                          config.get("table_software")),
                            nullable=False,
                            doc="The software this version belongs to")

    version = db.Column(
        db.String(config.get("max_tag_length")),
        default="any",
        nullable=False,
        doc="The version of the software.  This value does not "
        "follow any special formatting rules because the "
        "format depends on the 3rd party.")

    rank = db.Column(
        db.Integer,
        nullable=False,
        doc="The rank of this version relative to other versions of "
        "the same software. Used to determine whether a version "
        "is higher or lower than another.")

    default = db.Column(db.Boolean,
                        default=False,
                        nullable=False,
                        doc="If true, this software version will be registered"
                        "on new nodes by default.")

    discovery_code = db.Column(
        db.UnicodeText,
        nullable=True,
        doc="Python code to discover if this software version is installed "
        "on a node")

    discovery_function_name = db.Column(
        db.String(config.get("max_discovery_function_name_length")),
        nullable=True,
        doc="The name of a function in `discovery_code` to call when "
        "checking for the presence of this software version on an agent.\n"
        "The function should return either a boolean (true if present, "
        "false if not) or a tuple of a boolean and a dict of named "
        "parameters describing this installation.")
예제 #7
0
class JobGroup(db.Model, UtilityMixins):
    """
    Used to group jobs together for better presentation in the UI
    """
    __tablename__ = config.get("table_job_group")

    id = id_column(IDTypeWork)

    title = db.Column(
        db.String(config.get("max_jobgroup_name_length")),
        nullable=False,
        doc="The title of the job group's name")

    main_jobtype_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_type")),
        nullable=False,
        doc="ID of the jobtype of the main job in this "
            "group. Purely for display and filtering.")

    user_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_user")),
        doc="The id of the user who owns these jobs")

    #
    # Relationships
    #
    main_jobtype = db.relationship(
        "JobType",
        backref=db.backref("jobgroups", lazy="dynamic"),
        doc="The jobtype of the main job in this group")

    user = db.relationship(
        "User",
        backref=db.backref("jobgroups", lazy="dynamic"),
        doc="The user who owns these jobs")
예제 #8
0
class TaskTaskLogAssociation(db.Model):
    """Stores an association between the task table and a task log"""
    __tablename__ = config.get("table_task_log_assoc")
    __table_args__ = (
        PrimaryKeyConstraint("task_log_id", "task_id", "attempt"),)

    task_log_id = db.Column(
        db.Integer,
        db.ForeignKey(
            "%s.id" % config.get("table_task_log"), ondelete="CASCADE"),
        doc="The ID of the task log")

    task_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_task"), ondelete="CASCADE"),
        doc="The ID of the job a task log is associated with")

    attempt = db.Column(
        db.Integer,
        autoincrement=False,
        doc="The attempt number for the given task log")

    state = db.Column(
        WorkStateEnum,
        nullable=True,
        doc="The state of the work being performed")

    #
    # Relationships
    #
    task = db.relationship(
        "Task",
        backref=db.backref(
            "log_associations",
            lazy="dynamic",
            passive_deletes=True))
예제 #9
0
class PathMap(db.Model, ReprMixin, UtilityMixins):
    """
    Defines a table which is used for cross-platform
    file path mappings.
    """
    __tablename__ = config.get("table_path_map")

    id = id_column(db.Integer)

    path_linux = db.Column(
        db.String(config.get("max_path_length")),
        nullable=False,
        doc="The path on linux platforms")

    path_windows = db.Column(
        db.String(config.get("max_path_length")),
        nullable=False,
        doc="The path on Windows platforms")

    path_osx = db.Column(
        db.String(config.get("max_path_length")),
        nullable=False,
        doc="The path on Mac OS X platforms")

    tag_id = db.Column(
        db.Integer,
        db.ForeignKey("%s.id" % config.get("table_tag")),
        nullable=True,
        doc="The tag an agent needs to have for this path map "
            "to apply to it. "
            "If this is NULL, this path map applies to all "
            "agents, but is overridden by applying path maps "
            "that do specify a tag.")

    #
    # Relationships
    #
    tag = db.relationship(
        "Tag",
        backref=db.backref("path_maps", lazy="dynamic"),
        doc="Relationship attribute for the tag this path map "
            "applies to.")
예제 #10
0
try:
    # pylint: disable=undefined-variable
    range_ = xrange
except NameError:
    range_ = range

__all__ = ("Job", )

logger = getLogger("models.job")

JobTagAssociation = db.Table(
    config.get("table_job_tag_assoc"), db.metadata,
    db.Column("job_id",
              IDTypeWork,
              db.ForeignKey("%s.id" % config.get("table_job")),
              primary_key=True,
              doc="The id of the job associated with this task"),
    db.Column("tag_id",
              db.Integer,
              db.ForeignKey("%s.id" % config.get("table_tag")),
              primary_key=True,
              doc="The id of the tag being associated with the job"))

JobDependency = db.Table(
    config.get("table_job_dependency"), db.metadata,
    db.Column("parentid",
              IDTypeWork,
              db.ForeignKey("%s.id" % config.get("table_job")),
              primary_key=True,
              doc="The parent job id of the job dependency"),
예제 #11
0
class Task(db.Model, ValidatePriorityMixin, ValidateWorkStateMixin,
           UtilityMixins, ReprMixin):
    """
    Defines a task which a child of a :class:`Job`.  This table represents
    rows which contain the individual work unit(s) for a job.
    """
    __tablename__ = config.get("table_task")
    STATE_ENUM = list(WorkState) + [None]
    STATE_DEFAULT = None
    REPR_COLUMNS = ("id", "state", "frame", "project")
    REPR_CONVERT_COLUMN = {"state": partial(repr_enum, enum=STATE_ENUM)}

    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(STATE_DEFAULT, "job.priority")

    agent_id = db.Column(
        IDTypeAgent,
        db.ForeignKey("%s.id" % config.get("table_agent")),
        doc="Foreign key which stores :attr:`Job.id`")

    job_id = db.Column(
        IDTypeWork, db.ForeignKey("%s.id" % config.get("table_job")),
        nullable=False,
        doc="Foreign key which stores :attr:`Job.id`")

    hidden = db.Column(
        db.Boolean, default=False,
        doc="When True this hides the task from queue and web ui")

    attempts = db.Column(
        db.Integer,
        nullable=False, default=0,
        doc="The number of attempts which have been made on this "
            "task. This value is auto incremented when "
            "``state`` changes to a value synonymous with a "
            "running state.")

    failures = db.Column(
        db.Integer,
        nullable=False, default=0,
        doc="The number of times this task has failed. This value "
            "is auto incremented when :attr:`state` changes to a "
            "value synonymous with a failed state.")

    frame = db.Column(
        db.Numeric(10, 4),
        nullable=False,
        doc="The frame this :class:`Task` will be executing.")

    tile = db.Column(
        db.Integer,
        nullable=True,
        doc="When using tiled rendering, the number of the tile this task "
            "refers to. The jobtype will have to translate that into an "
            "actual image region. This will be NULL if the job doesn't use "
            "tiled rendering.")

    last_error = db.Column(
        db.UnicodeText,
        nullable=True,
        doc="This column may be set when an error is "
            "present.  The agent typically sets this "
            "column when the job type either can't or "
            "won't run a given task.  This column will "
            "be cleared whenever the task's state is "
            "returned to a non-error state.")

    sent_to_agent = db.Column(
        db.Boolean,
        default=False, nullable=False,
        doc="Whether this task was already sent to the assigned agent")

    progress = db.Column(
        db.Float, default=0.0,
        doc="The progress for this task, as a value between "
            "0.0 and 1.0. Used purely for display purposes.")

    #
    # Relationships
    #
    job = db.relationship(
        "Job",
        backref=db.backref("tasks", lazy="dynamic"),
        doc="relationship attribute which retrieves the "
            "associated job for this task")

    def running(self):
        return self.state == WorkState.RUNNING

    def failed(self):
        return self.state == WorkState.FAILED

    @staticmethod
    def increment_attempts(target, new_value, old_value, initiator):
        if new_value is not None and new_value != old_value:
            target.attempts += 1

    @staticmethod
    def log_assign_change(target, new_value, old_value, initiator):
        logger.debug("Agent change for task %s: old %s new: %s",
                     target.id, old_value, new_value)

    @staticmethod
    def update_failures(target, new_value, old_value, initiator):
        if new_value == WorkState.FAILED and new_value != old_value:
            target.failures += 1
            if target not in target.agent.failed_tasks:
                target.agent.failed_tasks.append(target)

    @staticmethod
    def set_progress_on_success(target, new_value, old_value, initiator):
        if new_value == WorkState.DONE:
            target.progress = 1.0

    @staticmethod
    def update_agent_on_success(target, new_value, old_value, initiator):
        if new_value == WorkState.DONE:
            agent = target.agent
            if agent:
                agent.last_success_on = datetime.utcnow()
                db.session.add(agent)

    @staticmethod
    def reset_agent_if_failed_and_retry(
            target, new_value, old_value, initiator):
        # There's nothing else we should do here if
        # we don't have a parent job.  This can happen if you're
        # testing or a job is disconnected from a task.
        if target.job is None:
            return new_value

        if (new_value == WorkState.FAILED and
            target.failures <= target.job.requeue):
            logger.info("Failed task %s will be retried", target.id)
            target.agent_id = None
            return None
        else:
            return new_value

    @staticmethod
    def clear_error_state(target, new_value, old_value, initiator):
        """
        Sets ``last_error`` column to ``None`` if the task's state is 'done'
        """
        if new_value == WorkState.DONE and target.last_error is not None:
            target.last_error = None

    @staticmethod
    def set_times(target, new_value, old_value, initiator):
        """update the datetime objects depending on the new value"""

        if (new_value == _WorkState.RUNNING and
            (old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or
             target.time_started == None)):
            if not target.job.jobtype_version.no_automatic_start_time:
                target.time_started = datetime.utcnow()
                target.time_finished = None

        elif (new_value in (_WorkState.DONE, _WorkState.FAILED) and
              not target.time_finished):
            target.time_finished = datetime.utcnow()

    @staticmethod
    def reset_finished_time(target, new_value, old_value, initiator):
        if (target.state not in (_WorkState.DONE, _WorkState.FAILED) or
            new_value is None):
            target.time_finished = None
        elif new_value is not None:
            if target.time_finished is not None:
                target.time_finished = max(target.time_finished,
                                           new_value)
            else:
                target.time_finished = max(new_value,
                                           datetime.utcnow())
예제 #12
0
class Task(db.Model, ValidatePriorityMixin, WorkStateChangedMixin,
           UtilityMixins, ReprMixin):
    """
    Defines a task which a child of a :class:`Job`.  This table represents
    rows which contain the individual work unit(s) for a job.
    """
    __tablename__ = TABLE_TASK
    STATE_ENUM = WorkState
    STATE_DEFAULT = STATE_ENUM.QUEUED
    REPR_COLUMNS = ("id", "state", "frame", "project")
    REPR_CONVERT_COLUMN = {"state": partial(repr_enum, enum=STATE_ENUM)}

    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(STATE_DEFAULT, "job.priority")
    project_id = db.Column(db.Integer,
                           db.ForeignKey("%s.id" % TABLE_PROJECT),
                           doc="stores the project id")
    agent_id = db.Column(IDTypeAgent,
                         db.ForeignKey("%s.id" % TABLE_AGENT),
                         doc="Foreign key which stores :attr:`Job.id`")
    job_id = db.Column(IDTypeWork,
                       db.ForeignKey("%s.id" % TABLE_JOB),
                       doc="Foreign key which stores :attr:`Job.id`")
    hidden = db.Column(db.Boolean,
                       default=False,
                       doc=dedent("""
                       hides the task from queue and web ui"""))
    attempts = db.Column(db.Integer,
                         doc=dedent("""
                         The number attempts which have been made on this
                         task. This value is auto incremented when
                         :attr:`state` changes to a value synonyms with a
                         running state."""))
    frame = db.Column(db.Float,
                      nullable=False,
                      doc=dedent("""
                      The frame the :class:`Task` will be executing."""))

    # relationships
    parents = db.relationship("Task",
                              secondary=TaskDependencies,
                              primaryjoin=id == TaskDependencies.c.parent_id,
                              secondaryjoin=id == TaskDependencies.c.child_id,
                              backref=db.backref("children", lazy="dynamic"))
    project = db.relationship("Project",
                              backref=db.backref("tasks", lazy="dynamic"),
                              doc=dedent("""
                              relationship attribute which retrieves the
                              associated project for the task"""))
    job = db.relationship("Job",
                          backref=db.backref("tasks", lazy="dynamic"),
                          doc=dedent("""
                          relationship attribute which retrieves the
                          associated job for this task"""))

    @staticmethod
    def agentChangedEvent(target, new_value, old_value, initiator):
        """set the state to ASSIGN whenever the agent is changed"""
        if new_value is not None:
            target.state = target.STATE_ENUM.ASSIGN
예제 #13
0
from pyfarm.core.enums import WorkState
from pyfarm.master.application import db
from pyfarm.models.core.types import IDTypeAgent, IDTypeWork
from pyfarm.models.core.functions import work_columns, repr_enum
from pyfarm.models.core.cfg import (TABLE_JOB, TABLE_TASK, TABLE_AGENT,
                                    TABLE_TASK_DEPENDENCIES, TABLE_PROJECT)
from pyfarm.models.core.mixins import (ValidatePriorityMixin,
                                       WorkStateChangedMixin, UtilityMixins,
                                       ReprMixin)

TaskDependencies = db.Table(
    TABLE_TASK_DEPENDENCIES, db.metadata,
    db.Column("parent_id",
              IDTypeWork,
              db.ForeignKey("%s.id" % TABLE_TASK),
              primary_key=True),
    db.Column("child_id",
              IDTypeWork,
              db.ForeignKey("%s.id" % TABLE_TASK),
              primary_key=True))


class Task(db.Model, ValidatePriorityMixin, WorkStateChangedMixin,
           UtilityMixins, ReprMixin):
    """
    Defines a task which a child of a :class:`Job`.  This table represents
    rows which contain the individual work unit(s) for a job.
    """
    __tablename__ = TABLE_TASK
    STATE_ENUM = WorkState
예제 #14
0
from pyfarm.core.enums import STRING_TYPES, PY3
from pyfarm.master.application import app, db, login_serializer
from pyfarm.models.core.mixins import ReprMixin
from pyfarm.models.core.functions import split_and_extend
from pyfarm.models.core.cfg import (
    TABLE_USERS_USER, TABLE_USERS_ROLE, TABLE_USERS_USER_ROLES,
    MAX_USERNAME_LENGTH, SHA256_ASCII_LENGTH, MAX_EMAILADDR_LENGTH,
    MAX_ROLE_LENGTH, TABLE_USERS_PROJECTS, TABLE_PROJECT)

logger = getLogger("models.users")

# roles the user is a member of
UserRoles = db.Table(
    TABLE_USERS_USER_ROLES,
    db.Column("user_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_USERS_USER)),
    db.Column("role_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_USERS_ROLE)))

# projects the user is a member of
UserProjects = db.Table(
    TABLE_USERS_PROJECTS,
    db.Column("user_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_USERS_USER), primary_key=True),
    db.Column("project_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_PROJECT), primary_key=True))


class User(db.Model, UserMixin, ReprMixin):
    """
    Stores information about a user including the roles they belong to
예제 #15
0
class Job(db.Model, ValidatePriorityMixin, WorkStateChangedMixin, ReprMixin):
    """
    Defines the attributes and environment for a job.  Individual commands
    are kept track of by |Task|
    """
    __tablename__ = TABLE_JOB
    REPR_COLUMNS = ("id", "state", "project")
    REPR_CONVERT_COLUMN = {
        "state": repr}
    MIN_CPUS = read_env_int("PYFARM_QUEUE_MIN_CPUS", 1)
    MAX_CPUS = read_env_int("PYFARM_QUEUE_MAX_CPUS", 256)
    MIN_RAM = read_env_int("PYFARM_QUEUE_MIN_RAM", 16)
    MAX_RAM = read_env_int("PYFARM_QUEUE_MAX_RAM", 262144)
    SPECIAL_RAM = read_env("PYFARM_AGENT_SPECIAL_RAM", [0], eval_literal=True)
    SPECIAL_CPUS = read_env("PYFARM_AGENT_SPECIAL_CPUS", [0], eval_literal=True)

    # quick check of the configured data
    assert MIN_CPUS >= 1, "$PYFARM_QUEUE_MIN_CPUS must be > 0"
    assert MAX_CPUS >= 1, "$PYFARM_QUEUE_MAX_CPUS must be > 0"
    assert MAX_CPUS >= MIN_CPUS, "MIN_CPUS must be <= MAX_CPUS"
    assert MIN_RAM >= 1, "$PYFARM_QUEUE_MIN_RAM must be > 0"
    assert MAX_RAM >= 1, "$PYFARM_QUEUE_MAX_RAM must be > 0"
    assert MAX_RAM >= MIN_RAM, "MIN_RAM must be <= MAX_RAM"


    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(WorkState.QUEUED, "job.priority")
    project_id = db.Column(db.Integer, db.ForeignKey("%s.id" % TABLE_PROJECT),
                           doc="stores the project id")
    job_type_id = db.Column(db.Integer, db.ForeignKey("%s.id" % TABLE_JOB_TYPE),
                            nullable=False,
                            doc=dedent("""
                            The foreign key which stores :class:`JobType.id`"""))
    user = db.Column(db.String(MAX_USERNAME_LENGTH),
                     doc=dedent("""
                     The user this job should execute as.  The agent
                     process will have to be running as root on platforms
                     that support setting the user id.

                     .. note::
                        The length of this field is limited by the
                        configuration value `job.max_username_length`

                     .. warning::
                        this may not behave as expected on all platforms
                        (windows in particular)"""))
    notes = db.Column(db.Text, default="",
                      doc=dedent("""
                      Notes that are provided on submission or added after
                      the fact. This column is only provided for human
                      consumption is not scanned, index, or used when
                      searching"""))

    # task data
    cmd = db.Column(db.String(MAX_COMMAND_LENGTH),
                    doc=dedent("""
                    The platform independent command to run. Each agent will
                    resolve this value for itself when the task begins so a
                    command like `ping` will work on any platform it's
                    assigned to.  The full command could be provided here,
                    but then the job must be tagged using
                    :class:`.JobSoftware` to limit which agent(s) it will
                    run on."""))
    start = db.Column(db.Float,
                      doc=dedent("""
                      The first frame of the job to run.  This value may
                      be a float so subframes can be processed."""))
    end = db.Column(db.Float,
                      doc=dedent("""
                      The last frame of the job to run.  This value may
                      be a float so subframes can be processed."""))
    by = db.Column(db.Float, default=1,
                   doc=dedent("""
                   The number of frames to count by between `start` and
                   `end`.  This column may also sometimes be referred to
                   as 'step' by other software."""))
    batch = db.Column(db.Integer,
                      default=read_env_int("PYFARM_QUEUE_DEFAULT_BATCH", 1),
                      doc=dedent("""
                      Number of tasks to run on a single agent at once.
                      Depending on the capabilities of the software being run
                      this will either cause a single process to execute on
                      the agent or multiple processes on after the other.

                      **configured by**: `job.batch`"""))
    requeue = db.Column(db.Integer,
                        default=read_env_int("PYFARM_QUEUE_DEFAULT_REQUEUE", 3),
                        doc=dedent("""
                        Number of times to requeue failed tasks

                        .. csv-table:: **Special Values**
                            :header: Value, Result
                            :widths: 10, 50

                            0, never requeue failed tasks
                            -1, requeue failed tasks indefinitely

                        **configured by**: `job.requeue`"""))
    cpus = db.Column(db.Integer,
                     default=read_env_int("PYFARM_QUEUE_DEFAULT_CPUS", 1),
                     doc=dedent("""
                     Number of cpus or threads each task should consume on
                     each agent.  Depending on the job type being executed
                     this may result in additional cpu consumption, longer
                     wait times in the queue (2 cpus means 2 'fewer' cpus on
                     an agent), or all of the above.

                     .. csv-table:: **Special Values**
                        :header: Value, Result
                        :widths: 10, 50

                        0, minimum number of cpu resources not required
                        -1, agent cpu is exclusive for a task from this job

                     **configured by**: `job.cpus`"""))
    ram = db.Column(db.Integer,
                    default=read_env_int("PYFARM_QUEUE_DEFAULT_RAM", 32),
                    doc=dedent("""
                    Amount of ram a task from this job will require to be
                    free in order to run.  A task exceeding this value will
                    not result in any special behavior.

                    .. csv-table:: **Special Values**
                        :header: Value, Result
                        :widths: 10, 50

                        0, minimum amount of free ram not required
                        -1, agent ram is exclusive for a task from this job

                    **configured by**: `job.ram`"""))
    ram_warning = db.Column(db.Integer, default=-1,
                            doc=dedent("""
                            Amount of ram used by a task before a warning
                            raised.  A task exceeding this value will not
                            cause any work stopping behavior.

                            .. csv-table:: **Special Values**
                                :header: Value, Result
                                :widths: 10, 50

                                -1, not set"""))
    ram_max = db.Column(db.Integer, default=-1,
                        doc=dedent("""
                        Maximum amount of ram a task is allowed to consume on
                        an agent.

                        .. warning::
                            The task will be **terminated** if the ram in use
                            by the process exceeds this value.

                        .. csv-table:: **Special Values**
                            :header: Value, Result
                            :widths: 10, 50

                            -1, not set
                        """))
    attempts = db.Column(db.Integer,
                         doc=dedent("""
                         The number attempts which have been made on this
                         task. This value is auto incremented when
                         :attr:`state` changes to a value synonyms with a
                         running state."""))
    hidden = db.Column(db.Boolean, default=False, nullable=False,
                       doc=dedent("""
                       If True, keep the job hidden from the queue and web
                       ui.  This is typically set to True if you either want
                       to save a job for later viewing or if the jobs data
                       is being populated in a deferred manner."""))
    environ = db.Column(JSONDict,
                        doc=dedent("""
                        Dictionary containing information about the environment
                        in which the job will execute.

                        .. note::
                            Changes made directly to this object are **not**
                            applied to the session."""))
    args = db.Column(JSONList,
                     doc=dedent("""
                     List containing the command line arguments.

                     .. note::
                        Changes made directly to this object are **not**
                        applied to the session."""))
    data = db.Column(JSONDict,
                     doc=dedent("""
                     Json blob containing additional data for a job

                     .. note::
                        Changes made directly to this object are **not**
                        applied to the session."""))

    project = db.relationship("Project",
                              backref=db.backref("jobs", lazy="dynamic"),
                              doc=dedent("""
                              relationship attribute which retrieves the
                              associated project for the job"""))

    # self-referential many-to-many relationship
    parents = db.relationship("Job",
                              secondary=JobDependencies,
                              primaryjoin=id==JobDependencies.c.parentid,
                              secondaryjoin=id==JobDependencies.c.childid,
                              backref="children")

    tasks_done = db.relationship("Task", lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
                    "(Task.job_id == Job.id)" % DBWorkState.DONE,
        doc=dedent("""
        Relationship between this job and any |Task| objects which are
        done."""))

    tasks_failed = db.relationship("Task", lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
                    "(Task.job_id == Job.id)" % DBWorkState.FAILED,
        doc=dedent("""
        Relationship between this job and any |Task| objects which have
        failed."""))

    tasks_queued = db.relationship("Task", lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
                    "(Task.job_id == Job.id)" % DBWorkState.QUEUED,
        doc=dedent("""
        Relationship between this job and any |Task| objects which
        are queued."""))

    # resource relationships
    tags = db.relationship("Tag", backref="jobs", lazy="dynamic",
                           secondary=JobTagAssociation,
                           doc=dedent("""
                           Relationship between this job and
                           :class:`.Tag` objects"""))
    software = db.relationship("Software",
                               secondary=JobSoftwareDependency,
                               backref=db.backref("jobs", lazy="dynamic"),
                               lazy="dynamic",
                               doc="software needed by this job")

    @validates("ram", "cpus")
    def validate_resource(self, key, value):
        """
        Validation that ensures that the value provided for either
        :attr:`.ram` or :attr:`.cpus` is a valid value with a given range
        """
        key_upper = key.upper()
        special = getattr(self, "SPECIAL_%s" % key_upper)

        if value is None or value in special:
            return value

        min_value = getattr(self, "MIN_%s" % key_upper)
        max_value = getattr(self, "MAX_%s" % key_upper)

        # quick sanity check of the incoming config
        assert isinstance(min_value, int), "db.min_%s must be an integer" % key
        assert isinstance(max_value, int), "db.max_%s must be an integer" % key
        assert min_value >= 1, "db.min_%s must be > 0" % key
        assert max_value >= 1, "db.max_%s must be > 0" % key

        # check the provided input
        if min_value > value or value > max_value:
            msg = "value for `%s` must be between " % key
            msg += "%s and %s" % (min_value, max_value)
            raise ValueError(msg)

        return value
예제 #16
0
    AgentStateEnum)
from pyfarm.models.core.cfg import (
    TABLE_AGENT, TABLE_SOFTWARE, TABLE_TAG, TABLE_AGENT_TAG_ASSOC,
    MAX_HOSTNAME_LENGTH, MAX_TAG_LENGTH, TABLE_AGENT_SOFTWARE_ASSOC,
    TABLE_PROJECT_AGENTS, TABLE_PROJECT)

PYFARM_REQUIRE_PRIVATE_IP = read_env_bool("PYFARM_REQUIRE_PRIVATE_IP", False)
REGEX_HOSTNAME = re.compile("^(?!-)[A-Z\d-]{1,63}(?<!-)"
                            "(\.(?!-)[A-Z\d-]{1,63}(?<!-))*\.?$"
                            , re.IGNORECASE)


AgentSoftwareAssociation = db.Table(
    TABLE_AGENT_SOFTWARE_ASSOC, db.metadata,
    db.Column("agent_id", IDTypeAgent,
              db.ForeignKey("%s.id" % TABLE_AGENT), primary_key=True),
    db.Column("software_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_SOFTWARE), primary_key=True))


AgentTagAssociation = db.Table(
    TABLE_AGENT_TAG_ASSOC, db.metadata,
    db.Column("agent_id", IDTypeAgent,
              db.ForeignKey("%s.id" % TABLE_AGENT), primary_key=True),
    db.Column("tag_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_TAG), primary_key=True))


AgentProjects = db.Table(
    TABLE_PROJECT_AGENTS, db.metadata,
    db.Column("agent_id", IDTypeAgent,
예제 #17
0
class Job(db.Model, ValidatePriorityMixin, ValidateWorkStateMixin,
          WorkStateChangedMixin, ReprMixin, UtilityMixins):
    """
    Defines the attributes and environment for a job.  Individual commands
    are kept track of by :class:`Task`
    """
    __tablename__ = config.get("table_job")
    REPR_COLUMNS = ("id", "state", "project")
    REPR_CONVERT_COLUMN = {"state": repr}
    STATE_ENUM = list(WorkState) + [None]

    # shared work columns
    id, state, priority, time_submitted, time_started, time_finished = \
        work_columns(None, "job.priority")

    jobtype_version_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_type_version")),
        nullable=False,
        doc="The foreign key which stores :class:`JobTypeVersion.id`")

    job_queue_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_queue")),
        nullable=True,
        doc="The foreign key which stores :class:`JobQueue.id`")

    job_group_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_group")),
        nullable=True,
        doc="The foreign key which stores:class:`JobGroup.id`")

    user_id = db.Column(db.Integer,
                        db.ForeignKey("%s.id" % config.get("table_user")),
                        doc="The id of the user who owns this job")

    minimum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will try to assign at least this number "
        "of agents to this job as long as it can use them, "
        "before any other considerations.")

    maximum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will never assign more than this number"
        "of agents to this job.")

    weight = db.Column(
        db.Integer,
        nullable=False,
        default=config.get("queue_default_weight"),
        doc="The weight of this job. The scheduler will distribute "
        "available agents between jobs and job queues in the "
        "same queue in proportion to their weights.")

    title = db.Column(db.String(config.get("jobtitle_max_length")),
                      nullable=False,
                      doc="The title of this job")

    notes = db.Column(
        db.Text,
        default="",
        doc="Notes that are provided on submission or added after "
        "the fact. This column is only provided for human "
        "consumption, is not scanned, indexed, or used when "
        "searching")

    output_link = db.Column(
        db.Text,
        nullable=True,
        doc="An optional link to a URI where this job's output can "
        "be viewed.")

    # task data
    by = db.Column(db.Numeric(10, 4),
                   default=1,
                   doc="The number of frames to count by between `start` and "
                   "`end`.  This column may also sometimes be referred to "
                   "as 'step' by other software.")

    num_tiles = db.Column(
        db.Integer,
        nullable=True,
        doc="How many regions to split frames into for rendering.")

    batch = db.Column(
        db.Integer,
        default=config.get("job_default_batch"),
        doc="Number of tasks to run on a single agent at once. Depending "
        "on the capabilities of the software being run this will "
        "either cause a single process to execute on the agent "
        "or multiple processes one after the other.")

    requeue = db.Column(db.Integer,
                        default=config.get("job_requeue_default"),
                        doc="Number of times to requeue failed tasks "
                        ""
                        ".. csv-table:: **Special Values**"
                        "   :header: Value, Result"
                        "   :widths: 10, 50"
                        ""
                        "   0, never requeue failed tasks"
                        "  -1, requeue failed tasks indefinitely")

    cpus = db.Column(
        db.Integer,
        default=config.get("job_default_cpus"),
        doc="Number of cpus or threads each task should consume on"
        "each agent.  Depending on the job type being executed "
        "this may result in additional cpu consumption, longer "
        "wait times in the queue (2 cpus means 2 'fewer' cpus on "
        "an agent), or all of the above."
        ""
        ".. csv-table:: **Special Values**"
        "   :header: Value, Result"
        "   :widths: 10, 50"
        ""
        "   0, minimum number of cpu resources not required "
        "   -1, agent cpu is exclusive for a task from this job")

    ram = db.Column(
        db.Integer,
        default=config.get("job_default_ram"),
        doc="Amount of ram a task from this job will require to be "
        "free in order to run.  A task exceeding this value will "
        "not result in any special behavior."
        ""
        ".. csv-table:: **Special Values**"
        "    :header: Value, Result"
        "    :widths: 10, 50"
        ""
        "0, minimum amount of free ram not required"
        "-1, agent ram is exclusive for a task from this job")

    ram_warning = db.Column(
        db.Integer,
        nullable=True,
        doc="Amount of ram used by a task before a warning raised. "
        "A task exceeding this value will not  cause any work "
        "stopping behavior.")

    ram_max = db.Column(
        db.Integer,
        nullable=True,
        doc="Maximum amount of ram a task is allowed to consume on "
        "an agent."
        ""
        ".. warning:: "
        "   If set, the task will be **terminated** if the ram in "
        "   use by the process exceeds this value.")

    hidden = db.Column(
        db.Boolean,
        default=False,
        nullable=False,
        doc="If True, keep the job hidden from the queue and web "
        "ui.  This is typically set to True if you either want "
        "to save a job for later viewing or if the jobs data "
        "is being populated in a deferred manner.")

    environ = db.Column(
        JSONDict,
        doc="Dictionary containing information about the environment "
        "in which the job will execute. "
        ""
        ".. note::"
        "    Changes made directly to this object are **not** "
        "    applied to the session.")

    data = db.Column(JSONDict,
                     doc="Json blob containing additional data for a job "
                     ""
                     ".. note:: "
                     "   Changes made directly to this object are **not** "
                     "   applied to the session.")

    to_be_deleted = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="If true, the master will stop all running tasks for "
        "this job and then delete it.")

    completion_notify_sent = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="Whether or not the finish notification mail has already "
        "been sent out.")

    autodelete_time = db.Column(
        db.Integer,
        nullable=True,
        default=None,
        doc="If not None, this job will be automatically deleted this "
        "number of seconds after it finishes.")

    #
    # Relationships
    #

    queue = db.relationship("JobQueue",
                            backref=db.backref("jobs", lazy="dynamic"),
                            doc="The queue for this job")

    group = db.relationship("JobGroup",
                            backref=db.backref("jobs", lazy="dynamic"),
                            doc="The job group this job belongs to")

    user = db.relationship("User",
                           backref=db.backref("jobs", lazy="dynamic"),
                           doc="The owner of this job")

    # self-referential many-to-many relationship
    parents = db.relationship("Job",
                              secondary=JobDependency,
                              primaryjoin=id == JobDependency.c.childid,
                              secondaryjoin=id == JobDependency.c.parentid,
                              backref="children")

    notified_users = db.relationship("JobNotifiedUser",
                                     lazy="dynamic",
                                     backref=db.backref("job"),
                                     cascade="all,delete")

    tasks_queued = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == None) & "
        "(Task.job_id == Job.id)",
        doc="Relationship between this job and any :class:`Task` "
        "objects which are queued.")

    tasks_running = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
        "(Task.job_id == Job.id)" % DBWorkState.RUNNING,
        doc="Relationship between this job and any :class:`Task` "
        "objects which are running.")

    tasks_done = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
        "(Task.job_id == Job.id)" % DBWorkState.DONE,
        doc="Relationship between this job and any :class:`Task` objects "
        "which are done.")

    tasks_failed = db.relationship(
        "Task",
        lazy="dynamic",
        primaryjoin="(Task.state == %s) & "
        "(Task.job_id == Job.id)" % DBWorkState.FAILED,
        doc="Relationship between this job and any :class:`Task` objects "
        "which have failed.")

    # resource relationships
    tags = db.relationship(
        "Tag",
        backref="jobs",
        lazy="dynamic",
        secondary=JobTagAssociation,
        doc="Relationship between this job and :class:`.Tag` objects")

    def paused(self):
        return self.state == WorkState.PAUSED

    def update_state(self):
        # Import here instead of at the top of the file to avoid a circular
        # import
        from pyfarm.scheduler.tasks import send_job_completion_mail
        from pyfarm.models.agent import Agent

        num_active_tasks = db.session.query(Task).\
            filter(Task.job == self,
                   or_(Task.state == None, and_(
                            Task.state != WorkState.DONE,
                            Task.state != WorkState.FAILED))).count()
        if num_active_tasks == 0:
            num_failed_tasks = db.session.query(Task).filter(
                Task.job == self, Task.state == WorkState.FAILED).count()
            if num_failed_tasks == 0:
                if self.state != _WorkState.DONE:
                    logger.info(
                        "Job %r (id %s): state transition %r -> 'done'",
                        self.title, self.id, self.state)
                    self.state = WorkState.DONE
                    send_job_completion_mail.apply_async(args=[self.id, True],
                                                         countdown=5)
            else:
                if self.state != _WorkState.FAILED:
                    logger.info(
                        "Job %r (id %s): state transition %r -> "
                        "'failed'", self.title, self.id, self.state)
                    self.state = WorkState.FAILED
                    send_job_completion_mail.apply_async(args=[self.id, False],
                                                         countdown=5)
            db.session.add(self)
        elif self.state != _WorkState.PAUSED:
            num_running_tasks = db.session.query(Task).\
                filter(Task.job == self,
                       Task.agent_id != None,
                       Task.agent.has(and_(Agent.state != AgentState.OFFLINE,
                                           Agent.state != AgentState.DISABLED)),
                       or_(
                            Task.state == WorkState.RUNNING,
                            Task.state == None)).count()
            if num_running_tasks == 0:
                logger.debug(
                    "No running tasks in job %s (id %s), setting it "
                    "to queued", self.title, self.id)
                self.state = None
                db.session.add(self)
            elif self.state != _WorkState.RUNNING:
                self.state = WorkState.RUNNING

    # Methods used by the scheduler
    def num_assigned_agents(self):
        # Import here instead of at the top of the file to avoid circular import
        from pyfarm.models.agent import Agent

        # Optimization: Blindly assume that we have no agents assigned if not
        # running
        if self.state != _WorkState.RUNNING:
            return 0

        try:
            return self.assigned_agents_count
        except AttributeError:
            self.assigned_agents_count =\
                db.session.query(distinct(Task.agent_id)).\
                    filter(Task.job == self,
                           Task.agent_id != None,
                           or_(Task.state == None,
                               Task.state == WorkState.RUNNING),
                           Task.agent.has(
                               and_(Agent.state != AgentState.OFFLINE,
                                    Agent.state != AgentState.DISABLED)))\
                                        .count()

            return self.assigned_agents_count

    def clear_assigned_counts(self):
        try:
            del self.assigned_agents_count
        except AttributeError:
            pass
        if self.queue:
            self.queue.clear_assigned_counts()

    def can_use_more_agents(self):
        # Import here instead of at the top of the file to avoid circular import
        from pyfarm.models.agent import Agent

        unassigned_tasks = Task.query.filter(
            Task.job == self,
            or_(Task.state == None,
                ~Task.state.in_([WorkState.DONE, WorkState.FAILED])),
            or_(
                Task.agent == None,
                Task.agent.has(
                    Agent.state.in_([AgentState.OFFLINE,
                                     AgentState.DISABLED])))).count()

        return unassigned_tasks > 0

    def get_batch(self, agent):
        # Import here instead of at the top of the file to avoid circular import
        from pyfarm.models.agent import Agent

        tasks_query = Task.query.filter(
            Task.job == self,
            ~Task.failed_in_agents.any(id=agent.id),
            or_(Task.state == None,
                ~Task.state.in_([WorkState.DONE, WorkState.FAILED])),
            or_(Task.agent == None,
                Task.agent.has(Agent.state.in_(
                    [AgentState.OFFLINE, AgentState.DISABLED])))).\
                        order_by("frame asc, tile asc")

        batch = []
        for task in tasks_query:
            if (len(batch) < self.batch and len(batch) <
                (self.jobtype_version.max_batch or maxsize)
                    and (not self.jobtype_version.batch_contiguous or
                         (len(batch) == 0
                          or batch[-1].frame + self.by == task.frame))):
                batch.append(task)

        return batch

    def alter_frame_range(self, start, end, by):
        # We have to import this down here instead of at the top to break a
        # circular dependency between the modules
        from pyfarm.scheduler.tasks import delete_task

        if end < start:
            raise ValueError("`end` must be greater than or equal to `start`")

        self.by = by

        required_frames = []
        current_frame = start
        while current_frame <= end:
            required_frames.append(current_frame)
            current_frame += by

        existing_tasks = Task.query.filter_by(job=self).all()
        frames_to_create = required_frames
        num_created = 0
        for task in existing_tasks:
            if task.frame not in required_frames:
                delete_task.delay(task.id)
            else:
                frames_to_create.remove(task.frame)

        for frame in frames_to_create:
            if self.num_tiles:
                for tile in range_(self.num_tiles - 1):
                    num_created += 1
                    task = Task()
                    task.job = self
                    task.frame = frame
                    task.tile = tile
                    task.priority = self.priority
                    db.session.add(task)
            else:
                num_created += 1
                task = Task()
                task.job = self
                task.frame = frame
                task.priority = self.priority
                db.session.add(task)

        if frames_to_create:
            if self.state != WorkState.RUNNING:
                self.state = None

        if config.get("enable_statistics"):
            task_event_count = TaskEventCount(num_new=num_created,
                                              job_queue_id=self.job_queue_id)
            task_event_count.time_start = datetime.utcnow()
            task_event_count.time_end = datetime.utcnow()
            db.session.add(task_event_count)

    def rerun(self):
        """
        Makes this job rerun all its task.  Tasks that are currently running are
        left untouched.
        """
        num_restarted = 0
        for task in self.tasks:
            if task.state != _WorkState.RUNNING and task.state is not None:
                task.state = None
                task.agent = None
                task.failures = 0
                db.session.add(task)
                num_restarted += 1

        self.completion_notify_sent = False
        self.update_state()
        db.session.add(self)

        if config.get("enable_statistics"):
            task_event_count = TaskEventCount(job_queue_id=self.job_queue_id,
                                              num_restarted=num_restarted)
            task_event_count.time_start = datetime.utcnow()
            task_event_count.time_end = datetime.utcnow()
            db.session.add(task_event_count)
            db.session.commit()

        for child in self.children:
            child.rerun()

    def rerun_failed(self):
        """
        Makes this job rerun all its failed tasks.  Tasks that are done or are
        currently running are left untouched
        """
        num_restarted = 0
        for task in self.tasks:
            if task.state == _WorkState.FAILED:
                task.state = None
                task.agent = None
                task.failures = 0
                db.session.add(task)
                num_restarted += 1

        self.completion_notify_sent = False
        self.update_state()
        db.session.add(self)

        if config.get("enable_statistics"):
            task_event_count = TaskEventCount(job_queue_id=self.job_queue_id,
                                              num_restarted=num_restarted)
            task_event_count.time_start = datetime.utcnow()
            task_event_count.time_end = datetime.utcnow()
            db.session.commit()

        for child in self.children:
            child.rerun_failed()

    @validates("ram", "cpus")
    def validate_resource(self, key, value):
        """
        Validation that ensures that the value provided for either
        :attr:`.ram` or :attr:`.cpus` is a valid value with a given range
        """
        assert isinstance(value, int), "%s must be an integer" % key
        min_value = config.get("agent_min_%s" % key)
        max_value = config.get("agent_max_%s" % key)

        # check the provided input
        if min_value > value or value > max_value:
            msg = "value for `%s` must be between " % key
            msg += "%s and %s" % (min_value, max_value)
            raise ValueError(msg)

        return value

    @validates("progress")
    def validate_progress(self, key, value):
        if value < 0.0 or value > 1.0:
            raise ValueError("Progress must be between 0.0 and 1.0")
예제 #18
0
from pyfarm.models.job import Job


__all__ = ("Agent", )

ALLOW_AGENT_LOOPBACK = config.get("allow_agents_from_loopback")
REGEX_HOSTNAME = re.compile("^(?!-)[A-Z\d-]{1,63}(?<!-)"
                            "(\.(?!-)[A-Z\d-]{1,63}(?<!-))*\.?$",
                            re.IGNORECASE)


AgentSoftwareVersionAssociation = db.Table(
    config.get("table_agent_software_version_assoc"), db.metadata,
    db.Column(
        "agent_id", IDTypeAgent,
        db.ForeignKey("%s.id" % config.get("table_agent")),
        primary_key=True),
    db.Column(
        "software_version_id", db.Integer,
        db.ForeignKey("%s.id" % config.get("table_software_version")),
        primary_key=True))


AgentTagAssociation = db.Table(
    config.get("table_agent_tag_assoc"), db.metadata,
    db.Column(
        "agent_id", IDTypeAgent,
        db.ForeignKey("%s.id" % config.get("table_agent")),
        primary_key=True),
    db.Column(
        "tag_id", db.Integer,
예제 #19
0
class JobTypeVersion(db.Model, UtilityMixins, ReprMixin):
    """
    Defines a specific jobtype version.
    """
    __tablename__ = config.get("table_job_type_version")
    __table_args__ = (UniqueConstraint("jobtype_id", "version"), )

    REPR_COLUMNS = ("id", "jobtype_id", "version")

    id = id_column(IDTypeWork)

    jobtype_id = db.Column(IDTypeWork,
                           db.ForeignKey("%s.id" %
                                         config.get("table_job_type")),
                           nullable=False,
                           doc="The jobtype this version belongs to")

    version = db.Column(db.Integer, nullable=False, doc="The version number")

    max_batch = db.Column(
        db.Integer,
        default=config.get("job_type_max_batch"),
        doc="When the queue runs, this is the maximum number of tasks "
        "that the queue can select to assign to a single"
        "agent.  If left empty, no maximum applies")

    batch_contiguous = db.Column(
        db.Boolean,
        default=config.get("job_type_batch_contiguous"),
        doc="If True then the queue will be forced to batch"
        "numerically contiguous tasks only for this job type.  "
        "For example if True it would batch frames 1, 2, 3, 4 "
        "together but not 2, 4, 6, 8.  If this column is False "
        "however the queue will batch non-contiguous tasks too.")

    no_automatic_start_time = db.Column(
        db.Boolean,
        nullable=False,
        default=False,
        doc="If set, we will not automatically set `time_started_on` "
        "for the tasks in jobs of this type when they are set "
        "to `running`.")

    supports_tiling = db.Column(
        db.Boolean,
        default=False,
        doc="Whether or not the jobtype supports tiling, i.e. splitting single "
        "frames into regions and then rendering those independently from "
        "each other.")

    classname = db.Column(
        db.String(config.get("job_type_max_class_name_length")),
        nullable=True,
        doc="The name of the job class contained within the file being "
        "loaded.  This field may be null but when it's not provided "
        "job type name will be used instead.")

    code = db.Column(db.UnicodeText,
                     nullable=False,
                     doc="The source code of the job type")

    #
    # Relationships
    #
    jobtype = db.relationship("JobType",
                              backref=db.backref("versions",
                                                 lazy="dynamic",
                                                 cascade="all, delete-orphan"),
                              doc="Relationship between this version and the "
                              ":class:`JobType` it belongs to"
                              "")

    jobs = db.relationship("Job",
                           backref="jobtype_version",
                           lazy="dynamic",
                           doc="Relationship between this jobtype version and "
                           ":class:`.Job` objects.")

    @validates("max_batch")
    def validate_max_batch(self, key, value):
        if isinstance(value, int) and value < 1:
            raise ValueError("max_batch must be greater than or equal to 1")

        return value

    @validates("version")
    def validate_version(self, key, value):
        if isinstance(value, int) and value < 1:
            raise ValueError("version must be greater than or equal to 1")

        return value
예제 #20
0
from pyfarm.core.enums import STRING_TYPES, PY3
from pyfarm.master.application import app, db, login_serializer
from pyfarm.master.config import config
from pyfarm.models.core.mixins import ReprMixin
from pyfarm.models.core.functions import split_and_extend

__all__ = ("User", "Role")

SHA256_ASCII_LENGTH = 64  # static length of a sha256 string

# roles the user is a member of
UserRole = db.Table(
    config.get("table_user_role"),
    db.Column("user_id",
              db.Integer,
              db.ForeignKey("%s.id" % config.get("table_user")),
              doc="The id of the associated user"),
    db.Column("role_id",
              db.Integer,
              db.ForeignKey("%s.id" % config.get("table_role")),
              doc="The id of the associated role"))


class User(db.Model, UserMixin, ReprMixin):
    """
    Stores information about a user including the roles they belong to
    """
    __tablename__ = config.get("table_user")
    REPR_COLUMNS = ("id", "username")

    id = db.Column(db.Integer, primary_key=True, nullable=False)
    __tablename__ = "%s_state_change_test" % config.get("table_prefix")
    id = db.Column(Integer, primary_key=True, autoincrement=True)
    state = db.Column(WorkStateEnum)
    attempts = db.Column(Integer, nullable=False, default=0)
    time_started = db.Column(DateTime)
    time_finished = db.Column(DateTime)


event.listen(WorkStateChangedModel.state, "set",
             WorkStateChangedModel.state_changed)

MixinModelRelation1 = db.Table(
    "%s_mixin_rel_test1" % config.get("table_prefix"), db.metadata,
    db.Column("mixin_id",
              db.Integer,
              db.ForeignKey("%s.id" % "%s_mixin_test" %
                            config.get("table_prefix")),
              primary_key=True))

MixinModelRelation2 = db.Table(
    "%s_mixin_rel_test2" % config.get("table_prefix"), db.metadata,
    db.Column("mixin_id",
              db.Integer,
              db.ForeignKey("%s.id" % "%s_mixin_test" %
                            config.get("table_prefix")),
              primary_key=True))


class MixinModel(db.Model, UtilityMixins):
    __tablename__ = "%s_mixin_test" % config.get("table_prefix")
    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    a = db.Column(db.Integer)
예제 #22
0
from pyfarm.master.application import db
from pyfarm.models.core.functions import work_columns
from pyfarm.models.core.types import id_column, JSONDict, JSONList, IDTypeWork
from pyfarm.models.core.cfg import (
    TABLE_JOB, TABLE_JOB_SOFTWARE_DEP, TABLE_JOB_TYPE, TABLE_TAG,
    TABLE_JOB_TAG_ASSOC, MAX_COMMAND_LENGTH, MAX_TAG_LENGTH, MAX_USERNAME_LENGTH,
    TABLE_SOFTWARE, TABLE_JOB_DEPENDENCIES, TABLE_PROJECT)
from pyfarm.models.core.mixins import (
    ValidatePriorityMixin, WorkStateChangedMixin, ReprMixin)
from pyfarm.models.jobtype import JobType  # required for a relationship


JobSoftwareDependency = db.Table(
    TABLE_JOB_SOFTWARE_DEP, db.metadata,
    db.Column("job_id", IDTypeWork,
              db.ForeignKey("%s.id" % TABLE_JOB), primary_key=True),
    db.Column("software_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_SOFTWARE), primary_key=True))


JobTagAssociation = db.Table(
    TABLE_JOB_TAG_ASSOC, db.metadata,
    db.Column("job_id", IDTypeWork,
              db.ForeignKey("%s.id" % TABLE_JOB), primary_key=True),
    db.Column("tag_id", db.Integer,
              db.ForeignKey("%s.id" % TABLE_TAG), primary_key=True))


JobDependencies = db.Table(
    TABLE_JOB_DEPENDENCIES, db.metadata,
    db.Column("parentid", IDTypeWork,
예제 #23
0
class JobQueue(db.Model, UtilityMixins, ReprMixin):
    """
    Stores information about a job queue. Used for flexible, configurable
    distribution of computing capacity to jobs.
    """
    __tablename__ = config.get("table_job_queue")
    __table_args__ = (UniqueConstraint("parent_jobqueue_id", "name"), )

    REPR_COLUMNS = ("id", "name")

    id = id_column(IDTypeWork)

    parent_jobqueue_id = db.Column(
        IDTypeWork,
        db.ForeignKey("%s.id" % config.get("table_job_queue")),
        nullable=True,
        doc="The parent queue of this queue. If NULL, this is a top "
        "level queue.")

    name = db.Column(db.String(config.get("max_queue_name_length")),
                     nullable=False,
                     doc="The name of the job queue")

    minimum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will try to assign at least this number of "
        "agents to jobs in or below this queue as long as it "
        "can use them, before any other considerations.")

    maximum_agents = db.Column(
        db.Integer,
        nullable=True,
        doc="The scheduler will never assign more than this number of "
        "agents to jobs in or below this queue.")

    priority = db.Column(
        db.Integer,
        nullable=False,
        default=config.get("queue_default_priority"),
        doc="The priority of this job queue. The scheduler will not "
        "assign any nodes to other job queues or jobs with the "
        "same parent and a lower priority as long as this one "
        "can still use nodes. The minimum_agents column takes "
        "precedence over this.")

    weight = db.Column(db.Integer,
                       nullable=False,
                       default=config.get("queue_default_weight"),
                       doc="The weight of this job queue. The scheduler will "
                       "distribute available agents between jobs and job "
                       "queues in the same queue in proportion to their "
                       "weights.")

    fullpath = db.Column(db.String(config.get("max_queue_path_length")),
                         doc="The path of this jobqueue.  This column is a "
                         "database denormalization.  It is technically "
                         "redundant, but faster to access than recursively "
                         "querying all parent queues.  If set to NULL, the "
                         "path must be computed by recursively querying "
                         "the parent queues.")

    #
    # Relationship
    #
    parent = db.relationship("JobQueue",
                             remote_side=[id],
                             backref=db.backref("children", lazy="dynamic"),
                             doc="Relationship between this queue its parent")

    def path(self):
        # Import here instead of at the top to break circular dependency
        from pyfarm.scheduler.tasks import cache_jobqueue_path

        if self.fullpath:
            return self.fullpath
        else:
            cache_jobqueue_path.delay(self.id)
            path = "/%s" % (self.name or "")
            if self.parent:
                return self.parent.path() + path
            else:
                return path

    def child_queues_sorted(self):
        """
        Return child queues sorted by number of currently assigned agents with
        priority as a secondary sort key.
        """
        queues = [x for x in self.children]
        return sorted(queues,
                      key=lambda x: x.num_assigned_agents(),
                      reverse=True)

    def child_jobs(self, filters):
        # Import down here instead of at the top to avoid circular import
        from pyfarm.models.job import Job

        jobs_query = Job.query

        if self.id:
            jobs_query = jobs_query.filter_by(queue=self)

        wanted_states = []
        if filters["state_paused"]:
            wanted_states.append(WorkState.PAUSED)
        if filters["state_running"]:
            wanted_states.append(WorkState.RUNNING)
        if filters["state_done"]:
            wanted_states.append(WorkState.DONE)
        if filters["state_failed"]:
            wanted_states.append(WorkState.FAILED)
        if filters["state_queued"]:
            jobs_query = jobs_query.filter(
                or_(Job.state == None, Job.state.in_(wanted_states)))
        else:
            jobs_query = jobs_query.filter(Job.state.in_(wanted_states))

        return sorted(jobs_query.all(),
                      key=lambda x: x.num_assigned_agents(),
                      reverse=True)

    def num_assigned_agents(self):
        try:
            return self.assigned_agents_count
        except AttributeError:
            # Import down here instead of at the top to avoid circular import
            from pyfarm.models.task import Task
            from pyfarm.models.job import Job

            self.assigned_agents_count = 0
            for queue in self.children:
                self.assigned_agents_count += queue.num_assigned_agents()
            self.assigned_agents_count +=\
                db.session.query(distinct(Task.agent_id)).\
                    filter(Task.job.has(Job.queue == self),
                           Task.agent_id != None,
                           Task.agent.has(
                               and_(Agent.state != AgentState.OFFLINE,
                                    Agent.state != AgentState.DISABLED)),
                           or_(Task.state == None,
                               Task.state == WorkState.RUNNING)).count()

            return self.assigned_agents_count

    def clear_assigned_counts(self):
        try:
            del self.assigned_agents_count
        except AttributeError:
            pass
        if self.parent:
            self.parent.clear_assigned_counts()

    def get_job_for_agent(self, agent, unwanted_job_ids=None):
        # Import down here instead of at the top to avoid circular import
        from pyfarm.models.job import Job

        supported_types = agent.get_supported_types()
        if not supported_types:
            return None

        available_ram = agent.ram if USE_TOTAL_RAM else agent.free_ram
        child_jobs = Job.query.filter(
            or_(Job.state == WorkState.RUNNING, Job.state == None),
            Job.job_queue_id == self.id, ~Job.parents.any(
                or_(Job.state == None, Job.state != WorkState.DONE)),
            Job.jobtype_version_id.in_(supported_types),
            Job.ram <= available_ram).all()
        child_jobs = [
            x for x in child_jobs if (agent.satisfies_job_requirements(x)
                                      and x.id not in unwanted_job_ids)
        ]
        if unwanted_job_ids:
            child_jobs = [
                x for x in child_jobs if x.id not in unwanted_job_ids
            ]
        child_queues = JobQueue.query.filter(
            JobQueue.parent_jobqueue_id == self.id).all()

        # Before anything else, enforce minimums
        for job in child_jobs:
            if job.state == _WorkState.RUNNING:
                if (job.num_assigned_agents() < (job.minimum_agents or 0)
                        and job.num_assigned_agents() <
                    (job.maximum_agents or maxsize)
                        and job.can_use_more_agents()):
                    return job
            elif job.minimum_agents and job.minimum_agents > 0:
                return job

        for queue in child_queues:
            if (queue.num_assigned_agents() < (queue.minimum_agents or 0)
                    and queue.num_assigned_agents() <
                (queue.maximum_agents or maxsize)):
                job = queue.get_job_for_agent(agent, unwanted_job_ids)
                if job:
                    return job

        objects_by_priority = {}

        for queue in child_queues:
            if queue.priority in objects_by_priority:
                objects_by_priority[queue.priority] += [queue]
            else:
                objects_by_priority[queue.priority] = [queue]

        for job in child_jobs:
            if job.priority in objects_by_priority:
                objects_by_priority[job.priority] += [job]
            else:
                objects_by_priority[job.priority] = [job]

        available_priorities = sorted(objects_by_priority.keys(), reverse=True)

        # Work through the priorities in descending order
        for priority in available_priorities:
            objects = objects_by_priority[priority]
            active_objects = [
                x for x in objects
                if (type(x) != Job or x.state == _WorkState.RUNNING)
            ]
            weight_sum = reduce(lambda a, b: a + b.weight, active_objects, 0)
            total_assigned = reduce(lambda a, b: a + b.num_assigned_agents(),
                                    objects, 0)
            objects.sort(key=(lambda x: ((float(x.num_assigned_agents(
            )) / total_assigned) if total_assigned else 0) / ((float(
                x.weight) / weight_sum) if weight_sum and x.weight else 1)))

            selected_job = None
            for item in objects:
                if isinstance(item, Job):
                    if item.state == _WorkState.RUNNING:
                        if (item.can_use_more_agents()
                                and item.num_assigned_agents() <
                            (item.maximum_agents or maxsize)):
                            if PREFER_RUNNING_JOBS:
                                return item
                            elif (selected_job is None
                                  or selected_job.time_submitted >
                                  item.time_submitted):
                                selected_job = item
                    elif (selected_job is None or
                          selected_job.time_submitted > item.time_submitted):
                        # If this job is not running yet, remember it, but keep
                        # looking for already running or queued but older jobs
                        selected_job = item
                if isinstance(item, JobQueue):
                    if (item.num_assigned_agents() <
                        (item.maximum_agents or maxsize)):
                        job = item.get_job_for_agent(agent, unwanted_job_ids)
                        if job:
                            return job
            if selected_job:
                return selected_job

        return None

    @staticmethod
    def top_level_unique_check(mapper, connection, target):
        if target.parent_jobqueue_id is None:
            count = JobQueue.query.filter_by(parent_jobqueue_id=None,
                                             name=target.name).count()
            if count > 0:
                raise ValueError("Cannot have two jobqueues named %r at the "
                                 "top level" % target.name)