Пример #1
0
class Test(dbmodels.Model, model_logic.ModelExtensions):
    """\
    Required:
    author: author name
    description: description of the test
    name: test name
    time: short, medium, long
    test_class: This describes the class for your the test belongs in.
    test_category: This describes the category for your tests
    test_type: Client or Server
    path: path to pass to run_test()
    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
                 async job. If it's >1 it's sync job for that number of machines
                 i.e. if sync_count = 2 it is a sync job that requires two
                 machines.
    Optional:
    dependencies: What the test requires to run. Comma deliminated list
    dependency_labels: many-to-many relationship with labels corresponding to
                       test dependencies.
    experimental: If this is set to True production servers will ignore the test
    run_verify: Whether or not the scheduler should run the verify stage
    """
    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
    TestTypes = model_attributes.TestTypes
    # TODO(showard) - this should be merged with Job.ControlType (but right
    # now they use opposite values)

    name = dbmodels.CharField(max_length=255, unique=True)
    author = dbmodels.CharField(max_length=255)
    test_class = dbmodels.CharField(max_length=255)
    test_category = dbmodels.CharField(max_length=255)
    dependencies = dbmodels.CharField(max_length=255, blank=True)
    description = dbmodels.TextField(blank=True)
    experimental = dbmodels.BooleanField(default=True)
    run_verify = dbmodels.BooleanField(default=True)
    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
                                           default=TestTime.MEDIUM)
    test_type = dbmodels.SmallIntegerField(choices=TestTypes.choices())
    sync_count = dbmodels.IntegerField(default=1)
    path = dbmodels.CharField(max_length=255, unique=True)

    dependency_labels = (dbmodels.ManyToManyField(
        Label, blank=True, db_table='afe_autotests_dependency_labels'))
    name_field = 'name'
    objects = model_logic.ExtendedManager()

    def admin_description(self):
        escaped_description = saxutils.escape(self.description)
        return '<span style="white-space:pre">%s</span>' % escaped_description

    admin_description.allow_tags = True
    admin_description.short_description = 'Description'

    class Meta:
        db_table = 'afe_autotests'

    def __unicode__(self):
        return unicode(self.name)
Пример #2
0
try:
    import autotest.common as common  # pylint: disable=W0611
except ImportError:
    import common  # pylint: disable=W0611
from autotest.client.shared import enum

# common enums for Job attributes
RebootBefore = enum.Enum('Never', 'If dirty', 'Always')
RebootAfter = enum.Enum('Never', 'If all tests passed', 'Always')

# common enums for test attributes
TestTypes = enum.Enum('Client', 'Server', start_value=1)

# common enums for profiler and job parameter types
ParameterTypes = enum.Enum('int', 'float', 'string', string_values=True)
Пример #3
0
class Job(dbmodels.Model, model_logic.ModelExtensions):
    """\
    owner: username of job owner
    name: job name (does not have to be unique)
    priority: Low, Medium, High, Urgent (or 0-3)
    control_file: contents of control file
    control_type: Client or Server
    created_on: date of job creation
    submitted_on: date of job submission
    synch_count: how many hosts should be used per autoserv execution
    run_verify: Whether or not to run the verify phase
    timeout: hours from queuing time until job times out
    max_runtime_hrs: hours from job starting time until job times out
    email_list: list of people to email on completion delimited by any of:
                white space, ',', ':', ';'
    dependency_labels: many-to-many relationship with labels corresponding to
                       job dependencies
    reboot_before: Never, If dirty, or Always
    reboot_after: Never, If all tests passed, or Always
    parse_failed_repair: if True, a failed repair launched by this job will have
    its results parsed as part of the job.
    drone_set: The set of drones to run this job on
    """
    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
        'AUTOTEST_WEB', 'job_timeout_default', default=240)
    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
        'AUTOTEST_WEB',
        'parse_failed_repair_default',
        type=bool,
        default=False)

    Priority = enum.Enum('Low', 'Medium', 'High', 'Urgent')
    ControlType = enum.Enum('Server', 'Client', start_value=1)

    owner = dbmodels.CharField(max_length=255)
    name = dbmodels.CharField(max_length=255)
    priority = dbmodels.SmallIntegerField(
        choices=Priority.choices(),
        blank=True,  # to allow 0
        default=Priority.MEDIUM)
    control_file = dbmodels.TextField(null=True, blank=True)
    control_type = dbmodels.SmallIntegerField(
        choices=ControlType.choices(),
        blank=True,  # to allow 0
        default=ControlType.CLIENT)
    created_on = dbmodels.DateTimeField()
    synch_count = dbmodels.IntegerField(null=True, default=1)
    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
    run_verify = dbmodels.BooleanField(default=True)
    email_list = dbmodels.CharField(max_length=250, blank=True)
    dependency_labels = (dbmodels.ManyToManyField(
        Label, blank=True, db_table='afe_jobs_dependency_labels'))
    reboot_before = dbmodels.SmallIntegerField(
        choices=model_attributes.RebootBefore.choices(),
        blank=True,
        default=DEFAULT_REBOOT_BEFORE)
    reboot_after = dbmodels.SmallIntegerField(
        choices=model_attributes.RebootAfter.choices(),
        blank=True,
        default=DEFAULT_REBOOT_AFTER)
    parse_failed_repair = dbmodels.BooleanField(
        default=DEFAULT_PARSE_FAILED_REPAIR)
    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)

    parameterized_job = dbmodels.ForeignKey(ParameterizedJob,
                                            null=True,
                                            blank=True)

    # custom manager
    objects = JobManager()

    def is_server_job(self):
        return self.control_type == self.ControlType.SERVER

    @classmethod
    def parameterized_jobs_enabled(cls):
        return global_config.global_config.get_config_value(
            'AUTOTEST_WEB', 'parameterized_jobs', type=bool)

    @classmethod
    def check_parameterized_job(cls, control_file, parameterized_job):
        """
        Checks that the job is valid given the global config settings

        First, either control_file must be set, or parameterized_job must be
        set, but not both. Second, parameterized_job must be set if and only if
        the parameterized_jobs option in the global config is set to True.
        """
        if not (bool(control_file) ^ bool(parameterized_job)):
            raise Exception('Job must have either control file or '
                            'parameterization, but not both')

        parameterized_jobs_enabled = cls.parameterized_jobs_enabled()
        if control_file and parameterized_jobs_enabled:
            raise Exception('Control file specified, but parameterized jobs '
                            'are enabled')
        if parameterized_job and not parameterized_jobs_enabled:
            raise Exception('Parameterized job specified, but parameterized '
                            'jobs are not enabled')

    @classmethod
    def create(cls, owner, options, hosts):
        """\
        Creates a job by taking some information (the listed args)
        and filling in the rest of the necessary information.
        """
        AclGroup.check_for_acl_violation_hosts(hosts)

        control_file = options.get('control_file')
        parameterized_job = options.get('parameterized_job')
        cls.check_parameterized_job(control_file=control_file,
                                    parameterized_job=parameterized_job)

        user = User.current_user()
        if options.get('reboot_before') is None:
            options['reboot_before'] = user.get_reboot_before_display()
        if options.get('reboot_after') is None:
            options['reboot_after'] = user.get_reboot_after_display()

        drone_set = DroneSet.resolve_name(options.get('drone_set'))

        job = cls.add_object(
            owner=owner,
            name=options['name'],
            priority=options['priority'],
            control_file=control_file,
            control_type=options['control_type'],
            synch_count=options.get('synch_count'),
            timeout=options.get('timeout'),
            max_runtime_hrs=options.get('max_runtime_hrs'),
            run_verify=options.get('run_verify'),
            email_list=options.get('email_list'),
            reboot_before=options.get('reboot_before'),
            reboot_after=options.get('reboot_after'),
            parse_failed_repair=options.get('parse_failed_repair'),
            created_on=datetime.now(),
            drone_set=drone_set,
            parameterized_job=parameterized_job)

        job.dependency_labels = options['dependencies']

        if options.get('keyvals'):
            for key, value in options['keyvals'].iteritems():
                JobKeyval.objects.create(job=job, key=key, value=value)

        return job

    def save(self, *args, **kwargs):
        self.check_parameterized_job(control_file=self.control_file,
                                     parameterized_job=self.parameterized_job)
        super(Job, self).save(*args, **kwargs)

    def queue(self, hosts, profiles, atomic_group=None, is_template=False):
        """Enqueue a job on the given hosts."""
        if not hosts:
            if atomic_group:
                # No hosts or labels are required to queue an atomic group
                # Job.  However, if they are given, we respect them below.
                atomic_group.enqueue_job(self, is_template=is_template)
            else:
                # hostless job
                entry = HostQueueEntry.create(job=self,
                                              profile='N/A',
                                              is_template=is_template)
                entry.save()
            return

        if not profiles:
            profiles = [''] * len(hosts)
        for host, profile in zip(hosts, profiles):
            host.enqueue_job(self,
                             profile=profile,
                             atomic_group=atomic_group,
                             is_template=is_template)

    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
        rec = RecurringRun(job=self,
                           start_date=start_date,
                           loop_period=loop_period,
                           loop_count=loop_count,
                           owner=User.objects.get(login=owner))
        rec.save()
        return rec.id

    def user(self):
        try:
            return User.objects.get(login=self.owner)
        except self.DoesNotExist:
            return None

    def abort(self):
        for queue_entry in self.hostqueueentry_set.all():
            queue_entry.abort()

    def tag(self):
        return '%s-%s' % (self.id, self.owner)

    def keyval_dict(self):
        return dict(
            (keyval.key, keyval.value) for keyval in self.jobkeyval_set.all())

    class Meta:
        db_table = 'afe_jobs'

    def __unicode__(self):
        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
Пример #4
0
class Host(model_logic.ModelWithInvalid, dbmodels.Model,
           model_logic.ModelWithAttributes):
    """\
    Required:
    hostname

    optional:
    locked: if true, host is locked and will not be queued

    Internal:
    synch_id: currently unused
    status: string describing status of host
    invalid: true if the host has been deleted
    protection: indicates what can be done to this host during repair
    locked_by: user that locked the host, or null if the host is unlocked
    lock_time: DateTime at which the host was locked
    dirty: true if the host has been used without being rebooted
    """
    Status = enum.Enum('Verifying',
                       'Running',
                       'Ready',
                       'Repairing',
                       'Repair Failed',
                       'Cleaning',
                       'Pending',
                       string_values=True)
    Protection = host_protections.Protection

    hostname = dbmodels.CharField(max_length=255, unique=True)
    labels = dbmodels.ManyToManyField(Label,
                                      blank=True,
                                      db_table='afe_hosts_labels')
    locked = dbmodels.BooleanField(default=False)
    synch_id = dbmodels.IntegerField(blank=True,
                                     null=True,
                                     editable=settings.FULL_ADMIN)
    status = dbmodels.CharField(max_length=255,
                                default=Status.READY,
                                choices=Status.choices(),
                                editable=settings.FULL_ADMIN)
    invalid = dbmodels.BooleanField(default=False,
                                    editable=settings.FULL_ADMIN)
    protection = dbmodels.SmallIntegerField(null=False,
                                            blank=True,
                                            choices=host_protections.choices,
                                            default=host_protections.default)
    locked_by = dbmodels.ForeignKey(User,
                                    null=True,
                                    blank=True,
                                    editable=False)
    lock_time = dbmodels.DateTimeField(null=True, blank=True, editable=False)
    dirty = dbmodels.BooleanField(default=True, editable=settings.FULL_ADMIN)

    name_field = 'hostname'
    objects = model_logic.ModelWithInvalidManager()
    valid_objects = model_logic.ValidObjectsManager()

    def __init__(self, *args, **kwargs):
        super(Host, self).__init__(*args, **kwargs)
        self._record_attributes(['status'])

    @staticmethod
    def create_one_time_host(hostname):
        query = Host.objects.filter(hostname=hostname)
        if query.count() == 0:
            host = Host(hostname=hostname, invalid=True)
            host.do_validate()
        else:
            host = query[0]
            if not host.invalid:
                raise model_logic.ValidationError({
                    'hostname':
                    '%s already exists in the autotest DB.  '
                    'Select it rather than entering it as a one time '
                    'host.' % hostname
                })
        host.protection = host_protections.Protection.DO_NOT_REPAIR
        host.locked = False
        host.save()
        host.clean_object()
        return host

    def resurrect_object(self, old_object):
        super(Host, self).resurrect_object(old_object)
        # invalid hosts can be in use by the scheduler (as one-time hosts), so
        # don't change the status
        self.status = old_object.status

    def clean_object(self):
        self.aclgroup_set.clear()
        self.labels.clear()

    def save(self, *args, **kwargs):
        # extra spaces in the hostname can be a sneaky source of errors
        self.hostname = self.hostname.strip()
        # is this a new object being saved for the first time?
        first_time = (self.id is None)
        if not first_time:
            AclGroup.check_for_acl_violation_hosts([self])
        if self.locked and not self.locked_by:
            self.locked_by = User.current_user()
            self.lock_time = datetime.now()
            self.dirty = True
        elif not self.locked and self.locked_by:
            self.locked_by = None
            self.lock_time = None
        super(Host, self).save(*args, **kwargs)
        if first_time:
            everyone = AclGroup.objects.get(name='Everyone')
            everyone.hosts.add(self)
        self._check_for_updated_attributes()

    def delete(self):
        AclGroup.check_for_acl_violation_hosts([self])
        for queue_entry in self.hostqueueentry_set.all():
            queue_entry.deleted = True
            queue_entry.abort()
        super(Host, self).delete()

    def on_attribute_changed(self, attribute, old_value):
        assert attribute == 'status'
        logging.info(self.hostname + ' -> ' + self.status)

    def enqueue_job(self, job, profile, atomic_group=None, is_template=False):
        """Enqueue a job on this host."""
        queue_entry = HostQueueEntry.create(host=self,
                                            job=job,
                                            profile=profile,
                                            is_template=is_template,
                                            atomic_group=atomic_group)
        # allow recovery of dead hosts from the frontend
        if not self.active_queue_entry() and self.is_dead():
            self.status = Host.Status.READY
            self.save()
        queue_entry.save()

        block = IneligibleHostQueue(job=job, host=self)
        block.save()

    def platform(self):
        # TODO(showard): slighly hacky?
        platforms = self.labels.filter(platform=True)
        if len(platforms) == 0:
            return None
        return platforms[0]

    platform.short_description = 'Platform'

    @classmethod
    def check_no_platform(cls, hosts):
        Host.objects.populate_relationships(hosts, Label, 'label_list')
        errors = []
        for host in hosts:
            platforms = [
                label.name for label in host.label_list if label.platform
            ]
            if platforms:
                # do a join, just in case this host has multiple platforms,
                # we'll be able to see it
                errors.append('Host %s already has a platform: %s' %
                              (host.hostname, ', '.join(platforms)))
        if errors:
            raise model_logic.ValidationError({'labels': '; '.join(errors)})

    def is_dead(self):
        return self.status == Host.Status.REPAIR_FAILED

    def active_queue_entry(self):
        active = list(self.hostqueueentry_set.filter(active=True))
        if not active:
            return None
        assert len(active) == 1, ('More than one active entry for '
                                  'host ' + self.hostname)
        return active[0]

    def _get_attribute_model_and_args(self, attribute):
        return HostAttribute, dict(host=self, attribute=attribute)

    class Meta:
        db_table = 'afe_hosts'

    def __unicode__(self):
        return unicode(self.hostname)
Пример #5
0
class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
    """\
    Tasks to run on hosts at the next time they are in the Ready state. Use this
    for high-priority tasks, such as forced repair or forced reinstall.

    host: host to run this task on
    task: special task to run
    time_requested: date and time the request for this task was made
    is_active: task is currently running
    is_complete: task has finished running
    time_started: date and time the task started
    queue_entry: Host queue entry waiting on this task (or None, if task was not
                 started in preparation of a job)
    """
    Task = enum.Enum('Verify', 'Cleanup', 'Repair', string_values=True)

    host = dbmodels.ForeignKey(Host, blank=False, null=False)
    task = dbmodels.CharField(max_length=64,
                              choices=Task.choices(),
                              blank=False,
                              null=False)
    requested_by = dbmodels.ForeignKey(User)
    time_requested = dbmodels.DateTimeField(auto_now_add=True,
                                            blank=False,
                                            null=False)
    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
    time_started = dbmodels.DateTimeField(null=True, blank=True)
    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
    success = dbmodels.BooleanField(default=False, blank=False, null=False)

    objects = model_logic.ExtendedManager()

    def save(self, **kwargs):
        if self.queue_entry:
            self.requested_by = User.objects.get(
                login=self.queue_entry.job.owner)
        super(SpecialTask, self).save(**kwargs)

    def execution_path(self):
        """@see HostQueueEntry.execution_path()"""
        return 'hosts/%s/%s-%s' % (self.host.hostname, self.id,
                                   self.task.lower())

    # property to emulate HostQueueEntry.status
    @property
    def status(self):
        """
        Return a host queue entry status appropriate for this task.  Although
        SpecialTasks are not HostQueueEntries, it is helpful to the user to
        present similar statuses.
        """
        if self.is_complete:
            if self.success:
                return HostQueueEntry.Status.COMPLETED
            return HostQueueEntry.Status.FAILED
        if self.is_active:
            return HostQueueEntry.Status.RUNNING
        return HostQueueEntry.Status.QUEUED

    # property to emulate HostQueueEntry.started_on
    @property
    def started_on(self):
        return self.time_started

    @classmethod
    def schedule_special_task(cls, host, task):
        """
        Schedules a special task on a host if the task is not already scheduled.
        """
        existing_tasks = SpecialTask.objects.filter(host__id=host.id,
                                                    task=task,
                                                    is_active=False,
                                                    is_complete=False)
        if existing_tasks:
            return existing_tasks[0]

        special_task = SpecialTask(host=host,
                                   task=task,
                                   requested_by=User.current_user())
        special_task.save()
        return special_task

    def activate(self):
        """
        Sets a task as active and sets the time started to the current time.
        """
        logging.info('Starting: %s', self)
        self.is_active = True
        self.time_started = datetime.now()
        self.save()

    def finish(self, success):
        """
        Sets a task as completed
        """
        logging.info('Finished: %s', self)
        self.is_active = False
        self.is_complete = True
        self.success = success
        self.save()

    class Meta:
        db_table = 'afe_special_tasks'

    def __unicode__(self):
        result = u'Special Task %s (host %s, task %s, time %s)' % (
            self.id, self.host, self.task, self.time_requested)
        if self.is_complete:
            result += u' (completed)'
        elif self.is_active:
            result += u' (active)'

        return result
Пример #6
0
    def get_value(self,
                  section,
                  key,
                  type=str,
                  default=None,
                  allow_blank=False):
        identifier = (section, key)
        if identifier not in self._config_info:
            return default
        return self._config_info[identifier]


# the SpecialTask names here must match the suffixes used on the SpecialTask
# results directories
_PidfileType = enum.Enum('verify', 'cleanup', 'repair', 'job', 'gather',
                         'parse', 'archive')

_PIDFILE_TO_PIDFILE_TYPE = {
    drone_manager.AUTOSERV_PID_FILE: _PidfileType.JOB,
    drone_manager.CRASHINFO_PID_FILE: _PidfileType.GATHER,
    drone_manager.PARSER_PID_FILE: _PidfileType.PARSE,
    drone_manager.ARCHIVER_PID_FILE: _PidfileType.ARCHIVE,
}

_PIDFILE_TYPE_TO_PIDFILE = dict(
    (value, key) for key, value in _PIDFILE_TO_PIDFILE_TYPE.items())


class MockDroneManager(NullMethodObject):
    """
    Public attributes:
# Changing this file has consequences that need to be understood.
# Adding a protection level to the enum requires you to append your change to
# the end of the enum or a database migration needs to be added to migrate
# older protections to match the layout of the new enum.
# Removing a protection level from the enum requires a database migration to
# update the integer values in the DB and migrate hosts that use the removed
# protection to a default protection level.
# IF THIS IS NOT DONE HOSTS' PROTECTION LEVELS WILL BE CHANGED RANDOMLY.

Protection = enum.Enum(
    'No protection',  # Repair can do anything to
    # this host.
    'Repair software only',  # repair should try to fix any
    # software problem
    'Repair filesystem only',  # Repair should only try to
    # recover the file system.
    'Do not repair',  # Repair should not touch this
    # host.
    'Do not verify',  # Don't even try to verify
    # this host
)

running_client = settings.check_stand_alone_client_run()

try:
    _bad_value = object()
    default_protection = settings.get_value('HOSTS',
                                            'default_protection',
                                            default=_bad_value)
    if default_protection == _bad_value:
        if not running_client:
Пример #8
0
"""
This module contains the status enums for use by HostQueueEntrys in the
database.  It is a stand alone module as these status strings are needed
from various disconnected pieces of code that should not depend on everything
that autotest.frontend.afe.models depends on such as RPC clients.
"""

from autotest.client.shared import enum

Status = enum.Enum('Queued',
                   'Starting',
                   'Verifying',
                   'Pending',
                   'Waiting',
                   'Running',
                   'Gathering',
                   'Parsing',
                   'Archiving',
                   'Aborted',
                   'Completed',
                   'Failed',
                   'Stopped',
                   'Template',
                   string_values=True)
ACTIVE_STATUSES = (Status.STARTING, Status.VERIFYING, Status.PENDING,
                   Status.RUNNING, Status.GATHERING)
COMPLETE_STATUSES = (Status.ABORTED, Status.COMPLETED, Status.FAILED,
                     Status.STOPPED, Status.TEMPLATE)