Beispiel #1
0
 def save(self, new_data):
     user = thread_local.get_user()
     if hasattr(self, 'original_object'):
         if (not user.is_superuser()
             and self.original_object.name == 'Everyone'):
             raise AclAccessViolation("You cannot modify 'Everyone'!")
         self.original_object.check_for_acl_violation_acl_group()
     obj = super(AclGroup.Manipulator, self).save(new_data)
     if not hasattr(self, 'original_object'):
         obj.users.add(thread_local.get_user())
     obj.add_current_user_if_empty()
     obj.on_host_membership_change()
     return obj
Beispiel #2
0
 def check_for_acl_violation_acl_group(self):
     user = thread_local.get_user()
     if user.is_superuser():
         return None
     if not user in self.users.all():
         raise AclAccessViolation("You do not have access to %s"
                                  % self.name)
Beispiel #3
0
 def current_user(cls):
     user = thread_local.get_user()
     if user is None:
         user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
         user.access_level = cls.ACCESS_ROOT
         user.save()
     return user
Beispiel #4
0
def create_recurring_run(job_id, start_date, loop_period, loop_count):
    owner = thread_local.get_user().login
    job = models.Job.objects.get(id=job_id)
    return job.create_recurring_job(start_date=start_date,
                                    loop_period=loop_period,
                                    loop_count=loop_count,
                                    owner=owner)
Beispiel #5
0
 def current_user(cls):
     user = thread_local.get_user()
     if user is None:
         user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
         user.access_level = cls.ACCESS_ROOT
         user.save()
     return user
Beispiel #6
0
 def check_abort_permissions(queue_entries):
     """
     look for queue entries that aren't abortable, meaning
      * the job isn't owned by this user, and
        * the machine isn't ACL-accessible, or
        * the machine is in the "Everyone" ACL
     """
     user = thread_local.get_user()
     if user.is_superuser():
         return
     not_owned = queue_entries.exclude(job__owner=user.login)
     # I do this using ID sets instead of just Django filters because
     # filtering on M2M fields is broken in Django 0.96.  It's better in 1.0.
     accessible_ids = set(
         entry.id for entry
         in not_owned.filter(host__aclgroup__users__login=user.login))
     public_ids = set(entry.id for entry
                      in not_owned.filter(host__aclgroup__name='Everyone'))
     cannot_abort = [entry for entry in not_owned.select_related()
                     if entry.id not in accessible_ids
                     or entry.id in public_ids]
     if len(cannot_abort) == 0:
         return
     entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
                                           entry.host_or_metahost_name())
                             for entry in cannot_abort)
     raise AclAccessViolation('You cannot abort the following job entries: '
                              + entry_names)
 def process_request(self, request):
     super(ApacheAuthMiddleware, self).process_request(request)
     username = thread_local.get_user()
     thread_local.set_user(None)
     user_object = auth.authenticate(username=username, password='')
     auth.login(request, user_object)
     thread_local.set_user(models.User.objects.get(login=username))
Beispiel #8
0
 def process_request(self, request):
     super(ApacheAuthMiddleware, self).process_request(request)
     username = thread_local.get_user()
     thread_local.set_user(None)
     user_object = auth.authenticate(username=username,
                                     password='')
     auth.login(request, user_object)
     thread_local.set_user(models.User.objects.get(login=username))
Beispiel #9
0
 def update(self, input_dict):
     if 'aborted' in input_dict:
         if input_dict['aborted'] != True:
             raise BadRequest('"aborted" can only be set to true')
         query = models.HostQueueEntry.objects.filter(pk=self.instance.pk)
         models.AclGroup.check_abort_permissions(query)
         rpc_utils.check_abort_synchronous_jobs(query)
         self.instance.abort(thread_local.get_user())
Beispiel #10
0
 def update(self, input_dict):
     if 'aborted' in input_dict:
         if input_dict['aborted'] != True:
             raise exceptions.BadRequest('"aborted" can only be set to true')
         query = models.HostQueueEntry.objects.filter(pk=self.instance.pk)
         models.AclGroup.check_abort_permissions(query)
         rpc_utils.check_abort_synchronous_jobs(query)
         self.instance.abort(thread_local.get_user())
Beispiel #11
0
 def handle_rpc_request(self, request):
     user = thread_local.get_user()
     json_request = self.raw_request_data(request)
     decoded_request = self.decode_request(json_request)
     decoded_result = self.dispatch_request(decoded_request)
     result = self.encode_result(decoded_result)
     if rpcserver_logging.LOGGING_ENABLED:
         self.log_request(user, decoded_request, decoded_result)
     return rpc_utils.raw_http_response(result)
Beispiel #12
0
 def check_for_acl_violation_acl_group(self):
     user = thread_local.get_user()
     if user.is_superuser():
         return
     if self.name == 'Everyone':
         raise AclAccessViolation("You cannot modify 'Everyone'!")
     if not user in self.users.all():
         raise AclAccessViolation("You do not have access to %s"
                                  % self.name)
Beispiel #13
0
 def save(self, *args, **kwargs):
     # is this a new object being saved for the first time?
     first_time = (self.id is None)
     user = thread_local.get_user()
     if user and not user.is_superuser() and user.login != self.login:
         raise AclAccessViolation("You cannot modify user " + self.login)
     super(User, self).save(*args, **kwargs)
     if first_time:
         everyone = AclGroup.objects.get(name='Everyone')
         everyone.users.add(self)
Beispiel #14
0
 def save(self, *args, **kwargs):
     # is this a new object being saved for the first time?
     first_time = (self.id is None)
     user = thread_local.get_user()
     if user and not user.is_superuser() and user.login != self.login:
         raise AclAccessViolation("You cannot modify user " + self.login)
     super(User, self).save(*args, **kwargs)
     if first_time:
         everyone = AclGroup.objects.get(name='Everyone')
         everyone.users.add(self)
Beispiel #15
0
def add_saved_query(name, url_token):
    name = name.strip()
    owner = thread_local.get_user()
    existing_list = list(models.SavedQuery.objects.filter(owner=owner,
                                                          name=name))
    if existing_list:
        query_object = existing_list[0]
        query_object.url_token = url_token
        query_object.save()
        return query_object.id

    return models.SavedQuery.add_object(owner=owner, name=name,
                                        url_token=url_token).id
Beispiel #16
0
def abort_host_queue_entries(**filter_data):
    """\
    Abort a set of host queue entries.
    """
    query = models.HostQueueEntry.query_objects(filter_data)
    query = query.filter(complete=False)
    models.AclGroup.check_abort_permissions(query)
    host_queue_entries = list(query.select_related())
    rpc_utils.check_abort_synchronous_jobs(host_queue_entries)

    user = thread_local.get_user()
    for queue_entry in host_queue_entries:
        queue_entry.abort(user)
Beispiel #17
0
 def check_for_acl_violation_hosts(hosts):
     user = thread_local.get_user()
     if user.is_superuser():
         return
     accessible_host_ids = set(
         host.id for host in Host.objects.filter(aclgroup__users=user))
     for host in hosts:
         # Check if the user has access to this host,
         # but only if it is not a metahost or a one-time-host
         no_access = (isinstance(host, Host)
                      and not host.invalid
                      and int(host.id) not in accessible_host_ids)
         if no_access:
             raise AclAccessViolation("You do not have access to %s"
                                      % str(host))
def run_rpc_on_multiple_hostnames(rpc_call, shard_hostnames, **kwargs):
    """Runs an rpc to multiple AFEs

    This is i.e. used to propagate changes made to hosts after they are assigned
    to a shard.

    @param rpc_call: Name of the rpc endpoint to call.
    @param shard_hostnames: List of hostnames to run the rpcs on.
    @param **kwargs: Keyword arguments to pass in the rpcs.
    """
    # Make sure this function is not called on shards but only on master.
    assert not server_utils.is_shard()
    for shard_hostname in shard_hostnames:
        afe = frontend_wrappers.RetryingAFE(server=shard_hostname,
                                            user=thread_local.get_user())
        afe.run(rpc_call, **kwargs)
    def replacement(*args, **kwargs):
        """We need special handling when decorating an RPC that can be called
        directly using positional arguments.

        One example is rpc_interface.create_job().
        rpc_interface.create_job_page_handler() calls the function using both
        positional and keyword arguments.  Since frontend.RpcClient.run()
        takes only keyword arguments for an RPC, positional arguments of the
        RPC function need to be transformed into keyword arguments.
        """
        kwargs = _convert_to_kwargs_only(func, args, kwargs)
        if server_utils.is_shard():
            afe = frontend_wrappers.RetryingAFE(
                server=server_utils.get_global_afe_hostname(),
                user=thread_local.get_user())
            return afe.run(func.func_name, **kwargs)
        return func(**kwargs)
Beispiel #20
0
 def save(self):
     # extra spaces in the hostname can be a sneaky source of errors
     self.hostname = self.hostname.strip()
     # is this a new object being saved for the first time?
     first_time = (self.id is None)
     if not first_time:
         AclGroup.check_for_acl_violation_hosts([self])
     if self.locked and not self.locked_by:
         self.locked_by = thread_local.get_user()
         self.lock_time = datetime.now()
         self.dirty = True
     elif not self.locked and self.locked_by:
         self.locked_by = None
         self.lock_time = None
     super(Host, self).save()
     if first_time:
         everyone = AclGroup.objects.get(name='Everyone')
         everyone.hosts.add(self)
     self._check_for_updated_attributes()
Beispiel #21
0
 def delete(self):
     AclGroup.check_for_acl_violation_hosts([self])
     for queue_entry in self.hostqueueentry_set.all():
         queue_entry.deleted = True
         queue_entry.abort(thread_local.get_user())
     super(Host, self).delete()
Beispiel #22
0
def get_static_data():
    """\
    Returns a dictionary containing a bunch of data that shouldn't change
    often and is otherwise inaccessible.  This includes:

    priorities: List of job priority choices.
    default_priority: Default priority value for new jobs.
    users: Sorted list of all users.
    labels: Sorted list of all labels.
    atomic_groups: Sorted list of all atomic groups.
    tests: Sorted list of all tests.
    profilers: Sorted list of all profilers.
    current_user: Logged-in username.
    host_statuses: Sorted list of possible Host statuses.
    job_statuses: Sorted list of possible HostQueueEntry statuses.
    job_timeout_default: The default job timeout length in hours.
    parse_failed_repair_default: Default value for the parse_failed_repair job
    option.
    reboot_before_options: A list of valid RebootBefore string enums.
    reboot_after_options: A list of valid RebootAfter string enums.
    motd: Server's message of the day.
    status_dictionary: A mapping from one word job status names to a more
            informative description.
    """

    job_fields = models.Job.get_field_dict()

    result = {}
    result['priorities'] = models.Job.Priority.choices()
    default_priority = job_fields['priority'].default
    default_string = models.Job.Priority.get_string(default_priority)
    result['default_priority'] = default_string
    result['users'] = get_users(sort_by=['login'])
    result['labels'] = get_labels(sort_by=['-platform', 'name'])
    result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
    result['tests'] = get_tests(sort_by=['name'])
    result['profilers'] = get_profilers(sort_by=['name'])
    result['current_user'] = rpc_utils.prepare_for_serialization(
        thread_local.get_user().get_object_dict())
    result['host_statuses'] = sorted(models.Host.Status.names)
    result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
    result['job_timeout_default'] = models.Job.DEFAULT_TIMEOUT
    result['job_max_runtime_hrs_default'] = models.Job.DEFAULT_MAX_RUNTIME_HRS
    result['parse_failed_repair_default'] = bool(
        models.Job.DEFAULT_PARSE_FAILED_REPAIR)
    result['reboot_before_options'] = models.RebootBefore.names
    result['reboot_after_options'] = models.RebootAfter.names
    result['motd'] = rpc_utils.get_motd()

    result['status_dictionary'] = {"Aborted": "Aborted",
                                   "Verifying": "Verifying Host",
                                   "Pending": "Waiting on other hosts",
                                   "Running": "Running autoserv",
                                   "Completed": "Autoserv completed",
                                   "Failed": "Failed to complete",
                                   "Queued": "Queued",
                                   "Starting": "Next in host's queue",
                                   "Stopped": "Other host(s) failed verify",
                                   "Parsing": "Awaiting parse of final results",
                                   "Gathering": "Gathering log files",
                                   "Template": "Template job for recurring run"}
    return result
Beispiel #23
0
def delete_saved_queries(id_list):
    user = thread_local.get_user()
    query = models.SavedQuery.objects.filter(id__in=id_list, owner=user)
    if query.count() == 0:
        raise model_logic.ValidationError('No such queries found for this user')
    query.delete()
Beispiel #24
0
def create_job(name, priority, control_file, control_type,
               hosts=(), meta_hosts=(), one_time_hosts=(),
               atomic_group_name=None, synch_count=None, is_template=False,
               timeout=None, max_runtime_hrs=None, run_verify=True,
               email_list='', dependencies=(), reboot_before=None,
               reboot_after=None, parse_failed_repair=None):
    """\
    Create and enqueue a job.

    @param name name of this job
    @param priority Low, Medium, High, Urgent
    @param control_file String contents of the control file.
    @param control_type Type of control file, Client or Server.
    @param synch_count How many machines the job uses per autoserv execution.
    synch_count == 1 means the job is asynchronous.  If an atomic group is
    given this value is treated as a minimum.
    @param is_template If true then create a template job.
    @param timeout Hours after this call returns until the job times out.
    @param max_runtime_hrs Hours from job starting time until job times out
    @param run_verify Should the host be verified before running the test?
    @param email_list String containing emails to mail when the job is done
    @param dependencies List of label names on which this job depends
    @param reboot_before Never, If dirty, or Always
    @param reboot_after Never, If all tests passed, or Always
    @param parse_failed_repair if true, results of failed repairs launched by
    this job will be parsed as part of the job.

    @param hosts List of hosts to run job on.
    @param meta_hosts List where each entry is a label name, and for each entry
    one host will be chosen from that label to run the job on.
    @param one_time_hosts List of hosts not in the database to run the job on.
    @param atomic_group_name The name of an atomic group to schedule the job on.


    @returns The created Job id number.
    """
    user = thread_local.get_user()
    owner = user.login
    # input validation
    if not (hosts or meta_hosts or one_time_hosts or atomic_group_name):
        raise model_logic.ValidationError({
            'arguments' : "You must pass at least one of 'hosts', "
                          "'meta_hosts', 'one_time_hosts', "
                          "or 'atomic_group_name'"
            })

    labels_by_name = dict((label.name, label)
                          for label in models.Label.objects.all())
    atomic_groups_by_name = dict((ag.name, ag)
                                 for ag in models.AtomicGroup.objects.all())

    # Schedule on an atomic group automagically if one of the labels given
    # is an atomic group label and no explicit atomic_group_name was supplied.
    if not atomic_group_name:
        for label_name in meta_hosts or []:
            label = labels_by_name.get(label_name)
            if label and label.atomic_group:
                atomic_group_name = label.atomic_group.name
                break

    # convert hostnames & meta hosts to host/label objects
    host_objects = models.Host.smart_get_bulk(hosts)
    metahost_objects = []
    for label_name in meta_hosts or []:
        if label_name in labels_by_name:
            label = labels_by_name[label_name]
            metahost_objects.append(label)
        elif label_name in atomic_groups_by_name:
            # If given a metahost name that isn't a Label, check to
            # see if the user was specifying an Atomic Group instead.
            atomic_group = atomic_groups_by_name[label_name]
            if atomic_group_name and atomic_group_name != atomic_group.name:
                raise model_logic.ValidationError({
                        'meta_hosts': (
                                'Label "%s" not found.  If assumed to be an '
                                'atomic group it would conflict with the '
                                'supplied atomic group "%s".' % (
                                        label_name, atomic_group_name))})
            atomic_group_name = atomic_group.name
        else:
            raise model_logic.ValidationError(
                {'meta_hosts' : 'Label "%s" not found' % label})

    # Create and sanity check an AtomicGroup object if requested.
    if atomic_group_name:
        if one_time_hosts:
            raise model_logic.ValidationError(
                    {'one_time_hosts':
                     'One time hosts cannot be used with an Atomic Group.'})
        atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
        if synch_count and synch_count > atomic_group.max_number_of_machines:
            raise model_logic.ValidationError(
                    {'atomic_group_name' :
                     'You have requested a synch_count (%d) greater than the '
                     'maximum machines in the requested Atomic Group (%d).' %
                     (synch_count, atomic_group.max_number_of_machines)})
    else:
        atomic_group = None

    for host in one_time_hosts or []:
        this_host = models.Host.create_one_time_host(host)
        host_objects.append(this_host)

    if reboot_before is None:
        reboot_before = user.get_reboot_before_display()
    if reboot_after is None:
        reboot_after = user.get_reboot_after_display()

    options = dict(name=name,
                   priority=priority,
                   control_file=control_file,
                   control_type=control_type,
                   is_template=is_template,
                   timeout=timeout,
                   max_runtime_hrs=max_runtime_hrs,
                   synch_count=synch_count,
                   run_verify=run_verify,
                   email_list=email_list,
                   dependencies=dependencies,
                   reboot_before=reboot_before,
                   reboot_after=reboot_after,
                   parse_failed_repair=parse_failed_repair)
    return rpc_utils.create_new_job(owner=owner,
                                    options=options,
                                    host_objects=host_objects,
                                    metahost_objects=metahost_objects,
                                    atomic_group=atomic_group)
Beispiel #25
0
def add_acl_group(name, description=None):
    group = models.AclGroup.add_object(name=name, description=description)
    group.users.add(thread_local.get_user())
    return group.id
Beispiel #26
0
def get_static_data():
    result = {}
    group_fields = []
    for field in models.TestView.group_fields:
        if field in models.TestView.extra_fields:
            name = models.TestView.extra_fields[field]
        else:
            name = models.TestView.get_field_dict()[field].verbose_name
        group_fields.append((name.capitalize(), field))
    model_fields = [(field.verbose_name.capitalize(), field.column)
                    for field in models.TestView._meta.fields]
    extra_fields = [(field_name.capitalize(), field_sql)
                    for field_sql, field_name
                    in models.TestView.extra_fields.iteritems()]

    benchmark_key = {
        'kernbench' : 'elapsed',
        'dbench' : 'throughput',
        'tbench' : 'throughput',
        'unixbench' : 'score',
        'iozone' : '32768-4096-fwrite'
    }

    perf_view = [
        ['Test Index', 'test_idx'],
        ['Job Index', 'job_idx'],
        ['Test Name', 'test_name'],
        ['Subdirectory', 'subdir'],
        ['Kernel Index', 'kernel_idx'],
        ['Status Index', 'status_idx'],
        ['Reason', 'reason'],
        ['Host Index', 'machine_idx'],
        ['Test Started Time', 'test_started_time'],
        ['Test Finished Time', 'test_finished_time'],
        ['Job Tag', 'job_tag'],
        ['Job Name', 'job_name'],
        ['Owner', 'job_owner'],
        ['Job Queued Time', 'job_queued_time'],
        ['Job Started Time', 'job_started_time'],
        ['Job Finished Time', 'job_finished_time'],
        ['Hostname', 'hostname'],
        ['Platform', 'platform'],
        ['Machine Owner', 'machine_owner'],
        ['Kernel Hash', 'kernel_hash'],
        ['Kernel Base', 'kernel_base'],
        ['Kernel', 'kernel'],
        ['Status', 'status'],
        ['Iteration Number', 'iteration'],
        ['Performance Keyval (Key)', 'iteration_key'],
        ['Performance Keyval (Value)', 'iteration_value'],
    ]

    result['group_fields'] = sorted(group_fields)
    result['all_fields'] = sorted(model_fields + extra_fields)
    result['test_labels'] = get_test_labels(sort_by=['name'])
    result['current_user'] = {'login' : thread_local.get_user()}
    result['benchmark_key'] = benchmark_key
    result['perf_view'] = perf_view
    result['test_view'] = model_fields
    result['preconfigs'] = preconfigs.manager.all_preconfigs()
    result['motd'] = rpc_utils.get_motd()

    return result
Beispiel #27
0
 def perform_after_save(self, change):
     if not change:
         self.users.add(thread_local.get_user())
     self.add_current_user_if_empty()
     self.on_host_membership_change()
Beispiel #28
0
 def add_current_user_if_empty(self):
     if not self.users.count():
         self.users.add(thread_local.get_user())