def create_instance(cls, input_dict, containing_collection): owner = input_dict.get('owner') if not owner: owner = models.User.current_user().login cls._check_for_required_fields( input_dict, ('name', 'execution_info', 'queue_entries')) execution_info = input_dict['execution_info'] cls._check_for_required_fields(execution_info, ('control_file', 'is_server')) if execution_info['is_server']: control_type = control_data.CONTROL_TYPE.SERVER else: control_type = control_data.CONTROL_TYPE.CLIENT options = dict( name=input_dict['name'], priority=input_dict.get('priority', None), control_file=execution_info['control_file'], control_type=control_type, is_template=input_dict.get('is_template', None), timeout_mins=execution_info.get('timeout_mins'), max_runtime_mins=execution_info.get('maximum_runtime_mins'), synch_count=execution_info.get('machines_per_execution'), run_verify=execution_info.get('run_verify'), run_reset=execution_info.get('run_reset'), email_list=input_dict.get('email_list', None), dependencies=execution_info.get('dependencies', ()), reboot_before=execution_info.get('cleanup_before_job'), reboot_after=execution_info.get('cleanup_after_job'), parse_failed_repair=input_dict.get('parse_failed_repair', None), drone_set=input_dict.get('drone_set', None), keyvals=input_dict.get('keyvals', None)) host_objects, metahost_label_objects, atomic_group = [], [], None for queue_entry in input_dict['queue_entries']: if 'host' in queue_entry: host = queue_entry['host'] if host: # can be None, indicated a hostless job host_entry = containing_collection.resolve_link(host) host_objects.append(host_entry.instance) elif 'meta_host' in queue_entry: label_entry = containing_collection.resolve_link( queue_entry['meta_host']) metahost_label_objects.append(label_entry.instance) if 'atomic_group_class' in queue_entry: atomic_group_entry = containing_collection.resolve_link( queue_entry['atomic_group_class']) if atomic_group: assert atomic_group_entry.instance.id == atomic_group.id else: atomic_group = atomic_group_entry.instance job_id = rpc_utils.create_new_job( owner=owner, options=options, host_objects=host_objects, metahost_objects=metahost_label_objects, atomic_group=atomic_group) return models.Job.objects.get(id=job_id)
def create_instance(cls, input_dict, containing_collection): owner = input_dict.get('owner') if not owner: owner = models.User.current_user().login cls._check_for_required_fields(input_dict, ('name', 'execution_info', 'queue_entries')) execution_info = input_dict['execution_info'] cls._check_for_required_fields(execution_info, ('control_file', 'is_server')) if execution_info['is_server']: control_type = models.Job.ControlType.SERVER else: control_type = models.Job.ControlType.CLIENT options = dict( name=input_dict['name'], priority=input_dict.get('priority', None), control_file=execution_info['control_file'], control_type=control_type, is_template=input_dict.get('is_template', None), timeout=execution_info.get('timeout_hrs'), max_runtime_hrs=execution_info.get('maximum_runtime_hrs'), synch_count=execution_info.get('machines_per_execution'), run_verify=execution_info.get('run_verify'), email_list=input_dict.get('email_list', None), dependencies=execution_info.get('dependencies', ()), reboot_before=execution_info.get('cleanup_before_job'), reboot_after=execution_info.get('cleanup_after_job'), parse_failed_repair=input_dict.get('parse_failed_repair', None), drone_set=input_dict.get('drone_set', None), keyvals=input_dict.get('keyvals', None)) host_objects, metahost_label_objects, atomic_group = [], [], None for queue_entry in input_dict['queue_entries']: if 'host' in queue_entry: host = queue_entry['host'] if host: # can be None, indicated a hostless job host_entry = containing_collection.resolve_link(host) host_objects.append(host_entry.instance) elif 'meta_host' in queue_entry: label_entry = containing_collection.resolve_link( queue_entry['meta_host']) metahost_label_objects.append(label_entry.instance) if 'atomic_group_class' in queue_entry: atomic_group_entry = containing_collection.resolve_link( queue_entry['atomic_group_class']) if atomic_group: assert atomic_group_entry.instance.id == atomic_group.id else: atomic_group = atomic_group_entry.instance job_id = rpc_utils.create_new_job( owner=owner, options=options, host_objects=host_objects, metahost_objects=metahost_label_objects, atomic_group=atomic_group) return models.Job.objects.get(id=job_id)
def create_job(name, priority, control_file, control_type, hosts=(), meta_hosts=(), one_time_hosts=(), atomic_group_name=None, synch_count=None, is_template=False, timeout=None, max_runtime_hrs=None, run_verify=True, email_list='', dependencies=(), reboot_before=None, reboot_after=None, parse_failed_repair=None): """\ Create and enqueue a job. @param name name of this job @param priority Low, Medium, High, Urgent @param control_file String contents of the control file. @param control_type Type of control file, Client or Server. @param synch_count How many machines the job uses per autoserv execution. synch_count == 1 means the job is asynchronous. If an atomic group is given this value is treated as a minimum. @param is_template If true then create a template job. @param timeout Hours after this call returns until the job times out. @param max_runtime_hrs Hours from job starting time until job times out @param run_verify Should the host be verified before running the test? @param email_list String containing emails to mail when the job is done @param dependencies List of label names on which this job depends @param reboot_before Never, If dirty, or Always @param reboot_after Never, If all tests passed, or Always @param parse_failed_repair if true, results of failed repairs launched by this job will be parsed as part of the job. @param hosts List of hosts to run job on. @param meta_hosts List where each entry is a label name, and for each entry one host will be chosen from that label to run the job on. @param one_time_hosts List of hosts not in the database to run the job on. @param atomic_group_name The name of an atomic group to schedule the job on. @returns The created Job id number. """ user = thread_local.get_user() owner = user.login # input validation if not (hosts or meta_hosts or one_time_hosts or atomic_group_name): raise model_logic.ValidationError({ 'arguments' : "You must pass at least one of 'hosts', " "'meta_hosts', 'one_time_hosts', " "or 'atomic_group_name'" }) labels_by_name = dict((label.name, label) for label in models.Label.objects.all()) atomic_groups_by_name = dict((ag.name, ag) for ag in models.AtomicGroup.objects.all()) # Schedule on an atomic group automagically if one of the labels given # is an atomic group label and no explicit atomic_group_name was supplied. if not atomic_group_name: for label_name in meta_hosts or []: label = labels_by_name.get(label_name) if label and label.atomic_group: atomic_group_name = label.atomic_group.name break # convert hostnames & meta hosts to host/label objects host_objects = models.Host.smart_get_bulk(hosts) metahost_objects = [] for label_name in meta_hosts or []: if label_name in labels_by_name: label = labels_by_name[label_name] metahost_objects.append(label) elif label_name in atomic_groups_by_name: # If given a metahost name that isn't a Label, check to # see if the user was specifying an Atomic Group instead. atomic_group = atomic_groups_by_name[label_name] if atomic_group_name and atomic_group_name != atomic_group.name: raise model_logic.ValidationError({ 'meta_hosts': ( 'Label "%s" not found. If assumed to be an ' 'atomic group it would conflict with the ' 'supplied atomic group "%s".' % ( label_name, atomic_group_name))}) atomic_group_name = atomic_group.name else: raise model_logic.ValidationError( {'meta_hosts' : 'Label "%s" not found' % label}) # Create and sanity check an AtomicGroup object if requested. if atomic_group_name: if one_time_hosts: raise model_logic.ValidationError( {'one_time_hosts': 'One time hosts cannot be used with an Atomic Group.'}) atomic_group = models.AtomicGroup.smart_get(atomic_group_name) if synch_count and synch_count > atomic_group.max_number_of_machines: raise model_logic.ValidationError( {'atomic_group_name' : 'You have requested a synch_count (%d) greater than the ' 'maximum machines in the requested Atomic Group (%d).' % (synch_count, atomic_group.max_number_of_machines)}) else: atomic_group = None for host in one_time_hosts or []: this_host = models.Host.create_one_time_host(host) host_objects.append(this_host) if reboot_before is None: reboot_before = user.get_reboot_before_display() if reboot_after is None: reboot_after = user.get_reboot_after_display() options = dict(name=name, priority=priority, control_file=control_file, control_type=control_type, is_template=is_template, timeout=timeout, max_runtime_hrs=max_runtime_hrs, synch_count=synch_count, run_verify=run_verify, email_list=email_list, dependencies=dependencies, reboot_before=reboot_before, reboot_after=reboot_after, parse_failed_repair=parse_failed_repair) return rpc_utils.create_new_job(owner=owner, options=options, host_objects=host_objects, metahost_objects=metahost_objects, atomic_group=atomic_group)