def check_modify_host_locking(host, update_data): """ Checks when locking/unlocking has been requested if the host is already locked/unlocked. @param host: models.Host object to be modified @param update_data: A dictionary with the changes to make to the host. """ locked = update_data.get('locked', None) lock_reason = update_data.get('lock_reason', None) if locked is not None: if locked and host.locked: raise model_logic.ValidationError({ 'locked': 'Host %s already locked by %s on %s.' % (host.hostname, host.locked_by, host.lock_time) }) if not locked and not host.locked: raise model_logic.ValidationError( {'locked': 'Host %s already unlocked.' % host.hostname}) if locked and not lock_reason and not host.locked: raise model_logic.ValidationError({ 'locked': 'Please provide a reason for locking Host %s' % host.hostname })
def generate_additional_parameters(hostname_regex, param_type, param_values): """ Generates an AdditionalParamter dictionary, for passing in to submit_plan() Returns a dictionary. To use in submit_job(), put this dictionary into a list (possibly with other additional_parameters dictionaries) @param hostname_regex: The hostname regular expression to match @param param_type: One of get_static_data()['additional_parameter_types'] @param param_values: Dictionary of key=value pairs for this parameter """ try: re.compile(hostname_regex) except Exception: raise model_logic.ValidationError( {'hostname_regex': '%s is not a valid regex' % hostname_regex}) if param_type not in model_attributes.AdditionalParameterType.values: raise model_logic.ValidationError( {'param_type': '%s is not a valid parameter type' % param_type}) if type(param_values) is not dict: raise model_logic.ValidationError( {'param_values': '%s is not a dictionary' % repr(param_values)}) return { 'hostname_regex': hostname_regex, 'param_type': param_type, 'param_values': param_values }
def check_job_dependencies(host_objects, job_dependencies): """ Check that a set of machines satisfies a job's dependencies. host_objects: list of models.Host objects job_dependencies: list of names of labels """ # check that hosts satisfy dependencies host_ids = [host.id for host in host_objects] hosts_in_job = models.Host.objects.filter(id__in=host_ids) ok_hosts = hosts_in_job for index, dependency in enumerate(job_dependencies): if not provision.is_for_special_action(dependency): try: label = models.Label.smart_get(dependency) except models.Label.DoesNotExist: logging.info( 'Label %r does not exist, so it cannot ' 'be replaced by static label.', dependency) label = None if label is not None and label.is_replaced_by_static(): ok_hosts = ok_hosts.filter(static_labels__name=dependency) else: ok_hosts = ok_hosts.filter(labels__name=dependency) failing_hosts = (set(host.hostname for host in host_objects) - set(host.hostname for host in ok_hosts)) if failing_hosts: raise model_logic.ValidationError({ 'hosts': 'Host(s) failed to meet job dependencies (' + (', '.join(job_dependencies)) + '): ' + (', '.join(failing_hosts)) })
def create_new_job(owner, options, host_objects, metahost_objects): all_host_objects = host_objects + metahost_objects dependencies = options.get('dependencies', []) synch_count = options.get('synch_count') if synch_count is not None and synch_count > len(all_host_objects): raise model_logic.ValidationError({ 'hosts': 'only %d hosts provided for job with synch_count = %d' % (len(all_host_objects), synch_count) }) check_for_duplicate_hosts(host_objects) for label_name in dependencies: if provision.is_for_special_action(label_name): # TODO: We could save a few queries # if we had a bulk ensure-label-exists function, which used # a bulk .get() call. The win is probably very small. _ensure_label_exists(label_name) # This only checks targeted hosts, not hosts eligible due to the metahost check_job_dependencies(host_objects, dependencies) check_job_metahost_dependencies(metahost_objects, dependencies) options['dependencies'] = list( models.Label.objects.filter(name__in=dependencies)) job = models.Job.create(owner=owner, options=options, hosts=all_host_objects) job.queue(all_host_objects, is_template=options.get('is_template', False)) return job.id
def delete_saved_queries(id_list): user = afe_models.User.current_user().login query = models.SavedQuery.objects.filter(id__in=id_list, owner=user) if query.count() == 0: raise model_logic.ValidationError( 'No such queries found for this user') query.delete()
def check_for_duplicate_hosts(host_objects): host_counts = collections.Counter(host_objects) duplicate_hostnames = { host.hostname for host, count in host_counts.iteritems() if count > 1 } if duplicate_hostnames: raise model_logic.ValidationError( {'hosts': 'Duplicate hosts: %s' % ', '.join(duplicate_hostnames)})
def process_failures(failure_ids, host_action, test_action, labels=(), keyvals=None, bugs=(), reason=None, invalidate=False): """ Triage a failure @param failure_id: The failure ID, as returned by get_failures() @param host_action: One of 'Block', 'Unblock', 'Reinstall' @param test_action: One of 'Skip', 'Rerun' @param labels: Test labels to apply, by name @param keyvals: Dictionary of job keyvals to add (or replace) @param bugs: List of bug IDs to associate with this failure @param reason: An override for the test failure reason @param invalidate: True if failure should be invalidated for the purposes of reporting. Defaults to False. """ host_choices = failure_actions.HostAction.values test_choices = failure_actions.TestAction.values if host_action not in host_choices: raise model_logic.ValidationError({ 'host_action': ('host action %s not valid; must be one of %s' % (host_action, ', '.join(host_choices))) }) if test_action not in test_choices: raise model_logic.ValidationError({ 'test_action': ('test action %s not valid; must be one of %s' % (test_action, ', '.join(test_choices))) }) for failure_id in failure_ids: rpc_utils.process_failure(failure_id=failure_id, host_action=host_action, test_action=test_action, labels=labels, keyvals=keyvals, bugs=bugs, reason=reason, invalidate=invalidate)
def check_for_duplicate_hosts(host_objects): host_ids = set() duplicate_hostnames = set() for host in host_objects: if host.id in host_ids: duplicate_hostnames.add(host.hostname) host_ids.add(host.id) if duplicate_hostnames: raise model_logic.ValidationError( {'hosts': 'Duplicate hosts: %s' % ', '.join(duplicate_hostnames)})
def _sanity_check_generate_control(is_server, client_control_file, kernels, upload_kernel_config): """ Sanity check some of the parameters to generate_control(). This exists as its own function so that site_control_file may call it as well from its own generate_control(). @raises ValidationError if any of the parameters do not make sense. """ if is_server and client_control_file: raise model_logic.ValidationError({ 'tests': 'You cannot run server tests at the same time ' 'as directly supplying a client-side control file.' }) if kernels: # make sure that kernel is a list of dictionarions with at least # the 'version' key in them kernel_error = model_logic.ValidationError({ 'kernel': 'The kernel parameter must be a sequence of ' 'dictionaries containing at least the "version" key ' '(got: %r)' % kernels }) try: iter(kernels) except TypeError: raise kernel_error for kernel_info in kernels: if (not isinstance(kernel_info, dict) or 'version' not in kernel_info): raise kernel_error if upload_kernel_config and not is_server: raise model_logic.ValidationError({ 'upload_kernel_config': 'Cannot use upload_kernel_config ' 'with client side tests' })
def host_add_labels(id, labels): labels = models.Label.smart_get_bulk(labels) host = models.Host.smart_get(id) platforms = [label.name for label in labels if label.platform] if len(platforms) > 1: raise model_logic.ValidationError( {'labels': 'Adding more than one platform label: %s' % ', '.join(platforms)}) if len(platforms) == 1: models.Host.check_no_platform([host]) host.labels.add(*labels)
def check_modify_host(update_data): """ Sanity check modify_host* requests. @param update_data: A dictionary with the changes to make to a host or hosts. """ # Only the scheduler (monitor_db) is allowed to modify Host status. # Otherwise race conditions happen as a hosts state is changed out from # beneath tasks being run on a host. if 'status' in update_data: raise model_logic.ValidationError( {'status': 'Host status can not be modified by the frontend.'})
def check_no_platform(cls, hosts): Host.objects.populate_relationships(hosts, Label, 'label_list') errors = [] for host in hosts: platforms = [label.name for label in host.label_list if label.platform] if platforms: # do a join, just in case this host has multiple platforms, # we'll be able to see it errors.append('Host %s already has a platform: %s' % ( host.hostname, ', '.join(platforms))) if errors: raise model_logic.ValidationError({'labels': '; '.join(errors)})
def prepare_generate_control_file(tests, kernel, label, profilers): test_objects = [models.Test.smart_get(test) for test in tests] profiler_objects = [ models.Profiler.smart_get(profiler) for profiler in profilers ] # ensure tests are all the same type try: test_type = get_consistent_value(test_objects, 'test_type') except InconsistencyException, exc: test1, test2 = exc.args raise model_logic.ValidationError({ 'tests': 'You cannot run both server- and client-side ' 'tests together (tests %s and %s differ' % (test1.name, test2.name) })
def _sanity_check_generate_control(is_server, client_control_file): """ Sanity check some of the parameters to generate_control(). This exists as its own function so that site_control_file may call it as well from its own generate_control(). @raises ValidationError if any of the parameters do not make sense. """ if is_server and client_control_file: raise model_logic.ValidationError({ 'tests': 'You cannot run server tests at the same time ' 'as directly supplying a client-side control file.' })
def prepare_generate_control_file(tests, profilers, db_tests=True): if db_tests: test_objects = [models.Test.smart_get(test) for test in tests] else: test_objects = [afe_test_dict_to_test_object(test) for test in tests] profiler_objects = [models.Profiler.smart_get(profiler) for profiler in profilers] # ensure tests are all the same type try: test_type = get_consistent_value(test_objects, 'test_type') except InconsistencyException, exc: test1, test2 = exc.args raise model_logic.ValidationError( {'tests' : 'You cannot run both test_suites and server-side ' 'tests together (tests %s and %s differ' % ( test1.name, test2.name)})
def create_plan_label(plan): """ Creates the host label to apply on the plan hosts """ group, _ = afe_models.AtomicGroup.objects.get_or_create( name=PLANNER_ATOMIC_GROUP_NAME) if group.invalid: group.invalid = False group.save() name = PLANNER_LABEL_PREFIX + plan.name if bool(afe_models.Label.valid_objects.filter(name=name)): raise model_logic.ValidationError('Label %s already exists, ' 'cannot start plan' % name) label = afe_models.Label(name=name, atomic_group=group) label.save() return label
def create_one_time_host(hostname): query = Host.objects.filter(hostname=hostname) if query.count() == 0: host = Host(hostname=hostname, invalid=True) host.do_validate() else: host = query[0] if not host.invalid: raise model_logic.ValidationError({ 'hostname' : '%s already exists in the autotest DB. ' 'Select it rather than entering it as a one time ' 'host.' % hostname }) host.protection = host_protections.Protection.DO_NOT_REPAIR host.locked = False host.save() host.clean_object() return host
def check_abort_synchronous_jobs(host_queue_entries): # ensure user isn't aborting part of a synchronous autoserv execution count_per_execution = {} for queue_entry in host_queue_entries: key = _execution_key_for(queue_entry) count_per_execution.setdefault(key, 0) count_per_execution[key] += 1 for queue_entry in host_queue_entries: if not queue_entry.execution_subdir: continue execution_count = count_per_execution[_execution_key_for(queue_entry)] if execution_count < queue_entry.job.synch_count: raise model_logic.ValidationError( {'' : 'You cannot abort part of a synchronous job execution ' '(%d/%s), %d included, %d expected' % (queue_entry.job.id, queue_entry.execution_subdir, execution_count, queue_entry.job.synch_count)})
def check_job_dependencies(host_objects, job_dependencies): """ Check that a set of machines satisfies a job's dependencies. host_objects: list of models.Host objects job_dependencies: list of names of labels """ # check that hosts satisfy dependencies host_ids = [host.id for host in host_objects] hosts_in_job = models.Host.objects.filter(id__in=host_ids) ok_hosts = hosts_in_job for index, dependency in enumerate(job_dependencies): if not provision.is_for_special_action(dependency): ok_hosts = ok_hosts.filter(labels__name=dependency) failing_hosts = (set(host.hostname for host in host_objects) - set(host.hostname for host in ok_hosts)) if failing_hosts: raise model_logic.ValidationError( {'hosts' : 'Host(s) failed to meet job dependencies (' + (', '.join(job_dependencies)) + '): ' + (', '.join(failing_hosts))})
def create_new_job(owner, options, host_objects, metahost_objects, atomic_group=None): labels_by_name = dict( (label.name, label) for label in models.Label.objects.all()) all_host_objects = host_objects + metahost_objects metahost_counts = _get_metahost_counts(metahost_objects) dependencies = options.get('dependencies', []) synch_count = options.get('synch_count') if atomic_group: check_atomic_group_create_job(synch_count, host_objects, metahost_objects, dependencies, atomic_group, labels_by_name) else: if synch_count is not None and synch_count > len(all_host_objects): raise model_logic.ValidationError({ 'hosts': 'only %d hosts provided for job with synch_count = %d' % (len(all_host_objects), synch_count) }) atomic_hosts = models.Host.objects.filter( id__in=[host.id for host in host_objects], labels__atomic_group=True) unusable_host_names = [host.hostname for host in atomic_hosts] if unusable_host_names: raise model_logic.ValidationError({ 'hosts': 'Host(s) "%s" are atomic group hosts but no ' 'atomic group was specified for this job.' % (', '.join(unusable_host_names), ) }) check_for_duplicate_hosts(host_objects) check_job_dependencies(host_objects, dependencies) options['dependencies'] = [ labels_by_name[label_name] for label_name in dependencies ] for label in metahost_objects + options['dependencies']: if label.atomic_group and not atomic_group: raise model_logic.ValidationError({ 'atomic_group_name': 'Dependency %r requires an atomic group but no ' 'atomic_group_name or meta_host in an atomic group was ' 'specified for this job.' % label.name }) elif (label.atomic_group and label.atomic_group.name != atomic_group.name): raise model_logic.ValidationError({ 'atomic_group_name': 'meta_hosts or dependency %r requires atomic group ' '%r instead of the supplied atomic_group_name=%r.' % (label.name, label.atomic_group.name, atomic_group.name) }) job = models.Job.create(owner=owner, options=options, hosts=all_host_objects) job.queue(all_host_objects, atomic_group=atomic_group, is_template=options.get('is_template', False)) return job.id
def create_job_common(name, priority, control_type, control_file=None, hosts=(), meta_hosts=(), one_time_hosts=(), synch_count=None, is_template=False, timeout=None, timeout_mins=None, max_runtime_mins=None, run_verify=True, email_list='', dependencies=(), reboot_before=None, reboot_after=None, parse_failed_repair=None, hostless=False, keyvals=None, drone_set=None, parent_job_id=None, test_retry=0, run_reset=True, require_ssp=None): #pylint: disable-msg=C0111 """ Common code between creating "standard" jobs and creating parameterized jobs """ # input validation host_args_passed = any((hosts, meta_hosts, one_time_hosts)) if hostless: if host_args_passed: raise model_logic.ValidationError( {'hostless': 'Hostless jobs cannot include any hosts!'}) if control_type != control_data.CONTROL_TYPE_NAMES.SERVER: raise model_logic.ValidationError({ 'control_type': 'Hostless jobs cannot use client-side ' 'control files' }) elif not host_args_passed: raise model_logic.ValidationError({ 'arguments': "For host jobs, you must pass at least one of" " 'hosts', 'meta_hosts', 'one_time_hosts'." }) label_objects = list(models.Label.objects.filter(name__in=meta_hosts)) # convert hostnames & meta hosts to host/label objects host_objects = models.Host.smart_get_bulk(hosts) _validate_host_job_sharding(host_objects) for host in one_time_hosts: this_host = models.Host.create_one_time_host(host) host_objects.append(this_host) metahost_objects = [] meta_host_labels_by_name = {label.name: label for label in label_objects} for label_name in meta_hosts: if label_name in meta_host_labels_by_name: metahost_objects.append(meta_host_labels_by_name[label_name]) else: raise model_logic.ValidationError( {'meta_hosts': 'Label "%s" not found' % label_name}) options = dict(name=name, priority=priority, control_file=control_file, control_type=control_type, is_template=is_template, timeout=timeout, timeout_mins=timeout_mins, max_runtime_mins=max_runtime_mins, synch_count=synch_count, run_verify=run_verify, email_list=email_list, dependencies=dependencies, reboot_before=reboot_before, reboot_after=reboot_after, parse_failed_repair=parse_failed_repair, keyvals=keyvals, drone_set=drone_set, parent_job_id=parent_job_id, test_retry=test_retry, run_reset=run_reset, require_ssp=require_ssp) return create_new_job(owner=models.User.current_user().login, options=options, host_objects=host_objects, metahost_objects=metahost_objects)
def create_job_common(name, priority, control_type, control_file=None, hosts=(), meta_hosts=(), one_time_hosts=(), atomic_group_name=None, synch_count=None, is_template=False, timeout=None, max_runtime_hrs=None, run_verify=True, email_list='', dependencies=(), reboot_before=None, reboot_after=None, parse_failed_repair=None, hostless=False, keyvals=None, drone_set=None, parameterized_job=None): """ Common code between creating "standard" jobs and creating parameterized jobs """ user = models.User.current_user() owner = user.login # Convert metahost names to lower case, to avoid case sensitivity issues meta_hosts = [meta_host.lower() for meta_host in meta_hosts] # input validation if not (hosts or meta_hosts or one_time_hosts or atomic_group_name or hostless): raise model_logic.ValidationError({ 'arguments': "You must pass at least one of 'hosts', " "'meta_hosts', 'one_time_hosts', " "'atomic_group_name', or 'hostless'" }) if hostless: if hosts or meta_hosts or one_time_hosts or atomic_group_name: raise model_logic.ValidationError( {'hostless': 'Hostless jobs cannot include any hosts!'}) server_type = models.Job.ControlType.get_string( models.Job.ControlType.SERVER) if control_type != server_type: raise model_logic.ValidationError({ 'control_type': 'Hostless jobs cannot use client-side ' 'control files' }) labels_by_name = dict( (label.name.lower(), label) for label in models.Label.objects.all()) atomic_groups_by_name = dict( (ag.name.lower(), ag) for ag in models.AtomicGroup.objects.all()) # Schedule on an atomic group automagically if one of the labels given # is an atomic group label and no explicit atomic_group_name was supplied. if not atomic_group_name: for label_name in meta_hosts or []: label = labels_by_name.get(label_name) if label and label.atomic_group: atomic_group_name = label.atomic_group.name break # convert hostnames & meta hosts to host/label objects host_objects = models.Host.smart_get_bulk(hosts) metahost_objects = [] for label_name in meta_hosts or []: if label_name in labels_by_name: label = labels_by_name[label_name] metahost_objects.append(label) elif label_name in atomic_groups_by_name: # If given a metahost name that isn't a Label, check to # see if the user was specifying an Atomic Group instead. atomic_group = atomic_groups_by_name[label_name] if atomic_group_name and atomic_group_name != atomic_group.name: raise model_logic.ValidationError({ 'meta_hosts': ('Label "%s" not found. If assumed to be an ' 'atomic group it would conflict with the ' 'supplied atomic group "%s".' % (label_name, atomic_group_name)) }) atomic_group_name = atomic_group.name else: raise model_logic.ValidationError( {'meta_hosts': 'Label "%s" not found' % label_name}) # Create and sanity check an AtomicGroup object if requested. if atomic_group_name: if one_time_hosts: raise model_logic.ValidationError({ 'one_time_hosts': 'One time hosts cannot be used with an Atomic Group.' }) atomic_group = models.AtomicGroup.smart_get(atomic_group_name) if synch_count and synch_count > atomic_group.max_number_of_machines: raise model_logic.ValidationError({ 'atomic_group_name': 'You have requested a synch_count (%d) greater than the ' 'maximum machines in the requested Atomic Group (%d).' % (synch_count, atomic_group.max_number_of_machines) }) else: atomic_group = None for host in one_time_hosts or []: this_host = models.Host.create_one_time_host(host) host_objects.append(this_host) options = dict(name=name, priority=priority, control_file=control_file, control_type=control_type, is_template=is_template, timeout=timeout, max_runtime_hrs=max_runtime_hrs, synch_count=synch_count, run_verify=run_verify, email_list=email_list, dependencies=dependencies, reboot_before=reboot_before, reboot_after=reboot_after, parse_failed_repair=parse_failed_repair, keyvals=keyvals, drone_set=drone_set, parameterized_job=parameterized_job) return create_new_job(owner=owner, options=options, host_objects=host_objects, metahost_objects=metahost_objects, atomic_group=atomic_group)
def check_atomic_group_create_job(synch_count, host_objects, metahost_objects, dependencies, atomic_group, labels_by_name): """ Attempt to reject create_job requests with an atomic group that will be impossible to schedule. The checks are not perfect but should catch the most obvious issues. @param synch_count - The job's minimum synch count. @param host_objects - A list of models.Host instances. @param metahost_objects - A list of models.Label instances. @param dependencies - A list of job dependency label names. @param atomic_group - The models.AtomicGroup instance. @param labels_by_name - A dictionary mapping label names to models.Label instance. Used to look up instances for dependencies. @raises model_logic.ValidationError - When an issue is found. """ # If specific host objects were supplied with an atomic group, verify # that there are enough to satisfy the synch_count. minimum_required = synch_count or 1 if (host_objects and not metahost_objects and len(host_objects) < minimum_required): raise model_logic.ValidationError({ 'hosts': 'only %d hosts provided for job with synch_count = %d' % (len(host_objects), synch_count) }) # Check that the atomic group has a hope of running this job # given any supplied metahosts and dependancies that may limit. # Get a set of hostnames in the atomic group. possible_hosts = set() for label in atomic_group.label_set.all(): possible_hosts.update(h.hostname for h in label.host_set.all()) # Filter out hosts that don't match all of the job dependency labels. for label_name in set(dependencies): label = labels_by_name[label_name] hosts_in_label = (h.hostname for h in label.host_set.all()) possible_hosts.intersection_update(hosts_in_label) if not host_objects and not metahost_objects: # No hosts or metahosts are required to queue an atomic group Job. # However, if they are given, we respect them below. host_set = possible_hosts else: host_set = set(host.hostname for host in host_objects) unusable_host_set = host_set.difference(possible_hosts) if unusable_host_set: raise model_logic.ValidationError({ 'hosts': 'Hosts "%s" are not in Atomic Group "%s"' % (', '.join(sorted(unusable_host_set)), atomic_group.name) }) # Lookup hosts provided by each meta host and merge them into the # host_set for final counting. for meta_host in metahost_objects: meta_possible = possible_hosts.copy() hosts_in_meta_host = (h.hostname for h in meta_host.host_set.all()) meta_possible.intersection_update(hosts_in_meta_host) # Count all hosts that this meta_host will provide. host_set.update(meta_possible) if len(host_set) < minimum_required: raise model_logic.ValidationError({ 'atomic_group_name': 'Insufficient hosts in Atomic Group "%s" with the' ' supplied dependencies and meta_hosts.' % (atomic_group.name, ) })
def submit_plan(name, hosts, host_labels, tests, support=None, label_override=None, additional_parameters=None): """ Submits a plan to the Test Planner @param name: the name of the plan @param hosts: a list of hostnames @param host_labels: a list of host labels. The hosts under test will update to reflect changes in the label @param tests: an ordered list of dictionaries: alias: an alias for the test control_file: the test control file is_server: True if is a server-side control file estimated_runtime: estimated number of hours this test will run @param support: the global support script @param label_override: label to prepend to all AFE jobs for this test plan. Defaults to the plan name. @param additional_parameters: A mapping of AdditionalParameters to apply to this test plan, as an ordered list. Each item of the list is a dictionary: hostname_regex: A regular expression; the additional parameter in the value will be applied if the hostname matches this regex param_type: The type of additional parameter param_values: A dictionary of key=value pairs for this parameter example: [{'hostname_regex': 'host[0-9]', 'param_type': 'Verify', 'param_values': {'key1': 'value1', 'key2': 'value2'}}, {'hostname_regex': '.*', 'param_type': 'Verify', 'param_values': {'key': 'value'}}] Currently, the only (non-site-specific) param_type available is 'Verify'. Setting these parameters allows the user to specify arguments to the job.run_test('verify_test', ...) line at the beginning of the wrapped control file for each test """ host_objects = [] label_objects = [] for host in hosts or []: try: host_objects.append( afe_models.Host.valid_objects.get(hostname=host)) except afe_models.Host.DoesNotExist: raise model_logic.ValidationError( {'hosts': 'host %s does not exist' % host}) for label in host_labels or []: try: label_objects.append( afe_models.Label.valid_objects.get(name=label)) except afe_models.Label.DoesNotExist: raise model_logic.ValidationError( {'host_labels': 'host label %s does not exist' % label}) aliases_seen = set() test_required_fields = ('alias', 'control_file', 'is_server', 'estimated_runtime') for test in tests: for field in test_required_fields: if field not in test: raise model_logic.ValidationError( {'tests': 'field %s is required' % field}) alias = test['alias'] if alias in aliases_seen: raise model_logic.Validationerror( {'tests': 'alias %s occurs more than once' % alias}) aliases_seen.add(alias) plan, created = models.Plan.objects.get_or_create(name=name) if not created: raise model_logic.ValidationError( {'name': 'Plan name %s already exists' % name}) try: rpc_utils.set_additional_parameters(plan, additional_parameters) label = rpc_utils.create_plan_label(plan) try: for i, test in enumerate(tests): control, _ = models.ControlFile.objects.get_or_create( contents=test['control_file']) models.TestConfig.objects.create( plan=plan, alias=test['alias'], control_file=control, is_server=test['is_server'], execution_order=i, estimated_runtime=test['estimated_runtime']) plan.label_override = label_override plan.support = support or '' plan.save() plan.owners.add(afe_models.User.current_user()) for host in host_objects: planner_host = models.Host.objects.create(plan=plan, host=host) plan.host_labels.add(*label_objects) rpc_utils.start_plan(plan, label) return plan.id except: label.delete() raise except: plan.delete() raise