Example #1
0
def get_tests_from_fs(parent_dir, control_pattern, add_noncompliant=False):
    """Find control jobs in location and create one big job
       Returns:
        dictionary of the form:
            tests[file_path] = parsed_object

    """
    tests = {}
    profilers = False
    if 'client/profilers' in parent_dir:
        profilers = True
    for dir in [ parent_dir ]:
        files = recursive_walk(dir, control_pattern)
        for file in files:
            if '__init__.py' in file or '.svn' in file:
                continue
            if not profilers:
                if not add_noncompliant:
                    try:
                        found_test = control_data.parse_control(file,
                                                            raise_warnings=True)
                        tests[file] = found_test
                    except control_data.ControlVariableException, e:
                        print "Skipping %s\n%s" % (file, e)
                        pass
                else:
                    found_test = control_data.parse_control(file)
                    tests[file] = found_test
            else:
                script = file.rstrip(".py")
                tests[file] = compiler.parseFile(file).doc
Example #2
0
def get_tests_from_fs(parent_dir, control_pattern, add_noncompliant=False):
    """
    Find control files in file system and load a list with their info.

    @param parent_dir: directory to search recursively.
    @param control_pattern: name format of control file.
    @param add_noncompliant: ignore control file parse errors.

    @return dictionary of the form: tests[file_path] = parsed_object
    """
    tests = {}
    profilers = False
    if 'client/profilers' in parent_dir:
        profilers = True
    for dir in [ parent_dir ]:
        files = recursive_walk(dir, control_pattern)
        for file in files:
            if '__init__.py' in file or '.svn' in file:
                continue
            if not profilers:
                if not add_noncompliant:
                    try:
                        found_test = control_data.parse_control(file,
                                                            raise_warnings=True)
                        tests[file] = found_test
                    except control_data.ControlVariableException, e:
                        logging.warn("Skipping %s\n%s", file, e)
                    except Exception, e:
                        logging.error("Bad %s\n%s", file, e)
                else:
                    found_test = control_data.parse_control(file)
                    tests[file] = found_test
            else:
                tests[file] = compiler.parseFile(file).doc
Example #3
0
def get_tests_from_fs(parent_dir, control_pattern, add_noncompliant=False):
    """
    Find control files in file system and load a list with their info.

    @param parent_dir: directory to search recursively.
    @param control_pattern: name format of control file.
    @param add_noncompliant: ignore control file parse errors.

    @return dictionary of the form: tests[file_path] = parsed_object
    """
    tests = {}
    profilers = False
    if 'client/profilers' in parent_dir:
        profilers = True
    for dir in [parent_dir]:
        files = recursive_walk(dir, control_pattern)
        for file in files:
            if '__init__.py' in file or '.svn' in file:
                continue
            if not profilers:
                if not add_noncompliant:
                    try:
                        found_test = control_data.parse_control(
                            file, raise_warnings=True)
                        tests[file] = found_test
                    except control_data.ControlVariableException, e:
                        logging.warn("Skipping %s\n%s", file, e)
                    except Exception, e:
                        logging.error("Bad %s\n%s", file, e)
                else:
                    found_test = control_data.parse_control(file)
                    tests[file] = found_test
            else:
                tests[file] = compiler.parseFile(file).doc
Example #4
0
def _require_ssp_from_control(control_name):
    """Read the value of REQUIRE_SSP from test control file.

    This reads the control file from the prod checkout of autotest and uses that
    to determine whether to even stage the SSP package on a devserver.

    This means:
    [1] Any change in REQUIRE_SSP directive in a test requires a prod-push to go
    live.
    [2] This function may find that the control file does not exist but the SSP
    package may contain the test file. This function conservatively returns True
    in that case.

    This function is called very early in autoserv, before logging is setup.
    """
    if not control_name:
        return True
    try:
        path = _control_path_on_disk(control_name)
    except error.AutoservError as e:
        sys.stderr.write("autoserv: Could not determine control file path,"
                         " assuming we need SSP: %s\n" % e)
        sys.stderr.flush()
        return True
    if not os.path.isfile(path):
        return True
    control = control_data.parse_control(path)
    # There must be explicit directive in the control file to disable SSP.
    if not control or control.require_ssp is None:
        return True
    return control.require_ssp
def SeedAttributes(path_controlfile):
    """Seed attributes in a control file.

  Read and re-write a control file with modified contents with attributes added.

  Args:
    @param path_controlfile: path to control file

  Returns:
    None
  """
    # Parse attribute from suite, and prepare ATTRIBUTES line.
    cd = control_data.parse_control(path_controlfile, True)
    suite = cd.suite

    attr_items = set('suite:' + x.strip() for x in suite.split(',')
                     if x.strip())
    attr_items = list(attr_items)
    attr_items.sort(key=str.lower)
    attr_line = ', '.join(attr_items)
    attr_line = 'ATTRIBUTES = \"' + attr_line + '\"\n'

    # Read control file and modify the suite line with attribute added.
    with open(path_controlfile, 'r') as f:
        lines = f.readlines()
        index = [
            i for i, val in enumerate(lines)
            if val.startswith('SUITE =') or val.startswith('SUITE=')
        ][0]
        suite_line = lines[index]
        lines[index] = attr_line + suite_line

    # Write the modified contents back to file
    with open(path_controlfile, 'w') as f:
        f.writelines(lines)
Example #6
0
def GetTestsFromFS(parent_dir, logger):
    """
    Find control files in file system and load a list with their info.

    @param parent_dir: directory to search recursively.
    @param logger: Python logger for logging.

    @return dictionary of the form: tests[file_path] = parsed_object
    """
    tests = {}
    tests_src = {}
    for root, dirnames, filenames in os.walk(parent_dir):
        for filename in fnmatch.filter(filenames, 'control*'):
            test_name = os.path.basename(root)
            if test_name[:5].lower() == 'suite' or '.svn' in filename:
                continue
            full_name = os.path.join(root, filename)
            try:
                found_test = control_data.parse_control(full_name,
                                                        raise_warnings=True)
                tests[test_name] = ''
                tests_src[test_name] = parent_dir
            except control_data.ControlVariableException, e:
                logger.warn("Skipping %s\n%s", full_name, e)
            except Exception, e:
                logger.error("Bad %s\n%s", full_name, e)
Example #7
0
def update_from_whitelist(whitelist_set, add_experimental, add_noncompliant,
                          autotest_dir):
    """
    Scans through all tests in the whitelist and add them to the database.

    This function invoked when -w supplied.

    @param whitelist_set: set of tests in full-path form from a whitelist.
    @param add_experimental: add tests with experimental attribute set.
    @param add_noncompliant: attempt adding test with invalid control files.
    @param autotest_dir: prepended to path strings
            (see global_config.ini, COMMON, autotest_top_path).
    """
    tests = {}
    profilers = {}
    for file_path in whitelist_set:
        if file_path.find('client/profilers') == -1:
            try:
                found_test = control_data.parse_control(file_path,
                                                        raise_warnings=True)
                tests[file_path] = found_test
            except control_data.ControlVariableException, e:
                logging.warn("Skipping %s\n%s", file, e)
        else:
            profilers[file_path] = compiler.parseFile(file_path).doc
Example #8
0
def _throttle_result_size(path):
    """Limit the total size of test results for the given path.

    @param path: Path of the result directory.
    """
    if not result_runner.ENABLE_RESULT_THROTTLING:
        tko_utils.dprint(
            'Result throttling is not enabled. Skipping throttling %s' % path)
        return

    max_result_size_KB = control_data.DEFAULT_MAX_RESULT_SIZE_KB
    # Client side test saves the test control to file `control`, while server
    # side test saves the test control to file `control.srv`
    for control_file in ['control', 'control.srv']:
        control = os.path.join(path, control_file)
        try:
            max_result_size_KB = control_data.parse_control(
                control, raise_warnings=False).max_result_size_KB
            # Any value different from the default is considered to be the one
            # set in the test control file.
            if max_result_size_KB != control_data.DEFAULT_MAX_RESULT_SIZE_KB:
                break
        except IOError as e:
            tko_utils.dprint('Failed to access %s. Error: %s\nDetails %s' %
                             (control, e, traceback.format_exc()))
        except control_data.ControlVariableException as e:
            tko_utils.dprint('Failed to parse %s. Error: %s\nDetails %s' %
                             (control, e, traceback.format_exc()))

    try:
        result_utils.execute(path, max_result_size_KB)
    except:
        tko_utils.dprint('Failed to throttle result size of %s.\nDetails %s' %
                         (path, traceback.format_exc()))
Example #9
0
def update_from_whitelist(whitelist_set, add_experimental, add_noncompliant,
                          autotest_dir):
    """
    Scans through all tests in the whitelist and add them to the database.

    This function invoked when -w supplied.

    @param whitelist_set: set of tests in full-path form from a whitelist.
    @param add_experimental: add tests with experimental attribute set.
    @param add_noncompliant: attempt adding test with invalid control files.
    @param autotest_dir: prepended to path strings
            (see global_config.ini, COMMON, autotest_top_path).
    """
    tests = {}
    profilers = {}
    for file_path in whitelist_set:
        if file_path.find('client/profilers') == -1:
            try:
                found_test = control_data.parse_control(file_path,
                                                        raise_warnings=True)
                tests[file_path] = found_test
            except control_data.ControlVariableException, e:
                logging.warn("Skipping %s\n%s", file, e)
        else:
            profilers[file_path] = compiler.parseFile(file_path).doc
Example #10
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        _harness = self.handle_persistent_option(options, 'harness')
        _harness_args = self.handle_persistent_option(options, 'harness_args')

        self.harness = harness.select(_harness, self, _harness_args)

        if self.control:
            parsed_control = control_data.parse_control(self.control,
                                                        raise_warnings=False)
            self.fast = parsed_control.fast

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag,
                                            entry.fields)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)

        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook)
 def test_parse_control(self):
     cd = control_data.parse_control(self.control_tmp.name, True)
     self.assertEquals(cd.author, "Author")
     self.assertEquals(cd.dependencies, set(["console", "power"]))
     self.assertEquals(cd.doc, "doc stuff")
     self.assertEquals(cd.experimental, False)
     self.assertEquals(cd.name, "nAmE")
     self.assertEquals(cd.run_verify, False)
     self.assertEquals(cd.sync_count, 2)
     self.assertEquals(cd.time, "short")
     self.assertEquals(cd.test_class, "kernel")
     self.assertEquals(cd.test_category, "stress")
     self.assertEquals(cd.test_type, "client")
Example #12
0
 def test_parse_control(self):
     cd = control_data.parse_control(self.control_tmp.name, True)
     self.assertEquals(cd.author, "Author")
     self.assertEquals(cd.dependencies, set(['console', 'power']))
     self.assertEquals(cd.doc, "doc stuff")
     self.assertEquals(cd.experimental, False)
     self.assertEquals(cd.name, "nAmE")
     self.assertEquals(cd.run_verify, False)
     self.assertEquals(cd.sync_count, 2)
     self.assertEquals(cd.time, "short")
     self.assertEquals(cd.test_class, "kernel")
     self.assertEquals(cd.test_category, "stress")
     self.assertEquals(cd.test_type, "client")
def main():
    """
    Checks if all control files that are a part of this commit conform to the
    ChromeOS autotest guidelines.
    """
    parser = argparse.ArgumentParser(description='Process overlay arguments.')
    parser.add_argument('--overlay',
                        default=None,
                        help='the overlay directory path')
    args = parser.parse_args()
    file_list = os.environ.get('PRESUBMIT_FILES')
    if file_list is None:
        raise ControlFileCheckerError(
            'Expected a list of presubmit files in '
            'the PRESUBMIT_FILES environment variable.')

    # Parse the whitelist set from file, hardcode the filepath to the whitelist.
    path_whitelist = os.path.join(common.autotest_dir,
                                  'site_utils/attribute_whitelist.txt')
    with open(path_whitelist, 'r') as f:
        whitelist = {line.strip() for line in f.readlines() if line.strip()}

    # Delay getting the useflags. The call takes long time, so init useflags
    # only when needed, i.e., the script needs to check any control file.
    useflags = None
    for file_path in file_list.split('\n'):
        control_file = re.search(r'.*/control(?:\.\w+)?$', file_path)
        if control_file:
            ctrl_file_path = control_file.group(0)
            CheckSuiteLineRemoved(ctrl_file_path)
            ctrl_data = control_data.parse_control(ctrl_file_path,
                                                   raise_warnings=True)
            test_name = os.path.basename(os.path.split(file_path)[0])
            try:
                reporting_utils.BugTemplate.validate_bug_template(
                    ctrl_data.bug_template)
            except AttributeError:
                # The control file may not have bug template defined.
                pass

            if not useflags:
                useflags = GetUseFlags(args.overlay)
            CheckSuites(ctrl_data, test_name, useflags)
            CheckValidAttr(ctrl_data, whitelist, test_name)
            CheckRetry(ctrl_data, test_name)
            CheckDependencies(ctrl_data, test_name)
Example #14
0
 def test_parse_control(self):
     cd = control_data.parse_control(self.control_tmp.name, True)
     self.assertEquals(cd.author, "Author")
     self.assertEquals(cd.dependencies, set(['console', 'power']))
     self.assertEquals(cd.doc, "doc stuff")
     self.assertEquals(cd.experimental, False)
     self.assertEquals(cd.name, "nAmE")
     self.assertEquals(cd.run_verify, False)
     self.assertEquals(cd.sync_count, 2)
     self.assertEquals(cd.time, "short")
     self.assertEquals(cd.test_class, "kernel")
     self.assertEquals(cd.test_category, "stress")
     self.assertEquals(cd.test_type, "client")
     self.assertEquals(cd.retries, 5)
     self.assertEquals(cd.require_ssp, False)
     self.assertEquals(cd.attributes,
                       set(["suite:smoke","suite:bvt","subsystem:default"]))
def AttrSuiteMatch(path_list, path_whitelist):
    """Check whether attributes are in the attribute whitelist and match with the
  suites in the control files.

  Args:
    @param path_list: a list of path to the control files to be checked.
    @param path_whitelist: path to the attribute whitelist.

  Returns:
    A list of paths to the control files that failed at checking.
  """
    unmatch_pathlist = []

    # Read the whitelist to a set, if path is invalid, throw IOError.
    with open(path_whitelist, 'r') as f:
        whitelist = {line.strip() for line in f.readlines() if line.strip()}

    # Read the attr in the control files, check with whitelist and suite.
    for path in path_list:
        cd = control_data.parse_control(path, True)
        cd_attrs = cd.attributes

        # Test whether attributes in the whitelist
        if not (whitelist >= cd_attrs):
            unmatch_pathlist.append(path)
        # Test when suite exists, whether attributes match suites
        if hasattr(cd, 'suite'):
            target_attrs = set('suite:' + x.strip()
                               for x in cd.suite.split(',') if x.strip())
            if cd_attrs != target_attrs:
                unmatch_pathlist.append(path)
        # Test when suite not exists, whether attributes is empty
        elif not hasattr(cd, 'suite') and cd_attrs:
            unmatch_pathlist.append(path)

    return unmatch_pathlist
def _max_result_size_from_control(path):
    """Gets the max result size set in a control file, if any.

    If not overrides is found, returns None.
    """
    for control_file in _HARDCODED_CONTROL_FILE_NAMES:
        control = os.path.join(path, control_file)
        if not os.path.exists(control):
            continue

        try:
            max_result_size_KB = control_data.parse_control(
                    control, raise_warnings=False).max_result_size_KB
            if max_result_size_KB != control_data.DEFAULT_MAX_RESULT_SIZE_KB:
                return max_result_size_KB
        except IOError as e:
            tko_utils.dprint(
                    'Failed to access %s. Error: %s\nDetails %s' %
                    (control, e, traceback.format_exc()))
        except control_data.ControlVariableException as e:
            tko_utils.dprint(
                    'Failed to parse %s. Error: %s\nDetails %s' %
                    (control, e, traceback.format_exc()))
    return None
def main():
    start_time = datetime.datetime.now()
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    # If the job requires to run with server-side package, try to stage server-
    # side package first. If that fails with error that autotest server package
    # does not exist, fall back to run the job without using server-side
    # packaging. If option warn_no_ssp is specified, that means autoserv is
    # running in a drone does not support SSP, thus no need to stage server-side
    # package.
    ssp_url = None
    ssp_url_warning = False
    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
        ssp_url, ssp_error_msg = _stage_ssp(parser)
        # The build does not have autotest server package. Fall back to not
        # to use server-side package. Logging is postponed until logging being
        # set up.
        ssp_url_warning = not ssp_url

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    # Server-side packaging will only be used if it's required and the package
    # is available. If warn_no_ssp is specified, it means that autoserv is
    # running in a drone does not have SSP supported and a warning will be logs.
    # Therefore, it should not run with SSP.
    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
               and ssp_url)
    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    if ssp_url_warning:
        logging.warn(
            'Autoserv is required to run with server-side packaging. '
            'However, no server-side package can be found based on '
            '`--image`, host attribute job_repo_url or host OS version '
            'label. It could be that the build to test is older than the '
            'minimum version that supports server-side packaging. The test '
            'will be executed without using erver-side packaging. '
            'Following is the detailed error:\n%s', ssp_error_msg)

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    try:
        # Take the first argument as control file name, get the test name from
        # the control file.
        if (len(parser.args) > 0 and parser.args[0] != ''
                and parser.options.machines):
            try:
                test_name = control_data.parse_control(
                    parser.args[0], raise_warnings=True).name
            except control_data.ControlVariableException:
                logging.debug(
                    'Failed to retrieve test name from control file.')
                test_name = None
    except control_data.ControlVariableException as e:
        logging.error(str(e))
    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker(
                        test_name if test_name else 'unknown-test',
                        parser.options.results, machine).mock_results()
                return
            else:
                run_autoserv(pid_file_manager, results, parser, ssp_url,
                             use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception(e)
        except Exception as e:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception(e)
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
        # Record the autoserv duration time. Must be called
        # just before the system exits to ensure accuracy.
        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
        record_autoserv(parser.options, duration_secs)
    sys.exit(exit_code)
Example #18
0
#!/usr/bin/python
import sys, os, textwrap
import common
from autotest_lib.client.common_lib import control_data

if len(sys.argv) != 2:
    print "Usage %s <control file>" % os.path.basename(sys.argv[0])
    sys.exit(1)

if not os.path.exists(sys.argv[1]):
    print "File %s does not exist" % sys.argv[1]
    sys.exit(1)

try:
    cd = control_data.parse_control(sys.argv[1], True)
except Exception, e:
    print "This control file does not adhear to the spec set forth in"
    print "http://autotest.kernel.org/wiki/ControlRequirements"
    print
    print "Specific error:"
    print '\n'.join(textwrap.wrap(str(e), initial_indent='    ',
                    subsequent_indent='    '))
    sys.exit(1)

if cd.experimental:
    print textwrap.wrap("WARNING: This file is marked experimental.  It will "
                        "not show up on the autotest frontend unless "
                        "experimental is set to False.")
    sys.exit(0)

print "Control file looks good!"
Example #19
0
 def test_bug_template_parsing(self):
     """Basic parsing test for a bug templates in a test control file."""
     os.write(self.control_tmp.fd, self.insert_bug_template(CONTROL))
     cd = control_data.parse_control(self.control_tmp.name, True)
     self.verify_bug_template(cd.bug_template)
Example #20
0
 def test_bad_template(self):
     """Test that a bad bug template doesn't result in a bad control data."""
     self.bug_template = 'foobarbug_template'
     os.write(self.control_tmp.fd, self.insert_bug_template(CONTROL))
     cd = control_data.parse_control(self.control_tmp.name, True)
     self.assertFalse(hasattr(cd, 'bug_template'))
Example #21
0
 def test_bug_template_list(self):
     """Test that lists in the bug template can handle other datatypes."""
     self.bug_template['labels'].append({'foo': 'bar'})
     os.write(self.control_tmp.fd, self.insert_bug_template(CONTROL))
     cd = control_data.parse_control(self.control_tmp.name, True)
     self.verify_bug_template(cd.bug_template)