Пример #1
0
def test_build():
    for buildername in ('pickle', 'json', 'linkcheck', 'text', 'htmlhelp',
                        'qthelp', 'epub', 'changes', 'singlehtml', 'xml',
                        'pseudoxml'):
        app = TestApp(buildername=buildername)
        yield lambda app: app.builder.build_all(), app
        app.cleanup()
Пример #2
0
def verify_build(buildername, srcdir):
    if buildername == 'man' and ManWriter is None:
        raise SkipTest('man writer is not available')
    app = TestApp(buildername=buildername, srcdir=srcdir)
    try:
        app.builder.build_all()
    finally:
        app.cleanup()
Пример #3
0
def test_extensions():
    status, warnings = StringIO(), StringIO()
    app = TestApp(status=status, warning=warnings)
    try:
        app.setup_extension('shutil')
        assert warnings.getvalue().startswith("WARNING: extension 'shutil'")
    finally:
        app.cleanup()
Пример #4
0
def verify_build(buildername, srcdir):
    if buildername == 'man' and ManWriter is None:
        raise SkipTest('man writer is not available')
    app = TestApp(buildername=buildername, srcdir=srcdir)
    try:
        app.builder.build_all()
    finally:
        app.cleanup()
Пример #5
0
 def deco(*args2, **kwargs2):
     app = TestApp(*args, **default_kw)
     (app.srcdir / 'docutils.conf').write_text(docutilsconf)
     try:
         cwd = os.getcwd()
         os.chdir(app.srcdir)
         func(app, *args2, **kwargs2)
     finally:
         os.chdir(cwd)
     # don't execute cleanup if test failed
     app.cleanup()
Пример #6
0
 def deco(*args2, **kwargs2):
     app = TestApp(*args, **default_kw)
     (app.srcdir / 'docutils.conf').write_text(docutilsconf)
     try:
         cwd = os.getcwd()
         os.chdir(app.srcdir)
         func(app, *args2, **kwargs2)
     finally:
         os.chdir(cwd)
     # don't execute cleanup if test failed
     app.cleanup()
Пример #7
0
def test_feed():
    app = TestApp(buildername='html', warning=feed_warnfile, cleanenv=True)  
    app.build(force_all=True, filenames=[]) #build_all misses the crucial finish signal
    feed_warnings = feed_warnfile.getvalue().replace(os.sep, '/')
    feed_warnings_exp = FEED_WARNINGS % {'root': app.srcdir}
    yield assert_equals, feed_warnings, feed_warnings_exp
    rss_path = os.path.join(app.outdir, 'rss.xml')
    yield exists, rss_path
    
    base_path = unicode("file:/" + app.outdir)
    
    # see http://www.feedparser.org/
    f = feedparser.parse(rss_path)
    yield assert_equals, f.bozo, 0 #feedparser well-formedness detection. We want this.
    entries = f.entries
    yield assert_equals, entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0)
    yield assert_equals, entries[0].title, "The latest blog post"
    
    yield assert_equals, entries[0].link, base_path + '/B_latest.html'
    yield assert_equals, entries[0].guid, base_path + '/B_latest.html'
    yield assert_equals, entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0)
    yield assert_equals, entries[1].title, "An older blog post"
    yield assert_equals, entries[1].link, base_path + '/A_older.html'
    yield assert_equals, entries[1].guid, base_path + '/A_older.html'
    yield assert_equals, entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0,)
    yield assert_equals, entries[2].title, "The oldest blog post"
    yield assert_equals, entries[2].link, base_path + '/C_most_aged.html'
    yield assert_equals, entries[2].guid, base_path + '/C_most_aged.html'
    #Now we do it all again to make sure that things work when handling stale files
    app2 = TestApp(buildername='html', warning=feed_warnfile)  
    app2.build(force_all=False, filenames=['most_aged'])
    f = feedparser.parse(rss_path)
    yield assert_equals, f.bozo, 0 #feedparser well-formedness detection. We want this.
    entries = f.entries
    yield assert_equals, entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0)
    yield assert_equals, entries[0].title, "The latest blog post"
    yield assert_equals, entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0)
    yield assert_equals, entries[1].title, "An older blog post"
    yield assert_equals, entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0)
    yield assert_equals, entries[2].title, "The oldest blog post"
    
    #Tests for relative URIs. note that these tests only work because there is
    # no xml:base - otherwise feedparser will supposedly fix them up for us - 
    # http://www.feedparser.org/docs/resolving-relative-links.html
    links = BeautifulSoup(entries[0].description).findAll('a')
    # These links will look like:
    #[<a class="headerlink" href="#the-latest-blog-post" title="Permalink to this headline">¶</a>, <a class="reference internal" href="older.html"><em>a relative link</em></a>, <a class="reference external" href="http://google.com/">an absolute link</a>]
    yield assert_equals, links.pop()['href'], "http://google.com/"
    yield assert_equals, links.pop()['href'], base_path + '/A_older.html'
    yield assert_equals, links.pop()['href'], entries[0].link + '#the-latest-blog-post'
    
    app.cleanup()
    app2.cleanup()
Пример #8
0
def test_master_doc_not_found(tmpdir):
    (tmpdir / 'conf.py').write_text('master_doc = "index"')
    assert tmpdir.listdir() == ['conf.py']

    try:
        app = TestApp(buildername='dummy', srcdir=tmpdir)
        app.builder.build_all()
        assert False  # SphinxError not raised
    except Exception as exc:
        assert isinstance(exc, SphinxError)
    finally:
        app.cleanup()
Пример #9
0
def test_master_doc_not_found(tmpdir):
    (tmpdir / 'conf.py').write_text('master_doc = "index"')
    assert tmpdir.listdir() == ['conf.py']

    try:
        app = TestApp(buildername='dummy', srcdir=tmpdir)
        app.builder.build_all()
        assert False  # SphinxError not raised
    except Exception as exc:
        assert isinstance(exc, SphinxError)
    finally:
        app.cleanup()
Пример #10
0
def test_nonascii_path():
    (test_root / '_build').rmtree(True) #keep this to build first gettext

    builder_names = ['gettext', 'html', 'dirhtml', 'singlehtml', 'latex',
                     'texinfo', 'pickle', 'json', 'linkcheck', 'text',
                     'htmlhelp', 'qthelp', 'epub', 'changes', 'xml',
                     'pseudoxml']
    if ManWriter is not None:
        builder_names.append('man')

    for buildername in builder_names:
        app = TestApp(buildername=buildername, srcdir='(temp)')
        yield _test_nonascii_path, app
        app.cleanup()
Пример #11
0
        def deco(*args2, **kwargs2):
            # Now, modify the python path...
            srcdir = default_kw['srcdir']
            sys.path.insert(0, srcdir)
            try:
                app = TestApp(*args, **default_kw)
                func(app, *args2, **kwargs2)
            finally:
                if srcdir in sys.path:
                    sys.path.remove(srcdir)
                # remove the auto-generated dummy_module.rst
                dummy_rst = srcdir / 'dummy_module.rst'
                if dummy_rst.isfile():
                    dummy_rst.unlink()

            # don't execute cleanup if test failed
            app.cleanup()
Пример #12
0
def test_gen_check_types():
    for key, value, should, deftype in TYPECHECK_OVERRIDES:
        warning = StringIO()
        app = TestApp(confoverrides={key: value}, warning=warning)
        app.cleanup()

        real = type(value).__name__
        msg = ("WARNING: the config value %r has type `%s',"
               " defaults to `%s.'\n" % (key, real, deftype.__name__))
        def test():
            warning_list = warning.getvalue()
            assert (msg in warning_list) == should, \
                    "Setting %s to %r should%s raise: %s" % \
                    (key, value, " not" if should else "", msg)
        test.description = "test_check_type_%s_on_%s" % \
                (real, type(Config.config_values[key][0]).__name__)

        yield test
Пример #13
0
def test_output():
    status, warnings = StringIO(), StringIO()
    app = TestApp(status=status, warning=warnings)
    try:
        status.truncate(0) # __init__ writes to status
        status.seek(0)
        app.info("Nothing here...")
        assert status.getvalue() == "Nothing here...\n"
        status.truncate(0)
        status.seek(0)
        app.info("Nothing here...", True)
        assert status.getvalue() == "Nothing here..."

        old_count = app._warncount
        app.warn("Bad news!")
        assert warnings.getvalue() == "WARNING: Bad news!\n"
        assert app._warncount == old_count + 1
    finally:
        app.cleanup()
Пример #14
0
def test_domain_override():
    class A(Domain):
        name = 'foo'
    class B(A):
        name = 'foo'
    class C(Domain):
        name = 'foo'
    status, warnings = StringIO(), StringIO()
    app = TestApp(status=status, warning=warnings)
    try:
        # No domain know named foo.
        raises_msg(ExtensionError, 'domain foo not yet registered',
                   app.override_domain, A)
        assert app.add_domain(A) is None
        assert app.override_domain(B) is None
        raises_msg(ExtensionError, 'new domain not a subclass of registered '
                   'foo domain', app.override_domain, C)
    finally:
        app.cleanup()
Пример #15
0
def test_correct_year():
    try:
        # save current value of SOURCE_DATE_EPOCH
        sde = os.environ.pop('SOURCE_DATE_EPOCH',None)

        # test with SOURCE_DATE_EPOCH unset: no modification
        app = TestApp(buildername='html',testroot='correct-year')
        app.builder.build_all()
        content = (app.outdir / 'contents.html').text()
        app.cleanup()
        assert '2006-2009' in content

        # test with SOURCE_DATE_EPOCH set: copyright year should be
        # updated
        os.environ['SOURCE_DATE_EPOCH'] = "1293840000"
        app = TestApp(buildername='html',testroot='correct-year')
        app.builder.build_all()
        content = (app.outdir / 'contents.html').text()
        app.cleanup()
        assert '2006-2011' in content

        os.environ['SOURCE_DATE_EPOCH'] = "1293839999"
        app = TestApp(buildername='html',testroot='correct-year')
        app.builder.build_all()
        content = (app.outdir / 'contents.html').text()
        app.cleanup()
        assert '2006-2010' in content

    finally:
        # Restores SOURCE_DATE_EPOCH
        if sde == None:
            os.environ.pop('SOURCE_DATE_EPOCH',None)
        else:
            os.environ['SOURCE_DATE_EPOCH'] = sde
Пример #16
0
def test_correct_year():
    try:
        # save current value of SOURCE_DATE_EPOCH
        sde = os.environ.pop('SOURCE_DATE_EPOCH', None)

        # test with SOURCE_DATE_EPOCH unset: no modification
        app = TestApp(buildername='html', testroot='correct-year')
        app.builder.build_all()
        content = (app.outdir / 'contents.html').text()
        app.cleanup()
        assert '2006-2009' in content

        # test with SOURCE_DATE_EPOCH set: copyright year should be
        # updated
        os.environ['SOURCE_DATE_EPOCH'] = "1293840000"
        app = TestApp(buildername='html', testroot='correct-year')
        app.builder.build_all()
        content = (app.outdir / 'contents.html').text()
        app.cleanup()
        assert '2006-2011' in content

        os.environ['SOURCE_DATE_EPOCH'] = "1293839999"
        app = TestApp(buildername='html', testroot='correct-year')
        app.builder.build_all()
        content = (app.outdir / 'contents.html').text()
        app.cleanup()
        assert '2006-2010' in content

    finally:
        # Restores SOURCE_DATE_EPOCH
        if sde == None:
            os.environ.pop('SOURCE_DATE_EPOCH', None)
        else:
            os.environ['SOURCE_DATE_EPOCH'] = sde
Пример #17
0
def test_needs_sphinx():
    # micro version
    app = TestApp(confoverrides={'needs_sphinx': '1.3.3'})  # OK: less
    app.cleanup()
    app = TestApp(confoverrides={'needs_sphinx': '1.3.4'})  # OK: equals
    app.cleanup()
    raises(VersionRequirementError, TestApp,
           confoverrides={'needs_sphinx': '1.3.5'})  # NG: greater

    # minor version
    app = TestApp(confoverrides={'needs_sphinx': '1.2'})  # OK: less
    app.cleanup()
    app = TestApp(confoverrides={'needs_sphinx': '1.3'})  # OK: equals
    app.cleanup()
    raises(VersionRequirementError, TestApp,
           confoverrides={'needs_sphinx': '1.4'})  # NG: greater

    # major version
    app = TestApp(confoverrides={'needs_sphinx': '0'})  # OK: less
    app.cleanup()
    app = TestApp(confoverrides={'needs_sphinx': '1'})  # OK: equals
    app.cleanup()
    raises(VersionRequirementError, TestApp,
           confoverrides={'needs_sphinx': '2'})  # NG: greater
Пример #18
0
    def test_feed_by_parsing_it(self):
        feed_warnfile = self.feed_warnfile
        app = TestApp(buildername='html', warning=feed_warnfile, cleanenv=True)
        app.build(force_all=True,
                  filenames=[])  #build_all misses the crucial finish signal
        feed_warnings = feed_warnfile.getvalue().replace(os.sep, '/')
        feed_warnings_exp = self.FEED_WARNINGS % {'root': app.srcdir}
        self.assertEqual(feed_warnings, feed_warnings_exp)
        rss_path = os.path.join(app.outdir, 'rss.xml')
        self.assertTrue(exists(rss_path))

        base_path = unicode("file:/" + app.outdir)

        # see http://www.feedparser.org/
        f = feedparser.parse(rss_path)
        #feedparser well-formedness detection. We want this.
        self.assertEqual(f.bozo, 0)
        self.assertEqual(f.feed['title'], 'Sphinx Syndicate Test Title')
        entries = f.entries
        self.assertEqual(entries[0].updated_parsed[0:6],
                         (2001, 8, 11, 13, 0, 0))
        self.assertEqual(entries[0].title, "The latest blog post")

        self.assertEqual(entries[0].link, base_path + '/B_latest.html')
        self.assertEqual(entries[0].guid, base_path + '/B_latest.html')
        self.assertEqual(entries[1].updated_parsed[0:6],
                         (2001, 8, 11, 9, 0, 0))
        self.assertEqual(entries[1].title, "An older blog post")
        self.assertEqual(entries[1].link, base_path + '/A_older.html')
        self.assertEqual(entries[1].guid, base_path + '/A_older.html')
        self.assertEqual(entries[2].updated_parsed[0:6], (
            1979,
            1,
            1,
            0,
            0,
            0,
        ))
        self.assertEqual(entries[2].title, "The oldest blog post")
        self.assertEqual(entries[2].link, base_path + '/C_most_aged.html')
        self.assertEqual(entries[2].guid, base_path + '/C_most_aged.html')
        #Now we do it all again to make sure that things work when handling stale files
        app2 = TestApp(buildername='html', warning=feed_warnfile)
        app2.build(force_all=False, filenames=['most_aged'])
        f = feedparser.parse(rss_path)
        self.assertEqual(f.bozo, 0)
        entries = f.entries
        self.assertEqual(entries[0].updated_parsed[0:6],
                         (2001, 8, 11, 13, 0, 0))
        self.assertEqual(entries[0].title, "The latest blog post")
        self.assertEqual(entries[1].updated_parsed[0:6],
                         (2001, 8, 11, 9, 0, 0))
        self.assertEqual(entries[1].title, "An older blog post")
        self.assertEqual(entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0))
        self.assertEqual(entries[2].title, "The oldest blog post")

        #Tests for relative URIs. note that these tests only work because there is
        # no xml:base - otherwise feedparser will supposedly fix them up for us -
        # http://www.feedparser.org/docs/resolving-relative-links.html
        links = BeautifulSoup(entries[0].description).findAll('a')
        # These links will look like:
        #[<a class="headerlink" href="#the-latest-blog-post" title="Permalink to this headline">¶</a>, <a class="reference internal" href="older.html"><em>a relative link</em></a>, <a class="reference external" href="http://google.com/">an absolute link</a>]
        self.assertEqual(links.pop()['href'], "http://google.com/")
        self.assertEqual(links.pop()['href'], base_path + '/A_older.html')
        self.assertEqual(links.pop()['href'],
                         entries[0].link + '#the-latest-blog-post')

        index_path = os.path.join(app.outdir, 'index.html')
        soup = BeautifulSoup(open(index_path).read())
        latest_tree = soup.find('div', 'latest-wrapper')
        latest_items = latest_tree.findAll('li')
        actual_links = [entry.contents[0]['href'] for entry in latest_items]
        ideal_links = [
            u'B_latest.html',
            u'A_older.html',
            u'C_most_aged.html',
        ]

        self.assertListEqual(actual_links, ideal_links)

        app.cleanup()
        app2.cleanup()
Пример #19
0
def test_feed():
    app = TestApp(buildername='html', warning=feed_warnfile, cleanenv=True)
    app.build(force_all=True,
              filenames=[])  #build_all misses the crucial finish signal
    feed_warnings = feed_warnfile.getvalue().replace(os.sep, '/')
    feed_warnings_exp = FEED_WARNINGS % {'root': app.srcdir}
    yield assert_equals, feed_warnings, feed_warnings_exp
    rss_path = os.path.join(app.outdir, 'rss.xml')
    yield exists, rss_path

    base_path = unicode("file:/" + app.outdir)

    # see http://www.feedparser.org/
    f = feedparser.parse(rss_path)
    yield assert_equals, f.bozo, 0  #feedparser well-formedness detection. We want this.
    yield assert_equals, f.feed['title'], 'Sphinx Syndicate Test Title'
    entries = f.entries
    yield assert_equals, entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0,
                                                          0)
    yield assert_equals, entries[0].title, "The latest blog post"

    yield assert_equals, entries[0].link, base_path + '/B_latest.html'
    yield assert_equals, entries[0].guid, base_path + '/B_latest.html'
    yield assert_equals, entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0)
    yield assert_equals, entries[1].title, "An older blog post"
    yield assert_equals, entries[1].link, base_path + '/A_older.html'
    yield assert_equals, entries[1].guid, base_path + '/A_older.html'
    yield assert_equals, entries[2].updated_parsed[0:6], (
        1979,
        1,
        1,
        0,
        0,
        0,
    )
    yield assert_equals, entries[2].title, "The oldest blog post"
    yield assert_equals, entries[2].link, base_path + '/C_most_aged.html'
    yield assert_equals, entries[2].guid, base_path + '/C_most_aged.html'
    #Now we do it all again to make sure that things work when handling stale files
    app2 = TestApp(buildername='html', warning=feed_warnfile)
    app2.build(force_all=False, filenames=['most_aged'])
    f = feedparser.parse(rss_path)
    yield assert_equals, f.bozo, 0  #feedparser well-formedness detection. We want this.
    entries = f.entries
    yield assert_equals, entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0,
                                                          0)
    yield assert_equals, entries[0].title, "The latest blog post"
    yield assert_equals, entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0)
    yield assert_equals, entries[1].title, "An older blog post"
    yield assert_equals, entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0)
    yield assert_equals, entries[2].title, "The oldest blog post"

    #Tests for relative URIs. note that these tests only work because there is
    # no xml:base - otherwise feedparser will supposedly fix them up for us -
    # http://www.feedparser.org/docs/resolving-relative-links.html
    links = BeautifulSoup(entries[0].description).findAll('a')
    # These links will look like:
    #[<a class="headerlink" href="#the-latest-blog-post" title="Permalink to this headline">¶</a>, <a class="reference internal" href="older.html"><em>a relative link</em></a>, <a class="reference external" href="http://google.com/">an absolute link</a>]
    yield assert_equals, links.pop()['href'], "http://google.com/"
    yield assert_equals, links.pop()['href'], base_path + '/A_older.html'
    yield assert_equals, links.pop(
    )['href'], entries[0].link + '#the-latest-blog-post'

    index_path = os.path.join(app.outdir, 'index.html')
    soup = BeautifulSoup(open(index_path).read())
    #latest_tree = soup.find('div', 'toctree-wrapper')
    #latest_items = soup.findAll('li', 'toctree-l1')
    #These will look like this:
    # <div class="toctree-wrapper compound">
    # <ul>
    # <li class="toctree-l1"><a class="reference internal" href="B_latest.html">The latest blog post</a></li>
    # <li class="toctree-l1"><a class="reference internal" href="A_older.html">An older blog post</a></li>
    # <li class="toctree-l1"><a class="reference internal" href="C_most_aged.html">The oldest blog post</a></li>
    # </ul>
    # </div>
    #yield assert_equals, latest_tree, 'weirdness'
    # import pdb; pdb.set_trace()

    app.cleanup()
    app2.cleanup()
Пример #20
0
 def test_feed_by_parsing_it(self):
     feed_warnfile = self.feed_warnfile
     app = TestApp(buildername='html', warning=feed_warnfile, cleanenv=True)  
     app.build(force_all=True, filenames=[]) #build_all misses the crucial finish signal
     feed_warnings = feed_warnfile.getvalue().replace(os.sep, '/')
     feed_warnings_exp = self.FEED_WARNINGS % {'root': app.srcdir}
     self.assertEqual(feed_warnings, feed_warnings_exp)
     rss_path = os.path.join(app.outdir, 'rss.xml')
     self.assertTrue(exists(rss_path))
 
     base_path = unicode("file:/" + app.outdir)
 
     # see http://www.feedparser.org/
     f = feedparser.parse(rss_path)
     #feedparser well-formedness detection. We want this.
     self.assertEqual(f.bozo, 0 )
     self.assertEqual(f.feed['title'], 'Sphinx Syndicate Test Title')
     entries = f.entries
     self.assertEqual(entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0))
     self.assertEqual(entries[0].title, "The latest blog post")
 
     self.assertEqual(entries[0].link, base_path + '/B_latest.html')
     self.assertEqual(entries[0].guid, base_path + '/B_latest.html')
     self.assertEqual(entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0))
     self.assertEqual(entries[1].title, "An older blog post")
     self.assertEqual(entries[1].link, base_path + '/A_older.html')
     self.assertEqual(entries[1].guid, base_path + '/A_older.html')
     self.assertEqual(entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0,))
     self.assertEqual(entries[2].title, "The oldest blog post")
     self.assertEqual(entries[2].link, base_path + '/C_most_aged.html')
     self.assertEqual(entries[2].guid, base_path + '/C_most_aged.html')
     #Now we do it all again to make sure that things work when handling stale files
     app2 = TestApp(buildername='html', warning=feed_warnfile)  
     app2.build(force_all=False, filenames=['most_aged'])
     f = feedparser.parse(rss_path)
     self.assertEqual(f.bozo, 0)
     entries = f.entries
     self.assertEqual(entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0))
     self.assertEqual(entries[0].title, "The latest blog post")
     self.assertEqual(entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0))
     self.assertEqual(entries[1].title, "An older blog post")
     self.assertEqual(entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0))
     self.assertEqual(entries[2].title, "The oldest blog post")
 
     #Tests for relative URIs. note that these tests only work because there is
     # no xml:base - otherwise feedparser will supposedly fix them up for us - 
     # http://www.feedparser.org/docs/resolving-relative-links.html
     links = BeautifulSoup(entries[0].description).findAll('a')
     # These links will look like:
     #[<a class="headerlink" href="#the-latest-blog-post" title="Permalink to this headline">¶</a>, <a class="reference internal" href="older.html"><em>a relative link</em></a>, <a class="reference external" href="http://google.com/">an absolute link</a>]
     self.assertEqual(links.pop()['href'], "http://google.com/")
     self.assertEqual(links.pop()['href'], base_path + '/A_older.html')
     self.assertEqual(links.pop()['href'], entries[0].link + '#the-latest-blog-post')
 
     index_path  = os.path.join(app.outdir, 'index.html')
     soup = BeautifulSoup(open(index_path).read())
     latest_tree = soup.find('div', 'latest-wrapper')
     latest_items = latest_tree.findAll('li')
     actual_links = [entry.contents[0]['href'] for entry in latest_items]
     ideal_links = [
         u'B_latest.html',
         u'A_older.html',
         u'C_most_aged.html',
     ]
     
     self.assertListEqual(actual_links, ideal_links)
     
     app.cleanup()
     app2.cleanup()