def test_comments_rendered(self):
        # If there are comments on the difference, they are rendered.
        ds_diff = self.factory.makeDistroSeriesDifference()
        person = self.factory.makePerson()
        with person_logged_in(person):
            ds_diff.addComment(person, "I'm working on this.")
            ds_diff.addComment(person, "Here's another comment.")

        view = create_initialized_view(ds_diff, '+listing-distroseries-extra')
        soup = BeautifulSoup(view())

        self.assertEqual(
            1, len(soup.findAll('pre', text="I'm working on this.")))
        self.assertEqual(
            1, len(soup.findAll('pre', text="Here's another comment.")))
示例#2
0
def print_upstream_linking_form(browser):
    """Print the upstream linking form found via +choose-affected-product.

    The resulting output will look something like:
    (*) A checked option
        [A related text field]
    ( ) An unchecked option
    """
    soup = BeautifulSoup(browser.contents)

    link_upstream_how_radio_control = browser.getControl(
        name='field.link_upstream_how')
    link_upstream_how_buttons = soup.findAll(
        'input', {'name': 'field.link_upstream_how'})

    wrapper = textwrap.TextWrapper(width=65, subsequent_indent='    ')
    for button in link_upstream_how_buttons:
        # Print the radio button.
        label = button.findParent('label')
        if label is None:
            label = soup.find('label', {'for': button['id']})
        if button.get('value') in link_upstream_how_radio_control.value:
            print wrapper.fill('(*) %s' % extract_text(label))
        else:
            print wrapper.fill('( ) %s' % extract_text(label))
        # Print related text field, if found. Assumes that the text
        # field is in the same table row as the radio button.
        text_field = button.findParent('tr').find('input', {'type': 'text'})
        if text_field is not None:
            text_control = browser.getControl(name=text_field.get('name'))
            print '    [%s]' % text_control.value.ljust(10)
 def contains_one_link_to_diff(self, html_or_soup, package_diff):
     """Return whether the html contains a link to the diff content."""
     if not (isinstance(html_or_soup, BeautifulSoup)):
         soup = BeautifulSoup(html_or_soup)
     else:
         soup = html_or_soup
     return 1 == len(
         soup.findAll('a', href=package_diff.diff_content.http_url))
 def number_of_request_diff_texts(self, html_or_soup):
     """Returns the number of request diff text."""
     if not (isinstance(html_or_soup, BeautifulSoup)):
         soup = BeautifulSoup(html_or_soup)
     else:
         soup = html_or_soup
     class_dict = {'class': re.compile('request-derived-diff')}
     return len(soup.findAll('span', class_dict))
 def test_call(self):
     # The __call__ method sets up the widgets.
     markup = self.widget()
     self.assertIsNotNone(self.widget.repository_widget)
     self.assertIsNotNone(self.widget.path_widget)
     soup = BeautifulSoup(markup)
     fields = soup.findAll("input", id=True)
     ids = [field["id"] for field in fields]
     self.assertContentEqual(
         ["field.git_ref.repository", "field.git_ref.path"], ids)
示例#6
0
 def test_call(self):
     # The __call__ method sets up the widgets and the options.
     markup = self.widget()
     self.assertIsNotNone(self.widget.person_widget)
     self.assertIn("repository_owner", self.widget.options)
     self.assertIn("person", self.widget.options)
     soup = BeautifulSoup(markup)
     fields = soup.findAll(["input", "select"], {"id": re.compile(".*")})
     ids = [field["id"] for field in fields]
     self.assertContentEqual(self.expected_ids, ids)
    def test_blacklist_options_disabled(self):
        # Blacklist options are disabled to the users who are *not* archive
        # admins.
        ds_diff = self.factory.makeDistroSeriesDifference()
        person = self.factory.makePerson()
        view_content = self.getViewContentXmlHttpRequest(
            ds_diff, '+listing-distroseries-extra', person)
        soup = BeautifulSoup(view_content)

        self.assertEqual(
            1, len(soup.findAll('div',
                                {'class': 'blacklist-options-disabled'})))
    def test_blacklist_options(self):
        # Blacklist options are presented to the users who are archive
        # admins.
        ds_diff = self.factory.makeDistroSeriesDifference()
        archive_admin = self.factory.makeArchiveAdmin(
            archive=ds_diff.derived_series.main_archive)
        view_content = self.getViewContentXmlHttpRequest(
            ds_diff, '+listing-distroseries-extra', archive_admin)
        soup = BeautifulSoup(view_content)

        self.assertEqual(
            1, len(soup.findAll('div', {'class': 'blacklist-options'})))
示例#9
0
 def test_call(self):
     # The __call__ method sets up the widgets.
     markup = self.widget()
     self.assertIsNotNone(self.widget.core_widget)
     self.assertIsNotNone(self.widget.snapcraft_widget)
     soup = BeautifulSoup(markup)
     fields = soup.findAll(["input"], {"id": re.compile(".*")})
     expected_ids = [
         "field.auto_build_channels.core",
         "field.auto_build_channels.snapcraft",
         ]
     ids = [field["id"] for field in fields]
     self.assertContentEqual(expected_ids, ids)
示例#10
0
 def test_call(self):
     # The __call__ method sets up the widgets.
     markup = self.widget()
     self.assertIsNotNone(self.widget.track_widget)
     self.assertIsNotNone(self.widget.risks_widget)
     soup = BeautifulSoup(markup)
     fields = soup.findAll(["input"], {"id": re.compile(".*")})
     expected_ids = [
         "field.channels.risks.%d" % i for i in range(len(self.risks))
     ]
     expected_ids.append("field.channels.track")
     expected_ids.append("field.channels.branch")
     ids = [field["id"] for field in fields]
     self.assertContentEqual(expected_ids, ids)
 def test_call(self):
     # The __call__ method setups the widgets and the options.
     markup = self.widget()
     self.assertIsNot(None, self.widget.product_widget)
     self.assertTrue('personal' in self.widget.options)
     expected_ids = [
         'field.target.option.personal',
         'field.target.option.product',
         'field.target.product',
     ]
     soup = BeautifulSoup(markup)
     fields = soup.findAll(['input', 'select'], {'id': re.compile('.*')})
     ids = [field['id'] for field in fields]
     self.assertContentEqual(expected_ids, ids)
示例#12
0
 def test_call(self):
     # The __call__ method sets up the widgets and the options.
     markup = self.widget()
     self.assertIsNotNone(self.widget.ppa_widget)
     self.assertIn("primary", self.widget.options)
     self.assertIn("ppa", self.widget.options)
     soup = BeautifulSoup(markup)
     fields = soup.findAll(["input", "select"], {"id": re.compile(".*")})
     expected_ids = [
         "field.archive.option.primary",
         "field.archive.option.ppa",
         "field.archive.ppa",
     ]
     ids = [field["id"] for field in fields]
     self.assertContentEqual(expected_ids, ids)
    def test_parent_source_diff_rendering_diff_no_link(self):
        # The status of the package is shown if the parent package diff is
        # in a PENDING or FAILED state.
        ds_diff = self.factory.makeDistroSeriesDifference(
            set_base_version=True)

        statuses_and_classes = [(PackageDiffStatus.PENDING, 'PENDING'),
                                (PackageDiffStatus.FAILED, 'FAILED')]
        for status, css_class in statuses_and_classes:
            with person_logged_in(self.factory.makePerson()):
                ds_diff.parent_package_diff = self.factory.makePackageDiff(
                    status=status)

            view = create_initialized_view(ds_diff,
                                           '+listing-distroseries-extra')
            soup = BeautifulSoup(view())
            # Only one link since the other package diff is not COMPLETED.
            self.assertEqual(1, self.number_of_request_diff_texts(soup))
            # The diff has a css_class class.
            self.assertEqual(
                1, len(soup.findAll('span', {'class': re.compile(css_class)})))
    def parsePage(self, page_text):
        """Builds self.product using HTML content in page_text"""
        soup = BeautifulSoup(page_text)
        if soup is None:
            return None

        # Load products into a list since Bugzilla references them
        # by index number
        products = []
        for product in soup.find(name='select',
                                 onchange="doOnSelectProduct(2);").contents:
            if product.string != "\n":
                products.append({
                    'name': product.string,
                    'components': {},
                    'versions': None,
                })

        for script_text in soup.findAll(name="script"):
            if script_text is None or script_text.string is None:
                continue
            for line in script_text.string.split(";"):
                m = self.re_cpts.search(line)
                if m:
                    num = int(m.group(1))
                    products[num]['components'] = dictFromCSV(m.group(2))

                m = self.re_vers.search(line)
                if m:
                    num = int(m.group(1))
                    products[num]['versions'] = dictFromCSV(m.group(2))

        # Re-map list into dict for easier lookups
        for product in products:
            product_name = product['name']
            self.products[product_name] = product

        return True
示例#15
0
    def content(self):
        if (self.content_type in ('html', 'xhtml') and
            self.root_url is not None):
            # Unqualified hrefs must be qualified using the original subdomain
            # or they will try be served from http://feeds.launchpad.net,
            # which will not work.
            soup = BeautifulSoup(self._content)
            a_tags = soup.findAll('a')
            for a_tag in a_tags:
                if a_tag['href'].startswith('/'):
                    a_tag['href'] = urljoin(self.root_url, a_tag['href'])
            altered_content = unicode(soup)
        else:
            altered_content = self._content

        if self.content_type in ('text', 'html'):
            altered_content = html_escape(altered_content)
        elif self.content_type == 'xhtml':
            soup = BeautifulSoup(
                altered_content,
                convertEntities=BeautifulSoup.HTML_ENTITIES)
            altered_content = unicode(soup)
        return altered_content