Exemple #1
0
def test_scraper(httpd, tmpdir, args):
    """Testing download scenarios with invalid parameters for TinderboxScraper"""

    with pytest.raises(ValueError):
        TinderboxScraper(destination=str(tmpdir),
                         base_url=httpd.get_url(),
                         **args)
Exemple #2
0
def test_scraper(httpd, tmpdir, args, filename, url):
    """Testing various download scenarios for TinderboxScraper"""

    scraper = TinderboxScraper(destination=str(tmpdir), base_url=httpd.get_url(), **args)
    expected_filename = os.path.join(str(tmpdir), filename)
    assert scraper.filename == expected_filename
    assert unquote(scraper.url) == urljoin(httpd.get_url(), url)
def test_invalid_revision(httpd, tmpdir, mocker):
    query_builds_by_revision = mocker.patch('mozdownload.treeherder.Treeherder.query_builds_by_revision')
    query_builds_by_revision.return_value = []

    with pytest.raises(errors.NotFoundError):
        TinderboxScraper(destination=str(tmpdir),
                         base_url=httpd.get_url(),
                         platform='linux',
                         revision='not_valid')
    def test_invalid_revision(self, query_builds_by_revision):
        query_builds_by_revision.return_value = []

        with self.assertRaises(errors.NotFoundError):
            TinderboxScraper(destination=self.temp_dir,
                             base_url=self.wdir,
                             logger=self.logger,
                             platform='linux',
                             revision='not_valid')
    def test_valid_revision(self, query_builds_by_revision):
        build_path = self.wdir + '/firefox/tinderbox-builds/mozilla-central-linux/1374583608/'
        query_builds_by_revision.return_value = [build_path]

        scraper = TinderboxScraper(destination=self.temp_dir,
                                   base_url=self.wdir,
                                   logger=self.logger,
                                   platform='linux',
                                   revision='6b92cb377496')
        self.assertEqual(len(scraper.builds), 1)
        self.assertEqual(scraper.url, build_path + 'firefox-25.0a1.en-US.linux-i686.tar.bz2')
Exemple #6
0
    def test_scraper(self):
        """Testing various download scenarios for TinderboxScraper"""

        for entry in tests:
            scraper = TinderboxScraper(destination=self.temp_dir,
                                       base_url=self.wdir,
                                       logger=self.logger,
                                       **entry['args'])
            expected_filename = os.path.join(self.temp_dir, entry['filename'])
            self.assertEqual(scraper.filename, expected_filename)
            self.assertEqual(urllib.unquote(scraper.url),
                             urljoin(self.wdir, entry['url']))
def test_valid_revision(httpd, tmpdir, mocker):
    query_builds_by_revision = mocker.patch('mozdownload.treeherder.Treeherder.query_builds_by_revision')
    build_path = httpd.get_url() + 'firefox/tinderbox-builds/mozilla-central-linux/1374583608/'
    query_builds_by_revision.return_value = [build_path]

    scraper = TinderboxScraper(destination=str(tmpdir),
                               base_url=httpd.get_url(),
                               platform='linux',
                               revision='6b92cb377496')

    assert len(scraper.builds) == 1
    assert scraper.url == build_path + 'firefox-25.0a1.en-US.linux-i686.tar.bz2'
Exemple #8
0
    def test_scraper(self):
        """Testing various download scenarios for TinderboxScraper"""

        for entry in tests:
            scraper = TinderboxScraper(directory=self.temp_dir,
                                       version=None,
                                       base_url=self.wdir,
                                       log_level='ERROR',
                                       **entry['args'])
            expected_target = os.path.join(self.temp_dir, entry['target'])
            self.assertEqual(scraper.target, expected_target)
            self.assertEqual(urllib.unquote(scraper.final_url),
                             urljoin(self.wdir, entry['target_url']))