Esempio n. 1
0
def start_jenkins():
    try:
        execfile(JENKINS_ENV, dict(__file__=JENKINS_ENV))
        print "Virtual environment activated successfully."
    except Exception as ex:
        print 'Could not activate virtual environment at "%s": %s.' % (
            JENKINS_ENV, str(ex))
        sys.exit(1)

    # do imports here because it requires the virtualenv to b activated
    from mozdownload import DirectScraper
    from mozprocess.processhandler import ProcessHandler

    # Download the Jenkins WAR file
    scraper = DirectScraper(url=JENKINS_URL, destination=JENKINS_WAR)
    scraper.download()

    # TODO: Start Jenkins as daemon
    print "Starting Jenkins"
    os.environ['JENKINS_HOME'] = os.path.join(HERE, 'jenkins-master')
    args = [
        'java', '-Xms2g', '-Xmx2g', '-XX:MaxPermSize=512M', '-Xincgc', '-jar',
        JENKINS_WAR
    ]
    proc = ProcessHandler(args)
    proc.run()
    return proc
Esempio n. 2
0
def test_url_download(httpd, tmpdir):
    """test mozdownload direct url scraper"""
    filename = 'download_test.txt'
    test_url = urljoin(httpd.get_url(), filename)
    scraper = DirectScraper(url=test_url, destination=str(tmpdir))
    assert scraper.url == test_url
    assert scraper.filename == os.path.join(str(tmpdir), filename)

    scraper.download()
    assert os.path.isfile(os.path.join(str(tmpdir), scraper.filename))
Esempio n. 3
0
def tps_addon(pytestconfig, tmpdir_factory):
    url = 'https://index.taskcluster.net/v1/task/' \
          'gecko.v2.mozilla-central.latest.firefox.addons.tps/' \
          'artifacts/public/tps.xpi'
    path = pytestconfig.getoption('tps')
    if path is None:
        cache_dir = str(pytestconfig.cache.makedir('tps'))
        scraper = DirectScraper(url, destination=cache_dir)
        path = scraper.download()
    yield path
def test_url_download(httpd, tmpdir):
    """test mozdownload direct url scraper"""
    filename = 'download_test.txt'
    test_url = urljoin(httpd.get_url(), filename)
    scraper = DirectScraper(url=test_url, destination=str(tmpdir))
    assert scraper.url == test_url
    assert scraper.filename == os.path.join(str(tmpdir), filename)

    scraper.download()
    assert os.path.isfile(os.path.join(str(tmpdir), scraper.filename))
Esempio n. 5
0
def tps_addon(pytestconfig, tmpdir_factory):
    path = pytestconfig.getoption('tps')
    if path is not None:
        return path
    task_url = 'https://index.taskcluster.net/v1/task/' \
               'gecko.v2.mozilla-central.latest.firefox.addons.tps'
    task_id = requests.get(task_url).json().get('taskId')
    cache_dir = str(pytestconfig.cache.makedir('tps-{}'.format(task_id)))
    addon_url = 'https://queue.taskcluster.net/v1/task/' \
                '{}/artifacts/public/tps.xpi'.format(task_id)
    scraper = DirectScraper(addon_url, destination=cache_dir)
    return scraper.download()
Esempio n. 6
0
def tps_addon(pytestconfig, tmpdir_factory):
    path = pytestconfig.getoption('tps')
    if path is not None:
        return path
    task_url = 'https://index.taskcluster.net/v1/task/' \
               'gecko.v2.mozilla-central.latest.firefox.addons.tps'
    task_id = requests.get(task_url).json().get('taskId')
    cache_dir = str(pytestconfig.cache.makedir('tps-{}'.format(task_id)))
    addon_url = 'https://queue.taskcluster.net/v1/task/' \
                '{}/artifacts/public/tps.xpi'.format(task_id)
    scraper = DirectScraper(addon_url, destination=cache_dir)
    return scraper.download()
    def test_url_download(self):
        test_url = 'https://mozqa.com/index.html'
        scraper = DirectScraper(url=test_url,
                                directory=self.temp_dir,
                                version=None)
        self.assertEqual(scraper.url, test_url)
        self.assertEqual(scraper.final_url, test_url)
        self.assertEqual(scraper.target,
                         os.path.join(self.temp_dir, 'index.html'))

        for attr in ['binary', 'binary_regex', 'path', 'path_regex']:
            self.assertRaises(NotImplementedError, getattr, scraper, attr)

        scraper.download()
        self.assertTrue(os.path.isfile(os.path.join(self.temp_dir,
                                                    scraper.target)))
Esempio n. 8
0
    def test_url_download(self):
        filename = 'download_test.txt'
        test_url = urljoin(self.wdir, filename)
        scraper = DirectScraper(url=test_url,
                                destination=self.temp_dir,
                                logger=self.logger)
        self.assertEqual(scraper.url, test_url)
        self.assertEqual(scraper.filename,
                         os.path.join(self.temp_dir, filename))

        for attr in ['binary', 'binary_regex', 'path', 'path_regex']:
            self.assertRaises(errors.NotImplementedError, getattr, scraper, attr)

        scraper.download()
        self.assertTrue(os.path.isfile(os.path.join(self.temp_dir,
                                                    scraper.filename)))
Esempio n. 9
0
    def test_url_download(self):
        filename = 'download_test.txt'
        test_url = urljoin(self.wdir, filename)
        scraper = DirectScraper(url=test_url,
                                destination=self.temp_dir,
                                logger=self.logger)
        self.assertEqual(scraper.url, test_url)
        self.assertEqual(scraper.filename,
                         os.path.join(self.temp_dir, filename))

        for attr in ['binary', 'binary_regex', 'path', 'path_regex']:
            self.assertRaises(errors.NotImplementedError, getattr, scraper, attr)

        scraper.download()
        self.assertTrue(os.path.isfile(os.path.join(self.temp_dir,
                                                    scraper.filename)))
def test_implementation_error(httpd, tmpdir, attr):
    """test implementations available"""
    filename = 'download_test.txt'
    test_url = urljoin(httpd.get_url(), filename)
    scraper = DirectScraper(url=test_url, destination=str(tmpdir))
    with pytest.raises(errors.NotImplementedError):
        getattr(scraper, attr)
Esempio n. 11
0
    def test_url_download(self):
        test_url = 'https://mozqa.com/index.html'
        scraper = DirectScraper(url=test_url,
                                directory=self.temp_dir,
                                version=None,
                                log_level='ERROR')
        self.assertEqual(scraper.url, test_url)
        self.assertEqual(scraper.final_url, test_url)
        self.assertEqual(scraper.target,
                         os.path.join(self.temp_dir, 'index.html'))

        for attr in ['binary', 'binary_regex', 'path', 'path_regex']:
            self.assertRaises(NotImplementedError, getattr, scraper, attr)

        scraper.download()
        self.assertTrue(
            os.path.isfile(os.path.join(self.temp_dir, scraper.target)))
Esempio n. 12
0
def main():
    try:
        execfile(JENKINS_ENV, dict(__file__=JENKINS_ENV))
        print "Virtual environment activated successfully."
    except Exception as ex:
        print 'Could not activate virtual environment at "%s": %s.' % (JENKINS_ENV, str(ex))
        sys.exit(1)

    # Download the Jenkins WAR file
    from mozdownload import DirectScraper
    scraper = DirectScraper(url=JENKINS_URL, destination=JENKINS_WAR)
    scraper.download()

    # TODO: Start Jenkins as daemon
    print "Starting Jenkins"
    os.environ['JENKINS_HOME'] = os.path.join(HERE, 'jenkins-master')
    args = ['java', '-Xms2g', '-Xmx2g', '-XX:MaxPermSize=512M',
            '-Xincgc', '-jar', JENKINS_WAR]
    try:
        check_call(args)
    except CalledProcessError as e:
        sys.exit(e.returncode)
Esempio n. 13
0
def start_jenkins():
    try:
        execfile(JENKINS_ENV, dict(__file__=JENKINS_ENV))
        print "Virtual environment activated successfully."
    except Exception as ex:
        print 'Could not activate virtual environment at "%s": %s.' % (JENKINS_ENV, str(ex))
        sys.exit(1)

    # do imports here because it requires the virtualenv to b activated
    from mozdownload import DirectScraper
    from mozprocess.processhandler import ProcessHandler

    # Download the Jenkins WAR file
    scraper = DirectScraper(url=JENKINS_URL, destination=JENKINS_WAR)
    scraper.download()

    # TODO: Start Jenkins as daemon
    print "Starting Jenkins"
    os.environ['JENKINS_HOME'] = os.path.join(HERE, 'jenkins-master')
    args = ['java', '-Xms2g', '-Xmx2g', '-XX:MaxPermSize=512M',
            '-Xincgc', '-jar', JENKINS_WAR]
    proc = ProcessHandler(args)
    proc.run()
    return proc