示例#1
0
def _test_run_branches(tmpdir, dvcs, conf, machine_file, range_spec,
                       branches, initial_commit):
    # Find the current head commits for each branch
    commits = [initial_commit]
    for branch in branches:
        commits.append(dvcs.get_hash(branch))

    # Run tests
    Run.run(conf, range_spec=range_spec,
            _machine_file=machine_file, quick=True)

    # Check that files for all commits expected were generated
    expected = set(['machine.json'])
    for commit in commits:
        for psver in ['0.3.1', '0.3.3']:
            expected.add('{0}-py{1[0]}.{1[1]}-colorama{2}-six.json'.format(
                commit[:8], sys.version_info, psver))

    result_files = os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))

    if range_spec == 'NEW':
        assert set(result_files) == expected
    elif range_spec == 'ALL':
        assert set(expected).difference(result_files) == set([])
    else:
        raise ValueError()
示例#2
0
 def init_results():
     results_dir = os.path.join(tmpdir, 'results_workflow')
     if os.path.isdir(results_dir):
         shutil.rmtree(results_dir)
     Run.run(conf, range_spec=initial_commit+"^!",
             bench=["time_secondary.track_value"],
             _machine_file=join(tmpdir, 'asv-machine.json'), quick=True,
             skip_successful=True, skip_failed=True)
示例#3
0
def test_run_python_same(capsys, basic_conf):
    tmpdir, local, conf = basic_conf

    # Test Run runs with python=same
    Run.run(conf, _machine_file=join(tmpdir, 'asv-machine.json'), python="same")
    text, err = capsys.readouterr()

    assert re.search("time_exception.*failed", text, re.S)
    assert re.search(r"Running time_secondary.track_value\s+42.0", text)

    # Check that it did not clone or install
    assert "Cloning" not in text
    assert "Installing" not in text
示例#4
0
def test_run_publish(capfd, basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    # Tests a typical complete run/publish workflow
    Run.run(conf, range_spec="master~5..master", steps=2,
            _machine_file=machine_file, quick=True, show_stderr=True)
    text, err = capfd.readouterr()

    assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
    assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2
    assert 'asv: benchmark timed out (timeout 0.1s)' in text

    Publish.run(conf)

    assert isfile(join(tmpdir, 'html', 'index.html'))
    assert isfile(join(tmpdir, 'html', 'index.json'))
    assert isfile(join(tmpdir, 'html', 'asv.js'))
    assert isfile(join(tmpdir, 'html', 'asv.css'))

    # Check parameterized test json data format
    filename = glob.glob(join(tmpdir, 'html', 'graphs', 'arch-x86_64', 'branch-master',
                              'colorama-0.3.3',  'cpu-Blazingly fast', 'machine-orangutan',
                              'os-GNU', 'Linux', 'python-*', 'ram-128GB',
                              'six', 'params_examples.time_skip.json'))[0]
    with open(filename, 'r') as fp:
        data = json.load(fp)
        assert len(data) == 2
        assert isinstance(data[0][0], six.integer_types)  # date
        assert len(data[0][1]) == 3
        assert len(data[1][1]) == 3
        assert isinstance(data[0][1][0], float)
        assert isinstance(data[0][1][1], float)
        assert data[0][1][2] is None

    # Check that the skip options work
    capfd.readouterr()
    Run.run(conf, range_spec="master~5..master", steps=2,
            _machine_file=join(tmpdir, 'asv-machine.json'), quick=True,
            skip_successful=True, skip_failed=True)
    Run.run(conf, range_spec="master~5..master", steps=2,
            _machine_file=join(tmpdir, 'asv-machine.json'), quick=True,
            skip_existing_commits=True)
    text, err = capfd.readouterr()
    assert 'Running benchmarks.' not in text

    # Check EXISTING works
    Run.run(conf, range_spec="EXISTING",
            _machine_file=machine_file, quick=True)

    # Remove the benchmarks.json file to make sure publish can
    # regenerate it

    os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))

    Publish.run(conf)
示例#5
0
def test_workflow(tmpdir):
    # Tests a typical complete run/publish workflow
    tmpdir = six.text_type(tmpdir)
    local = abspath(dirname(__file__))
    os.chdir(tmpdir)

    shutil.copyfile(join(local, 'asv-machine.json'),
                    join(tmpdir, 'asv-machine.json'))

    conf = config.Config.from_json({
        'env_dir': join(tmpdir, 'env'),
        'benchmark_dir': join(local, 'benchmark'),
        'results_dir': join(tmpdir, 'results_workflow'),
        'html_dir': join(tmpdir, 'html'),
        'repo': 'https://github.com/spacetelescope/asv.git',
        'project': 'asv',
        'matrix': {
            "six": [None],
            "psutil": ["1.2", "1.1"]
        }
    })

    Run.run(conf, range_spec="initial..master", steps=2,
            _machine_file=join(tmpdir, 'asv-machine.json'), quick=True)

    assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
    assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2

    Publish.run(conf)

    assert exists(join(tmpdir, 'html', 'index.html'))
    assert exists(join(tmpdir, 'html', 'index.json'))
    assert exists(join(tmpdir, 'html', 'asv.js'))
    assert exists(join(tmpdir, 'html', 'asv.css'))

    Run.run(conf, range_spec="EXISTING",
            _machine_file=join(tmpdir, 'asv-machine.json'), quick=True)

    # Remove the benchmarks.json file to make sure publish can
    # regenerate it

    os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))

    Publish.run(conf)
示例#6
0
def test_workflow(tmpdir):
    # Tests a typical complete run/publish workflow
    tmpdir = six.text_type(tmpdir)
    local = abspath(dirname(__file__))
    os.chdir(tmpdir)

    conf = config.Config.from_json({
        'env_dir': join(tmpdir, 'env'),
        'benchmark_dir': join(local, 'benchmark'),
        'results_dir': join(tmpdir, 'results_workflow'),
        'html_dir': join(tmpdir, 'html'),
        'repo': 'https://github.com/spacetelescope/asv.git',
        'project': 'asv',
        'matrix': {
            "six": [None],
            "psutil": ["1.2", "1.1"]
        }
    })

    Run.run(conf, range_spec="initial..master", steps=2,
            _machine_file=join(local, 'asv-machine.json'), quick=True)

    assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
    assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2

    Publish.run(conf)

    assert exists(join(tmpdir, 'html', 'index.html'))
    assert exists(join(tmpdir, 'html', 'index.json'))
    assert exists(join(tmpdir, 'html', 'asv.js'))
    assert exists(join(tmpdir, 'html', 'asv.css'))

    Run.run(conf, range_spec="EXISTING",
            _machine_file=join(local, 'asv-machine.json'), quick=True)

    # Remove the benchmarks.json file to make sure publish can
    # regenerate it

    os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))

    Publish.run(conf)
示例#7
0
文件: test_dev.py 项目: ericdill/asv
def test_run_python_same(basic_conf):
    tmpdir, local, conf = basic_conf

    # Test Run runs with python=same
    s = StringIO()
    stdout = sys.stdout
    try:
        sys.stdout = s
        Run.run(conf, _machine_file=join(tmpdir, 'asv-machine.json'), python="same")
    finally:
        sys.stdout = stdout

    s.seek(0)
    text = s.read()

    assert re.search("time_exception.*failed", text, re.S)
    assert re.search(r"Running time_secondary.track_value\s+42.0", text)

    # Check that it did not clone or install
    assert "Cloning" not in text
    assert "Installing" not in text
示例#8
0
def basic_html(request):
    tmpdir = tempfile.mkdtemp()
    request.addfinalizer(lambda: shutil.rmtree(tmpdir))

    local = abspath(dirname(__file__))
    cwd = os.getcwd()

    os.chdir(tmpdir)
    try:
        machine_file = join(tmpdir, 'asv-machine.json')

        shutil.copyfile(join(local, 'asv-machine.json'),
                        machine_file)

        dvcs = tools.generate_test_repo(tmpdir, list(range(10)))
        repo_path = dvcs.path

        conf = config.Config.from_json({
            'env_dir': join(tmpdir, 'env'),
            'benchmark_dir': join(local, 'benchmark'),
            'results_dir': join(tmpdir, 'results_workflow'),
            'html_dir': join(tmpdir, 'html'),
            'repo': repo_path,
            'dvcs': 'git',
            'project': 'asv',
            'matrix': {
                "six": [None],
                "colorama": ["0.3.1", "0.3.3"]
            }
        })

        Run.run(conf, range_spec="master~5..master", steps=3,
                _machine_file=machine_file, quick=True)
        Publish.run(conf)
    finally:
        os.chdir(cwd)

    return conf, dvcs
示例#9
0
文件: test_run.py 项目: mtreinish/asv
def test_format_durations():
    durations = {'foo': 1, 'bar': 2, 'quux': 3}

    msg = Run.format_durations(durations, 2)
    expected = textwrap.dedent("""\
    =========== ================
     benchmark   total duration 
    ----------- ----------------
        quux         3.00s      
        bar          2.00s      
        ...           ...       
       total         6.00s      
    =========== ================""")
    assert msg == expected
示例#10
0
def test_web_regressions(browser, tmpdir):
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver import ActionChains

    tmpdir = six.text_type(tmpdir)
    local = abspath(dirname(__file__))
    cwd = os.getcwd()

    os.chdir(tmpdir)
    try:
        machine_file = join(tmpdir, 'asv-machine.json')

        shutil.copyfile(join(local, 'asv-machine.json'),
                        machine_file)

        values = [[x]*2 for x in [0, 0, 0, 0, 0,
                                  1, 1, 1, 1, 1,
                                  3, 3, 3, 3, 3,
                                  2, 2, 2, 2, 2]]
        dvcs = tools.generate_test_repo(tmpdir, values)
        repo_path = dvcs.path

        first_tested_commit_hash = dvcs.get_hash('master~14')

        conf = config.Config.from_json({
            'env_dir': join(tmpdir, 'env'),
            'benchmark_dir': join(local, 'benchmark'),
            'results_dir': join(tmpdir, 'results_workflow'),
            'html_dir': join(tmpdir, 'html'),
            'repo': repo_path,
            'dvcs': 'git',
            'project': 'asv',
            'matrix': {},
            'regressions_first_commits': {
                '.*': first_tested_commit_hash
            },
        })

        Run.run(conf, range_spec="ALL", bench='params_examples.track_find_test',
                _machine_file=machine_file, show_stderr=True, quick=True)
        Publish.run(conf)
    finally:
        os.chdir(cwd)

    bad_commit_hash = dvcs.get_hash('master~9')

    with tools.preview(conf.html_dir) as base_url:
        browser.get(base_url)

        regressions_btn = browser.find_element_by_link_text('Show regressions')
        regressions_btn.click()

        # Check that the expected links appear in the table
        regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
        regression_2 = browser.find_element_by_link_text('params_examples.track_find_test(2)')
        bad_hash_link = browser.find_element_by_link_text(bad_commit_hash[:8])

        href = regression_1.get_attribute('href')
        assert '/#params_examples.track_find_test?' in href
        assert 'time=' in href

        # Sort the tables vs. benchmark name (PhantomJS doesn't allow doing it via actionchains)
        browser.execute_script("$('thead th').eq(0).stupidsort('asc')")
        WebDriverWait(browser, 5).until(EC.text_to_be_present_in_element(
            ('xpath', '//table[1]/tbody/tr[1]/td[1]'), 'params_examples.track_find_test(1)'
            ))

        # Check the contents of the table
        table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
        assert len(table_rows) == 2
        cols1 = [td.text for td in table_rows[0].find_elements_by_xpath('td')]
        cols2 = [td.text for td in table_rows[1].find_elements_by_xpath('td')]

        assert cols1[0] == 'params_examples.track_find_test(1)'
        assert cols2[0] == 'params_examples.track_find_test(2)'

        assert re.match(r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d.\d+Z$', cols1[1])
        assert re.match(r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d.\d+Z$', cols2[1])

        assert cols1[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore']
        assert cols2[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore']

        # Check that the ignore buttons work as expected
        buttons = [button for button in browser.find_elements_by_xpath('//button')
                   if button.text == 'Ignore']
        buttons[0].click()

        # The button should disappear, together with the link
        WebDriverWait(browser, 5).until_not(EC.visibility_of(buttons[0]))
        WebDriverWait(browser, 5).until_not(EC.visibility_of(regression_1))

        table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
        assert len(table_rows) == 1

        # There's a second button for showing the links, clicking
        # which makes the elements reappear
        show_button = [button for button in browser.find_elements_by_xpath('//button')
                       if button.text == 'Show ignored regressions...'][0]
        show_button.click()

        regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
        WebDriverWait(browser, 5).until(EC.visibility_of(regression_1))

        table_rows = browser.find_elements_by_xpath('//table[2]/tbody/tr')
        assert len(table_rows) == 1

        # There's a config sample element
        pre_div = browser.find_element_by_xpath('//pre')
        assert "params_examples\\\\.track_find_test\\\\(1\\\\)" in pre_div.text

        # There's an unignore button that moves the element back to the main table
        unignore_button = [button for button in browser.find_elements_by_xpath('//button')
                           if button.text == 'Unignore'][0]
        unignore_button.click()

        browser.find_elements_by_xpath('//table[1]/tbody/tr[2]') # wait until the table has two rows

        table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
        assert len(table_rows) == 2

        # Check that a plot of some sort appears on mouseover.  The
        # page needs to be scrolled first so that the mouseover popup
        # has enough space to appear.
        regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')

        y = regression_1.location['y']
        browser.execute_script('window.scrollTo(0, {0})'.format(y - 200))

        chain = ActionChains(browser)
        chain.move_to_element(regression_1)
        chain.perform()

        popover = browser.find_element_by_css_selector('div.popover-content')
        flotplot = browser.find_element_by_css_selector('canvas.flot-base')