Пример #1
0
    def test_cd(self):
        cwd = Path('.')

        with cd(cwd.parent):
            self.assertEqual(Path('.'), cwd.parent)

        self.assertEqual(Path('.'), cwd)
Пример #2
0
    def test_execute(self):
        nb_path = PROJECT_ROOT.joinpath('demo', 'notebook.ipynb')
        t_path = PROJECT_ROOT.joinpath('demo', 'test.py')
        c_path = PROJECT_ROOT.joinpath('demo', 'context')

        # load test as module
        spec = import_util.spec_from_file_location('nbtest', t_path)
        nbtest = import_util.module_from_spec(spec)
        spec.loader.exec_module(nbtest)

        with TemporaryDirectory() as path, cd(path):
            self.assertEqual(5, nbtest.nbt.execute(args=(str(nb_path), '--context', str(c_path))))
Пример #3
0
    def test_custom_root(self):
        with TemporaryDirectory() as temp, cd(temp):
            os.mkdir('root')

            with Path('root').joinpath('foo').open(mode='wb') as f:
                f.write(b'FOO')

            loader = ArtifactLoader('root')

            self.assertEqual(b'FOO', loader['foo'])
            with self.assertRaises(FileNotFoundError):
                _ = loader['bar']
Пример #4
0
def cmd_summary(args):
    """Generate human & machine readable summary of results"""

    from autograde.cli.util import load_patched, render, list_results, merge_results, b64str, plot_fraud_matrix, \
        plot_score_distribution, summarize_results

    path = Path(args.result or Path.cwd()).expanduser().absolute()
    assert path.is_dir(), f'{path} is no regular directory'
    include_similarities = args.similarities

    results = list()
    sources = dict()
    filenames = []
    for path_ in list_results(path):
        logger.debug(f'read {path_}')
        filenames.append(path_.absolute())

        with mount_tar(path_) as tar, cd(tar):
            r = load_patched()
            results.append(r)

            with open('code.py', mode='rt') as f:
                sources[r.checksum] = f.read()

    # merge results
    results_df = merge_results(results)
    logger.debug('store raw.csv')
    results_df.to_csv(path.joinpath('raw.csv'), index=False)

    # summarize results
    summary_df = summarize_results(results, filenames, include_tasks=True)
    logger.debug('store summary.csv')
    summary_df.to_csv(path.joinpath('summary.csv'), index=False)

    if include_similarities:
        plots = dict(distribution=b64str(plot_score_distribution(summary_df)),
                     similarities=b64str(plot_fraud_matrix(sources)))
    else:
        plots = dict(score_distribution=b64str(
            plot_score_distribution(summary_df)),
                     similarities=b64str(plot_score_distribution(summary_df)))

    logger.info('render summary.html')
    with open(path.joinpath('summary.html'), mode='wt') as f:
        f.write(
            render('summary.html',
                   title='summary',
                   summary=summary_df,
                   plots=plots))

    return 0
Пример #5
0
    def test_tar_mount(self):
        path = Path('fnord.tar')

        with TemporaryDirectory() as dir_, cd(dir_):
            # reading non existing archive fails
            with self.assertRaises(FileNotFoundError):
                with mount_tar(path):
                    pass

            self.assertFalse(path.exists())

            # write empty archive
            with mount_tar(path, mode='w'):
                pass

            self.assertTrue(path.exists())

            # append some contents
            with mount_tar(path, mode='a') as tar, cd(tar):
                with open('foo', mode='wt') as f:
                    f.write('FOO')

            # see if changes persisted
            with mount_tar(path) as tar, cd(tar):
                self.assertTrue(Path('foo').exists())
                self.assertFalse(Path('bar').exists())

            # overwrite archive
            with mount_tar(path, mode='w') as tar, cd(tar):
                with open('bar', mode='wb') as f:
                    f.write(b'BAR')

            # see if changes persisted
            with mount_tar(path) as tar, cd(tar):
                self.assertFalse(Path('foo').exists())
                self.assertTrue(Path('bar').exists())
Пример #6
0
def cmd_report(args):
    """Inject a human readable report (standalone HTML) into result archive(s)"""
    from autograde.cli.util import load_patched, render, list_results

    for path in list_results(args.result):
        logger.info(f'render report for {path}')
        with mount_tar(path, mode='a') as tar, cd(tar):
            results = load_patched()
            with open('report.html', mode='wt') as f:
                f.write(
                    render('report.html',
                           title='report',
                           id=results.checksum,
                           results={results.checksum: results},
                           summary=results.summary()))

    return
Пример #7
0
    def test__grade_notebook(self):
        nb_path = PROJECT_ROOT.joinpath('demo', 'notebook.ipynb')
        t_path = PROJECT_ROOT.joinpath('demo', 'test.py')
        c_path = PROJECT_ROOT.joinpath('demo', 'context')

        with open(nb_path, mode='rb') as f:
            sha256_sum = sha256(f.read()).hexdigest()

        # load test as module
        spec = import_util.spec_from_file_location('nbtest', t_path)
        nbtest = import_util.module_from_spec(spec)
        spec.loader.exec_module(nbtest)

        with TemporaryDirectory() as path, cd(path):
            nbtest.nbt._grade_notebook(nb_path, context=c_path)

            rpath, *_ = Path(path).glob('results_*.tar.xz')

            with tarfile.open(rpath, mode='r') as tar:
                self.assertListEqual(sorted(tar.getnames())[1:], [
                    'artifacts',
                    'artifacts/bar.txt',
                    'artifacts/figures',
                    'artifacts/figures/fig_nb_3_1.png',
                    'artifacts/figures/fig_nb_8_1.png',
                    'artifacts/figures/fig_nb_8_2.png',
                    'artifacts/figures/fig_nb_9_1.png',
                    'artifacts/fnord.txt',
                    'artifacts/plot.png',
                    'code.py',
                    'notebook.ipynb',
                    'results.json'
                ])

                results = Results.from_json(tar.extractfile(tar.getmember('results.json')).read())

        self.assertEqual(results.version, autograde.__version__)

        self.assertEqual(results.checksum, sha256_sum)

        self.assertListEqual(results.excluded_artifacts, ['foo.txt'])

        assert_floats_equal(astuple(results.summary()), (16, 5, 6, 3, math.nan, 25))
Пример #8
0
def cmd_patch(args):
    """Patch result archive(s) with results from a different run"""

    from autograde.cli.util import load_patched, list_results, inject_patch

    # load & index all patches
    patches = dict()
    for path in list_results(args.patch):
        with mount_tar(path) as tar:
            patch = load_patched(tar)
            patches[patch.checksum] = patch

    # inject patches
    for path in list_results(args.result):
        with mount_tar(path, mode='a') as tar, cd(tar):
            result = load_patched()
            if result.checksum in patches:
                inject_patch(patches[result.checksum])
            else:
                logger.warn(f'no patch for {path} found')

    return 0
Пример #9
0
def inject_patch(results: Results, path='.', prefix: str = 'results') -> Path:
    """Store results as patch in mounted results archive"""
    path = Path(path)
    ct = len(list(path.glob(f'{prefix}_patch*.json')))

    with cd(path):
        with open(f'{prefix}_patch_{ct + 1:02d}.json', mode='wt') as f:
            json.dump(results.to_dict(), f, indent=4)

        # update report if it exists
        if Path('report.html').exists():
            results = load_patched()
            logger.debug(f'update report for {results.checksum}')
            with open('report.html', mode='wt') as f:
                f.write(
                    render('report.html',
                           title='report',
                           id=results.checksum,
                           results={results.checksum: results},
                           summary=results.summary()))

    return path
Пример #10
0
    def test_exec_notebook(self):
        nb_path = PROJECT_ROOT.joinpath('demo', 'notebook.ipynb')
        with open(nb_path, mode='rt') as f:
            nb = f.read()

        with TemporaryDirectory() as path, cd(path):
            shutil.copytree(PROJECT_ROOT.joinpath('demo', 'context'),
                            '.',
                            dirs_exist_ok=True)

            # forward errors raised in notebook
            with self.assertRaises(AssertionError):
                with io.StringIO(nb) as nb_buffer, open(os.devnull,
                                                        'w') as stdout:
                    with exec_notebook(nb_buffer, file=stdout):
                        pass

            # cell timeout
            with self.assertRaises(TimeoutError):
                with io.StringIO(nb) as nb_buffer, open(os.devnull,
                                                        'w') as stdout:
                    with exec_notebook(nb_buffer,
                                       file=stdout,
                                       cell_timeout=0.05):
                        pass

            # ignore errors
            with io.StringIO(nb) as nb_buffer, io.StringIO() as stdout:
                with exec_notebook(nb_buffer, file=stdout,
                                   ignore_errors=True) as state:
                    pass
                stdout = stdout.getvalue()

        self.assertIn('__IB_FLAG__', state)
        self.assertIn('__IA_FLAG__', state)
        self.assertEqual(state.get('SOME_CONSTANT'), 42)
        self.assertIn('this goes to stdout', stdout)
Пример #11
0
    def test_scenario_1(self):
        with TemporaryDirectory() as temp, cd(temp), warnings.catch_warnings():
            warnings.simplefilter('ignore')

            os.mkdir('results_1')
            os.mkdir('results_2')

            # run tests
            cli([
                'test',
                str(EXAMPLES.joinpath('test_1.py')),
                str(EXAMPLES), '-t', 'results_1'
            ])
            cli([
                'test',
                str(EXAMPLES.joinpath('test_2.py')),
                str(EXAMPLES), '-t', 'results_2'
            ])

            for path in list_results():
                with tarfile.open(path, mode='r') as tar:
                    self.assertListEqual(
                        sorted(tar.getnames())[1:], [
                            'artifacts', 'code.py', 'notebook.ipynb',
                            'results.json'
                        ])

            # create reports for test 2 results
            cli(['report', 'results_2'])

            for path in list_results('results_2'):
                with tarfile.open(path, mode='r') as tar:
                    self.assertTrue('report.html' in tar.getnames())

            # create test summaries
            cli(['summary', 'results_1'])
            cli(['summary', 'results_2'])

            summary_1 = pd.read_csv(Path('results_1', 'summary.csv'))
            summary_2 = pd.read_csv(Path('results_2', 'summary.csv'))

            assert_isclose(10., summary_1['score'].sum())
            assert_isclose(8., summary_2['score'].sum())
            assert_isclose(12., summary_1['max_score'].sum())
            assert_isclose(12., summary_2['max_score'].sum())
            self.assertEqual(2, sum(summary_1['duplicate']))
            self.assertEqual(2, sum(summary_2['duplicate']))

            # patch test 1 results and re-compute report + summary
            cli(['patch', 'results_1', 'results_2'])
            cli(['report', 'results_1'])
            cli(['summary', 'results_1'])

            for path in list_results('results_1'):
                with tarfile.open(path, mode='r') as tar:
                    self.assertTrue('report.html' in tar.getnames())

            summary_1 = pd.read_csv(Path('results_1', 'summary.csv'))

            assert_isclose(8., summary_1['score'].sum())
            assert_isclose(12., summary_1['max_score'].sum())
            self.assertEqual(2, sum(summary_1['duplicate']))

            # compute global summary
            cli(['summary', '.'])

            summary = pd.read_csv('summary.csv')

            assert_isclose(16., summary['score'].sum())
            assert_isclose(24., summary['max_score'].sum())
            self.assertTrue(all(summary['duplicate']))
Пример #12
0
    def _grade_notebook(self, nb_path, target_dir=None, context=None):
        target_dir = target_dir or os.getcwd()

        # prepare notebook
        with open(nb_path, mode='rb') as f:
            nb_data = f.read()

        nb_hash = sha256(nb_data).hexdigest()
        nb_hash_short = nb_hash[:8]

        with cd(target_dir):
            archive = Path(f'results_{nb_hash_short}.tar.xz')

            if archive.exists():
                logger.debug(f'remove existing {archive}')
                archive.unlink()

            with ExitStack() as exec_test_stack:
                tar = exec_test_stack.enter_context(
                    mount_tar(archive, mode='w:xz'))
                exec_test_stack.enter_context(cd(tar))

                # store copy of notebook
                logger.debug('dump copy of original notebook')
                with open('notebook.ipynb', mode='wb') as f:
                    f.write(nb_data)

                # prepare context and execute notebook
                with open('code.py', mode='wt') as c, cd('artifacts',
                                                         mkdir=True):
                    # prepare execution context in file system
                    if context is not None:
                        logger.debug(f'copy context files from: {context}')
                        shutil.copytree(context, '.', dirs_exist_ok=True)

                    # build index of all files known before execution
                    index = set()
                    for path in Path('.').glob('**/*'):
                        if path.is_file():
                            with path.open(mode='rb') as f:
                                index.add(sha256(f.read()).hexdigest())

                    # actual notebook execution
                    try:
                        logger.debug('execute notebook')
                        state = exec_test_stack.enter_context(
                            exec_notebook(io.StringIO(nb_data.decode('utf-8')),
                                          file=c,
                                          ignore_errors=True,
                                          cell_timeout=self._cell_timeout,
                                          variables=self._variables))

                    except ValueError:
                        state = {}

                    # remove files that haven't changed
                    artifacts = []
                    artifacts_excluded = []
                    for path in Path('.').glob('**/*'):
                        if path.is_file():
                            delete_flag = False
                            with path.open(mode='rb') as f:
                                if sha256(f.read()).hexdigest() in index:
                                    artifacts_excluded.append(str(path))
                                    delete_flag = True
                                else:
                                    artifacts.append(str(path))

                            if delete_flag:
                                path.unlink()

                # infer meta information
                group = list(
                    map(lambda m: TeamMember(**m),
                        state.get('team_members', [])))

                if not group:
                    logger.warning(
                        f'Couldn\'t find valid information about team members in "{nb_path}"'
                    )

                # execute tests
                logger.debug('execute tests')
                results = Results(
                    title=self._title,
                    notebook=str(nb_path),
                    checksum=nb_hash,
                    team_members=group,
                    artifacts=sorted(artifacts),
                    excluded_artifacts=sorted(artifacts_excluded),
                    results=self._apply_cases(state))

                # store results as json
                logger.debug('dump results as json')
                with open('results.json', mode='wt') as f:
                    json.dump(results.to_dict(), fp=f, indent=4)

                # infer new, more readable name
                names = results.format_members(separator=',')
                archive_name_new = Path(
                    f'results_[{names}]_{nb_hash_short}.tar.xz')

            if archive_name_new.exists():
                logger.debug(f'remove existing {archive_name_new}')
                archive_name_new.unlink()

            archive.rename(archive_name_new)

        return results