コード例 #1
0
def run_build(options: argparse.ArgumentParser) -> None:
    if not os.environ.get('CIRCLE_PULL_REQUEST', None) is None:
        logger.info('Pull Request detected. Skipping Build')
        return None

    validate_and_parse_inputs(options)

    # Find Builds
    jobs = []
    for job in find_build_jobs(options.project_path, options.collection_names,
                               options.category_names, options.notebook_names,
                               options.force_build):
        jobs.append(job)

    # Run Build
    artifact_paths = {}
    if options.build_mode is BuildMode.Single:
        for job in jobs:
            job_context = generate_job_context(job)
            run_job_context(job_context, True)
            for notebook in job_context.notebooks:
                hash_name = f'{notebook.collection_name}-{notebook.category_name}'
                artifact_paths[hash_name] = notebook.artifact.path

    else:
        build_artifacts_concurrently(options, jobs, artifact_paths)

    for name, path in artifact_paths.items():
        logger.info(f'Artifact[{name}] created here: {path}')
コード例 #2
0
def test__run_job_context(quick_build_collection):  # noqa F811
    from nbcollection.ci.constants import SCANNER_BUILD_LOG_DIR, SCANNER_BUILD_DIR
    from nbcollection.ci.scanner.utils import run_job_context, generate_job_context, find_build_jobs

    for job_idx, job in enumerate(find_build_jobs(quick_build_collection)):
        job_context = generate_job_context(job)
        run_job_context(job_context)

        # Validate Run completed
        stdout_log = os.path.join(
            SCANNER_BUILD_LOG_DIR,
            f'{job.collection.name}-{job.category.name}.stdout.log')
        assert os.path.exists(stdout_log)

        stderr_log = os.path.join(
            SCANNER_BUILD_LOG_DIR,
            f'{job.collection.name}-{job.category.name}.stderr.log')
        assert os.path.exists(stderr_log)

        assert os.path.exists(job_context.setup_script)
        for notebook in job_context.notebooks:
            assert os.path.exists(notebook.path)
            assert os.path.exists(notebook.artifact.path)
            assert os.path.exists(notebook.metadata.path)

        build_dir = os.path.join(SCANNER_BUILD_DIR, job.collection.name,
                                 job.category.name)
        venv_dirpath = os.path.join(build_dir, 'venv')
        assert os.path.exists(venv_dirpath)
        assert build_dir == job_context.build_dir
コード例 #3
0
def test__reset_notebook_execution__interface(
        executed_notebook_collection):  # noqa F811
    import json
    import os

    from nbcollection.ci.constants import SCANNER_BUILD_DIR
    from nbcollection.ci.scanner.utils import find_build_jobs, generate_job_context
    from nbcollection.ci.metadata.factory import run_reset_notebook_execution
    from nbcollection_tests.ci.tools.utils import collection_set_to_namespace

    options = collection_set_to_namespace(executed_notebook_collection)
    run_reset_notebook_execution(options)
    for job in find_build_jobs(options.project_path, options.collection_names,
                               options.category_names, options.notebook_names):
        job_context = generate_job_context(job)  # noqa F841
        for notebook in job.category.notebooks:
            notebook_path = os.path.join(SCANNER_BUILD_DIR,
                                         job.semantic_path(),
                                         f'{notebook.name}.ipynb')
            assert os.path.exists(notebook_path)
            with open(notebook_path, 'rb') as stream:
                notebook_data = json.loads(stream.read().decode(ENCODING))

            for idx, cell in enumerate(notebook_data['cells']):
                assert cell.get('execution_count', None) is None
                assert len(cell.get('outputs', [])) == 0
コード例 #4
0
def pull_request_build(
        url: str,
        project_path: str,
        collection_names: typing.List[str] = [],
        category_names: typing.List[str] = []) -> None:
    repo_path, repo_type = select_repo_type(url)
    url_parts = select_url_type(url, repo_type)
    if url_parts.url_type is URLType.GithubPullRequest:
        repo_path = project_path
        if not os.path.exists(repo_path):
            git.Repo.clone_from(url_parts.https_url, repo_path)

        repo = git.Repo(repo_path)
        RemoteParts.ParseURLToRemoteParts(repo.remotes.origin.url)
        pr_info = obtain_pull_request_info(url_parts)
        if getattr(repo.remotes, pr_info.source.org, None) is None:
            repo.create_remote(pr_info.source.org, pr_info.source.https_url)

        repo_info = extract_repo_info(repo, pr_info)
        build_jobs = {}

        if len(collection_names) > 0:
            for job in select_build_jobs_by_pr_author_commits(repo_info, pr_info):
                if not job.semantic_path() in build_jobs.keys():
                    build_jobs[job.semantic_path()] = job
        else:
            for job in find_build_jobs(repo_info.repo.working_dir, collection_names, category_names):
                build_jobs[job.semantic_path()] = job

        for semantic_path, build_job in build_jobs.items():
            job_context = generate_job_context(job)
            run_job_context(job_context, True)

    else:
        raise NotImplementedError(f'Unable to parse URL[{url}]')
コード例 #5
0
def run_extract_metadata(options: argparse.Namespace) -> Metadata:
    validate_and_parse_inputs(options)
    for job in find_build_jobs(options.project_path, options.collection_names,
                               options.category_names, options.notebook_names):
        job_context = generate_job_context(job)
        for notebook_context in job_context.notebooks:
            extract_metadata(notebook_context)
コード例 #6
0
def test__extract_metadata__interface(metadata_rich_notebooks):  # noqa F811
    import json
    import os

    from nbcollection.ci.constants import SCANNER_BUILD_DIR
    from nbcollection.ci.scanner.utils import find_build_jobs, generate_job_context
    from nbcollection.ci.metadata.factory import run_extract_metadata
    from nbcollection.ci.metadata.utils import extract_metadata
    from nbcollection.ci.commands.utils import validate_and_parse_inputs
    from nbcollection_tests.ci.tools.utils import collection_set_to_namespace

    metadata_keys = ['title', 'description']
    notebook_name = 'Notebook-One'
    options = collection_set_to_namespace(metadata_rich_notebooks,
                                          extra={
                                              'notebook_names': notebook_name,
                                          })
    run_extract_metadata(options)
    for job_idx, job in enumerate(
            find_build_jobs(options.project_path, options.collection_names,
                            options.category_names, options.notebook_names)):
        for notebook in job.category.notebooks:
            extract_metadata(notebook)
            with open(notebook.metadata.path, 'rb') as stream:
                metadata = json.loads(stream.read().decode(ENCODING))
                for key in metadata_keys:
                    assert key in metadata.keys()

    assert job_idx == 0
    validative_options = collection_set_to_namespace(metadata_rich_notebooks,
                                                     extra={
                                                         'notebook_names':
                                                         notebook_name,
                                                     })
    validate_and_parse_inputs(validative_options)
    for job_idx, job in enumerate(
            find_build_jobs(options.project_path, options.collection_names,
                            options.category_names, options.notebook_names)):
        job_context = generate_job_context(job)
        for notebook_idx, notebook_context in enumerate(job_context.notebooks):
            extract_metadata(notebook_context)

        assert notebook_idx == 0

        validative_metadata_filepath = os.path.join(
            SCANNER_BUILD_DIR, job.semantic_path(),
            f'{notebook.name}.metadata.json')
        with open(validative_metadata_filepath, 'rb') as stream:
            validative_metadata = json.loads(stream.read().decode(ENCODING))
            for key in metadata_keys:
                assert validative_metadata[key] == metadata[key]

    assert job_idx == 0
コード例 #7
0
    def _build_category(project_path: str, collection_name: str,
                        category_name: str) -> None:
        os.environ['CHANNEL_BUILD'] = 'true'
        for job in find_build_jobs(project_path, [collection_name],
                                   [category_name]):
            print(job.collection.name, job.category.name)
            print('Creating Job Context: ', job.collection.name,
                  job.category.name)
            job_context = generate_job_context(job)
            print('Running Job Context: ', job.collection.name,
                  job.category.name)
            run_job_context(job_context, False)

        del os.environ['CHANNEL_BUILD']
コード例 #8
0
def test__extract_metadata(metadata_rich_notebooks):  # noqa F811
    import json
    import os

    from nbcollection.ci.scanner.utils import find_build_jobs, generate_job_context
    from nbcollection.ci.metadata.utils import extract_metadata

    for job in find_build_jobs(metadata_rich_notebooks):
        job_context = generate_job_context(job)
        for notebook_context in job_context.notebooks:
            extract_metadata(notebook_context)
            assert os.path.exists(notebook_context.metadata.path)
            with open(notebook_context.metadata.path, 'rb') as stream:
                extracted_data = json.loads(stream.read().decode(ENCODING))
                assert extracted_data['title'] == 'Notebook One'
                assert not extracted_data['description'] is None
コード例 #9
0
def test__generate_job_context(
        single_collection_repo__nth_categories):  # noqa F811
    from nbcollection.ci.scanner.utils import find_build_jobs, generate_job_context
    from nbcollection.ci.datatypes import Requirements, PreRequirements, PreInstall

    for job_idx, job in enumerate(
            find_build_jobs(single_collection_repo__nth_categories)):
        job_context = generate_job_context(job)
        assert os.path.exists(job_context.build_dir)
        assert os.path.isdir(job_context.build_dir)
        assert os.path.exists(job_context.requirements.path)
        assert job_context.requirements.__class__ == Requirements
        assert job_context.pre_requirements.__class__ == PreRequirements
        assert job_context.pre_install.__class__ == PreInstall
        for notebook_context in job_context.notebooks:
            assert os.path.exists(notebook_context.build_script_path)

    assert job_idx == 1