예제 #1
0
 def test_add_existing_app_object(self):
     self.assertEqual(self.app.queue, [])
     app = BuildApp()
     app.pool_size = 2
     self.app.add(app)
     self.assertIs(app, self.app.queue[0])
     self.assertIsNot(app, BuildApp())
     self.assertIsNot(BuildApp(), self.app.queue[0])
예제 #2
0
def mine(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4

    gh = get_connection(conf)

    pprint(mine_github_pulls(gh, app, conf))
예제 #3
0
파일: github.py 프로젝트: i80and/docs-tools
def mine(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4

    gh = get_connection(conf)

    pprint(mine_github_pulls(gh, app, conf))
예제 #4
0
class TestBuildAppMinimalConfig(CommonAppSuite, TestCase):
    @classmethod
    def setUp(self):
        self.app = BuildApp()
        self.app.default_pool = random.choice(['serial', 'thread'])
        self.app.pool_size = 2
        self.c = None

    def tearDown(self):
        self.app.close_pool()
예제 #5
0
def actions(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    results = []

    for pull in mine_github_pulls(gh, app, conf):
        if pull['merge_safe'] is True:
            results.append(pull)

    pprint(results)
예제 #6
0
파일: github.py 프로젝트: i80and/docs-tools
def actions(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    results = []

    for pull in mine_github_pulls(gh, app, conf):
        if pull['merge_safe'] is True:
            results.append(pull)

    pprint(results)
예제 #7
0
    def test_single_runner_app(self):
        self.assertEqual(self.app.queue, [])
        self.assertEqual(self.app.results, [])

        app = BuildApp()
        app.pool_size = 2
        t = app.add('task')
        t.job = sum
        t.args = [[1, 2], 0]
        t.description = 'test task'

        self.app.add(app)
        self.app.run()
        self.assertEqual(self.app.results[0], 3)
예제 #8
0
def api(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.extend_queue(apiarg_tasks(c))
예제 #9
0
def api(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.extend_queue(apiarg_tasks(c))
예제 #10
0
def robots(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.pool = 'serial'
        app.extend_queue(robots_txt_tasks(c))
예제 #11
0
def source(args):
    args.builder = 'html'
    conf = fetch_config(args)

    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        sphinx_content_preperation(app, conf)
예제 #12
0
def robots(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.pool = 'serial'
        app.extend_queue(robots_txt_tasks(c))
예제 #13
0
def source(args):
    args.builder = 'html'
    conf = fetch_config(args)

    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        sphinx_content_preperation(app, conf)
예제 #14
0
파일: clean.py 프로젝트: i80and/docs-tools
def main(args):
    """
    Removes build artifacts from ``build/`` directory.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    to_remove = set()

    if c.runstate.git_branch is not None:
        to_remove.add(os.path.join(c.paths.projectroot, c.paths.branch_output))

    if c.runstate.builder != []:
        for edition, language, builder in get_builder_jobs(c):
            builder_path = resolve_builder_path(builder, edition, language, c)
            builder_path = os.path.join(c.paths.projectroot, c.paths.branch_output, builder_path)

            to_remove.add(builder_path)
            dirpath, base = os.path.split(builder_path)
            to_remove.add(os.path.join(dirpath, 'doctrees-' + base))

            m = 'remove artifacts associated with the {0} builder in {1} ({2}, {3})'
            logger.debug(m.format(builder, c.git.branches.current, edition, language))

    if c.runstate.days_to_save is not None:
        published_branches = ['docs-tools', 'archive', 'public', 'primer', c.git.branches.current]
        published_branches.extend(c.git.branches.published)

        for build in os.listdir(os.path.join(c.paths.projectroot, c.paths.output)):
            build = os.path.join(c.paths.projectroot, c.paths.output, build)
            branch = os.path.split(build)[1]

            if branch in published_branches:
                continue
            elif not os.path.isdir(build):
                continue
            elif os.stat(build).st_mtime > c.runstate.days_to_save:
                to_remove.add(build)
                to_remove.add(os.path.join(c.paths.projectroot, c.paths.output, 'public', branch))
                logger.debug('removed stale artifacts: "{0}" and "build/public/{0}"'.format(branch))

    for fn in to_remove:
        if os.path.isdir(fn):
            job = shutil.rmtree
        else:
            job = os.remove

        t = app.add('task')
        t.job = job
        t.args = fn
        m = 'removing artifact: {0}'.format(fn)
        t.description = m
        logger.critical(m)

    app.run()
예제 #15
0
    def test_single_runner_app_with_many_subtasks(self):
        self.assertEqual(self.app.queue, [])
        self.assertEqual(self.app.results, [])

        app = BuildApp()
        app.pool_size = 2

        for _ in range(10):
            t = app.add('task')
            t.job = sum
            t.description = 'test task'
            t.args = [[1, 2], 0]

        self.app.add(app)
        self.app.run()
        self.assertEqual(len(self.app.results), 10)
        self.assertEqual(self.app.results[0], 3)
        self.assertEqual(sum(self.app.results), 30)
예제 #16
0
def create_branch(args):
    """
    Takes a single branch name and (if necessary) creates a new branch. Then,
    populates the ``build/<branch>`` directory for the new branch using either
    the parent branch or ``master``. Safe to run multiple times (after a rebase)
    to update the build cache from master.

    Also calls :method:`~giza.operations.build_env.fix_build_environment()` to
    tweak the new build output to update hashes and on-disk copies of the
    environment to prevent unnecessary full-rebuilds from sphinx.
    """

    conf = fetch_config(args)

    g = GitRepo(conf.paths.projectroot)

    branch = conf.runstate.git_branch
    base_branch = g.current_branch()

    if base_branch == branch:
        base_branch = 'master'
        logger.warning(
            'seeding build data for branch "{0}" from "master"'.format(branch))

    branch_builddir = os.path.join(conf.paths.projectroot, conf.paths.output,
                                   branch)

    base_builddir = os.path.join(conf.paths.projectroot, conf.paths.output,
                                 base_branch)

    if g.branch_exists(branch):
        logger.info('checking out branch "{0}"'.format(branch))
    else:
        logger.info(
            'creating and checking out a branch named "{0}"'.format(branch))

    g.checkout_branch(branch)

    cmd = "rsync -r --times --checksum {0}/ {1}".format(
        base_builddir, branch_builddir)
    logger.info('seeding build directory for "{0}" from "{1}"'.format(
        branch, base_branch))

    try:
        subprocess.check_call(args=cmd.split())
        logger.info('branch creation complete.')
    except subprocess.CalledProcessError:
        logger.error(cmd)

    # get a new config here for the new branch
    conf = fetch_config(args)
    builders = get_existing_builders(conf)

    with BuildApp.new(pool_type='process',
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        app.exted_queue(fix_build_env_tasks(builders, conf))
예제 #17
0
파일: github.py 프로젝트: i80and/docs-tools
def stats(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    users = set()
    result = {'merge_safe': 0, 'total': 0}
    for pull in mine_github_pulls(gh, app, conf):
        result['total'] += 1
        if pull['merge_safe'] is True:
            result['merge_safe'] += 1

        users.add(pull['user'])

    result['user_count'] = len(users)
    result['users'] = list(users)

    pprint(result)
예제 #18
0
def redirects(args):
    c = fetch_config(args)

    if args.dry_run is True:
        print(''.join(make_redirect(c)))
    else:
        with BuildApp.new(pool_type=c.runstate.runner,
                          pool_size=c.runstate.pool_size,
                          force=c.runstate.force).context() as app:
            app.extend_queue(redirect_tasks(c))
예제 #19
0
def steps(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        if c.runstate.clean_generated is True:
            app.extend_queue(step_clean(c))
        else:
            app.extend_queue(step_tasks(c))
예제 #20
0
def redirects(args):
    c = fetch_config(args)

    if args.dry_run is True:
        print(''.join(make_redirect(c)))
    else:
        with BuildApp.new(pool_type=c.runstate.runner,
                          pool_size=c.runstate.pool_size,
                          force=c.runstate.force).context() as app:
            app.extend_queue(redirect_tasks(c))
예제 #21
0
def steps(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        if c.runstate.clean_generated is True:
            app.extend_queue(step_clean(c))
        else:
            app.extend_queue(step_tasks(c))
예제 #22
0
def stats(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    users = set()
    result = {'merge_safe': 0, 'total': 0}
    for pull in mine_github_pulls(gh, app, conf):
        result['total'] += 1
        if pull['merge_safe'] is True:
            result['merge_safe'] += 1

        users.add(pull['user'])

    result['user_count'] = len(users)
    result['users'] = list(users)

    pprint(result)
예제 #23
0
def extract(args):
    conf = fetch_config(args)

    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        path = fetch_package(conf.runstate._path, conf)
        extract_package_at_root(path, conf)

        builders = get_existing_builders(conf)
        app.extend_queue(fix_build_env_tasks(builders, conf))
예제 #24
0
def images(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:

        if c.runstate.clean_generated is True:
            app.extend_queue(image_clean(c))
        else:
            for (_, (bconf, sconf)) in get_restricted_builder_jobs(c):
                app.extend_queue(image_tasks(bconf, sconf))
예제 #25
0
def images(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:

        if c.runstate.clean_generated is True:
            app.extend_queue(image_clean(c))
        else:
            for (_, (bconf, sconf)) in get_restricted_builder_jobs(c):
                app.extend_queue(image_tasks(bconf, sconf))
예제 #26
0
파일: git.py 프로젝트: i80and/docs-tools
def create_branch(args):
    """
    Takes a single branch name and (if necessary) creates a new branch. Then,
    populates the ``build/<branch>`` directory for the new branch using either
    the parent branch or ``master``. Safe to run multiple times (after a rebase)
    to update the build cache from master.

    Also calls :method:`~giza.operations.build_env.fix_build_environment()` to
    tweak the new build output to update hashes and on-disk copies of the
    environment to prevent unnecessary full-rebuilds from sphinx.
    """

    conf = fetch_config(args)

    g = GitRepo(conf.paths.projectroot)

    branch = conf.runstate.git_branch
    base_branch = g.current_branch()

    if base_branch == branch:
        base_branch = 'master'
        logger.warning('seeding build data for branch "{0}" from "master"'.format(branch))

    branch_builddir = os.path.join(conf.paths.projectroot,
                                   conf.paths.output, branch)

    base_builddir = os.path.join(conf.paths.projectroot,
                                 conf.paths.output, base_branch)

    if g.branch_exists(branch):
        logger.info('checking out branch "{0}"'.format(branch))
    else:
        logger.info('creating and checking out a branch named "{0}"'.format(branch))

    g.checkout_branch(branch)

    cmd = "rsync -r --times --checksum {0}/ {1}".format(base_builddir, branch_builddir)
    logger.info('seeding build directory for "{0}" from "{1}"'.format(branch, base_branch))

    try:
        subprocess.check_call(args=cmd.split())
        logger.info('branch creation complete.')
    except subprocess.CalledProcessError:
        logger.error(cmd)

    # get a new config here for the new branch
    conf = fetch_config(args)
    builders = get_existing_builders(conf)

    with BuildApp.new(pool_type='process',
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        app.exted_queue(fix_build_env_tasks(builders, conf))
예제 #27
0
def sphinx(args):
    if args.runner == 'serial':
        args.serial_sphinx = True

    conf = fetch_config(args)
    logger.warning('not for production use: this expects that content generation is complete.')

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)

    r = sphinx_builder_tasks(app, conf)

    raise SystemExit(r)
예제 #28
0
파일: deploy.py 프로젝트: i80and/docs-tools
def main(args):
    """
    Uploads all build artifacts to the production environment. Does not build or
    render artifacts.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    deploy_tasks(c, app)

    if c.runstate.dry_run is False:
        app.run()
예제 #29
0
def main(args):
    """
    Uploads all build artifacts to the production environment. Does not build or
    render artifacts.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    deploy_tasks(c, app)

    if c.runstate.dry_run is False:
        app.run()
예제 #30
0
def sphinx(args):
    if args.runner == 'serial':
        args.serial_sphinx = True

    conf = fetch_config(args)
    logger.warning(
        'not for production use: this expects that content generation is complete.'
    )

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)

    r = sphinx_builder_tasks(app, conf)

    raise SystemExit(r)
예제 #31
0
class TestBuildAppStandardConfig(CommonAppSuite, TestCase):
    def setUp(self):
        self.c = Configuration()
        self.c.runstate = RuntimeStateConfig()
        self.app = BuildApp(self.c)
        self.app.default_pool = random.choice(['serial', 'thread'])
        self.app.pool_size = 2

    def test_conf_object_consistent_in_task(self):
        self.assertEqual(self.app.queue, [])
        t = self.app.add('task')
        self.assertIs(self.c, t.conf)
        self.assertIs(self.c, self.app.queue[0].conf)

    def test_conf_object_consistent_in_app(self):
        self.assertEqual(self.app.queue, [])
        self.app.add('app')

        self.assertIs(self.c, self.app.conf)
        self.assertIs(self.c, self.app.queue[0].conf)

    def test_conf_object_consistent_in_new_task(self):
        self.assertEqual(self.app.queue, [])
        t = Task()
        self.assertIsNone(t.conf)
        self.app.add(t)
        self.assertIsNotNone(t.conf)
        self.assertIs(self.c, self.app.queue[0].conf)
        self.assertIs(self.c, t.conf)

    def test_force_options(self):
        self.assertEquals(self.c.runstate.force, self.app.force)
        self.assertFalse(self.c.runstate.force)
        self.assertFalse(self.app.force)

        self.app._force = None
        self.assertFalse(self.app.force)

    def test_default_pool_size(self):
        self.assertIsNotNone(self.c)
        self.assertIsNotNone(self.app.conf)
        self.app._pool_size = None
        self.assertEquals(self.c.runstate.pool_size, self.app.pool_size)

    def tearDown(self):
        self.app.close_pool()
예제 #32
0
파일: deploy.py 프로젝트: i80and/docs-tools
def publish_and_deploy(args):
    """
    Combines the work of ``giza sphinx`` and ``giza deploy``, to produce build
    artifacts and then upload those artifacts to the servers.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    sphinx_ret = sphinx_publication(c, app)
    if sphinx_ret == 0 or c.runstate.force is True:
        deploy_tasks(c, app)

        if c.runstate.dry_run is False:
            app.run()
    else:
        logger.warning(sphinx_ret + ' sphinx build(s) failed, and build not forced. not deploying.')
예제 #33
0
def main(args):
    """
    Use Sphinx to generate build artifacts. Can generate artifacts for multiple
    output types, content editions and translations.
    """
    conf = fetch_config(args)

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)

    with Timer("full sphinx build process"):
        # In general we try to avoid passing the "app" object between functions
        # and mutating it at too many places in the stack (although in earlier
        # versions this was the primary idiom). This call is a noted exception,
        # and makes it possible to run portions of this process in separate
        # targets.

        sphinx_publication(conf, app)
예제 #34
0
def main(args):
    """
    Use Sphinx to generate build artifacts. Can generate artifacts for multiple
    output types, content editions and translations.
    """
    conf = fetch_config(args)

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)

    with Timer("full sphinx build process"):
        # In general we try to avoid passing the "app" object between functions
        # and mutating it at too many places in the stack (although in earlier
        # versions this was the primary idiom). This call is a noted exception,
        # and makes it possible to run portions of this process in separate
        # targets.

        sphinx_publication(conf, app)
예제 #35
0
def test_build_site(args):
    args.languages_to_build = args.editions_to_build = []
    args.builder = 'html'

    conf = fetch_config(args)

    safe_create_directory('build')
    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        try:
            sphinx_publication(conf, args, app)
        except:
            sphinx_publication(conf, args, app)
            if os.path.exists('doc-tools'):
                shutil.rmtree('docs-tools')

    logger.info('bootstrapped makefile system')

    logger.info('updated project skeleton in current directory.')
예제 #36
0
def publish_and_deploy(args):
    """
    Combines the work of ``giza sphinx`` and ``giza deploy``, to produce build
    artifacts and then upload those artifacts to the servers.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    sphinx_ret = sphinx_publication(c, app)
    if sphinx_ret == 0 or c.runstate.force is True:
        deploy_tasks(c, app)

        if c.runstate.dry_run is False:
            app.run()
    else:
        logger.warning(
            sphinx_ret +
            ' sphinx build(s) failed, and build not forced. not deploying.')
예제 #37
0
파일: make.py 프로젝트: i80and/docs-tools
def run_make_operations(targets, conf):
    """
    :param list targets: A list of tuples in the form of ``(<action>, [option,
         option])`` that define build targets.

    :param Configuration conf: The top level configuration object.

    Parses the ``targets`` list and runs tasks defined, including all specified
    sphinx targets, all ``push`` deployment targets, and will create the ``env``
    packages. Noteworthy behavior:

    - The order of options *except* for the action in the first option is not
      important.

    - If you run ``push`` target with the ``deploy`` option
      (i.e. ``push-deploy`` or ``push-<edition>-deploy``), ``giza`` will *not*
      run the ``publish`` Sphinx build.

    - This interface assumes that all deployment targets (defined in each
      project begin with ``push-`` or ``stage-``.) If you have a project with
      different deployment targets, you will need to call ``giza deploy``
      directly.

    - The ``env`` cache targets take the same options as the Sphinx builders and
      package the environment for only those builders. If you specify ``env``
      after a Sphinx target, ``giza`` will build the cache for only that
      package.
    """

    sphinx_opts = {"languages": set(),
                   "editions": set(),
                   "builders": set()}
    push_opts = {"targets": set(),
                 "type": None}
    packaging_opts = {}

    sphinx_builders = available_sphinx_builders()

    if 'push' in conf.system.files.data:
        deploy_configs = dict((item['target'], item) for item in conf.system.files.data.push)
    else:
        deploy_configs = []

    tasks = []
    for action, options in targets:
        if action in sphinx_builders:
            tasks.append(sphinx_opts)

            add_sphinx_build_options(sphinx_opts, action, options, conf)
        elif action in ('stage', 'push'):
            tasks.append(push_opts)
            push_opts['type'] = action

            if 'deploy' not in options:
                sphinx_opts['builders'].add('publish')
                tasks.append(sphinx_opts)
                add_sphinx_build_options(sphinx_opts, action, options, conf)
                conf.runstate.fast = False

            if action in deploy_configs:
                push_opts['targets'].add(action)

            for build_option in options:
                deploy_target_name = '-'.join((action, build_option))

                if build_option in deploy_configs:
                    push_opts['targets'].add(build_option)
                elif deploy_target_name in deploy_configs:
                    push_opts['targets'].add(deploy_target_name)
        elif action.startswith('env'):
            if len(packaging_opts) > 0:
                packaging_opts = copy.copy(sphinx_opts)

            tasks.append(packaging_opts)
            add_sphinx_build_options(packaging_opts, False, options, conf)
        else:
            logger.error('target: {0} not defined in the make interface'.format(action))

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       force=conf.runstate.force,
                       pool_size=conf.runstate.pool_size)

    if sphinx_opts in tasks:
        conf.runstate.languages_to_build = list(sphinx_opts['languages'])
        conf.runstate.editions_to_build = list(sphinx_opts['editions'])
        conf.runstate.builder = list(sphinx_opts['builders'])

        if 'publish' in conf.runstate.builder:
            conf.runstate.fast = False

        derive_command('sphinx', conf)

        sphinx_publication(conf, app)

    if push_opts in tasks:
        if len(push_opts['targets']) == 0:
            for lang, edition in itertools.product(conf.runstate.languages_to_build,
                                                   conf.runstate.editions_to_build):
                push_target_name = [push_opts['type']]
                for opt in (edition, lang):
                    if opt is not None:
                        push_target_name.append(opt)
                push_target_name = '-'.join(push_target_name)
                push_opts['targets'].add(push_target_name)

        conf.runstate.push_targets = list(push_opts['targets'])
        deploy_tasks(conf, app)
        derive_command('deploy', conf)

    if packaging_opts in tasks:
        derive_command('env', conf)

        app.add(Task(job=env_package_worker,
                     args=(conf.runstate, conf),
                     target=True,
                     dependency=None))

    if len(app.queue) >= 1:
        app.run()
예제 #38
0
def run_make_operations(targets, conf):
    """
    :param list targets: A list of tuples in the form of ``(<action>, [option,
         option])`` that define build targets.

    :param Configuration conf: The top level configuration object.

    Parses the ``targets`` list and runs tasks defined, including all specified
    sphinx targets, all ``push`` deployment targets, and will create the ``env``
    packages. Noteworthy behavior:

    - The order of options *except* for the action in the first option is not
      important.

    - If you run ``push`` target with the ``deploy`` option
      (i.e. ``push-deploy`` or ``push-<edition>-deploy``), ``giza`` will *not*
      run the ``publish`` Sphinx build.

    - This interface assumes that all deployment targets (defined in each
      project begin with ``push-`` or ``stage-``.) If you have a project with
      different deployment targets, you will need to call ``giza deploy``
      directly.

    - The ``env`` cache targets take the same options as the Sphinx builders and
      package the environment for only those builders. If you specify ``env``
      after a Sphinx target, ``giza`` will build the cache for only that
      package.
    """

    sphinx_opts = {"languages": set(), "editions": set(), "builders": set()}
    push_opts = {"targets": set(), "type": None}
    packaging_opts = {}

    sphinx_builders = available_sphinx_builders()

    if 'push' in conf.system.files.data:
        deploy_configs = dict(
            (item['target'], item) for item in conf.system.files.data.push)
    else:
        deploy_configs = []

    tasks = []
    for action, options in targets:
        if action in sphinx_builders:
            tasks.append(sphinx_opts)

            add_sphinx_build_options(sphinx_opts, action, options, conf)
        elif action in ('stage', 'push'):
            tasks.append(push_opts)
            push_opts['type'] = action

            if 'deploy' not in options:
                sphinx_opts['builders'].add('publish')
                tasks.append(sphinx_opts)
                add_sphinx_build_options(sphinx_opts, action, options, conf)
                conf.runstate.fast = False

            if action in deploy_configs:
                push_opts['targets'].add(action)

            for build_option in options:
                deploy_target_name = '-'.join((action, build_option))

                if build_option in deploy_configs:
                    push_opts['targets'].add(build_option)
                elif deploy_target_name in deploy_configs:
                    push_opts['targets'].add(deploy_target_name)
        elif action.startswith('env'):
            if len(packaging_opts) > 0:
                packaging_opts = copy.copy(sphinx_opts)

            tasks.append(packaging_opts)
            add_sphinx_build_options(packaging_opts, False, options, conf)
        else:
            logger.error(
                'target: {0} not defined in the make interface'.format(action))

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       force=conf.runstate.force,
                       pool_size=conf.runstate.pool_size)

    if sphinx_opts in tasks:
        conf.runstate.languages_to_build = list(sphinx_opts['languages'])
        conf.runstate.editions_to_build = list(sphinx_opts['editions'])
        conf.runstate.builder = list(sphinx_opts['builders'])

        if 'publish' in conf.runstate.builder:
            conf.runstate.fast = False

        derive_command('sphinx', conf)

        sphinx_publication(conf, app)

    if push_opts in tasks:
        if len(push_opts['targets']) == 0:
            for lang, edition in itertools.product(
                    conf.runstate.languages_to_build,
                    conf.runstate.editions_to_build):
                push_target_name = [push_opts['type']]
                for opt in (edition, lang):
                    if opt is not None:
                        push_target_name.append(opt)
                push_target_name = '-'.join(push_target_name)
                push_opts['targets'].add(push_target_name)

        conf.runstate.push_targets = list(push_opts['targets'])
        deploy_tasks(conf, app)
        derive_command('deploy', conf)

    if packaging_opts in tasks:
        derive_command('env', conf)

        app.add(
            Task(job=env_package_worker,
                 args=(conf.runstate, conf),
                 target=True,
                 dependency=None))

    if len(app.queue) >= 1:
        app.run()
예제 #39
0
 def test_finalizer_setter_error_app_in_list(self):
     with self.assertRaises(TypeError):
         self.task.finalizers = [self.Task(), BuildApp()]
예제 #40
0
 def test_finalizer_setter_error_app(self):
     with self.assertRaises(TypeError):
         self.task.finalizers = BuildApp()
예제 #41
0
 def setUp(self):
     self.app = BuildApp.new(pool_type=random.choice(['serial', 'thread']),
                             pool_size=None, force=None)
     self.c = None
예제 #42
0
 def setUp(self):
     self.app = BuildApp()
     self.app.default_pool = random.choice(['serial', 'thread'])
     self.app.pool_size = 2
     self.c = None
예제 #43
0
 def setUp(self):
     self.c = Configuration()
     self.c.runstate = RuntimeStateConfig()
     self.app = BuildApp(self.c)
     self.app.default_pool = random.choice(['serial', 'thread'])
     self.app.pool_size = 2
예제 #44
0
def main(args):
    """
    Removes build artifacts from ``build/`` directory.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    to_remove = set()

    if c.runstate.git_branch is not None:
        to_remove.add(os.path.join(c.paths.projectroot, c.paths.branch_output))

    if c.runstate.builder != []:
        for edition, language, builder in get_builder_jobs(c):
            builder_path = resolve_builder_path(builder, edition, language, c)
            builder_path = os.path.join(c.paths.projectroot,
                                        c.paths.branch_output, builder_path)

            to_remove.add(builder_path)
            dirpath, base = os.path.split(builder_path)
            to_remove.add(os.path.join(dirpath, 'doctrees-' + base))

            m = 'remove artifacts associated with the {0} builder in {1} ({2}, {3})'
            logger.debug(
                m.format(builder, c.git.branches.current, edition, language))

    if c.runstate.days_to_save is not None:
        published_branches = [
            'docs-tools', 'archive', 'public', 'primer', c.git.branches.current
        ]
        published_branches.extend(c.git.branches.published)

        for build in os.listdir(
                os.path.join(c.paths.projectroot, c.paths.output)):
            build = os.path.join(c.paths.projectroot, c.paths.output, build)
            branch = os.path.split(build)[1]

            if branch in published_branches:
                continue
            elif not os.path.isdir(build):
                continue
            elif os.stat(build).st_mtime > c.runstate.days_to_save:
                to_remove.add(build)
                to_remove.add(
                    os.path.join(c.paths.projectroot, c.paths.output, 'public',
                                 branch))
                logger.debug(
                    'removed stale artifacts: "{0}" and "build/public/{0}"'.
                    format(branch))

    for fn in to_remove:
        if os.path.isdir(fn):
            job = shutil.rmtree
        else:
            job = os.remove

        t = app.add('task')
        t.job = job
        t.args = fn
        m = 'removing artifact: {0}'.format(fn)
        t.description = m
        logger.critical(m)

    app.run()