Beispiel #1
0
 def test_add_existing_app_object(self):
     self.assertEqual(self.app.queue, [])
     app = BuildApp()
     app.pool_size = 2
     self.app.add(app)
     self.assertIs(app, self.app.queue[0])
     self.assertIsNot(app, BuildApp())
     self.assertIsNot(BuildApp(), self.app.queue[0])
Beispiel #2
0
 def test_add_existing_app_object(self):
     self.assertEqual(self.app.queue, [])
     app = BuildApp()
     app.pool_size = 2
     self.app.add(app)
     self.assertIs(app, self.app.queue[0])
     self.assertIsNot(app, BuildApp())
     self.assertIsNot(BuildApp(), self.app.queue[0])
Beispiel #3
0
def mine(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4

    gh = get_connection(conf)

    pprint(mine_github_pulls(gh, app, conf))
Beispiel #4
0
def mine(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4

    gh = get_connection(conf)

    pprint(mine_github_pulls(gh, app, conf))
Beispiel #5
0
class TestBuildAppMinimalConfig(CommonAppSuite, TestCase):
    @classmethod
    def setUp(self):
        self.app = BuildApp()
        self.app.default_pool = random.choice(['serial', 'thread'])
        self.app.pool_size = 2
        self.c = None

    def tearDown(self):
        self.app.close_pool()
Beispiel #6
0
class TestBuildAppMinimalConfig(CommonAppSuite, TestCase):
    @classmethod
    def setUp(self):
        self.app = BuildApp()
        self.app.default_pool = random.choice(['serial', 'thread'])
        self.app.pool_size = 2
        self.c = None

    def tearDown(self):
        self.app.close_pool()
Beispiel #7
0
def triage(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool = 'thread'

    j = JeerahClient(conf)
    j.connect()

    query_data = giza.jeerah.triage.query(j, app, conf)

    pprint(giza.jeerah.triage.report(query_data, conf))
Beispiel #8
0
def planning(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool = 'thread'

    j = JeerahClient(conf)
    j.connect()

    query_data = giza.jeerah.progress.query(j, app, conf)

    pprint(giza.jeerah.planning.report(query_data, conf))
Beispiel #9
0
def actions(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    results = []

    for pull in mine_github_pulls(gh, app, conf):
        if pull['merge_safe'] is True:
            results.append(pull)

    pprint(results)
Beispiel #10
0
def actions(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    results = []

    for pull in mine_github_pulls(gh, app, conf):
        if pull['merge_safe'] is True:
            results.append(pull)

    pprint(results)
Beispiel #11
0
    def test_single_runner_app(self):
        self.assertEqual(self.app.queue, [])
        self.assertEqual(self.app.results, [])

        app = BuildApp()
        app.pool_size = 2
        t = app.add('task')
        t.job = sum
        t.args = [[1, 2], 0]
        t.description = 'test task'

        self.app.add(app)
        self.app.run()
        self.assertEqual(self.app.results[0], 3)
Beispiel #12
0
    def test_single_runner_app(self):
        self.assertEqual(self.app.queue, [])
        self.assertEqual(self.app.results, [])

        app = BuildApp()
        app.pool_size = 2
        t = app.add('task')
        t.job = sum
        t.args = [[1, 2], 0]
        t.description = 'test task'

        self.app.add(app)
        self.app.run()
        self.assertEqual(self.app.results[0], 3)
Beispiel #13
0
def api(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.extend_queue(apiarg_tasks(c))
Beispiel #14
0
def build_translation_model(args):
    conf = fetch_config(args)
    if args.t_translate_config is None:
        tconf = conf.system.files.data.translate
    elif os.path.isfile(args.t_translate_config):
        tconf = TranslateConfig(args.t_translate_config, conf)
    else:
        logger.error(args.t_translate_config + " doesn't exist")
        return

    if os.path.exists(tconf.paths.project) is False:
        os.makedirs(tconf.paths.project)
    elif os.path.isfile(tconf.paths.project):
        logger.error(tconf.paths.project + " is a file")
        sys.exit(1)
    elif os.listdir(tconf.paths.project) != []:
        logger.error(tconf.paths.project + " must be empty")
        sys.exit(1)

    with open(os.path.join(tconf.paths.project, "translate.yaml"), 'w') as f:
        yaml.dump(tconf.dict(), f, default_flow_style=False)

    tconf.conf.runstate.pool_size = tconf.settings.pool_size
    run_args = get_run_args(tconf)

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)
    os.environ['IRSTLM'] = tconf.paths.irstlm

    setup_train(tconf)
    setup_tune(tconf)
    setup_test(tconf)

    for idx, parameter_set in enumerate(run_args):
        parameter_set = list(parameter_set)
        parameter_set.append(idx)
        parameter_set.append(tconf)
        t = app.add()
        t.job = build_model
        t.args = parameter_set
        t.description = "model_" + str(parameter_set[9])

    app.run()

    aggregate_model_data(tconf.paths.project)

    from_addr = "*****@*****.**"
    to_addr = [tconf.settings.email]

    with open(tconf.paths.project + "/data.csv") as data:
        msg = MIMEText(data.read())

    msg['Subject'] = "Model Complete"
    msg['From'] = from_addr
    msg['To'] = ", ".join(to_addr)

    server = smtplib.SMTP("localhost")
    server.sendmail(from_addr, to_addr, msg.as_string())
    server.quit()
Beispiel #15
0
def build_translation_model(args):
    conf = fetch_config(args)
    if args.t_translate_config is None:
        tconf = conf.system.files.data.translate
    elif os.path.isfile(args.t_translate_config):
        tconf = TranslateConfig(args.t_translate_config, conf)
    else:
        logger.error(args.t_translate_config + " doesn't exist")
        return

    if os.path.exists(tconf.paths.project) is False:
        os.makedirs(tconf.paths.project)
    elif os.path.isfile(tconf.paths.project):
        logger.error(tconf.paths.project + " is a file")
        sys.exit(1)
    elif os.listdir(tconf.paths.project) != []:
        logger.error(tconf.paths.project + " must be empty")
        sys.exit(1)

    with open(os.path.join(tconf.paths.project, "translate.yaml"), 'w') as f:
        yaml.dump(tconf.dict(), f, default_flow_style=False)

    tconf.conf.runstate.pool_size = tconf.settings.pool_size
    run_args = get_run_args(tconf)

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)
    os.environ['IRSTLM'] = tconf.paths.irstlm

    setup_train(tconf)
    setup_tune(tconf)
    setup_test(tconf)

    for idx, parameter_set in enumerate(run_args):
        parameter_set = list(parameter_set)
        parameter_set.append(idx)
        parameter_set.append(tconf)
        t = app.add()
        t.job = build_model
        t.args = parameter_set
        t.description = "model_" + str(parameter_set[9])

    app.run()

    aggregate_model_data(tconf.paths.project)

    from_addr = "*****@*****.**"
    to_addr = [tconf.settings.email]

    with open(tconf.paths.project + "/data.csv") as data:
        msg = MIMEText(data.read())

    msg['Subject'] = "Model Complete"
    msg['From'] = from_addr
    msg['To'] = ", ".join(to_addr)

    server = smtplib.SMTP("localhost")
    server.sendmail(from_addr, to_addr, msg.as_string())
    server.quit()
Beispiel #16
0
def api(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.extend_queue(apiarg_tasks(c))
Beispiel #17
0
def source(args):
    args.builder = 'html'
    conf = fetch_config(args)

    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        sphinx_content_preperation(app, conf)
Beispiel #18
0
def robots(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.pool = 'serial'
        app.extend_queue(robots_txt_tasks(c))
Beispiel #19
0
def source(args):
    args.builder = 'html'
    conf = fetch_config(args)

    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        sphinx_content_preperation(app, conf)
Beispiel #20
0
def robots(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        app.pool = 'serial'
        app.extend_queue(robots_txt_tasks(c))
Beispiel #21
0
def main(args):
    """
    Removes build artifacts from ``build/`` directory.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    to_remove = set()

    if c.runstate.git_branch is not None:
        to_remove.add(os.path.join(c.paths.projectroot, c.paths.branch_output))

    if c.runstate.builder != []:
        for edition, language, builder in get_builder_jobs(c):
            builder_path = resolve_builder_path(builder, edition, language, c)
            builder_path = os.path.join(c.paths.projectroot, c.paths.branch_output, builder_path)

            to_remove.add(builder_path)
            dirpath, base = os.path.split(builder_path)
            to_remove.add(os.path.join(dirpath, 'doctrees-' + base))

            m = 'remove artifacts associated with the {0} builder in {1} ({2}, {3})'
            logger.debug(m.format(builder, c.git.branches.current, edition, language))

    if c.runstate.days_to_save is not None:
        published_branches = ['docs-tools', 'archive', 'public', 'primer', c.git.branches.current]
        published_branches.extend(c.git.branches.published)

        for build in os.listdir(os.path.join(c.paths.projectroot, c.paths.output)):
            build = os.path.join(c.paths.projectroot, c.paths.output, build)
            branch = os.path.split(build)[1]

            if branch in published_branches:
                continue
            elif not os.path.isdir(build):
                continue
            elif os.stat(build).st_mtime > c.runstate.days_to_save:
                to_remove.add(build)
                to_remove.add(os.path.join(c.paths.projectroot, c.paths.output, 'public', branch))
                logger.debug('removed stale artifacts: "{0}" and "build/public/{0}"'.format(branch))

    for fn in to_remove:
        if os.path.isdir(fn):
            job = shutil.rmtree
        else:
            job = os.remove

        t = app.add('task')
        t.job = job
        t.args = fn
        m = 'removing artifact: {0}'.format(fn)
        t.description = m
        logger.critical(m)

    app.run()
Beispiel #22
0
    def test_single_runner_app_with_many_subtasks(self):
        self.assertEqual(self.app.queue, [])
        self.assertEqual(self.app.results, [])

        app = BuildApp()
        app.pool_size = 2

        for _ in range(10):
            t = app.add('task')
            t.job = sum
            t.description = 'test task'
            t.args = [[1, 2], 0]

        self.app.add(app)
        self.app.run()
        self.assertEqual(len(self.app.results), 10)
        self.assertEqual(self.app.results[0], 3)
        self.assertEqual(sum(self.app.results), 30)
Beispiel #23
0
def create_branch(args):
    """
    Takes a single branch name and (if necessary) creates a new branch. Then,
    populates the ``build/<branch>`` directory for the new branch using either
    the parent branch or ``master``. Safe to run multiple times (after a rebase)
    to update the build cache from master.

    Also calls :method:`~giza.operations.build_env.fix_build_environment()` to
    tweak the new build output to update hashes and on-disk copies of the
    environment to prevent unnecessary full-rebuilds from sphinx.
    """

    conf = fetch_config(args)

    g = GitRepo(conf.paths.projectroot)

    branch = conf.runstate.git_branch
    base_branch = g.current_branch()

    if base_branch == branch:
        base_branch = 'master'
        logger.warning(
            'seeding build data for branch "{0}" from "master"'.format(branch))

    branch_builddir = os.path.join(conf.paths.projectroot, conf.paths.output,
                                   branch)

    base_builddir = os.path.join(conf.paths.projectroot, conf.paths.output,
                                 base_branch)

    if g.branch_exists(branch):
        logger.info('checking out branch "{0}"'.format(branch))
    else:
        logger.info(
            'creating and checking out a branch named "{0}"'.format(branch))

    g.checkout_branch(branch)

    cmd = "rsync -r --times --checksum {0}/ {1}".format(
        base_builddir, branch_builddir)
    logger.info('seeding build directory for "{0}" from "{1}"'.format(
        branch, base_branch))

    try:
        subprocess.check_call(args=cmd.split())
        logger.info('branch creation complete.')
    except subprocess.CalledProcessError:
        logger.error(cmd)

    # get a new config here for the new branch
    conf = fetch_config(args)
    builders = get_existing_builders(conf)

    with BuildApp.new(pool_type='process',
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        app.exted_queue(fix_build_env_tasks(builders, conf))
Beispiel #24
0
    def test_single_runner_app_with_many_subtasks(self):
        self.assertEqual(self.app.queue, [])
        self.assertEqual(self.app.results, [])

        app = BuildApp()
        app.pool_size = 2

        for _ in range(10):
            t = app.add('task')
            t.job = sum
            t.description = 'test task'
            t.args = [[1, 2], 0]

        self.app.add(app)
        self.app.run()
        self.assertEqual(len(self.app.results), 10)
        self.assertEqual(self.app.results[0], 3)
        self.assertEqual(sum(self.app.results), 30)
Beispiel #25
0
def redirects(args):
    c = fetch_config(args)

    if args.dry_run is True:
        print(''.join(make_redirect(c)))
    else:
        with BuildApp.new(pool_type=c.runstate.runner,
                          pool_size=c.runstate.pool_size,
                          force=c.runstate.force).context() as app:
            app.extend_queue(redirect_tasks(c))
Beispiel #26
0
def steps(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        if c.runstate.clean_generated is True:
            app.extend_queue(step_clean(c))
        else:
            app.extend_queue(step_tasks(c))
Beispiel #27
0
def stats(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    users = set()
    result = {'merge_safe': 0, 'total': 0}
    for pull in mine_github_pulls(gh, app, conf):
        result['total'] += 1
        if pull['merge_safe'] is True:
            result['merge_safe'] += 1

        users.add(pull['user'])

    result['user_count'] = len(users)
    result['users'] = list(users)

    pprint(result)
Beispiel #28
0
def redirects(args):
    c = fetch_config(args)

    if args.dry_run is True:
        print(''.join(make_redirect(c)))
    else:
        with BuildApp.new(pool_type=c.runstate.runner,
                          pool_size=c.runstate.pool_size,
                          force=c.runstate.force).context() as app:
            app.extend_queue(redirect_tasks(c))
Beispiel #29
0
def stats(args):
    conf = fetch_config(args)
    app = BuildApp(conf)
    app.pool_size = 4
    gh = get_connection(conf)

    users = set()
    result = {'merge_safe': 0, 'total': 0}
    for pull in mine_github_pulls(gh, app, conf):
        result['total'] += 1
        if pull['merge_safe'] is True:
            result['merge_safe'] += 1

        users.add(pull['user'])

    result['user_count'] = len(users)
    result['users'] = list(users)

    pprint(result)
Beispiel #30
0
def steps(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:
        if c.runstate.clean_generated is True:
            app.extend_queue(step_clean(c))
        else:
            app.extend_queue(step_tasks(c))
Beispiel #31
0
def extract(args):
    conf = fetch_config(args)

    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        path = fetch_package(conf.runstate._path, conf)
        extract_package_at_root(path, conf)

        builders = get_existing_builders(conf)
        app.extend_queue(fix_build_env_tasks(builders, conf))
Beispiel #32
0
def extract(args):
    conf = fetch_config(args)

    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        path = fetch_package(conf.runstate._path, conf)
        extract_package_at_root(path, conf)

        builders = get_existing_builders(conf)
        app.extend_queue(fix_build_env_tasks(builders, conf))
Beispiel #33
0
def images(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:

        if c.runstate.clean_generated is True:
            app.extend_queue(image_clean(c))
        else:
            for (_, (bconf, sconf)) in get_restricted_builder_jobs(c):
                app.extend_queue(image_tasks(bconf, sconf))
Beispiel #34
0
def images(args):
    c = fetch_config(args)

    with BuildApp.new(pool_type=c.runstate.runner,
                      pool_size=c.runstate.pool_size,
                      force=c.runstate.force).context() as app:

        if c.runstate.clean_generated is True:
            app.extend_queue(image_clean(c))
        else:
            for (_, (bconf, sconf)) in get_restricted_builder_jobs(c):
                app.extend_queue(image_tasks(bconf, sconf))
Beispiel #35
0
def create_branch(args):
    """
    Takes a single branch name and (if necessary) creates a new branch. Then,
    populates the ``build/<branch>`` directory for the new branch using either
    the parent branch or ``master``. Safe to run multiple times (after a rebase)
    to update the build cache from master.

    Also calls :method:`~giza.operations.build_env.fix_build_environment()` to
    tweak the new build output to update hashes and on-disk copies of the
    environment to prevent unnecessary full-rebuilds from sphinx.
    """

    conf = fetch_config(args)

    g = GitRepo(conf.paths.projectroot)

    branch = conf.runstate.git_branch
    base_branch = g.current_branch()

    if base_branch == branch:
        base_branch = 'master'
        logger.warning('seeding build data for branch "{0}" from "master"'.format(branch))

    branch_builddir = os.path.join(conf.paths.projectroot,
                                   conf.paths.output, branch)

    base_builddir = os.path.join(conf.paths.projectroot,
                                 conf.paths.output, base_branch)

    if g.branch_exists(branch):
        logger.info('checking out branch "{0}"'.format(branch))
    else:
        logger.info('creating and checking out a branch named "{0}"'.format(branch))

    g.checkout_branch(branch)

    cmd = "rsync -r --times --checksum {0}/ {1}".format(base_builddir, branch_builddir)
    logger.info('seeding build directory for "{0}" from "{1}"'.format(branch, base_branch))

    try:
        subprocess.check_call(args=cmd.split())
        logger.info('branch creation complete.')
    except subprocess.CalledProcessError:
        logger.error(cmd)

    # get a new config here for the new branch
    conf = fetch_config(args)
    builders = get_existing_builders(conf)

    with BuildApp.new(pool_type='process',
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        app.exted_queue(fix_build_env_tasks(builders, conf))
Beispiel #36
0
def sphinx(args):
    if args.runner == 'serial':
        args.serial_sphinx = True

    conf = fetch_config(args)
    logger.warning('not for production use: this expects that content generation is complete.')

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)

    r = sphinx_builder_tasks(app, conf)

    raise SystemExit(r)
Beispiel #37
0
def main(args):
    """
    Uploads all build artifacts to the production environment. Does not build or
    render artifacts.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    deploy_tasks(c, app)

    if c.runstate.dry_run is False:
        app.run()
Beispiel #38
0
def main(args):
    """
    Uploads all build artifacts to the production environment. Does not build or
    render artifacts.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    deploy_tasks(c, app)

    if c.runstate.dry_run is False:
        app.run()
Beispiel #39
0
def sphinx(args):
    if args.runner == 'serial':
        args.serial_sphinx = True

    conf = fetch_config(args)
    logger.warning(
        'not for production use: this expects that content generation is complete.'
    )

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)

    r = sphinx_builder_tasks(app, conf)

    raise SystemExit(r)
Beispiel #40
0
class TestBuildAppStandardConfig(CommonAppSuite, TestCase):
    def setUp(self):
        self.c = Configuration()
        self.c.runstate = RuntimeStateConfig()
        self.app = BuildApp(self.c)
        self.app.default_pool = random.choice(['serial', 'thread'])
        self.app.pool_size = 2

    def test_conf_object_consistent_in_task(self):
        self.assertEqual(self.app.queue, [])
        t = self.app.add('task')
        self.assertIs(self.c, t.conf)
        self.assertIs(self.c, self.app.queue[0].conf)

    def test_conf_object_consistent_in_app(self):
        self.assertEqual(self.app.queue, [])
        self.app.add('app')

        self.assertIs(self.c, self.app.conf)
        self.assertIs(self.c, self.app.queue[0].conf)

    def test_conf_object_consistent_in_new_task(self):
        self.assertEqual(self.app.queue, [])
        t = Task()
        self.assertIsNone(t.conf)
        self.app.add(t)
        self.assertIsNotNone(t.conf)
        self.assertIs(self.c, self.app.queue[0].conf)
        self.assertIs(self.c, t.conf)

    def test_force_options(self):
        self.assertEquals(self.c.runstate.force, self.app.force)
        self.assertFalse(self.c.runstate.force)
        self.assertFalse(self.app.force)

        self.app._force = None
        self.assertFalse(self.app.force)

    def test_default_pool_size(self):
        self.assertIsNotNone(self.c)
        self.assertIsNotNone(self.app.conf)
        self.app._pool_size = None
        self.assertEquals(self.c.runstate.pool_size, self.app.pool_size)

    def tearDown(self):
        self.app.close_pool()
Beispiel #41
0
class TestBuildAppStandardConfig(CommonAppSuite, TestCase):
    def setUp(self):
        self.c = Configuration()
        self.c.runstate = RuntimeStateConfig()
        self.app = BuildApp(self.c)
        self.app.default_pool = random.choice(['serial', 'thread'])
        self.app.pool_size = 2

    def test_conf_object_consistent_in_task(self):
        self.assertEqual(self.app.queue, [])
        t = self.app.add('task')
        self.assertIs(self.c, t.conf)
        self.assertIs(self.c, self.app.queue[0].conf)

    def test_conf_object_consistent_in_app(self):
        self.assertEqual(self.app.queue, [])
        self.app.add('app')

        self.assertIs(self.c, self.app.conf)
        self.assertIs(self.c, self.app.queue[0].conf)

    def test_conf_object_consistent_in_new_task(self):
        self.assertEqual(self.app.queue, [])
        t = Task()
        self.assertIsNone(t.conf)
        self.app.add(t)
        self.assertIsNotNone(t.conf)
        self.assertIs(self.c, self.app.queue[0].conf)
        self.assertIs(self.c, t.conf)

    def test_force_options(self):
        self.assertEquals(self.c.runstate.force, self.app.force)
        self.assertFalse(self.c.runstate.force)
        self.assertFalse(self.app.force)

        self.app._force = None
        self.assertFalse(self.app.force)

    def test_default_pool_size(self):
        self.assertIsNotNone(self.c)
        self.assertIsNotNone(self.app.conf)
        self.app._pool_size = None
        self.assertEquals(self.c.runstate.pool_size, self.app.pool_size)

    def tearDown(self):
        self.app.close_pool()
Beispiel #42
0
def publish_and_deploy(args):
    """
    Combines the work of ``giza sphinx`` and ``giza deploy``, to produce build
    artifacts and then upload those artifacts to the servers.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    sphinx_ret = sphinx_publication(c, app)
    if sphinx_ret == 0 or c.runstate.force is True:
        deploy_tasks(c, app)

        if c.runstate.dry_run is False:
            app.run()
    else:
        logger.warning(sphinx_ret + ' sphinx build(s) failed, and build not forced. not deploying.')
def main(args):
    """
    Use Sphinx to generate build artifacts. Can generate artifacts for multiple
    output types, content editions and translations.
    """
    conf = fetch_config(args)

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       pool_size=conf.runstate.pool_size,
                       force=conf.runstate.force)

    with Timer("full sphinx build process"):
        # In general we try to avoid passing the "app" object between functions
        # and mutating it at too many places in the stack (although in earlier
        # versions this was the primary idiom). This call is a noted exception,
        # and makes it possible to run portions of this process in separate
        # targets.

        sphinx_publication(conf, app)
Beispiel #44
0
def test_build_site(args):
    args.languages_to_build = args.editions_to_build = []
    args.builder = 'html'

    conf = fetch_config(args)

    safe_create_directory('build')
    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        try:
            sphinx_publication(conf, args, app)
        except:
            sphinx_publication(conf, args, app)
            if os.path.exists('doc-tools'):
                shutil.rmtree('docs-tools')

    logger.info('bootstrapped makefile system')

    logger.info('updated project skeleton in current directory.')
Beispiel #45
0
def test_build_site(args):
    args.languages_to_build = args.editions_to_build = []
    args.builder = 'html'

    conf = fetch_config(args)

    safe_create_directory('build')
    with BuildApp.new(pool_type=conf.runstate.runner,
                      pool_size=conf.runstate.pool_size,
                      force=conf.runstate.force).context() as app:
        try:
            sphinx_publication(conf, args, app)
        except:
            sphinx_publication(conf, args, app)
            if os.path.exists('doc-tools'):
                shutil.rmtree('docs-tools')

    logger.info('bootstrapped makefile system')

    logger.info('updated project skeleton in current directory.')
Beispiel #46
0
def publish_and_deploy(args):
    """
    Combines the work of ``giza sphinx`` and ``giza deploy``, to produce build
    artifacts and then upload those artifacts to the servers.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    sphinx_ret = sphinx_publication(c, app)
    if sphinx_ret == 0 or c.runstate.force is True:
        deploy_tasks(c, app)

        if c.runstate.dry_run is False:
            app.run()
    else:
        logger.warning(
            sphinx_ret +
            ' sphinx build(s) failed, and build not forced. not deploying.')
Beispiel #47
0
 def setUp(self):
     self.c = Configuration()
     self.c.runstate = RuntimeStateConfig()
     self.app = BuildApp(self.c)
     self.app.default_pool = random.choice(['serial', 'thread'])
     self.app.pool_size = 2
Beispiel #48
0
 def setUp(self):
     self.app = BuildApp.new(pool_type=random.choice(['serial', 'thread']),
                             pool_size=None,
                             force=None)
     self.c = None
Beispiel #49
0
 def setUp(self):
     self.app = BuildApp()
     self.app.default_pool = random.choice(['serial', 'thread'])
     self.app.pool_size = 2
     self.c = None
Beispiel #50
0
 def setUp(self):
     self.app = BuildApp.new(pool_type=random.choice(['serial', 'thread']),
                             pool_size=None, force=None)
     self.c = None
Beispiel #51
0
 def test_finalizer_setter_error_app(self):
     with self.assertRaises(TypeError):
         self.task.finalizers = BuildApp()
Beispiel #52
0
 def test_finalizer_setter_error_app_in_list(self):
     with self.assertRaises(TypeError):
         self.task.finalizers = [self.Task(), BuildApp()]
Beispiel #53
0
 def setUp(self):
     self.c = Configuration()
     self.c.runstate = RuntimeStateConfig()
     self.app = BuildApp(self.c)
     self.app.default_pool = random.choice(['serial', 'thread'])
     self.app.pool_size = 2
Beispiel #54
0
def run_make_operations(targets, conf):
    """
    :param list targets: A list of tuples in the form of ``(<action>, [option,
         option])`` that define build targets.

    :param Configuration conf: The top level configuration object.

    Parses the ``targets`` list and runs tasks defined, including all specified
    sphinx targets, all ``push`` deployment targets, and will create the ``env``
    packages. Noteworthy behavior:

    - The order of options *except* for the action in the first option is not
      important.

    - If you run ``push`` target with the ``deploy`` option
      (i.e. ``push-deploy`` or ``push-<edition>-deploy``), ``giza`` will *not*
      run the ``publish`` Sphinx build.

    - This interface assumes that all deployment targets (defined in each
      project begin with ``push-`` or ``stage-``.) If you have a project with
      different deployment targets, you will need to call ``giza deploy``
      directly.

    - The ``env`` cache targets take the same options as the Sphinx builders and
      package the environment for only those builders. If you specify ``env``
      after a Sphinx target, ``giza`` will build the cache for only that
      package.
    """

    sphinx_opts = {"languages": set(),
                   "editions": set(),
                   "builders": set()}
    push_opts = {"targets": set(),
                 "type": None}
    packaging_opts = {}

    sphinx_builders = avalible_sphinx_builders()
    deploy_configs = dict((item['target'], item) for item in conf.system.files.data.push)

    tasks = []
    for action, options in targets:
        if action in sphinx_builders:
            tasks.append(sphinx_opts)

            add_sphinx_build_options(sphinx_opts, action, options, conf)
        elif action in ('stage', 'push'):
            tasks.append(push_opts)
            push_opts['type'] = action

            if 'deploy' not in options:
                sphinx_opts['builders'].add('publish')
                tasks.append(sphinx_opts)
                add_sphinx_build_options(sphinx_opts, action, options, conf)
                conf.runstate.fast = False

            if action in deploy_configs:
                push_opts['targets'].add(action)

            for build_option in options:
                deploy_target_name = '-'.join((action, build_option))

                if build_option in deploy_configs:
                    push_opts['targets'].add(build_option)
                elif deploy_target_name in deploy_configs:
                    push_opts['targets'].add(deploy_target_name)
        elif action.startswith('env'):
            if len(packaging_opts) > 0:
                packaging_opts = copy.copy(sphinx_opts)

            tasks.append(packaging_opts)
            add_sphinx_build_options(packaging_opts, False, options, conf)
        else:
            logger.error('target: {0} not defined in the make interface'.format(action))

    app = BuildApp.new(pool_type=conf.runstate.runner,
                       force=conf.runstate.force,
                       pool_size=conf.runstate.pool_size)

    if sphinx_opts in tasks:
        conf.runstate.languages_to_build = list(sphinx_opts['languages'])
        conf.runstate.editions_to_build = list(sphinx_opts['editions'])
        conf.runstate.builder = list(sphinx_opts['builders'])

        if 'publish' in conf.runstate.builder:
            conf.runstate.fast = False

        derive_command('sphinx', conf)

        sphinx_publication(conf, app)

    if push_opts in tasks:
        if len(push_opts['targets']) == 0:
            for lang, edition in itertools.product(conf.runstate.languages_to_build,
                                                   conf.runstate.editions_to_build):
                push_target_name = [push_opts['type']]
                for opt in (edition, lang):
                    if opt is not None:
                        push_target_name.append(opt)
                push_target_name = '-'.join(push_target_name)
                push_opts['targets'].add(push_target_name)

        conf.runstate.push_targets = list(push_opts['targets'])
        deploy_tasks(conf, app)
        derive_command('deploy', conf)

    if packaging_opts in tasks:
        derive_command('env', conf)

        app.add(Task(job=env_package_worker,
                     args=(conf.runstate, conf),
                     target=True,
                     dependency=None))

    if len(app.queue) >= 1:
        app.run()
Beispiel #55
0
def main(args):
    """
    Removes build artifacts from ``build/`` directory.
    """

    c = fetch_config(args)
    app = BuildApp.new(pool_type=c.runstate.runner,
                       pool_size=c.runstate.pool_size,
                       force=c.runstate.force)

    to_remove = set()

    if c.runstate.git_branch is not None:
        to_remove.add(os.path.join(c.paths.projectroot, c.paths.branch_output))

    if c.runstate.builder != []:
        for edition, language, builder in get_builder_jobs(c):
            builder_path = resolve_builder_path(builder, edition, language, c)
            builder_path = os.path.join(c.paths.projectroot,
                                        c.paths.branch_output, builder_path)

            to_remove.add(builder_path)
            dirpath, base = os.path.split(builder_path)
            to_remove.add(os.path.join(dirpath, 'doctrees-' + base))

            m = 'remove artifacts associated with the {0} builder in {1} ({2}, {3})'
            logger.debug(
                m.format(builder, c.git.branches.current, edition, language))

    if c.runstate.days_to_save is not None:
        published_branches = [
            'docs-tools', 'archive', 'public', 'primer', c.git.branches.current
        ]
        published_branches.extend(c.git.branches.published)

        for build in os.listdir(
                os.path.join(c.paths.projectroot, c.paths.output)):
            build = os.path.join(c.paths.projectroot, c.paths.output, build)
            branch = os.path.split(build)[1]

            if branch in published_branches:
                continue
            elif not os.path.isdir(build):
                continue
            elif os.stat(build).st_mtime > c.runstate.days_to_save:
                to_remove.add(build)
                to_remove.add(
                    os.path.join(c.paths.projectroot, c.paths.output, 'public',
                                 branch))
                logger.debug(
                    'removed stale artifacts: "{0}" and "build/public/{0}"'.
                    format(branch))

    for fn in to_remove:
        if os.path.isdir(fn):
            job = shutil.rmtree
        else:
            job = os.remove

        t = app.add('task')
        t.job = job
        t.args = fn
        m = 'removing artifact: {0}'.format(fn)
        t.description = m
        logger.critical(m)

    app.run()
Beispiel #56
0
 def setUp(self):
     self.app = BuildApp()
     self.app.default_pool = random.choice(['serial', 'thread'])
     self.app.pool_size = 2
     self.c = None