Exemplo n.º 1
0
    def run(self, ctx):
        out_dir = (ctx.args.output
                   or os.path.join(ctx.app.root_dir, '_counter'))

        success = True
        ctx.timers = {}
        start_time = time.perf_counter()
        try:
            # Bake the site sources.
            if not ctx.args.assets_only:
                success = success & self._bakeSources(ctx, out_dir)

            # Bake the assets.
            if not ctx.args.html_only:
                success = success & self._bakeAssets(ctx, out_dir)

            # Show merged timers.
            if ctx.args.show_timers:
                logger.info("-------------------")
                logger.info("Timing information:")
                _show_timers(ctx.timers)

            # All done.
            logger.info('-------------------------')
            logger.info(format_timed(start_time, 'done baking'))
            return 0 if success else 1
        except Exception as ex:
            if ctx.app.debug:
                logger.exception(ex)
            else:
                logger.error(str(ex))
            return 1
Exemplo n.º 2
0
    def _unsafeRun(self, job):
        start_time = time.clock()

        entry = job.record_entry
        try:
            self._page_baker.bake(job.factory, job.route, entry)
        except BakingError as ex:
            logger.debug("Got baking error. Adding it to the record.")
            while ex:
                entry.errors.append(str(ex))
                ex = ex.__cause__

        has_error = False
        for e in entry.getAllErrors():
            has_error = True
            logger.error(e)
        if has_error:
            return False

        if entry.was_any_sub_baked:
            first_sub = entry.subs[0]

            friendly_uri = first_sub.out_uri
            if friendly_uri == '':
                friendly_uri = '[main page]'

            friendly_count = ''
            if entry.num_subs > 1:
                friendly_count = ' (%d pages)' % entry.num_subs
            logger.info(format_timed(
                    start_time, '[%d] %s%s' %
                    (self.wid, friendly_uri, friendly_count)))

        return True
Exemplo n.º 3
0
    def run(self, ctx):
        out_dir = (ctx.args.output or
                   os.path.join(ctx.app.root_dir, '_counter'))

        success = True
        ctx.stats = {}
        start_time = time.perf_counter()
        try:
            # Bake the site sources.
            if not ctx.args.assets_only:
                success = success & self._bakeSources(ctx, out_dir)

            # Bake the assets.
            if not ctx.args.html_only:
                success = success & self._bakeAssets(ctx, out_dir)

            # Show merged stats.
            if ctx.args.show_stats:
                logger.info("-------------------")
                logger.info("Timing information:")
                _show_stats(ctx.stats)

            # All done.
            logger.info('-------------------------')
            logger.info(format_timed(start_time, 'done baking'))
            return 0 if success else 1
        except Exception as ex:
            if ctx.app.debug:
                logger.exception(ex)
            else:
                logger.error(str(ex))
            return 1
Exemplo n.º 4
0
    def _handleCacheValidity(self, record):
        start_time = time.perf_counter()

        reason = None
        if self.force:
            reason = "ordered to"
        elif not self.app.config.get('__cache_valid'):
            # The configuration file was changed, or we're running a new
            # version of the app.
            reason = "not valid anymore"
        elif (not record.previous.bake_time
              or not record.previous.hasLatestVersion()):
            # We have no valid previous bake record.
            reason = "need bake record regeneration"
        else:
            # Check if any template has changed since the last bake. Since
            # there could be some advanced conditional logic going on, we'd
            # better just force a bake from scratch if that's the case.
            max_time = 0
            for d in self.app.templates_dirs:
                for dpath, _, filenames in os.walk(d):
                    for fn in filenames:
                        full_fn = os.path.join(dpath, fn)
                        max_time = max(max_time, os.path.getmtime(full_fn))
            if max_time >= record.previous.bake_time:
                reason = "templates modified"

        if reason is not None:
            # We have to bake everything from scratch.
            self.app.cache.clearCaches(except_names=['app'])
            self.force = True
            record.incremental_count = 0
            record.clearPrevious()
            logger.info(
                format_timed(start_time,
                             "cleaned cache (reason: %s)" % reason))
            return False
        else:
            record.incremental_count += 1
            logger.debug(
                format_timed(start_time,
                             "cache is assumed valid",
                             colored=False))
            return True
Exemplo n.º 5
0
    def _bakeTaxonomies(self, record, pool):
        logger.debug("Baking taxonomy pages...")
        with format_timed_scope(logger, 'built taxonomy buckets',
                                level=logging.DEBUG, colored=False):
            buckets = self._buildTaxonomyBuckets(record)

        start_time = time.perf_counter()
        page_count = self._bakeTaxonomyBuckets(record, pool, buckets)
        logger.info(format_timed(start_time,
                                 "baked %d taxonomy pages." % page_count))
Exemplo n.º 6
0
    def _handleCacheValidity(self, record):
        start_time = time.perf_counter()

        reason = None
        if self.force:
            reason = "ordered to"
        elif not self.app.config.get('__cache_valid'):
            # The configuration file was changed, or we're running a new
            # version of the app.
            reason = "not valid anymore"
        elif (not record.previous.bake_time or
                not record.previous.hasLatestVersion()):
            # We have no valid previous bake record.
            reason = "need bake record regeneration"
        else:
            # Check if any template has changed since the last bake. Since
            # there could be some advanced conditional logic going on, we'd
            # better just force a bake from scratch if that's the case.
            max_time = 0
            for d in self.app.templates_dirs:
                for dpath, _, filenames in os.walk(d):
                    for fn in filenames:
                        full_fn = os.path.join(dpath, fn)
                        max_time = max(max_time, os.path.getmtime(full_fn))
            if max_time >= record.previous.bake_time:
                reason = "templates modified"

        if reason is not None:
            # We have to bake everything from scratch.
            self.app.cache.clearCaches(except_names=['app'])
            self.force = True
            record.incremental_count = 0
            record.clearPrevious()
            logger.info(format_timed(
                    start_time,
                    "cleaned cache (reason: %s)" % reason))
            return False
        else:
            record.incremental_count += 1
            logger.debug(format_timed(
                    start_time, "cache is assumed valid",
                    colored=False))
            return True
Exemplo n.º 7
0
    def _bakeTaxonomies(self, record, pool):
        logger.debug("Baking taxonomy pages...")
        with format_timed_scope(logger,
                                'built taxonomy buckets',
                                level=logging.DEBUG,
                                colored=False):
            buckets = self._buildTaxonomyBuckets(record)

        start_time = time.perf_counter()
        page_count = self._bakeTaxonomyBuckets(record, pool, buckets)
        logger.info(
            format_timed(start_time, "baked %d taxonomy pages." % page_count))
Exemplo n.º 8
0
    def run(self, ctx):
        from piecrust.chefutil import format_timed
        from piecrust.environment import ExecutionStats

        out_dir = (ctx.args.output
                   or os.path.join(ctx.app.root_dir, '_counter'))

        success = True
        avg_stats = ExecutionStats()
        avg_stats.registerTimer('Total')
        start_time = time.perf_counter()

        num_iter = 1
        if ctx.args.profile > 0:
            num_iter = ctx.args.profile

        for i in range(num_iter):
            iter_start_time = time.perf_counter()
            if num_iter > 1:
                import gc
                gc.collect()
                logger.info("---- %d/%d ----" % (i + 1, num_iter))
                # Don't cheat -- the app instance caches a bunch of stuff
                # so we need to create a fresh one.
                ctx.app = ctx.appfactory.create()

            try:
                records = self._doBake(ctx, out_dir)
            except Exception as ex:
                if ctx.app.debug:
                    logger.exception(ex)
                else:
                    logger.error(str(ex))
                return 1

            success = success and records.success
            avg_stats.mergeStats(records.stats)
            avg_stats.stepTimerSince('Total', iter_start_time)

        # Show merged stats.
        if ctx.args.show_stats:
            if num_iter > 1:
                _average_stats(avg_stats, num_iter)

            logger.info("-------------------")
            logger.info("Timing information:")
            _show_stats(avg_stats)

        # All done.
        logger.info('-------------------------')
        logger.info(format_timed(start_time, 'done baking'))
        return 0 if success else 1
Exemplo n.º 9
0
    def run(self, ctx):
        if ctx.args.foodtruck:
            if ctx.args.html_only:
                raise Exception("`--foodtruck` and `--html-only` can't be "
                                "both specified.")
            if ctx.args.output:
                raise Exception("`--foodtruck` and `--output` can't be "
                                "both specified.")

            ctx.args.assets_only = True
            ctx.args.output = os.path.join(ctx.app.root_dir, CACHE_DIR,
                                           'foodtruck', 'server')

        out_dir = (ctx.args.output or
                   os.path.join(ctx.app.root_dir, '_counter'))

        success = True
        ctx.timers = {}
        start_time = time.perf_counter()
        try:
            # Bake the site sources.
            if not ctx.args.assets_only:
                success = success & self._bakeSources(ctx, out_dir)

            # Bake the assets.
            if not ctx.args.html_only:
                success = success & self._bakeAssets(ctx, out_dir)

            # Show merged timers.
            if ctx.args.show_timers:
                logger.info("-------------------")
                logger.info("Timing information:")
                _show_timers(ctx.timers)

            # All done.
            logger.info('-------------------------')
            logger.info(format_timed(start_time, 'done baking'))
            return 0 if success else 1
        except Exception as ex:
            if ctx.app.debug:
                logger.exception(ex)
            else:
                logger.error(str(ex))
            return 1
Exemplo n.º 10
0
    def bake(self, ctx):
        if not self.page_ref.exists:
            logger.debug(
                    "No page found at '%s', skipping taxonomy '%s'." %
                    (self.page_ref, self.taxonomy.name))
            return

        logger.debug("Baking %s pages...", self.taxonomy.name)
        with format_timed_scope(logger, 'gathered taxonomy terms',
                                level=logging.DEBUG, colored=False):
            all_terms, dirty_terms = self._buildDirtyTaxonomyTerms(ctx)

        start_time = time.perf_counter()
        page_count = self._bakeTaxonomyTerms(ctx, all_terms, dirty_terms)
        if page_count > 0:
            logger.info(format_timed(
                start_time,
                "baked %d %s pages for %s." % (
                    page_count, self.taxonomy.term_name, self.source_name)))
Exemplo n.º 11
0
    def processNode(self, node):
        full_path = self._getNodePath(node)
        proc = node.getProcessor()
        if proc.is_bypassing_structured_processing:
            try:
                start_time = time.perf_counter()
                with proc.app.env.stats.timerScope(proc.__class__.__name__):
                    proc.process(full_path, self.out_dir)
                print_node(
                    node,
                    format_timed(start_time,
                                 "(bypassing structured processing)",
                                 colored=False))
                return True
            except Exception as e:
                raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e

        # All outputs of a node must go to the same directory, so we can get
        # the output directory off of the first output.
        base_out_dir = self._getNodeBaseDir(node.outputs[0])
        rel_out_dir = os.path.dirname(node.path)
        out_dir = os.path.join(base_out_dir, rel_out_dir)
        if not os.path.isdir(out_dir):
            try:
                os.makedirs(out_dir, 0o755, exist_ok=True)
            except OSError:
                pass

        try:
            start_time = time.perf_counter()
            with proc.app.env.stats.timerScope(proc.__class__.__name__):
                proc_res = proc.process(full_path, out_dir)
            if proc_res is None:
                raise Exception("Processor '%s' didn't return a boolean "
                                "result value." % proc)
            if proc_res:
                print_node(node, "-> %s" % out_dir)
                return True
            else:
                print_node(node, "-> %s [clean]" % out_dir)
                return False
        except Exception as e:
            raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e
Exemplo n.º 12
0
    def processNode(self, node):
        full_path = self._getNodePath(node)
        proc = node.getProcessor()
        if proc.is_bypassing_structured_processing:
            try:
                start_time = time.perf_counter()
                with proc.app.env.timerScope(proc.__class__.__name__):
                    proc.process(full_path, self.out_dir)
                print_node(
                        node,
                        format_timed(
                            start_time, "(bypassing structured processing)",
                            colored=False))
                return True
            except Exception as e:
                raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e

        # All outputs of a node must go to the same directory, so we can get
        # the output directory off of the first output.
        base_out_dir = self._getNodeBaseDir(node.outputs[0])
        rel_out_dir = os.path.dirname(node.path)
        out_dir = os.path.join(base_out_dir, rel_out_dir)
        if not os.path.isdir(out_dir):
            try:
                os.makedirs(out_dir, 0o755, exist_ok=True)
            except OSError:
                pass

        try:
            start_time = time.perf_counter()
            with proc.app.env.timerScope(proc.__class__.__name__):
                proc_res = proc.process(full_path, out_dir)
            if proc_res is None:
                raise Exception("Processor '%s' didn't return a boolean "
                                "result value." % proc)
            if proc_res:
                print_node(node, "-> %s" % out_dir)
                return True
            else:
                print_node(node, "-> %s [clean]" % out_dir)
                return False
        except Exception as e:
            raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e
Exemplo n.º 13
0
    def build(self, path):
        start_time = time.clock()
        tree_root = ProcessingTreeNode(path, list(self.processors))

        loop_guard = 100
        walk_stack = [tree_root]
        while len(walk_stack) > 0:
            loop_guard -= 1
            if loop_guard <= 0:
                raise ProcessingTreeError("Infinite loop detected!")

            cur_node = walk_stack.pop()
            proc = cur_node.getProcessor()

            # If the root tree node (and only that one) wants to bypass this
            # whole tree business, so be it.
            if proc.is_bypassing_structured_processing:
                if cur_node != tree_root:
                    raise ProcessingTreeError("Only root processors can "
                            "bypass structured processing.")
                break

            # Get the destination directory and output files.
            rel_dir, basename = os.path.split(cur_node.path)
            out_names = proc.getOutputFilenames(basename)
            if out_names is None:
                continue

            for n in out_names:
                out_node = ProcessingTreeNode(
                        os.path.join(rel_dir, n),
                        list(cur_node.available_procs),
                        cur_node.level + 1)
                cur_node.outputs.append(out_node)

                if proc.PROCESSOR_NAME != 'copy':
                    walk_stack.append(out_node)

        logger.debug(format_timed(
            start_time, "Built processing tree for: %s" % path,
            colored=False))
        return tree_root
Exemplo n.º 14
0
    def _bakeRealm(self, record, pool, realm, srclist):
        start_time = time.perf_counter()
        try:
            record.current.baked_count[realm] = 0

            all_factories = []
            for source in srclist:
                factories = source.getPageFactories()
                all_factories += [f for f in factories
                                  if f.path not in self.taxonomy_pages]

            self._loadRealmPages(record, pool, all_factories)
            self._renderRealmPages(record, pool, all_factories)
            self._bakeRealmPages(record, pool, realm, all_factories)
        finally:
            page_count = record.current.baked_count[realm]
            logger.info(format_timed(
                    start_time,
                    "baked %d %s pages." %
                    (page_count, REALM_NAMES[realm].lower())))
Exemplo n.º 15
0
    def _bakeRealm(self, pool, ppmngr, record_histories,
                   pp_pass_num, realm, pplist):
        start_time = time.perf_counter()

        job_count = 0
        job_descs = {}
        realm_name = REALM_NAMES[realm].lower()
        pool.userdata.cur_pass = pp_pass_num

        for ppinfo in pplist:
            src = ppinfo.source
            pp = ppinfo.pipeline
            jcctx = PipelineJobCreateContext(pp_pass_num, pp.record_name,
                                             record_histories)

            jobs, job_desc = pp.createJobs(jcctx)
            if jobs is not None:
                new_job_count = len(jobs)
                job_count += new_job_count
                pool.queueJobs(jobs)
                if job_desc:
                    job_descs.setdefault(job_desc, []).append(src.name)
            else:
                new_job_count = 0

            logger.debug(
                "Queued %d jobs for source '%s' using pipeline '%s' "
                "(%s)." %
                (new_job_count, src.name, pp.PIPELINE_NAME, realm_name))

        if job_count == 0:
            logger.debug("No jobs queued! Bailing out of this bake pass.")
            return

        pool.wait()

        logger.info(format_timed(
            start_time, "%d jobs completed (%s)." %
            (job_count, ', '.join(
                ['%s %s' % (d, ', '.join(sn))
                 for d, sn in job_descs.items()]))))
Exemplo n.º 16
0
    def runQueue(self, *, only_task=None, clear_queue=True):
        start_time = time.perf_counter()

        tasks = list(self.getTasks(only_task=only_task))
        for path, task_type, task_data in tasks:
            if not task_type:
                logger.error("Got task with no type: %s" % path)
                continue

            runner = self._getRunner(task_type)
            if runner is None:
                logger.error("No task runner for type: %s" % task_type)
                continue

            ctx = TaskContext()
            runner.runTask(task_data, ctx)

            if clear_queue:
                os.remove(path)

        logger.info(format_timed(start_time, "Ran %d tasks." % len(tasks)))
Exemplo n.º 17
0
    def _bakeRealm(self, record, pool, realm, srclist):
        start_time = time.perf_counter()
        try:
            record.current.baked_count[realm] = 0

            all_factories = []
            for source in srclist:
                factories = source.getPageFactories()
                all_factories += [
                    f for f in factories if f.path not in self.taxonomy_pages
                ]

            self._loadRealmPages(record, pool, all_factories)
            self._renderRealmPages(record, pool, all_factories)
            self._bakeRealmPages(record, pool, realm, all_factories)
        finally:
            page_count = record.current.baked_count[realm]
            logger.info(
                format_timed(
                    start_time, "baked %d %s pages." %
                    (page_count, REALM_NAMES[realm].lower())))
Exemplo n.º 18
0
    def runQueue(self, *, only_task=None, clear_queue=True):
        start_time = time.perf_counter()

        tasks = list(self.getTasks(only_task=only_task))
        for path, task_type, task_data in tasks:
            if not task_type:
                logger.error("Got task with no type: %s" % path)
                continue

            runner = self._getRunner(task_type)
            if runner is None:
                logger.error("No task runner for type: %s" % task_type)
                continue

            ctx = TaskContext()
            runner.runTask(task_data, ctx)

            if clear_queue:
                os.remove(path)

        logger.info(format_timed(
            start_time, "Ran %d tasks." % len(tasks)))
Exemplo n.º 19
0
    def bake(self):
        logger.debug("  Bake Output: %s" % self.out_dir)
        logger.debug("  Root URL: %s" % self.app.config.get('site/root'))

        # Get into bake mode.
        start_time = time.clock()
        self.app.config.set('baker/is_baking', True)
        self.app.env.base_asset_url_format = '%uri%'

        # Make sure the output directory exists.
        if not os.path.isdir(self.out_dir):
            os.makedirs(self.out_dir, 0o755)

        # Load/create the bake record.
        record = TransitionalBakeRecord()
        record_cache = self.app.cache.getCache('baker')
        record_id = hashlib.md5(self.out_dir.encode('utf8')).hexdigest()
        record_name = record_id + '.record'
        if not self.force and record_cache.has(record_name):
            t = time.clock()
            record.loadPrevious(record_cache.getCachePath(record_name))
            logger.debug(format_timed(
                    t, 'loaded previous bake record',
                    colored=False))
        record.current.success = True

        # Figure out if we need to clean the cache because important things
        # have changed.
        self._handleCacheValidity(record)

        # Gather all sources by realm -- we're going to bake each realm
        # separately so we can handle "overlaying" (i.e. one realm overrides
        # another realm's pages).
        sources_by_realm = {}
        for source in self.app.sources:
            srclist = sources_by_realm.setdefault(source.realm, [])
            srclist.append(source)

        # Bake the realms.
        realm_list = [REALM_USER, REALM_THEME]
        for realm in realm_list:
            srclist = sources_by_realm.get(realm)
            if srclist is not None:
                self._bakeRealm(record, realm, srclist)

        # Bake taxonomies.
        self._bakeTaxonomies(record)

        # Delete files from the output.
        self._handleDeletetions(record)

        # Backup previous records.
        for i in range(8, -1, -1):
            suffix = '' if i == 0 else '.%d' % i
            record_path = record_cache.getCachePath(
                    '%s%s.record' % (record_id, suffix))
            if os.path.exists(record_path):
                record_path_next = record_cache.getCachePath(
                        '%s.%s.record' % (record_id, i + 1))
                if os.path.exists(record_path_next):
                    os.remove(record_path_next)
                os.rename(record_path, record_path_next)

        # Save the bake record.
        t = time.clock()
        record.current.bake_time = time.time()
        record.current.out_dir = self.out_dir
        record.saveCurrent(record_cache.getCachePath(record_name))
        logger.debug(format_timed(t, 'saved bake record', colored=False))

        # All done.
        self.app.config.set('baker/is_baking', False)
        logger.debug(format_timed(start_time, 'done baking'))

        return record.detach()
Exemplo n.º 20
0
    def run(self, target,
            force=False, preview=False, extra_args=None,
            log_file=None, log_debug_info=False, append_log_file=False):
        start_time = time.perf_counter()

        # Get publisher for this target.
        pub = self.app.getPublisher(target)
        if pub is None:
            raise InvalidPublishTargetError(
                "No such publish target: %s" % target)

        # Will we need to bake first?
        bake_first = pub.config.get('bake', True)

        # Setup logging stuff.
        hdlr = None
        root_logger = logging.getLogger()
        if log_file and not preview:
            logger.debug("Adding file handler for: %s" % log_file)
            mode = 'w'
            if append_log_file:
                mode = 'a'
            hdlr = logging.FileHandler(log_file, mode=mode, encoding='utf8')
            root_logger.addHandler(hdlr)

        if log_debug_info:
            _log_debug_info(target, force, preview, extra_args)

        if not preview:
            logger.info("Deploying to %s" % target)
        else:
            logger.info("Previewing deployment to %s" % target)

        # Bake first is necessary.
        records = None
        was_baked = False
        bake_out_dir = os.path.join(self.app.root_dir, '_pub', target)
        if bake_first:
            if not preview:
                bake_start_time = time.perf_counter()
                logger.debug("Baking first to: %s" % bake_out_dir)

                from piecrust.baking.baker import Baker
                baker = Baker(
                    self.appfactory, self.app, bake_out_dir, force=force)
                records = baker.bake()
                was_baked = True

                if not records.success:
                    raise Exception(
                        "Error during baking, aborting publishing.")
                logger.info(format_timed(bake_start_time, "Baked website."))
            else:
                logger.info("Would bake to: %s" % bake_out_dir)

        # Publish!
        logger.debug(
            "Running publish target '%s' with publisher: %s" %
            (target, pub.PUBLISHER_NAME))
        pub_start_time = time.perf_counter()

        success = False
        ctx = PublishingContext()
        ctx.bake_out_dir = bake_out_dir
        ctx.bake_records = records
        ctx.was_baked = was_baked
        ctx.preview = preview
        ctx.args = extra_args
        try:
            success = pub.run(ctx)
        except Exception as ex:
            raise PublishingError(
                "Error publishing to target: %s" % target) from ex
        finally:
            if hdlr:
                root_logger.removeHandler(hdlr)
                hdlr.close()

        logger.info(format_timed(
            pub_start_time, "Ran publisher %s" % pub.PUBLISHER_NAME))

        if success:
            logger.info(format_timed(start_time, 'Deployed to %s' % target))
            return 0
        else:
            logger.error(format_timed(start_time, 'Failed to deploy to %s' %
                                      target))
            return 1
Exemplo n.º 21
0
    def run(self,
            target,
            force=False,
            preview=False,
            extra_args=None,
            log_file=None,
            log_debug_info=False,
            append_log_file=False):
        start_time = time.perf_counter()

        # Get publisher for this target.
        pub = self.app.getPublisher(target)
        if pub is None:
            raise InvalidPublishTargetError("No such publish target: %s" %
                                            target)

        # Will we need to bake first?
        bake_first = pub.config.get('bake', True)

        # Setup logging stuff.
        hdlr = None
        root_logger = logging.getLogger()
        if log_file and not preview:
            logger.debug("Adding file handler for: %s" % log_file)
            mode = 'w'
            if append_log_file:
                mode = 'a'
            hdlr = logging.FileHandler(log_file, mode=mode, encoding='utf8')
            root_logger.addHandler(hdlr)

        if log_debug_info:
            _log_debug_info(target, force, preview, extra_args)

        if not preview:
            logger.info("Deploying to %s" % target)
        else:
            logger.info("Previewing deployment to %s" % target)

        # Bake first is necessary.
        records = None
        was_baked = False
        bake_out_dir = os.path.join(self.app.root_dir, '_pub', target)
        if bake_first:
            if not preview:
                bake_start_time = time.perf_counter()
                logger.debug("Baking first to: %s" % bake_out_dir)

                from piecrust.baking.baker import Baker
                baker = Baker(self.appfactory,
                              self.app,
                              bake_out_dir,
                              force=force)
                records = baker.bake()
                was_baked = True

                if not records.success:
                    raise Exception(
                        "Error during baking, aborting publishing.")
                logger.info(format_timed(bake_start_time, "Baked website."))
            else:
                logger.info("Would bake to: %s" % bake_out_dir)

        # Publish!
        logger.debug("Running publish target '%s' with publisher: %s" %
                     (target, pub.PUBLISHER_NAME))
        pub_start_time = time.perf_counter()

        success = False
        ctx = PublishingContext()
        ctx.bake_out_dir = bake_out_dir
        ctx.bake_records = records
        ctx.was_baked = was_baked
        ctx.preview = preview
        ctx.args = extra_args
        try:
            success = pub.run(ctx)
        except Exception as ex:
            raise PublishingError("Error publishing to target: %s" %
                                  target) from ex
        finally:
            if hdlr:
                root_logger.removeHandler(hdlr)
                hdlr.close()

        logger.info(
            format_timed(pub_start_time,
                         "Ran publisher %s" % pub.PUBLISHER_NAME))

        if success:
            logger.info(format_timed(start_time, 'Deployed to %s' % target))
            return 0
        else:
            logger.error(
                format_timed(start_time, 'Failed to deploy to %s' % target))
            return 1
Exemplo n.º 22
0
def _run_chef(pre_args, argv):
    # Setup the app.
    start_time = time.perf_counter()
    root = None
    if pre_args.root:
        root = os.path.expanduser(pre_args.root)
    else:
        try:
            root = find_app_root()
        except SiteNotFoundError:
            root = None

    if not root:
        app = NullPieCrust()
    else:
        app = PieCrust(root, cache=(not pre_args.no_cache),
                       debug=pre_args.debug)

    # Build a hash for a custom cache directory.
    cache_key = 'default'

    # Handle custom configurations.
    if pre_args.config_variant is not None and not root:
        raise SiteNotFoundError("Can't apply any variant.")
    apply_variant_and_values(app, pre_args.config_variant,
                             pre_args.config_values)

    # Adjust the cache key.
    if pre_args.config_variant is not None:
        cache_key += ',variant=%s' % pre_args.config_variant
    if pre_args.config_values:
        for name, value in pre_args.config_values:
            cache_key += ',%s=%s' % (name, value)

    # Setup the arg parser.
    parser = argparse.ArgumentParser(
            prog='chef',
            description="The PieCrust chef manages your website.",
            formatter_class=argparse.RawDescriptionHelpFormatter)
    _setup_main_parser_arguments(parser)

    commands = sorted(app.plugin_loader.getCommands(),
                      key=lambda c: c.name)
    subparsers = parser.add_subparsers(title='list of commands')
    for c in commands:
        p = subparsers.add_parser(c.name, help=c.description)
        c.setupParser(p, app)
        p.set_defaults(func=c.checkedRun)
        p.set_defaults(cache_name=c.cache_name)

    help_cmd = next(filter(lambda c: c.name == 'help', commands), None)
    if help_cmd and help_cmd.has_topics:
        with io.StringIO() as epilog:
            epilog.write("additional help topics:\n")
            for name, desc in help_cmd.getTopics():
                print_help_item(epilog, name, desc)
            parser.epilog = epilog.getvalue()

    # Parse the command line.
    result = parser.parse_args(argv)
    logger.debug(format_timed(start_time, 'initialized PieCrust',
                              colored=False))

    # Print the help if no command was specified.
    if not hasattr(result, 'func'):
        parser.print_help()
        return 0

    # Use a customized cache for the command and current config.
    if result.cache_name != 'default' or cache_key != 'default':
        app.useSubCache(result.cache_name, cache_key)

    # Run the command!
    ctx = CommandContext(app, parser, result)
    ctx.config_variant = pre_args.config_variant
    ctx.config_values = pre_args.config_values

    exit_code = result.func(ctx)
    if exit_code is None:
        return 0
    if not isinstance(exit_code, int):
        logger.error("Got non-integer exit code: %s" % exit_code)
        return -1
    return exit_code
Exemplo n.º 23
0
    def _unsafeRun(self, job):
        start_time = time.clock()
        pipeline = self.ctx.pipeline
        record = self.ctx.record

        rel_path = os.path.relpath(job.path, job.base_dir)
        previous_entry = record.getPreviousEntry(rel_path)

        record_entry = ProcessorPipelineRecordEntry(job.base_dir, rel_path)
        record.addEntry(record_entry)

        # Figure out if a previously processed file is overriding this one.
        # This can happen if a theme file (processed via a mount point)
        # is overridden in the user's website.
        if record.current.hasOverrideEntry(rel_path):
            record_entry.flags |= FLAG_OVERRIDEN
            logger.info(
                format_timed(start_time,
                             '%s [not baked, overridden]' % rel_path))
            return True

        processors = pipeline.getFilteredProcessors(
            job.mount_info['processors'])
        try:
            builder = ProcessingTreeBuilder(processors)
            tree_root = builder.build(rel_path)
            record_entry.flags |= FLAG_PREPARED
        except ProcessingTreeError as ex:
            msg = str(ex)
            logger.error("Error preparing %s:\n%s" % (rel_path, msg))
            while ex:
                record_entry.errors.append(str(ex))
                ex = ex.__cause__
            return False

        print_node(tree_root, recursive=True)
        leaves = tree_root.getLeaves()
        record_entry.rel_outputs = [l.path for l in leaves]
        record_entry.proc_tree = get_node_name_tree(tree_root)
        if tree_root.getProcessor().is_bypassing_structured_processing:
            record_entry.flags |= FLAG_BYPASSED_STRUCTURED_PROCESSING

        force = (pipeline.force or previous_entry is None
                 or not previous_entry.was_processed_successfully)
        if not force:
            force = re_matchany(rel_path, pipeline.force_patterns)

        if force:
            tree_root.setState(STATE_DIRTY, True)

        try:
            runner = ProcessingTreeRunner(job.base_dir, pipeline.tmp_dir,
                                          pipeline.out_dir,
                                          self.ctx.pipeline_lock)
            if runner.processSubTree(tree_root):
                record_entry.flags |= FLAG_PROCESSED
                logger.info(
                    format_timed(start_time, "[%d] %s" % (self.wid, rel_path)))
            return True
        except ProcessingTreeError as ex:
            msg = str(ex)
            if isinstance(ex, ProcessorError):
                msg = str(ex.__cause__)
            logger.error("Error processing %s:\n%s" % (rel_path, msg))
            while ex:
                msg = re_ansicolors.sub('', str(ex))
                record_entry.errors.append(msg)
                ex = ex.__cause__
            return False
Exemplo n.º 24
0
    def _computeNodeState(self, node):
        if node.state != STATE_UNKNOWN:
            return

        proc = node.getProcessor()
        if (proc.is_bypassing_structured_processing or
                not proc.is_delegating_dependency_check):
            # This processor wants to handle things on its own...
            node.setState(STATE_DIRTY, False)
            return

        start_time = time.perf_counter()

        # Get paths and modification times for the input path and
        # all dependencies (if any).
        base_dir = self._getNodeBaseDir(node)
        full_path = os.path.join(base_dir, node.path)
        in_mtime = (full_path, os.path.getmtime(full_path))
        force_build = False
        try:
            deps = proc.getDependencies(full_path)
            if deps == FORCE_BUILD:
                force_build = True
            elif deps is not None:
                for dep in deps:
                    dep_mtime = os.path.getmtime(dep)
                    if dep_mtime > in_mtime[1]:
                        in_mtime = (dep, dep_mtime)
        except Exception as e:
            logger.warning("%s -- Will force-bake: %s" % (e, node.path))
            node.setState(STATE_DIRTY, True)
            return

        if force_build:
            # Just do what the processor told us to do.
            node.setState(STATE_DIRTY, True)
            message = "Processor requested a forced build."
            print_node(node, message)
        else:
            # Get paths and modification times for the outputs.
            message = None
            for o in node.outputs:
                full_out_path = self._getNodePath(o)
                if not os.path.isfile(full_out_path):
                    message = "Output '%s' doesn't exist." % o.path
                    break
                o_mtime = os.path.getmtime(full_out_path)
                if o_mtime < in_mtime[1]:
                    message = "Input '%s' is newer than output '%s'." % (
                            in_mtime[0], o.path)
                    break
            if message is not None:
                node.setState(STATE_DIRTY, True)
                message += " Re-processing sub-tree."
                print_node(node, message)
            else:
                node.setState(STATE_CLEAN, False)

        state = "dirty" if node.state == STATE_DIRTY else "clean"
        logger.debug(format_timed(start_time,
                                  "Computed node dirtyness: %s" % state,
                                  indent_level=node.level, colored=False))
Exemplo n.º 25
0
    def run(self, src_dir_or_file=None, *,
            delete=True, previous_record=None, save_record=True):
        start_time = time.perf_counter()

        # Get the list of processors for this run.
        processors = self.app.plugin_loader.getProcessors()
        if self.enabled_processors is not None:
            logger.debug("Filtering processors to: %s" %
                         self.enabled_processors)
            processors = get_filtered_processors(processors,
                                                 self.enabled_processors)
        if self.additional_processors_factories is not None:
            logger.debug("Adding %s additional processors." %
                         len(self.additional_processors_factories))
            for proc_fac in self.additional_processors_factories:
                proc = proc_fac()
                self.app.env.registerTimer(proc.__class__.__name__,
                                           raise_if_registered=False)
                proc.initialize(self.app)
                processors.append(proc)

        # Invoke pre-processors.
        pipeline_ctx = PipelineContext(-1, self.app, self.out_dir,
                                       self.tmp_dir, self.force)
        for proc in processors:
            proc.onPipelineStart(pipeline_ctx)

        # Pre-processors can define additional ignore patterns.
        self.ignore_patterns += make_re(
                pipeline_ctx._additional_ignore_patterns)

        # Create the pipeline record.
        record = TransitionalProcessorPipelineRecord()
        record_cache = self.app.cache.getCache('proc')
        record_name = (
                hashlib.md5(self.out_dir.encode('utf8')).hexdigest() +
                '.record')
        if previous_record:
            record.setPrevious(previous_record)
        elif not self.force and record_cache.has(record_name):
            with format_timed_scope(logger, 'loaded previous bake record',
                                    level=logging.DEBUG, colored=False):
                record.loadPrevious(record_cache.getCachePath(record_name))
        logger.debug("Got %d entries in process record." %
                     len(record.previous.entries))
        record.current.success = True
        record.current.processed_count = 0

        # Work!
        def _handler(res):
            entry = record.getCurrentEntry(res.path)
            assert entry is not None
            entry.flags = res.flags
            entry.proc_tree = res.proc_tree
            entry.rel_outputs = res.rel_outputs
            if entry.flags & FLAG_PROCESSED:
                record.current.processed_count += 1
            if res.errors:
                entry.errors += res.errors
                record.current.success = False

                rel_path = os.path.relpath(res.path, self.app.root_dir)
                logger.error("Errors found in %s:" % rel_path)
                for e in entry.errors:
                    logger.error("  " + e)

        jobs = []
        self._process(src_dir_or_file, record, jobs)
        pool = self._createWorkerPool()
        ar = pool.queueJobs(jobs, handler=_handler)
        ar.wait()

        # Shutdown the workers and get timing information from them.
        reports = pool.close()
        record.current.timers = {}
        for i in range(len(reports)):
            timers = reports[i]
            if timers is None:
                continue

            worker_name = 'PipelineWorker_%d' % i
            record.current.timers[worker_name] = {}
            for name, val in timers['data'].items():
                main_val = record.current.timers.setdefault(name, 0)
                record.current.timers[name] = main_val + val
                record.current.timers[worker_name][name] = val

        # Invoke post-processors.
        pipeline_ctx.record = record.current
        for proc in processors:
            proc.onPipelineEnd(pipeline_ctx)

        # Handle deletions.
        if delete:
            for path, reason in record.getDeletions():
                logger.debug("Removing '%s': %s" % (path, reason))
                try:
                    os.remove(path)
                except FileNotFoundError:
                    pass
                logger.info('[delete] %s' % path)

        # Finalize the process record.
        record.current.process_time = time.time()
        record.current.out_dir = self.out_dir
        record.collapseRecords()

        # Save the process record.
        if save_record:
            with format_timed_scope(logger, 'saved bake record',
                                    level=logging.DEBUG, colored=False):
                record.saveCurrent(record_cache.getCachePath(record_name))

        logger.info(format_timed(
                start_time,
                "processed %d assets." % record.current.processed_count))

        return record.detach()
Exemplo n.º 26
0
    def bake(self):
        logger.debug("  Bake Output: %s" % self.out_dir)
        logger.debug("  Root URL: %s" % self.app.config.get('site/root'))

        # Get into bake mode.
        start_time = time.perf_counter()
        self.app.config.set('baker/is_baking', True)
        self.app.env.base_asset_url_format = '%uri%'

        # Make sure the output directory exists.
        if not os.path.isdir(self.out_dir):
            os.makedirs(self.out_dir, 0o755)

        # Load/create the bake record.
        record = TransitionalBakeRecord()
        record_cache = self.app.cache.getCache('baker')
        record_id = hashlib.md5(self.out_dir.encode('utf8')).hexdigest()
        record_name = record_id + '.record'
        previous_record_path = None
        if not self.force and record_cache.has(record_name):
            with format_timed_scope(logger,
                                    "loaded previous bake record",
                                    level=logging.DEBUG,
                                    colored=False):
                previous_record_path = record_cache.getCachePath(record_name)
                record.loadPrevious(previous_record_path)
        record.current.success = True

        # Figure out if we need to clean the cache because important things
        # have changed.
        is_cache_valid = self._handleCacheValidity(record)
        if not is_cache_valid:
            previous_record_path = None

        # Pre-create all caches.
        for cache_name in ['app', 'baker', 'pages', 'renders']:
            self.app.cache.getCache(cache_name)

        # Gather all sources by realm -- we're going to bake each realm
        # separately so we can handle "overriding" (i.e. one realm overrides
        # another realm's pages, like the user realm overriding the theme
        # realm).
        sources_by_realm = {}
        for source in self.app.sources:
            srclist = sources_by_realm.setdefault(source.realm, [])
            srclist.append(source)

        # Create the worker processes.
        pool = self._createWorkerPool(previous_record_path)

        # Bake the realms.
        realm_list = [REALM_USER, REALM_THEME]
        for realm in realm_list:
            srclist = sources_by_realm.get(realm)
            if srclist is not None:
                self._bakeRealm(record, pool, realm, srclist)

        # Bake taxonomies.
        self._bakeTaxonomies(record, pool)

        # All done with the workers. Close the pool and get timing reports.
        reports = pool.close()
        record.current.timers = {}
        for i in range(len(reports)):
            timers = reports[i]
            if timers is None:
                continue

            worker_name = 'BakeWorker_%d' % i
            record.current.timers[worker_name] = {}
            for name, val in timers['data'].items():
                main_val = record.current.timers.setdefault(name, 0)
                record.current.timers[name] = main_val + val
                record.current.timers[worker_name][name] = val

        # Delete files from the output.
        self._handleDeletetions(record)

        # Backup previous records.
        for i in range(8, -1, -1):
            suffix = '' if i == 0 else '.%d' % i
            record_path = record_cache.getCachePath('%s%s.record' %
                                                    (record_id, suffix))
            if os.path.exists(record_path):
                record_path_next = record_cache.getCachePath(
                    '%s.%s.record' % (record_id, i + 1))
                if os.path.exists(record_path_next):
                    os.remove(record_path_next)
                os.rename(record_path, record_path_next)

        # Save the bake record.
        with format_timed_scope(logger,
                                "saved bake record.",
                                level=logging.DEBUG,
                                colored=False):
            record.current.bake_time = time.time()
            record.current.out_dir = self.out_dir
            record.saveCurrent(record_cache.getCachePath(record_name))

        # All done.
        self.app.config.set('baker/is_baking', False)
        logger.debug(format_timed(start_time, 'done baking'))

        return record.detach()
Exemplo n.º 27
0
    def run(self, src_dir_or_file=None, *,
            delete=True, previous_record=None, save_record=True):
        # Invoke pre-processors.
        for proc in self.processors:
            proc.onPipelineStart(self)

        # Sort our processors again in case the pre-process step involved
        # patching the processors with some new ones.
        self.processors.sort(key=lambda p: p.priority)

        # Create the pipeline record.
        record = TransitionalProcessorPipelineRecord()
        record_cache = self.app.cache.getCache('proc')
        record_name = (
                hashlib.md5(self.out_dir.encode('utf8')).hexdigest() +
                '.record')
        if previous_record:
            record.setPrevious(previous_record)
        elif not self.force and record_cache.has(record_name):
            t = time.clock()
            record.loadPrevious(record_cache.getCachePath(record_name))
            logger.debug(format_timed(t, 'loaded previous bake record',
                         colored=False))
        logger.debug("Got %d entries in process record." %
                len(record.previous.entries))

        # Create the workers.
        pool = []
        queue = Queue()
        abort = threading.Event()
        pipeline_lock = threading.Lock()
        for i in range(self.num_workers):
            ctx = ProcessingWorkerContext(self, record,
                                          queue, abort, pipeline_lock)
            worker = ProcessingWorker(i, ctx)
            worker.start()
            pool.append(worker)

        if src_dir_or_file is not None:
            # Process only the given path.
            # Find out what mount point this is in.
            for name, info in self.mounts.items():
                path = info['path']
                if src_dir_or_file[:len(path)] == path:
                    base_dir = path
                    mount_info = info
                    break
            else:
                known_roots = [i['path'] for i in self.mounts.values()]
                raise Exception("Input path '%s' is not part of any known "
                                "mount point: %s" %
                                (src_dir_or_file, known_roots))

            ctx = ProcessingContext(base_dir, mount_info, queue, record)
            logger.debug("Initiating processing pipeline on: %s" % src_dir_or_file)
            if os.path.isdir(src_dir_or_file):
                self.processDirectory(ctx, src_dir_or_file)
            elif os.path.isfile(src_dir_or_file):
                self.processFile(ctx, src_dir_or_file)

        else:
            # Process everything.
            for name, info in self.mounts.items():
                path = info['path']
                ctx = ProcessingContext(path, info, queue, record)
                logger.debug("Initiating processing pipeline on: %s" % path)
                self.processDirectory(ctx, path)

        # Wait on all workers.
        record.current.success = True
        for w in pool:
            w.join()
            record.current.success &= w.success
        if abort.is_set():
            raise Exception("Worker pool was aborted.")

        # Handle deletions.
        if delete:
            for path, reason in record.getDeletions():
                logger.debug("Removing '%s': %s" % (path, reason))
                try:
                    os.remove(path)
                except FileNotFoundError:
                    pass
                logger.info('[delete] %s' % path)

        # Invoke post-processors.
        for proc in self.processors:
            proc.onPipelineEnd(self)

        # Finalize the process record.
        record.current.process_time = time.time()
        record.current.out_dir = self.out_dir
        record.collapseRecords()

        # Save the process record.
        if save_record:
            t = time.clock()
            record.saveCurrent(record_cache.getCachePath(record_name))
            logger.debug(format_timed(t, 'saved bake record', colored=False))

        return record.detach()
Exemplo n.º 28
0
    def _unsafeRun(self, job):
        start_time = time.clock()
        pipeline = self.ctx.pipeline
        record = self.ctx.record

        rel_path = os.path.relpath(job.path, job.base_dir)
        previous_entry = record.getPreviousEntry(rel_path)

        record_entry = ProcessorPipelineRecordEntry(job.base_dir, rel_path)
        record.addEntry(record_entry)

        # Figure out if a previously processed file is overriding this one.
        # This can happen if a theme file (processed via a mount point)
        # is overridden in the user's website.
        if record.current.hasOverrideEntry(rel_path):
            record_entry.flags |= FLAG_OVERRIDEN
            logger.info(format_timed(start_time,
                    '%s [not baked, overridden]' % rel_path))
            return True

        processors = pipeline.getFilteredProcessors(
                job.mount_info['processors'])
        try:
            builder = ProcessingTreeBuilder(processors)
            tree_root = builder.build(rel_path)
            record_entry.flags |= FLAG_PREPARED
        except ProcessingTreeError as ex:
            msg = str(ex)
            logger.error("Error preparing %s:\n%s" % (rel_path, msg))
            while ex:
                record_entry.errors.append(str(ex))
                ex = ex.__cause__
            return False

        print_node(tree_root, recursive=True)
        leaves = tree_root.getLeaves()
        record_entry.rel_outputs = [l.path for l in leaves]
        record_entry.proc_tree = get_node_name_tree(tree_root)
        if tree_root.getProcessor().is_bypassing_structured_processing:
            record_entry.flags |= FLAG_BYPASSED_STRUCTURED_PROCESSING

        force = (pipeline.force or previous_entry is None or
                 not previous_entry.was_processed_successfully)
        if not force:
            force = re_matchany(rel_path, pipeline.force_patterns)

        if force:
            tree_root.setState(STATE_DIRTY, True)

        try:
            runner = ProcessingTreeRunner(
                    job.base_dir, pipeline.tmp_dir,
                    pipeline.out_dir, self.ctx.pipeline_lock)
            if runner.processSubTree(tree_root):
                record_entry.flags |= FLAG_PROCESSED
                logger.info(format_timed(
                    start_time, "[%d] %s" % (self.wid, rel_path)))
            return True
        except ProcessingTreeError as ex:
            msg = str(ex)
            if isinstance(ex, ProcessorError):
                msg = str(ex.__cause__)
            logger.error("Error processing %s:\n%s" % (rel_path, msg))
            while ex:
                msg = re_ansicolors.sub('', str(ex))
                record_entry.errors.append(msg)
                ex = ex.__cause__
            return False
Exemplo n.º 29
0
def _run_chef(pre_args, argv):
    # Setup the app.
    start_time = time.clock()
    root = pre_args.root
    if root is None:
        try:
            root = find_app_root()
        except SiteNotFoundError:
            root = None

    if not root:
        app = NullPieCrust()
    else:
        app = PieCrust(root, cache=pre_args.cache, debug=pre_args.debug)

    # Build a hash for a custom cache directory.
    cache_key = 'default'

    # Handle custom configurations.
    if pre_args.config_variant is not None and not root:
        raise SiteNotFoundError("Can't apply any variant.")
    apply_variant_and_values(app, pre_args.config_variant,
                             pre_args.config_values)

    # Adjust the cache key.
    if pre_args.config_variant is not None:
        cache_key += ',variant=%s' % pre_args.config_variant
    for name, value in pre_args.config_values:
        cache_key += ',%s=%s' % (name, value)

    # Setup the arg parser.
    parser = argparse.ArgumentParser(
            prog='chef',
            description="The PieCrust chef manages your website.",
            formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
            '--version',
            action='version',
            version=('%(prog)s ' + APP_VERSION))
    parser.add_argument(
            '--root',
            help="The root directory of the website.")
    parser.add_argument(
            '--config',
            help="The configuration variant to use for this command.")
    parser.add_argument(
            '--config-set',
            help="Sets a specific site configuration setting.")
    parser.add_argument(
            '--debug',
            help="Show debug information.", action='store_true')
    parser.add_argument(
            '--no-cache',
            help="When applicable, disable caching.",
            action='store_true')
    parser.add_argument(
            '--quiet',
            help="Print only important information.",
            action='store_true')
    parser.add_argument(
            '--log',
            help="Send log messages to the specified file.")
    parser.add_argument(
            '--log-debug',
            help="Log debug messages to the log file.",
            action='store_true')

    commands = sorted(app.plugin_loader.getCommands(),
                      key=lambda c: c.name)
    subparsers = parser.add_subparsers(title='list of commands')
    for c in commands:
        p = subparsers.add_parser(c.name, help=c.description)
        c.setupParser(p, app)
        p.set_defaults(func=c.checkedRun)
        p.set_defaults(cache_name=c.cache_name)

    help_cmd = next(filter(lambda c: c.name == 'help', commands), None)
    if help_cmd and help_cmd.has_topics:
        with io.StringIO() as epilog:
            epilog.write("additional help topics:\n")
            for name, desc in help_cmd.getTopics():
                print_help_item(epilog, name, desc)
            parser.epilog = epilog.getvalue()

    # Parse the command line.
    result = parser.parse_args(argv)
    logger.debug(format_timed(start_time, 'initialized PieCrust',
                              colored=False))

    # Print the help if no command was specified.
    if not hasattr(result, 'func'):
        parser.print_help()
        return 0

    # Use a customized cache for the command and current config.
    if result.cache_name != 'default' or cache_key != 'default':
        app.useSubCache(result.cache_name, cache_key)

    # Run the command!
    ctx = CommandContext(app, parser, result)
    ctx.config_variant = pre_args.config_variant
    ctx.config_values = pre_args.config_values

    exit_code = result.func(ctx)
    if exit_code is None:
        return 0
    if not isinstance(exit_code, int):
        logger.error("Got non-integer exit code: %s" % exit_code)
        return -1
    return exit_code
Exemplo n.º 30
0
    def bake(self):
        start_time = time.perf_counter()

        # Setup baker.
        logger.debug("  Bake Output: %s" % self.out_dir)
        logger.debug("  Root URL: %s" % self.app.config.get('site/root'))

        # Get into bake mode.
        self.app.config.set('baker/is_baking', True)
        self.app.config.set('site/asset_url_format', '%page_uri%/%filename%')

        stats = self.app.env.stats
        stats.registerTimer('LoadSourceContents', raise_if_registered=False)
        stats.registerTimer('CacheTemplates', raise_if_registered=False)

        # Make sure the output directory exists.
        if not os.path.isdir(self.out_dir):
            os.makedirs(self.out_dir, 0o755)

        # Load/create the bake records.
        records_path = get_bake_records_path(
            self.app, self.out_dir)
        if not self.force and os.path.isfile(records_path):
            with format_timed_scope(logger, "loaded previous bake records",
                                    level=logging.DEBUG, colored=False):
                previous_records = load_records(records_path)
        else:
            previous_records = MultiRecord()
        current_records = MultiRecord()

        # Figure out if we need to clean the cache because important things
        # have changed.
        is_cache_valid = self._handleCacheValidity(previous_records,
                                                   current_records)
        if not is_cache_valid:
            previous_records = MultiRecord()

        # Create the bake records history which tracks what's up-to-date
        # or not since last time we baked to the given output folder.
        record_histories = MultiRecordHistory(
            previous_records, current_records)

        # Pre-create all caches.
        for cache_name in ['app', 'baker', 'pages', 'renders']:
            self.app.cache.getCache(cache_name)

        # Create the pipelines.
        ppmngr = self._createPipelineManager(record_histories)

        # Done with all the setup, let's start the actual work.
        logger.info(format_timed(start_time, "setup baker"))

        # Load all sources, pre-cache templates.
        load_start_time = time.perf_counter()
        self._populateTemplateCaches()
        logger.info(format_timed(load_start_time, "cache templates"))

        # Create the worker processes.
        pool_userdata = _PoolUserData(self, ppmngr)
        pool = self._createWorkerPool(records_path, pool_userdata)

        # Bake the realms.
        self._bakeRealms(pool, ppmngr, record_histories)

        # Handle deletions, collapse records, etc.
        ppmngr.postJobRun()
        ppmngr.deleteStaleOutputs()
        ppmngr.collapseRecords(self.keep_unused_records)

        # All done with the workers. Close the pool and get reports.
        pool_stats = pool.close()
        current_records.stats = _merge_execution_stats(stats, *pool_stats)

        # Shutdown the pipelines.
        ppmngr.shutdownPipelines()

        # Backup previous records, save the current ones.
        current_records.bake_time = time.time()
        current_records.out_dir = self.out_dir
        _save_bake_records(current_records, records_path,
                           rotate_previous=self.rotate_bake_records)

        # All done.
        self.app.config.set('baker/is_baking', False)
        logger.debug(format_timed(start_time, 'done baking'))

        return current_records
Exemplo n.º 31
0
    def _computeNodeState(self, node):
        if node.state != STATE_UNKNOWN:
            return

        proc = node.getProcessor()
        if (proc.is_bypassing_structured_processing
                or not proc.is_delegating_dependency_check):
            # This processor wants to handle things on its own...
            node.setState(STATE_DIRTY, False)
            return

        start_time = time.perf_counter()

        # Get paths and modification times for the input path and
        # all dependencies (if any).
        base_dir = self._getNodeBaseDir(node)
        full_path = os.path.join(base_dir, node.path)
        in_mtime = (full_path, os.path.getmtime(full_path))
        force_build = False
        try:
            deps = proc.getDependencies(full_path)
            if deps == FORCE_BUILD:
                force_build = True
            elif deps is not None:
                for dep in deps:
                    dep_mtime = os.path.getmtime(dep)
                    if dep_mtime > in_mtime[1]:
                        in_mtime = (dep, dep_mtime)
        except Exception as e:
            logger.warning("%s -- Will force-bake: %s" % (e, node.path))
            node.setState(STATE_DIRTY, True)
            return

        if force_build:
            # Just do what the processor told us to do.
            node.setState(STATE_DIRTY, True)
            message = "Processor requested a forced build."
            print_node(node, message)
        else:
            # Get paths and modification times for the outputs.
            message = None
            for o in node.outputs:
                full_out_path = self._getNodePath(o)
                if not os.path.isfile(full_out_path):
                    message = "Output '%s' doesn't exist." % o.path
                    break
                o_mtime = os.path.getmtime(full_out_path)
                if o_mtime < in_mtime[1]:
                    message = "Input '%s' is newer than output '%s'." % (
                        in_mtime[0], o.path)
                    break
            if message is not None:
                node.setState(STATE_DIRTY, True)
                message += " Re-processing sub-tree."
                print_node(node, message)
            else:
                node.setState(STATE_CLEAN, False)

        if node.state == STATE_DIRTY:
            state = "dirty"
        elif node.state == STATE_CLEAN:
            state = "clean"
        else:
            state = "unknown"
        logger.debug(
            format_timed(start_time,
                         "Computed node dirtyness: %s" % state,
                         indent_level=node.level,
                         colored=False))
Exemplo n.º 32
0
def _run_chef(pre_args):
    # Setup the app.
    start_time = time.clock()
    root = pre_args.root
    if root is None:
        try:
            root = find_app_root()
        except SiteNotFoundError:
            root = None

    if not root:
        app = NullPieCrust()
    else:
        app = PieCrust(root, cache=pre_args.cache, debug=pre_args.debug)

    # Build a hash for a custom cache directory.
    cache_key = 'default'

    # Handle a configuration variant.
    if pre_args.config_variant is not None:
        if not root:
            raise SiteNotFoundError("Can't apply any variant.")
        app.config.applyVariant('variants/' + pre_args.config_variant)
        cache_key += ',variant=%s' % pre_args.config_variant
    for name, value in pre_args.config_values:
        logger.debug("Setting configuration '%s' to: %s" % (name, value))
        app.config.set(name, value)
        cache_key += ',%s=%s' % (name, value)

    # Setup the arg parser.
    parser = argparse.ArgumentParser(
        prog='chef',
        description="The PieCrust chef manages your website.",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version=('%(prog)s ' + APP_VERSION))
    parser.add_argument('--root', help="The root directory of the website.")
    parser.add_argument(
        '--config', help="The configuration variant to use for this command.")
    parser.add_argument('--config-set',
                        help="Sets a specific site configuration setting.")
    parser.add_argument('--debug',
                        help="Show debug information.",
                        action='store_true')
    parser.add_argument('--no-cache',
                        help="When applicable, disable caching.",
                        action='store_true')
    parser.add_argument('--quiet',
                        help="Print only important information.",
                        action='store_true')
    parser.add_argument('--log',
                        help="Send log messages to the specified file.")
    parser.add_argument('--log-debug',
                        help="Log debug messages to the log file.",
                        action='store_true')

    commands = sorted(app.plugin_loader.getCommands(), key=lambda c: c.name)
    subparsers = parser.add_subparsers(title='list of commands')
    for c in commands:
        p = subparsers.add_parser(c.name, help=c.description)
        c.setupParser(p, app)
        p.set_defaults(func=c.checkedRun)
        p.set_defaults(cache_name=c.cache_name)

    help_cmd = next(filter(lambda c: c.name == 'help', commands), None)
    if help_cmd and help_cmd.has_topics:
        with io.StringIO() as epilog:
            epilog.write("additional help topics:\n")
            for name, desc in help_cmd.getTopics():
                print_help_item(epilog, name, desc)
            parser.epilog = epilog.getvalue()

    # Parse the command line.
    result = parser.parse_args()
    logger.debug(
        format_timed(start_time, 'initialized PieCrust', colored=False))

    # Print the help if no command was specified.
    if not hasattr(result, 'func'):
        parser.print_help()
        return 0

    # Use a customized cache for the command and current config.
    if result.cache_name != 'default' or cache_key != 'default':
        app.useSubCache(result.cache_name, cache_key)

    # Run the command!
    ctx = CommandContext(app, parser, result)
    exit_code = result.func(ctx)
    if exit_code is None:
        return 0
    if not isinstance(exit_code, int):
        logger.error("Got non-integer exit code: %s" % exit_code)
        return -1
    return exit_code
Exemplo n.º 33
0
    def run(self, target,
            force=False, preview=False, extra_args=None, log_file=None,
            applied_config_variant=None, applied_config_values=None):
        start_time = time.perf_counter()

        # Get publisher for this target.
        pub = self.app.getPublisher(target)
        if pub is None:
            raise InvalidPublishTargetError(
                    "No such publish target: %s" % target)

        # Will we need to bake first?
        bake_first = True
        if not pub.has_url_config:
            bake_first = pub.getConfigValue('bake', True)

        # Setup logging stuff.
        hdlr = None
        root_logger = logging.getLogger()
        if log_file and not preview:
            logger.debug("Adding file handler for: %s" % log_file)
            hdlr = logging.FileHandler(log_file, mode='w', encoding='utf8')
            root_logger.addHandler(hdlr)
        if not preview:
            logger.info("Deploying to %s" % target)
        else:
            logger.info("Previewing deployment to %s" % target)

        # Bake first is necessary.
        rec1 = None
        rec2 = None
        was_baked = False
        bake_out_dir = os.path.join(self.app.root_dir, '_pub', target)
        if bake_first:
            if not preview:
                bake_start_time = time.perf_counter()
                logger.debug("Baking first to: %s" % bake_out_dir)

                from piecrust.baking.baker import Baker
                baker = Baker(
                        self.app, bake_out_dir,
                        applied_config_variant=applied_config_variant,
                        applied_config_values=applied_config_values)
                rec1 = baker.bake()

                from piecrust.processing.pipeline import ProcessorPipeline
                proc = ProcessorPipeline(
                        self.app, bake_out_dir,
                        applied_config_variant=applied_config_variant,
                        applied_config_values=applied_config_values)
                rec2 = proc.run()

                was_baked = True

                if not rec1.success or not rec2.success:
                    raise Exception(
                            "Error during baking, aborting publishing.")
                logger.info(format_timed(bake_start_time, "Baked website."))
            else:
                logger.info("Would bake to: %s" % bake_out_dir)

        # Publish!
        logger.debug(
                "Running publish target '%s' with publisher: %s" %
                (target, pub.PUBLISHER_NAME))
        pub_start_time = time.perf_counter()

        ctx = PublishingContext()
        ctx.bake_out_dir = bake_out_dir
        ctx.bake_record = rec1
        ctx.processing_record = rec2
        ctx.was_baked = was_baked
        ctx.preview = preview
        ctx.args = extra_args
        try:
            pub.run(ctx)
        except Exception as ex:
            raise PublishingError(
                    "Error publishing to target: %s" % target) from ex
        finally:
            if hdlr:
                root_logger.removeHandler(hdlr)
                hdlr.close()

        logger.info(format_timed(
            pub_start_time, "Ran publisher %s" % pub.PUBLISHER_NAME))

        logger.info(format_timed(start_time, 'Deployed to %s' % target))
Exemplo n.º 34
0
    def bake(self):
        logger.debug("  Bake Output: %s" % self.out_dir)
        logger.debug("  Root URL: %s" % self.app.config.get('site/root'))

        # Get into bake mode.
        start_time = time.perf_counter()
        self.app.config.set('baker/is_baking', True)
        self.app.env.base_asset_url_format = '%uri%'

        # Make sure the output directory exists.
        if not os.path.isdir(self.out_dir):
            os.makedirs(self.out_dir, 0o755)

        # Load/create the bake record.
        record = TransitionalBakeRecord()
        record_cache = self.app.cache.getCache('baker')
        record_id = hashlib.md5(self.out_dir.encode('utf8')).hexdigest()
        record_name = record_id + '.record'
        previous_record_path = None
        if not self.force and record_cache.has(record_name):
            with format_timed_scope(logger, "loaded previous bake record",
                                    level=logging.DEBUG, colored=False):
                previous_record_path = record_cache.getCachePath(record_name)
                record.loadPrevious(previous_record_path)
        record.current.success = True

        # Figure out if we need to clean the cache because important things
        # have changed.
        is_cache_valid = self._handleCacheValidity(record)
        if not is_cache_valid:
            previous_record_path = None

        # Pre-create all caches.
        for cache_name in ['app', 'baker', 'pages', 'renders']:
            self.app.cache.getCache(cache_name)

        # Gather all sources by realm -- we're going to bake each realm
        # separately so we can handle "overriding" (i.e. one realm overrides
        # another realm's pages, like the user realm overriding the theme
        # realm).
        sources_by_realm = {}
        for source in self.app.sources:
            srclist = sources_by_realm.setdefault(source.realm, [])
            srclist.append(source)

        # Create the worker processes.
        pool = self._createWorkerPool(previous_record_path)

        # Bake the realms.
        realm_list = [REALM_USER, REALM_THEME]
        for realm in realm_list:
            srclist = sources_by_realm.get(realm)
            if srclist is not None:
                self._bakeRealm(record, pool, realm, srclist)

        # Bake taxonomies.
        self._bakeTaxonomies(record, pool)

        # All done with the workers. Close the pool and get timing reports.
        reports = pool.close()
        record.current.timers = {}
        for i in range(len(reports)):
            timers = reports[i]
            if timers is None:
                continue

            worker_name = 'BakeWorker_%d' % i
            record.current.timers[worker_name] = {}
            for name, val in timers['data'].items():
                main_val = record.current.timers.setdefault(name, 0)
                record.current.timers[name] = main_val + val
                record.current.timers[worker_name][name] = val

        # Delete files from the output.
        self._handleDeletetions(record)

        # Backup previous records.
        for i in range(8, -1, -1):
            suffix = '' if i == 0 else '.%d' % i
            record_path = record_cache.getCachePath(
                    '%s%s.record' % (record_id, suffix))
            if os.path.exists(record_path):
                record_path_next = record_cache.getCachePath(
                        '%s.%s.record' % (record_id, i + 1))
                if os.path.exists(record_path_next):
                    os.remove(record_path_next)
                os.rename(record_path, record_path_next)

        # Save the bake record.
        with format_timed_scope(logger, "saved bake record.",
                                level=logging.DEBUG, colored=False):
            record.current.bake_time = time.time()
            record.current.out_dir = self.out_dir
            record.saveCurrent(record_cache.getCachePath(record_name))

        # All done.
        self.app.config.set('baker/is_baking', False)
        logger.debug(format_timed(start_time, 'done baking'))

        return record.detach()
Exemplo n.º 35
0
    def run(self,
            src_dir_or_file=None,
            *,
            delete=True,
            previous_record=None,
            save_record=True):
        start_time = time.perf_counter()

        # Get the list of processors for this run.
        processors = self.app.plugin_loader.getProcessors()
        if self.enabled_processors is not None:
            logger.debug("Filtering processors to: %s" %
                         self.enabled_processors)
            processors = get_filtered_processors(processors,
                                                 self.enabled_processors)
        if self.additional_processors_factories is not None:
            logger.debug("Adding %s additional processors." %
                         len(self.additional_processors_factories))
            for proc_fac in self.additional_processors_factories:
                proc = proc_fac()
                self.app.env.registerTimer(proc.__class__.__name__,
                                           raise_if_registered=False)
                proc.initialize(self.app)
                processors.append(proc)

        # Invoke pre-processors.
        pipeline_ctx = PipelineContext(-1, self.app, self.out_dir,
                                       self.tmp_dir, self.force)
        for proc in processors:
            proc.onPipelineStart(pipeline_ctx)

        # Pre-processors can define additional ignore patterns.
        self.ignore_patterns += make_re(
            pipeline_ctx._additional_ignore_patterns)

        # Create the pipeline record.
        record = TransitionalProcessorPipelineRecord()
        record_cache = self.app.cache.getCache('proc')
        record_name = (hashlib.md5(self.out_dir.encode('utf8')).hexdigest() +
                       '.record')
        if previous_record:
            record.setPrevious(previous_record)
        elif not self.force and record_cache.has(record_name):
            with format_timed_scope(logger,
                                    'loaded previous bake record',
                                    level=logging.DEBUG,
                                    colored=False):
                record.loadPrevious(record_cache.getCachePath(record_name))
        logger.debug("Got %d entries in process record." %
                     len(record.previous.entries))
        record.current.success = True
        record.current.processed_count = 0

        # Work!
        def _handler(res):
            entry = record.getCurrentEntry(res.path)
            assert entry is not None
            entry.flags = res.flags
            entry.proc_tree = res.proc_tree
            entry.rel_outputs = res.rel_outputs
            if entry.flags & FLAG_PROCESSED:
                record.current.processed_count += 1
            if res.errors:
                entry.errors += res.errors
                record.current.success = False

                rel_path = os.path.relpath(res.path, self.app.root_dir)
                logger.error("Errors found in %s:" % rel_path)
                for e in entry.errors:
                    logger.error("  " + e)

        jobs = []
        self._process(src_dir_or_file, record, jobs)
        pool = self._createWorkerPool()
        ar = pool.queueJobs(jobs, handler=_handler)
        ar.wait()

        # Shutdown the workers and get timing information from them.
        reports = pool.close()
        record.current.timers = {}
        for i in range(len(reports)):
            timers = reports[i]
            if timers is None:
                continue

            worker_name = 'PipelineWorker_%d' % i
            record.current.timers[worker_name] = {}
            for name, val in timers['data'].items():
                main_val = record.current.timers.setdefault(name, 0)
                record.current.timers[name] = main_val + val
                record.current.timers[worker_name][name] = val

        # Invoke post-processors.
        pipeline_ctx.record = record.current
        for proc in processors:
            proc.onPipelineEnd(pipeline_ctx)

        # Handle deletions.
        if delete:
            for path, reason in record.getDeletions():
                logger.debug("Removing '%s': %s" % (path, reason))
                try:
                    os.remove(path)
                except FileNotFoundError:
                    pass
                logger.info('[delete] %s' % path)

        # Finalize the process record.
        record.current.process_time = time.time()
        record.current.out_dir = self.out_dir
        record.collapseRecords()

        # Save the process record.
        if save_record:
            with format_timed_scope(logger,
                                    'saved bake record',
                                    level=logging.DEBUG,
                                    colored=False):
                record.saveCurrent(record_cache.getCachePath(record_name))

        logger.info(
            format_timed(
                start_time,
                "processed %d assets." % record.current.processed_count))

        return record.detach()
Exemplo n.º 36
0
def _run_chef(pre_args, argv):
    # Setup the app.
    root = None
    if pre_args.root:
        root = os.path.expanduser(pre_args.root)
    else:
        try:
            root = find_app_root(theme=pre_args.theme)
        except SiteNotFoundError:
            root = None

    # Can't apply custom configuration stuff if there's no website.
    if (pre_args.config_variants or pre_args.config_values) and not root:
        raise SiteNotFoundError(
            "Can't apply any configuration variant or value overrides, "
            "there is no website here.")

    if root:
        cache_key = None
        if not pre_args.no_cache:
            cache_key = _build_cache_key(pre_args)
        appfactory = PieCrustFactory(
            root,
            theme_site=pre_args.theme,
            cache=(not pre_args.no_cache),
            cache_key=cache_key,
            debug=pre_args.debug,
            config_variants=pre_args.config_variants,
            config_values=pre_args.config_values)
        app = appfactory.create()
    else:
        appfactory = None
        app = NullPieCrust(
            theme_site=pre_args.theme)

    # Setup the arg parser.
    parser = argparse.ArgumentParser(
        prog='chef',
        description="The PieCrust chef manages your website.",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    _setup_main_parser_arguments(parser)

    commands = sorted(app.plugin_loader.getCommands(),
                      key=lambda c: c.name)
    subparsers = parser.add_subparsers(title='list of commands')
    for c in commands:
        p = subparsers.add_parser(c.name, help=c.description)
        c.setupParser(p, app)
        p.set_defaults(func=c.checkedRun)
        p.set_defaults(cache_name=c.cache_name)

    help_cmd = next(filter(lambda c: c.name == 'help', commands), None)
    if help_cmd and help_cmd.has_topics:
        with io.StringIO() as epilog:
            epilog.write("additional help topics:\n")
            for name, desc in help_cmd.getTopics():
                print_help_item(epilog, name, desc)
            parser.epilog = epilog.getvalue()

    # Parse the command line.
    result = parser.parse_args(argv)
    logger.debug(format_timed(_chef_start_time, 'initialized PieCrust',
                              colored=False))

    # Print the help if no command was specified.
    if not hasattr(result, 'func'):
        parser.print_help()
        return 0

    # Do any custom setup the user wants.
    custom_env = app.config.get('chef/env')
    if custom_env:
        _setup_app_environment(app, custom_env)

    # Add some timing information.
    if app.env:
        app.env.stats.registerTimer('ChefStartup')
        app.env.stats.stepTimerSince('ChefStartup', _chef_start_time)

    # Run the command!
    ctx = CommandContext(appfactory, app, parser, result)
    exit_code = result.func(ctx)
    if exit_code is None:
        return 0
    if not isinstance(exit_code, int):
        logger.error("Got non-integer exit code: %s" % exit_code)
        return -1
    return exit_code
Exemplo n.º 37
0
    def run(self,
            src_dir_or_file=None,
            *,
            delete=True,
            previous_record=None,
            save_record=True):
        # Invoke pre-processors.
        for proc in self.processors:
            proc.onPipelineStart(self)

        # Sort our processors again in case the pre-process step involved
        # patching the processors with some new ones.
        self.processors.sort(key=lambda p: p.priority)

        # Create the pipeline record.
        record = TransitionalProcessorPipelineRecord()
        record_cache = self.app.cache.getCache('proc')
        record_name = (hashlib.md5(self.out_dir.encode('utf8')).hexdigest() +
                       '.record')
        if previous_record:
            record.setPrevious(previous_record)
        elif not self.force and record_cache.has(record_name):
            t = time.clock()
            record.loadPrevious(record_cache.getCachePath(record_name))
            logger.debug(
                format_timed(t, 'loaded previous bake record', colored=False))
        logger.debug("Got %d entries in process record." %
                     len(record.previous.entries))

        # Create the workers.
        pool = []
        queue = Queue()
        abort = threading.Event()
        pipeline_lock = threading.Lock()
        for i in range(self.num_workers):
            ctx = ProcessingWorkerContext(self, record, queue, abort,
                                          pipeline_lock)
            worker = ProcessingWorker(i, ctx)
            worker.start()
            pool.append(worker)

        if src_dir_or_file is not None:
            # Process only the given path.
            # Find out what mount point this is in.
            for name, info in self.mounts.items():
                path = info['path']
                if src_dir_or_file[:len(path)] == path:
                    base_dir = path
                    mount_info = info
                    break
            else:
                known_roots = [i['path'] for i in self.mounts.values()]
                raise Exception("Input path '%s' is not part of any known "
                                "mount point: %s" %
                                (src_dir_or_file, known_roots))

            ctx = ProcessingContext(base_dir, mount_info, queue, record)
            logger.debug("Initiating processing pipeline on: %s" %
                         src_dir_or_file)
            if os.path.isdir(src_dir_or_file):
                self.processDirectory(ctx, src_dir_or_file)
            elif os.path.isfile(src_dir_or_file):
                self.processFile(ctx, src_dir_or_file)

        else:
            # Process everything.
            for name, info in self.mounts.items():
                path = info['path']
                ctx = ProcessingContext(path, info, queue, record)
                logger.debug("Initiating processing pipeline on: %s" % path)
                self.processDirectory(ctx, path)

        # Wait on all workers.
        record.current.success = True
        for w in pool:
            w.join()
            record.current.success &= w.success
        if abort.is_set():
            raise Exception("Worker pool was aborted.")

        # Handle deletions.
        if delete:
            for path, reason in record.getDeletions():
                logger.debug("Removing '%s': %s" % (path, reason))
                try:
                    os.remove(path)
                except FileNotFoundError:
                    pass
                logger.info('[delete] %s' % path)

        # Invoke post-processors.
        for proc in self.processors:
            proc.onPipelineEnd(self)

        # Finalize the process record.
        record.current.process_time = time.time()
        record.current.out_dir = self.out_dir
        record.collapseRecords()

        # Save the process record.
        if save_record:
            t = time.clock()
            record.saveCurrent(record_cache.getCachePath(record_name))
            logger.debug(format_timed(t, 'saved bake record', colored=False))

        return record.detach()