Ejemplo n.º 1
0
def test_prefix_counter():
    cnts = PerfCounters()
    v = cnts._prefix_counter('test')
    assert v == 'test'
    cnts = PerfCounters('prefix')
    v = cnts._prefix_counter('test')
    assert v == 'prefix_test'
Ejemplo n.º 2
0
def counters():
    cnts = PerfCounters()

    cnts.set('value', 42)
    cnts.set('value2', 43)

    cnts.start('time')
    time.sleep(0.2)

    cnts.start('time2')
    time.sleep(0.2)
    return cnts
Ejemplo n.º 3
0
def test_counter_delta_auto_closing():
    cnts = PerfCounters()
    cnts.start('tmp')
    time.sleep(0.2)
    cnts.start('tmp2')
    time.sleep(0.2)
    cnts.stop('tmp')  # stop only one of two
    dic = cnts._get_counter_lists()
    assert len(dic['Timing counters']) == 2
    assert dic['Timing counters'][0][0] == 'tmp'
    assert dic['Timing counters'][0][1] > 0.4
    assert dic['Timing counters'][1][0] == 'tmp2'
    assert dic['Timing counters'][1][1] > 0.2
Ejemplo n.º 4
0
def test_prefix_counter():
    cnts = PerfCounters()
    v = cnts._prefix_counter('test')
    assert v == 'test'
    cnts = PerfCounters('prefix')
    v = cnts._prefix_counter('test')
    assert v == 'prefix_test'
Ejemplo n.º 5
0
def test_process_counters_values():
    cnts = PerfCounters()
    cnts.set('value', 42)
    cnts.set('value2', 43)
    dic = process_counters(cnts.counters,
                           cnts.laps,
                           sort_by='name',
                           reverse=False)
    assert len(dic[VALUE_COUNTERS]) == 2
    assert dic[VALUE_COUNTERS][0][0] == 'value'
    assert dic[VALUE_COUNTERS][0][1] == 42
    assert dic[VALUE_COUNTERS][1][0] == 'value2'
    assert dic[VALUE_COUNTERS][1][1] == 43
Ejemplo n.º 6
0
def test_counter_sorting():
    cnts = PerfCounters()  # clean instance
    cnts.set('a', 42)
    cnts.set('b', 43)

    # value desc (default)
    dic = cnts._get_counter_lists()
    assert dic['Value counters'][0][1] > dic['Value counters'][1][1]

    # value asc
    dic = cnts._get_counter_lists(reverse=False)
    assert dic['Value counters'][0][1] < dic['Value counters'][1][1]

    # name desc
    dic = cnts._get_counter_lists(sort_by=PerfCounters.SORT_BY_NAME)
    assert dic['Value counters'][0][0] == 'b'

    # name asc
    dic = cnts._get_counter_lists(sort_by=PerfCounters.SORT_BY_NAME,
                                  reverse=False)
    assert dic['Value counters'][0][0] == 'a'
Ejemplo n.º 7
0
def op_optimization_benchmark(population, op, num_runs, num_warmup=1):
    """Run OP with the different level of optimization supported and
    report results as PerfCounters counterts.

    Args:
        population (tensor): population to run the bench on
        op (OP): OP to be benchmarked
        num_runs (int): number of execution to perform.

    Returns:
        PerfCounters: performance counters for various optimization levels.
    """
    cprint('[warn-up]', 'yellow')
    os = optimization_support(op)
    max_level = os['optimization_level']
    cprint('Max optimization level %s' % (max_level), 'magenta')
    cprint('Optimization status:%s' % os['optimizations'], 'blue')

    cprint('[%s micro benchmark]' % str(op.__class__.__name__), 'yellow')

    total_tests = (num_warmup + num_runs) * (max_level + 1)
    pb = tqdm(total=total_tests, desc='running tests', unit='ops')
    cnts = PerfCounters()

    for level in range(max_level + 1):
        cname = 'Optimization level: %d' % level
        op.set_optimization_level(level)

        # warmup
        for _ in range(num_warmup):
            op(population)
            pb.update(1)

        # real test
        cnts.start(cname)
        for _ in range(num_runs):
            op(population)
            pb.update(1)
        cnts.stop(cname)

    pb.close()
    return cnts
Ejemplo n.º 8
0
def test_counter_deadline(caplog):
    cnts = PerfCounters()
    cnts.start('deadline', warning_deadline=1)
    time.sleep(2)
    cnts.stop_all()
    assert "counter deadline deadline exceeded" in caplog.text
Ejemplo n.º 9
0
def counters():
    cnts = PerfCounters()

    cnts.set('value', 42)
    cnts.set('value2', 43)

    cnts.start('lap')

    cnts.start('time')
    time.sleep(0.2)
    cnts.lap('lap')

    cnts.start('time2')
    time.sleep(0.2)
    cnts.lap('lap')

    return cnts
Ejemplo n.º 10
0
def test_counter_logging(caplog):
    cnts = PerfCounters()
    cnts.start('test', log=True)
    assert "test counter started" in caplog.text
    cnts.stop('test', log=True)
    assert "test counter stopped" in caplog.text
Ejemplo n.º 11
0
def test_counter_logging(caplog):
    cnts = PerfCounters()
    cnts.start('test', log=True)
    assert "test counter started" in caplog.text
    cnts.stop('test', log=True)
    assert "test counter stopped" in caplog.text
Ejemplo n.º 12
0
def test_merge(counters):
    counters2 = PerfCounters()
    counters2.set('value3', 44)
    counters.merge(counters2)
    assert 'value3' in counters.counters
    assert counters.get('value3') == 44
Ejemplo n.º 13
0
def test_counter_deadline(caplog):
    cnts = PerfCounters()
    cnts.start('deadline', warning_deadline=1)
    time.sleep(2)
    cnts.stop_all()
    assert "counter deadline deadline exceeded" in caplog.text
Ejemplo n.º 14
0
def test_assign_fn():
    NUM_TESTS = 3
    t = np.random.randint(0, 100, (100, 100))
    dst_tensor = np.zeros(t.shape, dtype=intx())
    slices = (slice(1, 30), slice(1, 25))
    indexes = slices2array(slices)
    updates = B.cast(t[slices], dtype=intx())

    r = B.assign(dst_tensor, updates, indexes)
    r2 = assign_tf(dst_tensor, updates, indexes)
    assert B.tensor_equal(r, r2)

    assign_tf(dst_tensor, updates, indexes)
    assign_xla(dst_tensor, updates, indexes)

    cnts = PerfCounters()
    cnts.start('basic')
    for _ in range(NUM_TESTS):
        B.assign(dst_tensor, updates, indexes)
    cnts.stop('basic')

    cnts.start('tf_fn')
    for _ in range(NUM_TESTS):
        assign_tf(dst_tensor, updates, indexes)
    cnts.stop('tf_fn')

    cnts.start('tf_xla')
    for _ in range(NUM_TESTS):
        assign_xla(dst_tensor, updates, indexes)
    cnts.stop('tf_xla')

    cnts.report()
Ejemplo n.º 15
0
def test_duplicate_merge(counters):
    counters2 = PerfCounters()
    counters2.set('value', 44)
    with pytest.raises(ValueError):
        counters.merge(counters2)
Ejemplo n.º 16
0
    def __init__(self, config_filename, version='1.0'):

        # Timers
        self.cnts = PerfCounters()
        self.cnts.start('Overall')
        self.cnts.start('Init')

        # [configuration]
        self.current_dir = Path.cwd()

        # make the config file path absolute to avoid weird cases
        self.config_filename = Path(config_filename).resolve()
        if not config_filename:
            raise Exception("Supply a configuration filename")

        # exist?
        if self.config_filename.is_file():  # absolute path
            self.config = files.load_config(self.config_filename)
        else:
            utils.error("Config file %s not found" % self.config_filename)

        # site root dir is -1 from where the config is
        self.config.root_dir = self.config_filename.parents[1]
        self.config.build = utils.create_objdict()

        # expose sitefab version to the templates
        self.config.build.sitefab_version = version

        # [parser] #

        # initialize the parser config
        parser_tpl_path = Path(self.config.parser.template_dir)
        self.config.parser.templates_path = (self.config.root_dir /
                                             parser_tpl_path)

        self.config.parser = Parser.make_config(self.config.parser)

        # [plugins]

        # loading configuration
        for d in self.config.plugins.config_dir:
            config_dir = self.config.root_dir / d

            # load the various config files from disk
            plugins_config = defaultdict(dict)
            for config_fname in files.get_files_list(config_dir, '*.yaml'):
                plugin_name = config_fname.stem
                category = "%s%s" % (str(config_fname.parts[-3]).capitalize(),
                                     str(config_fname.parts[-2]).capitalize())
                config = files.load_config(config_fname)
                plugins_config[category][plugin_name] = config

        # where to redirect the standard python log
        debug_log_fname = self.get_logs_dir() / "debug.log"
        self.plugins = Plugins(self.get_plugins_dirs(), debug_log_fname,
                               plugins_config)

        # Store data generated by plugins that can be used later.
        self.plugin_data = {}
        self.plugin_results = defaultdict(int)

        # [template rendering engine] #
        self.jinja2 = Environment(loader=FileSystemLoader(
            str(self.get_template_dir())),
                                  extensions=['jinja2.ext.do'])

        # loading templates custom functions
        custom_filters = self.plugins.get_template_filters()
        for flt_name, flt_fct in custom_filters.items():
            self.jinja2.filters[flt_name] = flt_fct

        # [logger] #
        cfg = utils.create_objdict()
        cfg.output_dir = self.get_logs_dir()
        # log template not the one from the users.
        cfg.template_dir = (self.config.root_dir /
                            self.config.logger.template_dir)

        tpl_dir = self.config.root_dir / Path(self.config.logger.template_dir)
        self.config.logger.template_dir = tpl_dir
        cfg.log_template = "log.html"
        cfg.log_index_template = "log_index.html"  # noqa
        cfg.stats_template = "stats.html"
        self.logger = Logger(cfg, self)

        # [linter] #
        linter_config_filename = (self.config.root_dir /
                                  self.config.linter.configuration_file)
        linter_config = files.load_config(linter_config_filename)

        linter_config.report_template_file = (
            self.config.root_dir / self.config.linter.report_template_file)

        linter_config.output_dir = self.get_logs_dir()
        linter_config.site_output_dir = self.get_output_dir()
        self.linter = Linter(linter_config)

        # Finding content and assets.
        self.filenames = utils.create_objdict()
        self.filenames.posts = files.get_files_list(self.get_content_dir(),
                                                    "*.md")

        # Cleanup the output directories.
        files.clean_dir(self.get_output_dir())
        self.cnts.stop('Init')
Ejemplo n.º 17
0
def test_randint_fn():
    NUM_TESTS = 3
    low = 10
    high = 1000
    shape = (100, 100, 100)

    randint_tf(low, high, shape)
    randint_xla(low, high, shape)

    v = randint_tf(low, high, shape)
    v2 = randint_tf(low, high, shape)
    assert not B.tensor_equal(v, v2)

    v = randint_xla(low, high, shape)
    v2 = randint_xla(low, high, shape)
    assert not B.tensor_equal(v, v2)

    cnts = PerfCounters()
    cnts.start('basic')
    for _ in range(NUM_TESTS):
        B.randint(low, high, shape)
    cnts.stop('basic')

    cnts.start('tf_fn')
    for _ in range(NUM_TESTS):
        randint_tf(low, high, shape)
    cnts.stop('tf_fn')

    cnts.start('tf_xla')
    for _ in range(NUM_TESTS):
        randint_xla(low, high, shape)
    cnts.stop('tf_xla')

    cnts.report()
Ejemplo n.º 18
0
def test_counter_delta_auto_closing():
    cnts = PerfCounters()
    cnts.start('tmp')
    cnts.start('lap')
    time.sleep(0.2)
    cnts.lap('lap')
    cnts.start('tmp2')
    time.sleep(0.2)
    cnts.lap('lap')
    cnts.stop('tmp')

    time.sleep(0.1)
    cnts.stop('lap')

    dic = process_counters(cnts.counters, cnts.laps)

    assert len(dic[TIME_COUNTERS]) == 3  # 2 time + lap
    assert len(dic[LAPS_COUNTERS]) == 1
    assert dic[TIME_COUNTERS][0][0] == 'lap'
    assert dic[TIME_COUNTERS][0][1] >= 0.5
    assert dic[TIME_COUNTERS][1][0] == 'tmp'
    assert dic[TIME_COUNTERS][1][1] >= 0.4
    assert dic[TIME_COUNTERS][2][0] == 'tmp2'
    assert dic[TIME_COUNTERS][2][1] >= 0.2
Ejemplo n.º 19
0
def test_duplicate_merge(counters):
    counters2 = PerfCounters()
    counters2.set('value', 44)
    with pytest.raises(ValueError):
        counters.merge(counters2)
Ejemplo n.º 20
0
def test_no_lap():
    cnts = PerfCounters()
    cnts.start('tmp')
    dic = process_counters(cnts.counters, cnts.laps)
    assert LAPS_COUNTERS not in dic
Ejemplo n.º 21
0
class SiteFab(object):
    """ Object representation of the site being built.

    SiteFab main class
    """

    SORT_BY_CREATION_DATE_DESC = 1
    SORT_BY_CREATION_DATE = 2
    SORT_BY_UPDATE_DATE_DESC = 3
    SORT_BY_UPDATE_DATE = 4

    OK = 1
    SKIPPED = 2
    ERROR = 3

    def __init__(self, config_filename, version='1.0'):

        # Timers
        self.cnts = PerfCounters()
        self.cnts.start('Overall')
        self.cnts.start('Init')

        # [configuration]
        self.current_dir = Path.cwd()

        # make the config file path absolute to avoid weird cases
        self.config_filename = Path(config_filename).resolve()
        if not config_filename:
            raise Exception("Supply a configuration filename")

        # exist?
        if self.config_filename.is_file():  # absolute path
            self.config = files.load_config(self.config_filename)
        else:
            utils.error("Config file %s not found" % self.config_filename)

        # site root dir is -1 from where the config is
        self.config.root_dir = self.config_filename.parents[1]
        self.config.build = utils.create_objdict()

        # expose sitefab version to the templates
        self.config.build.sitefab_version = version

        # [parser] #

        # initialize the parser config
        parser_tpl_path = Path(self.config.parser.template_dir)
        self.config.parser.templates_path = (self.config.root_dir /
                                             parser_tpl_path)

        self.config.parser = Parser.make_config(self.config.parser)

        # [plugins]

        # loading configuration
        for d in self.config.plugins.config_dir:
            config_dir = self.config.root_dir / d

            # load the various config files from disk
            plugins_config = defaultdict(dict)
            for config_fname in files.get_files_list(config_dir, '*.yaml'):
                plugin_name = config_fname.stem
                category = "%s%s" % (str(config_fname.parts[-3]).capitalize(),
                                     str(config_fname.parts[-2]).capitalize())
                config = files.load_config(config_fname)
                plugins_config[category][plugin_name] = config

        # where to redirect the standard python log
        debug_log_fname = self.get_logs_dir() / "debug.log"
        self.plugins = Plugins(self.get_plugins_dirs(), debug_log_fname,
                               plugins_config)

        # Store data generated by plugins that can be used later.
        self.plugin_data = {}
        self.plugin_results = defaultdict(int)

        # [template rendering engine] #
        self.jinja2 = Environment(loader=FileSystemLoader(
            str(self.get_template_dir())),
                                  extensions=['jinja2.ext.do'])

        # loading templates custom functions
        custom_filters = self.plugins.get_template_filters()
        for flt_name, flt_fct in custom_filters.items():
            self.jinja2.filters[flt_name] = flt_fct

        # [logger] #
        cfg = utils.create_objdict()
        cfg.output_dir = self.get_logs_dir()
        # log template not the one from the users.
        cfg.template_dir = (self.config.root_dir /
                            self.config.logger.template_dir)

        tpl_dir = self.config.root_dir / Path(self.config.logger.template_dir)
        self.config.logger.template_dir = tpl_dir
        cfg.log_template = "log.html"
        cfg.log_index_template = "log_index.html"  # noqa
        cfg.stats_template = "stats.html"
        self.logger = Logger(cfg, self)

        # [linter] #
        linter_config_filename = (self.config.root_dir /
                                  self.config.linter.configuration_file)
        linter_config = files.load_config(linter_config_filename)

        linter_config.report_template_file = (
            self.config.root_dir / self.config.linter.report_template_file)

        linter_config.output_dir = self.get_logs_dir()
        linter_config.site_output_dir = self.get_output_dir()
        self.linter = Linter(linter_config)

        # Finding content and assets.
        self.filenames = utils.create_objdict()
        self.filenames.posts = files.get_files_list(self.get_content_dir(),
                                                    "*.md")

        # Cleanup the output directories.
        files.clean_dir(self.get_output_dir())
        self.cnts.stop('Init')

    def preprocessing(self):
        "Perform pre-processing tasks"
        self.cnts.start('Preprocessing')
        self.execute_plugins([1], "SitePreparsing", " plugin")
        self.cnts.stop('Preprocessing')

    def parse(self):
        "parse md content into post objects"
        self.cnts.start('Parsing')
        filenames = self.filenames.posts
        self.posts = []

        # collections creation
        min_posts = self.config.collections.min_posts

        # posts_by_tag is what is rendered: it contains for as given post both
        #  its tags and its category
        tlp = self.jinja2.get_template(self.config.collections.template)
        path = self.get_output_dir() / self.config.collections.output_dir

        self.posts_by_tag = PostCollections(
            site=self,
            template=tlp,
            output_path=path,
            web_path=self.config.collections.output_dir,
            min_posts=min_posts)

        self.posts_by_category = PostCollections(
            site=self, web_path=self.config.collections.output_dir)

        self.posts_by_template = PostCollections(site=self)

        self.posts_by_microdata = PostCollections(site=self)

        # Parsing
        cprint("\nParsing posts", "magenta")
        progress_bar = tqdm(total=len(filenames),
                            unit=' files',
                            desc="Files",
                            leave=True)
        errors = []
        post_idx = 1
        threads = self.config.threads
        # NOTE: passing self.config.parser might seems strange
        # but it is required to get the pluging workings
        parser = Parser(self.config.parser, self)

        if threads > 1:
            todo_nlp_posts = []
            for filename in filenames:
                file_content = files.read_file(filename)
                post = parser.parse(file_content)
                post.filename = str(filename)
                post.id = post_idx
                post_idx += 1
                todo_nlp_posts.append(json.dumps(post))
            pool = Pool(threads)
            for parsed_post_json in pool.imap_unordered(
                    parse_post, todo_nlp_posts):
                parsed_post = json.loads(parsed_post_json)
                post = utils.dict_to_objdict(parsed_post)
                self.process_post(post)
                progress_bar.update(1)
            pool.close()
            pool.join()
        else:
            for filename in filenames:
                file_content = files.read_file(filename)
                post = parser.parse(file_content)
                post.filename = str(filename)
                post.id = post_idx
                parsed_post_json = parse_post(json.dumps(post))
                parsed_post = json.loads(parsed_post_json)
                self.process_post(utils.dict_to_objdict(parsed_post))
                progress_bar.update(1)
                post_idx += 1

        progress_bar.close()
        if len(errors):
            utils.error("\n".join(errors))

        self.cnts.stop('Parsing')

    def process_post(self, post):
        # do not process hidden post
        if post.meta.hidden:
            return True
        # Ignore posts set for future date
        if self.config.parser.skip_future and post.meta.creation_date_ts > int(
                time.time()):
            s = "Post in the future - skipping %s" % (post.meta.title)
            utils.warning(s)
            return True

        # Add posts to our list
        self.posts.append(post)
        # insert in template list
        if not post.meta.template:
            errors += "%s has no template defined." % post.filename
            errors += "Can't render it"
            return True

        self.posts_by_template.add(post.meta.template, post)

        # insert in microformat list
        if post.meta.microdata_type:
            self.posts_by_microdata.add(post.meta.microdata_type, post)

        # insert in category
        if post.meta.category:
            self.posts_by_category.add(post.meta.category, post)
            # tags is what is rendered
            self.posts_by_tag.add(post.meta.category, post)

        # insert in tags
        if post.meta.tags:
            for tag in post.meta.tags:
                self.posts_by_tag.add(tag, post)
        return True

    def process(self):
        "Processing stage"

        self.cnts.start('Processing')

        # Posts processing
        print("\nPosts plugins")
        self.execute_plugins(self.posts, "PostProcessor", " posts")

        # Collection processing
        print("\nCollections plugins")
        self.execute_plugins(self.posts_by_category.get_as_list(),
                             "CollectionProcessor", " categories")
        self.execute_plugins(self.posts_by_tag.get_as_list(),
                             "CollectionProcessor", " tags")
        self.execute_plugins(self.posts_by_template.get_as_list(),
                             "CollectionProcessor", " templates")
        self.execute_plugins(self.posts_by_microdata.get_as_list(),
                             "CollectionProcessor", " microdata")

        # site wide processing
        print("\nSite wide plugins")
        self.execute_plugins([1], "SiteProcessor", " site")
        self.cnts.stop('Processing')

    def render(self):
        "Rendering stage"

        self.cnts.start('Rendering')
        print("\nRendering posts")
        self.render_posts()

        print("\nRendering collections")
        self.posts_by_tag.render()

        print("\nAdditional Rendering")
        self.execute_plugins([1], "SiteRendering", " pages")
        self.cnts.stop('Rendering')

    def finale(self):
        "Final stage"

        # Write reminainig logs
        self.logger.write_log_index()
        self.logger.write_stats()

        # Terminal recap
        cprint("Thread used:%s" % self.config.threads, "cyan")

        cprint("\nPerformance", 'magenta')
        self.cnts.stop_all()
        self.cnts.report()

        cprint("Content", 'magenta')
        cprint("|-Num posts: %s" % len(self.posts), "cyan")
        cprint(
            "|-Num categories: %s" %
            self.posts_by_category.get_num_collections(), "yellow")
        cprint("|-Num tags: %s" % self.posts_by_tag.get_num_collections(),
               "cyan")
        cprint(
            "|-Num templates: %s" %
            self.posts_by_template.get_num_collections(), "yellow")

        # PLUGINS
        cprint("\nPlugins", 'magenta')
        cprint("|-Num plugins: %s" % len(self.plugins.plugins_enabled), "cyan")

        if self.plugin_results[self.OK]:
            cprint("|-Num OK:%s " % self.plugin_results[self.OK], 'green')

        if self.plugin_results[self.SKIPPED]:
            cprint("|-Num Skipped:%s " % self.plugin_results[self.SKIPPED],
                   'yellow')

        if self.plugin_results[self.ERROR]:
            cprint("|-Num Errors:%s" % self.plugin_results[self.ERROR], 'red')

        # LINTER
        cprint("\nLinter", 'magenta')
        self.linter.render_report()  # write the logs

        cprint("|-Posts OK:%s" % self.linter.num_post_ok(), 'green')
        cprint(
            "|-Posts with warnings:%s" % self.linter.num_post_with_warnings(),
            'yellow')
        cprint("|-Posts with errors:%s" % self.linter.num_post_with_errors(),
               'red')

        # Output
        cprint("\nOutput", 'magenta')
        cprint('|-Log index: %s' % (self.get_logs_dir() / 'index.html'),
               'blue')
        cprint('|-Linter log: %s' % (self.get_logs_dir() / 'linter.html'),
               'cyan')
        cprint('|-Generated site: %s' % (self.get_output_dir()), 'yellow')

        cprint('Generation complete - site is ready :)', 'green')

    # Post functions #
    def render_posts(self):
        """Render posts using jinja2 templates."""
        for post in tqdm(self.posts, unit=' pages', miniters=1, desc="Posts"):
            template_name = "%s.html" % post.meta.template
            template = self.jinja2.get_template(template_name)
            rv = template.render(
                content=post.html,
                meta=post.meta,
                year=datetime.today().year,
                posts=self.posts,
                plugin_data=self.plugin_data,
                config=self.config,
                categories=self.posts_by_category.get_as_dict(),  # noqa: E501
                tags=self.posts_by_tag.get_as_dict(),
                templates=self.posts_by_template.get_as_dict(),  # noqa: E501
                microdata=self.posts_by_microdata.get_as_dict()  # noqa: E501
            )

            # Linting
            linter_results = self.linter.lint(post, rv, self)
            # Are we stopping on linting errors?
            if linter_results.has_errors and self.config.linter.stop_on_error:
                print(post.filename)
                for err in linter_results.info:
                    print("\t-%s:%s" % (err[0], err[1]))
                sys.exit(-1)

            perm_url = post.meta.permanent_url
            if len(perm_url) and perm_url[0] == '/':
                perm_url = perm_url[1:]
            path = self.get_output_dir() / perm_url
            files.write_file(path, 'index.html', rv)

    # Templates functions #
    def get_num_templates(self):
        "Return the number of templates loaded."
        return len(self.jinja2.list_templates())

    def get_template_list(self):
        "Return the list of templates loaded."
        return self.jinja2.list_templates()

    # Plugins #
    def execute_plugins(self, items, plugin_class, unit):
        """ Execute a given group of plugins

        Args:
            items (str): list of object to apply the plugins to
                         {collection, posts, None}
            plugin_class (str):  which type of plugins to execute
            unit (str): which unit to report in the progress bar

        Return:
            None
        """
        results = self.plugins.run_plugins(items, plugin_class, unit, self)
        self.plugins.display_execution_results(results, self)

        # sum all plugins data for recap
        for result in results:
            plugin, values = result
            for k, v in values.items():
                self.plugin_results[k] += v

    # Files and directories #
    def get_site_info(self):
        "Return site information."
        return self.site

    def get_config(self):
        "Return sitefab config."
        return self.config

    def get_output_dir(self):
        "return the absolute path of the ouput dir"
        return self.config.root_dir / self.config.dir.output

    def get_content_dir(self):
        "return the absolute path of the content dir"
        return self.config.root_dir / self.config.dir.content

    def get_template_dir(self):
        "return the absolute path of the template dir"
        return self.config.root_dir / self.config.dir.templates

    def get_logs_dir(self):
        "return the absolute path of the log dir"
        return self.config.root_dir / self.config.dir.logs

    def get_cache_dir(self):
        "return the absolute path of the cache dir"
        return self.config.root_dir / self.config.dir.cache

    def get_plugins_dirs(self):
        "return the absolute path for the plugins directories"
        dirs = []
        for d in self.config.plugins.dir:
            dirs.append(self.config.root_dir / d)
        return dirs
Ejemplo n.º 22
0
def test_merge(counters):
    counters2 = PerfCounters()
    counters2.set('value3', 44)
    counters.merge(counters2)
    assert 'value3' in counters.counters
    assert counters.get('value3') == 44
Ejemplo n.º 23
0
def analyze_post(post, debug=False):
    "Perform NLP analysis"

    counters = PerfCounters()
    nlp = create_objdict()

    # clean fields
    counters.start('cleanup')
    clean_fields = generate_clean_fields(post)
    nlp.clean_fields = clean_fields
    counters.stop('cleanup')

    # creating spacy docs
    counters.start('make_spacy_docs')
    all_cleaned_content = ' '.join([clean_fields.title, clean_fields.category,
                                    " ".join(clean_fields.tags),
                                    clean_fields.abstract, clean_fields.text])

    # overall terms
    cleaned_doc = make_spacy_doc(all_cleaned_content, lang=SPACY_MODEL)

    # title terms
    title_doc = make_spacy_doc(clean_fields.title, lang=SPACY_MODEL)

    # for statistics
    text_doc = make_spacy_doc(post.text, lang=SPACY_MODEL)

    counters.stop('make_spacy_docs')

    # terms extraction
    counters.start('extract_key_terms')
    nlp.terms = extract_key_terms(cleaned_doc, num_terms=NUM_TERMS,
                                  algo=TERM_EXTRACTOR_ALGO, ngrams=NGRAMS)

    # !note we restrict ngram to one as we only want the lemmized top terms.
    nlp.title_terms = extract_key_terms(title_doc, num_terms=NUM_TERMS,
                                        algo=TERM_EXTRACTOR_ALGO, ngrams=1)

    counters.stop('extract_key_terms')

    # text stats
    counters.start('text_stats')
    nlp.stats = compute_stats(text_doc)
    counters.stop('text_stats')
    if debug:
        counters.report()
    return nlp
Ejemplo n.º 24
0
from perfcounters import PerfCounters
from random import randint
import time

print("=== End to end example ===\n")

# init counters
counters = PerfCounters()

num_iterations = randint(100000, 1000000)

# setting a value counter to a given value
counters.set('num_iterations', num_iterations)

# starting a timing counter
counters.start('loop')

for i in range(1000):
    v = randint(0, 1000000)

    # incrementing a value counter to sum the generated values
    counters.increment('total_value', v)

# stopping a timing counter
counters.stop('loop')

# reporting counters
counters.report()

print("\n=== Deadline ===\n")