Exemplo n.º 1
0
def generate_wiki_pages(sender, context):
    time_start = time.time()
    wiki_list = context['wikilist']
    structure = context['wiki']
    nice_list = parse_dict(structure, 0, [])

    number_written = 0
    for page in wiki_list:
        changes = context['cache_change_types']
        changed_meta = context['cache_changed_meta']
        refresh_triggers = []
        refresh_meta_triggers = []
        if 'WIKI' in context['WRITE_TRIGGERS']:
            refresh_triggers = context['WRITE_TRIGGERS']['WIKI']
        if 'WIKI' in context['META_WRITE_TRIGGERS']:
            refresh_meta_triggers = context['META_WRITE_TRIGGERS']['WIKI']

        cached = page[2].same_as_cache
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(
                any(m in merge_dictionaries(*c) for m in refresh_meta_triggers)
                for c in changed_meta):
            cached = False
        if not context.caching_enabled:
            cached = False
        if cached:
            continue
        number_written = number_written + 1
        filename = os.path.join('wiki', page[1].replace('.md', '.html'))
        content = page[2].content
        metadata = page[2].metadata
        path = page[0]
        breadcrumbs = []
        for name in path.split('/'):
            name_match = [item[1] for item in nice_list if item[0] == name]
            if len(name_match) > 0 and name_match[0] == "indexdir":
                breadcrumbs.append((name, "a"))
            else:
                breadcrumbs.append((name, "p"))
        file = page[1]
        writer = Writer(context,
                        filename,
                        "wikiarticle.html",
                        meta=metadata,
                        content=content,
                        file=file,
                        path=path,
                        links=nice_list,
                        breadcrumbs=breadcrumbs)
        writer.write_file()
    logger.info("Wrote %d out of %d wiki pages in %.3f seconds",
                number_written, len(wiki_list),
                time.time() - time_start)
Exemplo n.º 2
0
def generate_wiki_pages(sender, context):
    time_start = time.time()
    wiki_list = context['wikilist']
    structure = context['wiki']
    nice_list = parse_dict(structure, 0, [])

    number_written = 0
    for page in wiki_list:  
        changes                = context['cache_change_types']
        changed_meta           = context['cache_changed_meta']
        refresh_triggers       = []
        refresh_meta_triggers  = []
        if 'WIKI' in context['WRITE_TRIGGERS']:
            refresh_triggers = context['WRITE_TRIGGERS']['WIKI']
        if 'WIKI' in context['META_WRITE_TRIGGERS']:
            refresh_meta_triggers = context['META_WRITE_TRIGGERS']['WIKI']

        cached = page[2].same_as_cache
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(any(m in merge_dictionaries(*c) for m in refresh_meta_triggers) for c in changed_meta):
            cached = False
        if not context.caching_enabled:
            cached = False
        if cached:
            continue
        number_written = number_written + 1
        filename = os.path.join('wiki', page[1].replace('.md', '.html'))
        content = page[2].content
        metadata = page[2].metadata
        path = page[0]
        breadcrumbs = []
        for name in path.split('/'):
            name_match = [item[1] for item in nice_list if item[0] == name]
            if len(name_match) > 0 and name_match[0] == "indexdir":
                breadcrumbs.append((name, "a"))
            else:
                breadcrumbs.append((name, "p"))
        file = page[1]
        writer = Writer(
            context, 
            filename, 
            "wikiarticle.html",
            meta=metadata, 
            content=content, 
            file=file, 
            path=path, 
            links=nice_list, 
            breadcrumbs=breadcrumbs)
        writer.write_file()
    logger.info("Wrote %d out of %d wiki pages in %.3f seconds", number_written, len(wiki_list), time.time() - time_start)
Exemplo n.º 3
0
Arquivo: page.py Projeto: rhyst/olm
 def calc_cache_status(self, context=None):
     self.context = context if context is not None else self.context
     changes = self.context['cache_change_types']
     changed_meta = self.context['cache_changed_meta']
     refresh_triggers = []
     refresh_meta_triggers = []
     if 'PAGE' in self.context['WRITE_TRIGGERS']:
         refresh_triggers = self.context['WRITE_TRIGGERS']['PAGE']
     if 'PAGE' in self.context['META_WRITE_TRIGGERS']:
         refresh_meta_triggers = self.context['META_WRITE_TRIGGERS']['PAGE']
     if any(i in changes for i in refresh_triggers):
         self.same_as_cache = False
     if any(
             any(m in merge_dictionaries(*c) for m in refresh_meta_triggers)
             for c in changed_meta):
         self.same_as_cache = False
Exemplo n.º 4
0
 def write_file(self):
     """Write the article to a file"""
     os.makedirs(os.path.dirname(self.output_filepath), exist_ok=True)
     with codecs.open(self.output_filepath, 'w', encoding='utf-8') as html_file:
         html = self.template.render(**merge_dictionaries(self.context, self.kwargs))
         html_file.write(html)
Exemplo n.º 5
0
Arquivo: article.py Projeto: rhyst/olm
    def __init__(self,
                 context,
                 filepath=None,
                 metadata=None,
                 content=None,
                 basename=None):
        if filepath is not None:
            super().__init__(context, filepath=filepath)
        elif metadata is not None and content is not None and basename is not None:
            super().__init__(context,
                             metadata=metadata,
                             content=content,
                             basename=basename)

        self.template = 'article.html'
        self.date = datetime.datetime.strptime(self.metadata['date'].strip(
        ), '%Y-%m-%d') if 'date' in self.metadata else datetime.datetime.now()
        self.type = self.metadata['type'].strip().lower(
        ) if 'type' in self.metadata else 'article'
        self.title = self.metadata[
            'title'] if 'title' in self.metadata else basename
        self.summary = self.metadata[
            'summary'] if 'summary' in self.metadata else None
        self.location = self.metadata['location'].strip().lower(
        ) if 'location' in self.metadata else None

        # Status
        status = self.metadata['status'].strip().lower(
        ) if 'status' in self.metadata else None
        if status == 'unlisted' or self.type == 'unlisted':
            self.status = ArticleStatus.UNLISTED
        elif status == 'draft':
            self.status = ArticleStatus.DRAFT
        else:
            self.status = ArticleStatus.ACTIVE

        # Authors
        if 'authors' in self.metadata:
            self.authors = [
                a.strip() for a in self.metadata['authors'].split(',')
            ]
        elif 'author' in self.metadata:
            self.authors = [
                a.strip() for a in self.metadata['author'].split(',')
            ]
        else:
            self.authors = []
        for author in self.authors:
            if author in context['authors']:
                context['authors'][author].append(self)
            else:
                context['authors'][author] = [self]

        # Output Filepath
        if 'ARTICLE_SLUG' in context and self.date and self.location:
            slug_dict = merge_dictionaries(
                vars(self), {
                    'date': self.date.strftime('%Y-%m-%d'),
                    'location': self.location.lower()
                })
            output_filename = context.ARTICLE_SLUG.format(**slug_dict)
        elif 'ARTICLE_SLUG' in context:
            slug_dict = merge_dictionaries(
                vars(self), {'date': self.date.strftime('%Y-%m-%d')})
            output_filename = context.ARTICLE_SLUG.format(**slug_dict)
        elif self.date and self.location:
            output_filename = '{}-{}.html'.format(
                self.location.lower(), self.date.strftime('%Y-%m-%d'))
        else:
            output_filename = '{}.html'.format(self.basename)
        self.output_filepath = os.path.join(context.OUTPUT_FOLDER, 'articles',
                                            output_filename)
        self.url = 'articles/{}'.format(output_filename)

        self.cache_id = self.output_filepath

        if 'cache_type' in self.metadata:
            self.cache_type = self.metadata['cache_type']
        else:
            self.cache_type = 'ARTICLE'

        signal_sender = Signal(signals.AFTER_ARTICLE_READ)
        signal_sender.send(context=context, article=self)
Exemplo n.º 6
0
def generate_person_pages(context):
    changes = context['cache_change_types']
    meta_changes = context['cache_changed_meta']
    refresh_triggers       = ["ARTICLE.NEW_FILE", "ARTICLE.REMOVED_FILE"]
    refresh_meta_triggers  = ['title', 'location', 'date', 'status']
    changed_caves, changed_people = get_changes(context)
 
    # Flatten list of all cavers
    cavers = sorted(list(set([ person for trip in context['trip_list'] for person in trip['people']])))
    
    # Ensure each caver has an article object in the db
    for caver_name in cavers:
        if not [ c for c in context['cavers_list'] if c['name'] == caver_name ]:
            article = Caver(context, content='', metadata={},basename=caver_name)
            article.same_as_cache = context.is_cached
            context['cavers_list'].append({"name": caver_name,  "article": article})

    logger.debug("Writing %s caver pages", len(context['cavers_list']))
    number_written = 0
    row=namedtuple('row', 'cave article date')
    for caver in context['cavers_list']:
        caver_name = caver['name']

        # Set filepath and jinja template
        caver['article'].output_filepath = os.path.join("cavers", str(caver_name) + '.html')
        caver['article'].template = 'caverpages.html'

        trips = [t for t in context['trip_list'] if caver_name in t['people']]
        caver['article'].caver_articles = [row(' > '.join(t['caves']), t['article'], t['date']) for t in trips ]

        # Set number of trips
        caver['article'].number = len([ t for t in trips if len(t['caves']) > 0 ])

        # Work out it needs to be written
        if context.caching_enabled:
            if caver_name in changed_people:
                caver['article'].same_as_cache = False
            if any(i in changes for i in refresh_triggers):
                caver['article'].same_as_cache = False
            if any(any(m in merge_dictionaries(*c) for m in refresh_meta_triggers) for c in meta_changes):
                caver['article'].same_as_cache = False
            if caver['article'].same_as_cache:
                continue

        # Compute authored
        if caver_name in context.authors:
            authored = [ a for a in context.authors[caver_name] if a.status is not ArticleStatus.DRAFT and a.status is not ArticleStatus.UNLISTED ]
            caver['article'].authored = sorted(authored, key=lambda k: (k.date), reverse=True)

        # Compute cocavers
        cocavers = dict.fromkeys(set([ person for trip in trips for person in trip['people']]),0)
        del cocavers[caver_name]
        for key in cocavers:
            for trip in trips:
                if key in trip['people']:
                    cocavers[key] = cocavers[key] + 1
        caver['article'].cocavers = sorted([(person, cocavers[person]) for person in cocavers.keys()], key=lambda tup: tup[1], reverse=True)

        # Compute caves
        caves = dict.fromkeys(set([ cave for trip in trips for cave in trip['caves']]),0)
        for key in caves:
            for trip in trips:
                if key in trip['caves']:
                    caves[key] = caves[key] + 1
        caver['article'].caves = sorted([(cave, caves[cave]) for cave in caves.keys()], key=lambda tup: tup[1], reverse=True)

        number_written = number_written + 1
        caver['article'].write_file(context=context)
    logger.info("Wrote %s out of %s total caver pages", number_written, len(context['cavers_list']))

    # ==========Write the index of cavers================
    cached = True
    if context.caching_enabled:
        if len(changed_people) > 0:
            cached = False
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(any(m in merge_dictionaries(*c) for m in refresh_meta_triggers) for c in meta_changes):
            cached = False
        if cached:
            return
    
    row=namedtuple('row', 'name number recentdate meta')
    rows = []
    for caver in context['cavers_list']:
        name = caver['name']
        number = caver['article'].number
        recentdate = max([article.date for article in caver['article'].caver_articles])
        meta = caver['article'].metadata
        rows.append(row(name, number, recentdate, meta))
    filename=os.path.join('cavers','index.html')
    writer = Writer(
        context, 
        filename, 
        "caverpages_index.html",
        rows=sorted(sorted(rows, key=lambda x: x.name), key=lambda x: x.recentdate, reverse=True))
    writer.write_file()
Exemplo n.º 7
0
def generate_cave_pages(context):
    changes = context['cache_change_types']
    meta_changes = context['cache_changed_meta']
    refresh_triggers       = ["ARTICLE.NEW_FILE", "ARTICLE.REMOVED_FILE"]
    refresh_meta_triggers  = ['title', 'location', 'date', 'status']
    changed_caves, changed_people = get_changes(context)

    # Flatten list of all caves
    caves = sorted(list(set([ cave for trip in context['trip_list'] for cave in trip['caves'] if cave is not None ])))
    
    # Ensure each has an article object in the db
    for cave_name in caves:
        if not [ c for c in context['caves_list'] if c['name'] == cave_name ]:
            article = Cave(context, content='', metadata={},basename=cave_name)
            article.same_as_cache = context.is_cached
            context['caves_list'].append({"name": cave_name,  "article": article})

    logger.debug("Writing %s caver pages", len(context['caves_list']))
    number_written = 0
    for cave in context['caves_list']:
        cave_name = cave['name']
                
        # Set filepath and jinja template
        cave['article'].output_filepath = os.path.join("caves", str(cave_name) + '.html')
        cave['article'].template = 'cavepages.html'

        # Construct articles list with useful stuff (date, article, author_in_cave) surfaced
        trips = [t for t in context['trip_list'] if cave_name in t['caves'] ] 
        cave['article'].cave_articles = [ (t['article'], t['date'], was_author_in_cave(t['article'], cave_name)) for t in trips ]

        # Work out if it needs writing
        if context.caching_enabled:
            if cave_name in changed_caves:
                cave['article'].same_as_cache = False
            if any(i in changes for i in refresh_triggers):
                cave['article'].same_as_cache = False
            if any(any(m in merge_dictionaries(*c) for m in refresh_meta_triggers) for c in meta_changes):
                cave['article'].same_as_cache = False
            if cave['article'].same_as_cache:
                continue

        number_written = number_written + 1
        cave['article'].write_file(context=context)
    logger.info("Wrote %s out of %s total cave pages", number_written, len(context['caves_list']))

    # ==========Write the index of caves================
    cached = True
    if context.caching_enabled:
        if len(changed_caves) > 0:
            cached = False
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(any(m in merge_dictionaries(*c) for m in refresh_meta_triggers) for c in meta_changes):
            cached = False
        if cached:
            return

    row=namedtuple('row', 'name number recentdate meta')
    rows = []
    for cave in context['caves_list']:
        try:
            name = cave['name']
            number = len(cave['article'].cave_articles)
            recentdate = max([trip[1] for trip in cave['article'].cave_articles])
            meta = cave['article'].metadata
            rows.append(row(name, number, recentdate, meta))
        except Exception as e:
            logger.warn("Could not include %s in cave index", name)
            logger.warn(e)
    filename=os.path.join('caves', 'index.html')
    
    writer = Writer(
            context, 
            filename, 
            "cavepages_index.html",
            rows=sorted(rows, key=lambda x: x.name))
    writer.write_file()
Exemplo n.º 8
0
def generate_person_pages(context):
    changes = context['cache_change_types']
    meta_changes = context['cache_changed_meta']
    refresh_triggers = ["ARTICLE.NEW_FILE", "ARTICLE.REMOVED_FILE"]
    refresh_meta_triggers = ['title', 'location', 'date', 'status']
    changed_caves, changed_people = get_changes(context)

    # Flatten list of all cavers
    cavers = sorted(
        list(
            set([
                person for trip in context['trip_list']
                for person in trip['people']
            ])))

    # Ensure each caver has an article object in the db
    for caver_name in cavers:
        if not [c for c in context['cavers_list'] if c['name'] == caver_name]:
            article = Caver(context,
                            content='',
                            metadata={},
                            basename=caver_name)
            article.same_as_cache = context.is_cached
            context['cavers_list'].append({
                "name": caver_name,
                "article": article
            })

    logger.debug("Writing %s caver pages", len(context['cavers_list']))
    number_written = 0
    row = namedtuple('row', 'cave article date')
    for caver in context['cavers_list']:
        caver_name = caver['name']

        # Set filepath and jinja template
        caver['article'].output_filepath = os.path.join(
            "cavers",
            str(caver_name) + '.html')
        caver['article'].template = 'caverpages.html'

        trips = [t for t in context['trip_list'] if caver_name in t['people']]
        caver['article'].caver_articles = [
            row(' > '.join(t['caves']), t['article'], t['date']) for t in trips
        ]

        # Set number of trips
        caver['article'].number = len(
            [t for t in trips if len(t['caves']) > 0])

        # Work out it needs to be written
        if context.caching_enabled:
            if caver_name in changed_people:
                caver['article'].same_as_cache = False
            if any(i in changes for i in refresh_triggers):
                caver['article'].same_as_cache = False
            if any(
                    any(m in merge_dictionaries(*c)
                        for m in refresh_meta_triggers) for c in meta_changes):
                caver['article'].same_as_cache = False
            if caver['article'].same_as_cache:
                continue

        # Compute authored
        if caver_name in context.authors:
            authored = [
                a for a in context.authors[caver_name]
                if a.status is not ArticleStatus.DRAFT
                and a.status is not ArticleStatus.UNLISTED
            ]
            caver['article'].authored = sorted(authored,
                                               key=lambda k: (k.date),
                                               reverse=True)

        # Compute cocavers
        cocavers = dict.fromkeys(
            set([person for trip in trips for person in trip['people']]), 0)
        del cocavers[caver_name]
        for key in cocavers:
            for trip in trips:
                if key in trip['people']:
                    cocavers[key] = cocavers[key] + 1
        caver['article'].cocavers = sorted([(person, cocavers[person])
                                            for person in cocavers.keys()],
                                           key=lambda tup: tup[1],
                                           reverse=True)

        # Compute caves
        caves = dict.fromkeys(
            set([cave for trip in trips for cave in trip['caves']]), 0)
        for key in caves:
            for trip in trips:
                if key in trip['caves']:
                    caves[key] = caves[key] + 1
        caver['article'].caves = sorted([(cave, caves[cave])
                                         for cave in caves.keys()],
                                        key=lambda tup: tup[1],
                                        reverse=True)

        number_written = number_written + 1
        caver['article'].write_file(context=context)
    logger.info("Wrote %s out of %s total caver pages", number_written,
                len(context['cavers_list']))

    # ==========Write the index of cavers================
    cached = True
    if context.caching_enabled:
        if len(changed_people) > 0:
            cached = False
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(
                any(m in merge_dictionaries(*c) for m in refresh_meta_triggers)
                for c in meta_changes):
            cached = False
        if cached:
            return

    row = namedtuple('row', 'name number recentdate meta')
    rows = []
    for caver in context['cavers_list']:
        name = caver['name']
        number = caver['article'].number
        recentdate = max(
            [article.date for article in caver['article'].caver_articles])
        meta = caver['article'].metadata
        rows.append(row(name, number, recentdate, meta))
    filename = os.path.join('cavers', 'index.html')
    writer = Writer(context,
                    filename,
                    "caverpages_index.html",
                    rows=sorted(sorted(rows, key=lambda x: x.name),
                                key=lambda x: x.recentdate,
                                reverse=True))
    writer.write_file()
Exemplo n.º 9
0
def generate_cave_pages(context):
    changes = context['cache_change_types']
    meta_changes = context['cache_changed_meta']
    refresh_triggers = ["ARTICLE.NEW_FILE", "ARTICLE.REMOVED_FILE"]
    refresh_meta_triggers = ['title', 'location', 'date', 'status']
    changed_caves, changed_people = get_changes(context)

    # Flatten list of all caves
    caves = sorted(
        list(
            set([
                cave for trip in context['trip_list'] for cave in trip['caves']
                if cave is not None
            ])))

    # Ensure each has an article object in the db
    for cave_name in caves:
        if not [c for c in context['caves_list'] if c['name'] == cave_name]:
            article = Cave(context,
                           content='',
                           metadata={},
                           basename=cave_name)
            article.same_as_cache = context.is_cached
            context['caves_list'].append({
                "name": cave_name,
                "article": article
            })

    logger.debug("Writing %s caver pages", len(context['caves_list']))
    number_written = 0
    for cave in context['caves_list']:
        cave_name = cave['name']

        # Set filepath and jinja template
        cave['article'].output_filepath = os.path.join(
            "caves",
            str(cave_name) + '.html')
        cave['article'].template = 'cavepages.html'

        # Construct articles list with useful stuff (date, article, author_in_cave) surfaced
        trips = [t for t in context['trip_list'] if cave_name in t['caves']]
        cave['article'].cave_articles = [
            (t['article'], t['date'],
             was_author_in_cave(t['article'], cave_name)) for t in trips
        ]

        # Work out if it needs writing
        if context.caching_enabled:
            if cave_name in changed_caves:
                cave['article'].same_as_cache = False
            if any(i in changes for i in refresh_triggers):
                cave['article'].same_as_cache = False
            if any(
                    any(m in merge_dictionaries(*c)
                        for m in refresh_meta_triggers) for c in meta_changes):
                cave['article'].same_as_cache = False
            if cave['article'].same_as_cache:
                continue

        number_written = number_written + 1
        cave['article'].write_file(context=context)
    logger.info("Wrote %s out of %s total cave pages", number_written,
                len(context['caves_list']))

    # ==========Write the index of caves================
    cached = True
    if context.caching_enabled:
        if len(changed_caves) > 0:
            cached = False
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(
                any(m in merge_dictionaries(*c) for m in refresh_meta_triggers)
                for c in meta_changes):
            cached = False
        if cached:
            return

    row = namedtuple('row', 'name number recentdate meta')
    rows = []
    for cave in context['caves_list']:
        try:
            name = cave['name']
            number = len(cave['article'].cave_articles)
            recentdate = max(
                [trip[1] for trip in cave['article'].cave_articles])
            meta = cave['article'].metadata
            rows.append(row(name, number, recentdate, meta))
        except Exception as e:
            logger.warn("Could not include %s in cave index", name)
            logger.warn(e)
    filename = os.path.join('caves', 'index.html')

    writer = Writer(context,
                    filename,
                    "cavepages_index.html",
                    rows=sorted(rows, key=lambda x: x.name))
    writer.write_file()
Exemplo n.º 10
0
def generate_person_pages(context):
    # For each person generate a page listing the caves they have been in and the article that
    # describes that trip
    caver_bios = context['caverbios']
    cavers = context['cavepeep_person']

    dictionary = cavers
    content_dictionary = caver_bios
    output_path = "cavers"
    template = "caverpages"

    row = namedtuple('row',
                     'path content metadata articles authored same_as_cache')
    initialised_pages = {}

    for key in dictionary.keys():
        if key not in initialised_pages.keys():
            logger.debug("Adding {} to list of pages to write".format(key))
            if key in content_dictionary:
                source = content_dictionary[key]
                logger.debug("Content added to " + key)
            else:
                source = Caver(context, content='', metadata={}, basename=key)
                source.same_as_cache = context.is_cached

            if key in context.authors:
                source.authored = sorted(context.authors[key],
                                         key=lambda k: (k.date),
                                         reverse=True)

            source.output_filepath = os.path.join(output_path,
                                                  str(key) + '.html')
            source.articles = dictionary[key]
            source.template = template + '.html'
            initialised_pages[key] = source
        else:
            initialised_pages[key].articles.extend(dictionary[key])

    def get_people(cavepeep):
        c = re.compile(
            r"""DATE=\s*(\d\d\d\d-\d\d-\d\d)\s*;\s*CAVE=\s*([\s\w\D][^;]*)\s*;\s*PEOPLE=\s*([\s\w\D][^;]*);*[\n\t\r]*"""
        )
        # Create key/value relationship between trip identifier (Date + Cave) and list of cavers
        item_date = None
        item_caves = None
        item_people = None
        m = c.match(cavepeep)
        if not m:
            return []
        item_people = m.group(3).split(',')

        item_people = item_people if type(item_people) is list else [
            item_people
        ]
        item_people = [x.strip() for x in item_people]
        return item_people

    for page_name, page_data in initialised_pages.items():
        cocavers = {}
        for article in set([a.article for a in page_data.articles]):
            trips = article.metadata['cavepeeps'] if type(
                article.metadata['cavepeeps']) is list else [
                    article.metadata['cavepeeps']
                ]
            for trip in trips:
                people = get_people(trip)
                if page_name in people:
                    for person in people:
                        if person in cocavers:
                            cocavers[person] = cocavers[person] + 1
                        else:
                            cocavers[person] = 1
        del cocavers[page_name]
        page_data.cocavers = sorted([(person, cocavers[person])
                                     for person in cocavers.keys()],
                                    key=lambda tup: tup[1],
                                    reverse=True)

        caves = {}
        for trip in [a.cave for a in page_data.articles]:
            if trip is None:
                continue
            for cave in [a.strip() for a in trip.split('>')]:
                if cave in caves:
                    caves[cave] = caves[cave] + 1
                else:
                    caves[cave] = 1
        page_data.caves = sorted([(cave, caves[cave])
                                  for cave in caves.keys()],
                                 key=lambda tup: tup[1],
                                 reverse=True)

    # Work out if we need to update this file
    changes = context['cache_change_types']
    meta_changes = context['cache_changed_meta']
    refresh_triggers = ["ARTICLE.NEW_FILE", "ARTICLE.REMOVED_FILE"]
    refresh_meta_triggers = ['title', 'location', 'date', 'status']
    changed_people = []

    if "ARTICLE.NEW_FILE" in changes or "ARTICLE.META_CHANGE" in changes:
        for meta_change in meta_changes:
            added, removed, modified = meta_change
            if 'cavepeeps' in added:
                people, caves = parse_metadata(added['cavepeeps'])
                changed_people.extend(people)
            if 'cavepeeps' in removed:
                people, caves = parse_metadata(removed['cavepeeps'])
                changed_people.extend(people)
            if 'cavepeeps' in modified:
                people, caves = parse_metadata(modified['cavepeeps'][0])
                changed_people.extend(people)
                people, caves = parse_metadata(modified['cavepeeps'][1])
                changed_people.extend(people)
            if 'authors' in added:
                people = [p.strip() for p in added['authors'].split(',')]
                changed_people.extend(people)
            if 'authors' in removed:
                people = [p.strip() for p in removed['authors'].split(',')]
                changed_people.extend(people)
            if 'authors' in modified:
                people = [p.strip() for p in modified['authors'][0].split(',')]
                changed_people.extend(people)
                people = [p.strip() for p in modified['authors'][1].split(',')]
                changed_people.extend(people)

    logger.debug("Writing %s caver pages", len(initialised_pages))
    number_written = 0
    for page_name, page_data in initialised_pages.items():
        page_data.caver_articles = page_data.articles
        page_data.number = len(
            [a for a in page_data.articles if a.cave is not None])
        if context.caching_enabled:
            if page_name in changed_people:
                page_data.same_as_cache = False
            if any(i in changes for i in refresh_triggers):
                page_data.same_as_cache = False
            if any(
                    any(m in merge_dictionaries(*c)
                        for m in refresh_meta_triggers) for c in meta_changes):
                page_data.same_as_cache = False
            if page_data.same_as_cache:
                continue
        number_written = number_written + 1
        signal_sender = Signal("BEFORE_ARTICLE_WRITE")
        signal_sender.send(context=context, afile=page_data)
        page_data.write_file(context=context)

    pages = initialised_pages
    logger.info("Wrote %s out of %s total caver pages", number_written,
                len(initialised_pages))

    # ==========Write the index of cavers================
    cached = True
    if context.caching_enabled:
        if len(changed_people) > 0:
            cached = False
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(
                any(m in merge_dictionaries(*c) for m in refresh_meta_triggers)
                for c in meta_changes):
            cached = False
        if cached:
            return
    row = namedtuple('row', 'name number recentdate meta')
    rows = []
    for page_name in pages.keys():
        name = page_name
        number = len(
            [a for a in pages[page_name].articles if a.cave is not None])
        recentdate = max(
            [article.date for article in pages[page_name].articles])
        meta = content_dictionary[
            page_name].metadata if page_name in content_dictionary.keys(
            ) else None
        rows.append(row(name, number, recentdate, meta))
    filename = os.path.join(output_path, 'index.html')
    writer = Writer(context,
                    filename,
                    template + "_index.html",
                    rows=sorted(sorted(rows, key=lambda x: x.name),
                                key=lambda x: x.recentdate,
                                reverse=True))
    writer.write_file()
Exemplo n.º 11
0
def generate_cave_pages(context):
    cave_bios = context['cavebios']
    caves = context['cavepeep_cave']
    caves_dict = {}

    # Split the through trips into individual caves.
    # Make unique list (set) of cave names and
    for trip in [c for c in caves if c is not None]:
        for cave in trip.split('>'):
            create_or_add(caves_dict, cave.strip(), caves[trip])

    dictionary = caves_dict
    content_dictionary = cave_bios
    output_path = "caves"
    template = "cavepages"

    row = namedtuple('row', 'path content metadata articles same_as_cache')
    initialised_pages = {}

    for key in dictionary.keys():
        if key not in initialised_pages.keys():
            logger.debug(
                "Cavebios: Adding {} to list of pages to write".format(key))
            if key in content_dictionary:
                source = content_dictionary[key]
                logger.debug("Cavebios: Content added to " + key)
            else:
                source = Cave(context, content='', metadata={}, basename=key)
                source.same_as_cache = context.is_cached

            source.output_filepath = os.path.join(output_path,
                                                  str(key) + '.html')
            source.articles = dictionary[key]
            source.template = template + '.html'
            initialised_pages[key] = source
        else:
            initialised_pages[key].articles.extend(dictionary[key])

    # Work out if we need to update this file
    changes = context['cache_change_types']
    meta_changes = context['cache_changed_meta']
    refresh_triggers = ["ARTICLE.NEW_FILE", "ARTICLE.REMOVED_FILE"]
    refresh_meta_triggers = ['title', 'location', 'date', 'status']
    changed_caves = []
    if "ARTICLE.NEW_FILE" in changes or "ARTICLE.META_CHANGE" in changes:
        for meta_change in meta_changes:
            added, removed, modified = meta_change
            if 'cavepeeps' in added:
                people, caves = parse_metadata(added['cavepeeps'])
                changed_caves.extend(caves)
            if 'cavepeeps' in removed:
                people, caves = parse_metadata(removed['cavepeeps'])
                changed_caves.extend(caves)
            if 'cavepeeps' in modified:
                people, caves = parse_metadata(modified['cavepeeps'][0])
                changed_caves.extend(caves)
                people, caves = parse_metadata(modified['cavepeeps'][1])
                changed_caves.extend(caves)

    number_written = 0
    for page_name, page_data in initialised_pages.items():
        page_data.cave_articles = [(a, a.date,
                                    was_author_in_cave(a, page_name))
                                   for a in page_data.articles]
        if context.caching_enabled:
            if page_name in changed_caves:
                page_data.same_as_cache = False
            if any(i in changes for i in refresh_triggers):
                page_data.same_as_cache = False
            if any(
                    any(m in merge_dictionaries(*c)
                        for m in refresh_meta_triggers) for c in meta_changes):
                page_data.same_as_cache = False
            if page_data.same_as_cache:
                continue
        number_written = number_written + 1
        signal_sender = Signal("BEFORE_ARTICLE_WRITE")
        signal_sender.send(context=context, afile=page_data)
        page_data.write_file(context=context)

    logger.info("Wrote %s out of %s total cave pages", number_written,
                len(initialised_pages))

    # ==========Write the index of caves================
    cached = True
    if context.caching_enabled:
        if len(changed_caves) > 0:
            cached = False
        if any(i in changes for i in refresh_triggers):
            cached = False
        if any(
                any(m in merge_dictionaries(*c) for m in refresh_meta_triggers)
                for c in meta_changes):
            cached = False
        if cached:
            return
    logger.info("writing cave page index")
    pages = initialised_pages
    row = namedtuple('row', 'name number recentdate meta')
    rows = []
    for page_name in pages.keys():
        name = page_name
        number = len(pages[page_name].articles)
        recentdate = max(
            [article.date for article in pages[page_name].articles])
        meta = content_dictionary[
            page_name].metadata if page_name in content_dictionary.keys(
            ) else None
        rows.append(row(name, number, recentdate, meta))
    filename = os.path.join(output_path, 'index.html')

    writer = Writer(context,
                    filename,
                    template + "_index.html",
                    rows=sorted(rows, key=lambda x: x.name))
    writer.write_file()