def getChildren(self): if self._children is None: self._children = SortedValuesDict() for chapterInfo in self.chapters: chapterData = ChapterData(chapterInfo['data_path']) chapter = Chapter(chapterData) chapterId = chapter.getId() self._children[chapterId] = chapter print 'made chapter', chapterId return self._children
def media_markers_as_chapters(self) -> List[Chapter]: markers = self.media_markers chapters = [] if len(markers) > 0: for prev, cur in zip(markers[:-1], markers[1:]): chapters.append(Chapter(title=prev.name, start=prev.time, end=cur.time)) last_marker = markers[-1] chapters.append(Chapter(title=last_marker.name, start=last_marker.time, end=Timestamp.from_milliseconds(self.duration))) return chapters
def get_chapters_list(novel_link): chapters_list = [] chapter_soup = get_chapter_list_html(novel_link) for i, p in enumerate(chapter_soup): chapter = Chapter() chapter.id = i chapter.link = p.find('a').get('href') chapter.title = p.find('span').text chapters_list.append(chapter) return chapters_list
def load_episode_by_rss_id(self, rss_episode_id=None): sql = "SELECT * FROM " + self.table_episodes + " AS e WHERE rss_episode_id = ? LIMIT 1" cur = self.db.cursor() cur.row_factory = sqlite3.Row cur.execute(sql, [rss_episode_id]) d = cur.fetchone() e = Episode(episode_id=d['id'], rss_episode_id=d["rss_episode_id"], duration=d["duration"], title=d["title"], description=d["description"], subtitle=d["subtitle"], link=d["link"], published=d["published"], image=d["image"], chapters=[]) sql = "SELECT * FROM " + self.table_chapters + " WHERE episode_id = ?" cur.row_factory = sqlite3.Row cur.execute(sql, [d["id"]]) d = cur.fetchall() chapters = [] for c in d: chapters.append( Chapter(c["start"], c["title"], chapter_id=c["id"], episode_id=c["episode_id"], image=c["image"], href=c["href"])) e.chapters = chapters return e
def get_chapters(self): chapters = [] for key in self.chapters: chapter = Chapter.get(key) chapter.questions = list_questions(chapter) chapters.append(chapter) return chapters
def generate_novel(): generate_title() state = FantasyNovelState() setting = state.get_current_setting() intro = IntroChapter(state) intro.write_chapter() while setting is not None: chapter = Chapter(state) chapter.write_chapter() setting = state.next_setting() ending = FinalChapter(state) ending.write_chapter() md_writer.end_novel()
def get_fiction_chapter(self, index): if len(self.chapter_list) <= index: print("没有更多章节了") return None else: current_chapter_info_dict = self.chapter_list[config.fiction_index] return Chapter(current_chapter_info_dict['title'], current_chapter_info_dict['href'])
def get_chapter(manga_name, chapter_number): doc_ref = db.collection(u'manga').document(manga_name) doc = doc_ref.get() if(doc.exists): manga_url = doc.to_dict()['url'] my_manga = Manga(name=manga_name, url=manga_url) my_manga.gen_list() chapter_url = my_manga.chapters[int(chapter_number)-1].link chapter_number = int(chapter_number) my_chapter = Chapter(chapter_number=chapter_number, link=chapter_url) my_chapter.gen_images() res = {"chapter_number": int( chapter_number), "okay": "true", "links": my_chapter.image_links} return jsonify(res) else: res = {"okay": "false", "message": "Chapter Not Found"} return jsonify(res)
def novel(file_path): post = Post() post.login() category_name = '大说' category_slug = 'big' category_mid = post.category(category_name, category_slug) if category_mid: with open(file_path, 'r', encoding='utf-8') as f: txt = f.read() c = Chapter(file_path) c.change_format() chapters = c.find_chapters() for num, chapter in enumerate(chapters): post.send_text(chapter.title, chapter.content, category_mid) time.sleep(1) #间隔太短会导致后台文章的顺序出现问题 print("%d/%d"%(num+1,len(chapters)))
def addChapter(self, chapterName: str, chapterTime: str, chapterNumber: int=None) -> None: """ Adds a chapter to the movie Parameters ---------- chapterName : str The name of the chapter chapterTime : str The time of the chapter chapterNumber = None : int Optionally, a chapter number. Defaults to the number of chapters added so far + 1 """ chapter = Chapter() chapter.number = chapterNumber if chapterNumber else len(self.chapters + 1) chapter.name = chapterName chapter.time = chapterTime self.chapters.append(chapter)
def _read_id3v2_chapters(self): chapters = [] # Find top-level toc for toc in self._mp3.tag.table_of_contents: if toc.toplevel: for ch_eid in toc.child_ids: ch = self._mp3.tag.chapters.get(ch_eid) title = ch.title start, end = map(Timestamp.from_milliseconds, ch.times) chapters.append(Chapter(title, start=start, end=end)) return chapters
def gen_list(self): page = requests.get(self.url) soup = BeautifulSoup(page.content, 'html.parser') manga_chapters = soup.find_all('div', class_='row') manga_chapters.pop(0) i = 0 for chapter in manga_chapters: anchor = chapter.find('a') new_chapter = Chapter(chapter_number=i, link=anchor['href']) self.chapters.append(new_chapter) i = i + 1 self.chapters.reverse()
def create_from_corpus_file(filename, min_words=500): words = 0 chapters = [] while words < min_words: chapter = Chapter.create_from_corpus_file(filename) words += chapter.word_count print("generated chapter with {} words".format(chapter.word_count), file=sys.stderr) chapters.append(chapter) print("generated novel with {} chapters, {} words".format(len(chapters), words), file=sys.stderr) return Novel(chapters)
def has_chapter(self, chapter): """ Check if the course has a prticular chapter """ if isinstance(chapter, Chapter): chapter_key = chapter.key() else: chapter_key = chapter chapter = Chapter.get(chapter_key) if chapter_key in self.chapters: return True root = root_key() parent_chapter_key = chapter.parent_key() while parent_chapter_key != root and parent_chapter_key != None: if parent_chapter_key in self.chapters: return True chap = Chapter.get(parent_chapter_key) parent_chapter_key = chap.parent_key() return False
def download_chapters_list(self): html = download_content(self.index_iri) page_prefix = self.index_iri.replace("all.html", "") search = re.findall(r'<a style="" href=".*?<\/a>', html) if search: for title_html in search: self.chapter_list.append(Chapter(title_html, page_prefix)) else: logging.error( f'Failed to fetch this books chapter list from {self.index_iri}' ) # TODO delete this line self.chapter_list = self.chapter_list[2088:] return search
def single_predict(inp): """Do quote attribution prediction on single process Args: inp: A tuple as (args, feat_extracters, story_file, char_file, output_file, tok_file, tmp_dir), where `args' is the parsed CLI arguments object, `feat_extracters' is a list of feature extracters, `story_file' is the path to the story file, `char_file' is the path to the character list file, `output_file' is the path to save results, `tok_file' is the path to tokenization file (could be None or invalid, if so, no external tokenization will be load), and `tmp_dir' is the path to save temporary files. Returns: A tuple as (story_file, success). `success' will be False if processing failed. """ args, feat_extracters, story_file, char_file, output_file, tok_file, tmp_dir = inp name = multiprocessing.current_process().name story_filename = os.path.basename(story_file) print("\n### {} processing {} ###".format(name, story_filename)) try: # Read chapter chapter = Chapter.read_with_booknlp( story_file, char_file, getattr(args, 'booknlp', None), tok_file=tok_file, coref_story=(not args.no_coref_story), no_cipher=args.no_cipher_char, fix_inv_char=args.fix_inv_char, tmp=tmp_dir) # Predict quote attribution chapter.quote_attribution_svmrank(feat_extracters, args.model_path, getattr(args, 'svmrank', None), tmp=tmp_dir) # Dump chapter.dump_quote_json(output_file) except Exception as err: #print(err) track = traceback.format_exc() print(track) return (story_file, False) return (story_file, True)
def single_train_organize(inp): """Preprocess training data for svm-rank on single process. Args: inp: A tuple as (args, feat_extracters, story_file, char_file, ans_file, tok_file, tmp_dir), where `args' is the parsed CLI arguments object, `feat_extracters' is a list of feature extracters, `story_file' is the path to the story file, `char_file' is the path to the character list file, `ans_file' is the path to the gold answer file, `tok_file' is the path to tokenization file (could be None or invalid, if so, no external tokenization will be load), and `tmp_dir' is the path to save temporary files. Returns: A tuple as (story_file, svmrank_input_file, success). `success' will be False if processing failed. """ args, feat_extracters, story_file, char_file, ans_file, tok_file, tmp_dir = inp name = multiprocessing.current_process().name story_filename = os.path.basename(story_file) print("\n### {} processing {} ###".format(name, story_filename)) # Process file names svmrank_input_file = os.path.join(tmp_dir, 'svmrank_input.txt') try: # Read chapter chapter = Chapter.read_with_booknlp( story_file, char_file, getattr(args, 'booknlp', None), tok_file=tok_file, coref_story=(not args.no_coref_story), no_cipher=args.no_cipher_char, fix_inv_char=args.fix_inv_char, tmp=tmp_dir, use_booknlp=True) # Generate input file for svm-rank chapter.prepare_svmrank(feat_extracters, svmrank_input_file, answer=ans_file, original=True) except Exception as err: #print(err) print(traceback.format_exc()) return (story_file, None, False) return (story_file, svmrank_input_file, True)
def __init__(self, path): self.archive = zipfile.ZipFile(path, 'r') # read-only for now # Path to the OPF file which points to book content self.opf_path = self._get_opf_path() # Parse the OPF file for info self._parse_opf() #get OPF_path directory self.opf_dir = self.opf_path.split("/")[0] # The content of this book, divided into chapters self.chapters = [] for chapter_id in self.spine: path = self.opf_dir + '/' + self.manifest[chapter_id]['href'] chapter = Chapter(chapter_id, self.archive.open(path)) self.chapters.append(chapter)
def get_chapters_table(self, book): req_url = self.url + '/' + self.book_chapter_list_page + '?' +\ self.book_chapter_list_param + book.book_native_id() try: Logger.debug(' > {}' . format(req_url)) resp = requests.get(req_url, timeout=3) Logger.debug(' < {}[{}]' . format(req_url, resp.status_code)) except Exception as e: Logger.error(' xxx {}[{}]' . format(req_url, e)) return False parser = BeautifulSoup(resp.content, 'html.parser') chapters_root_tags = parser.find_all('div', class_=self.chapters_root_class) chapters = [] chapter_index = 1 for chapters_tag in chapters_root_tags: chapters_item_tags = chapters_tag.find_all(class_=self.chapters_item_class) for tag in chapters_item_tags: vip = False url = None title = None cid = '0' if tag.get('class') is not None and 'vip' in tag.get('class'): vip = True if tag.a is not None and tag.a.get('href') is not None: url = tag.a.get('href') if tag.a is not None and tag.a.span is not None: title = tag.a.span.get_text(strip=True) pattern = re.compile(self.book_chapter_id_pattern) matches = pattern.findall(title) if 0 < len(matches): cid = str(matches[0]) else: Logger.debug(title) chapter = Chapter(cid, url, title, vip, chapter_index) chapters.append(chapter) chapter_index += 1 self.add_book_chapters(book, chapters) return True
def get_chapters_table(self, parser, book): cons = parser.find_all('div', id='container') chmain = None for con in cons: mains = con.find_all('div', id='main') for main in mains: booklists = main.find_all(class_='title', id='alllist') if 0 < len(booklists): chmain = main break if chmain is not None: break if chmain is None: return chs = chmain.find_all('dl', class_='chapterlist') chapters = [] chapter_index = 1 for ch in chs: dds = ch.find_all('dd') for dd in dds: title = dd.get_text(strip=True) url = None aas = dd.find_all('a') for a in aas: if a.get('href') is not None: url = '{}/{}'.format( book.book_native_id().replace('_', '/'), a.get('href')) cid = None if url is not None: cid = url.replace('.html', '').replace('/', '_') url = self.url + '/' + url chapter = Chapter(cid, url, title, False, chapter_index) chapters.append(chapter) chapter_index += 1 self.add_book_chapters(book, chapters)
def load_rss(self, feedUrl): self.feed_url = feedUrl print "Loading RSS Feed: " + self.feed_url feedparser.USER_AGENT = "FeedToYoutube/0.0.3 +http://ldk.net/" f = feedparser.parse( self.feed_url, request_headers={'content-type': 'text/html; charset=UTF-8'}) if not hasattr(f, "etag"): if hasattr(f.feed, "updated"): etag = f.feed.updated else: raise LookupError( 'Can\'t find any update indicator. please contact the author.' ) else: etag = f.etag if not self._feed_has_changed(etag): print "Nothing has changed" return {'status': "304", "message": "Not Modified"} if not hasattr(f.feed, "updated"): f.feed.updated = unicode(datetime.datetime.now()) imageUrl = "" if hasattr(f.feed, 'image') and hasattr(f.feed.image, "href"): imageUrl = f.feed.image.href summary = f.feed.title if hasattr(f.feed, "summary"): summary = f.feed.summary feed = Feed(self.config, feedUrl, image=imageUrl, etag=etag, subtitle=summary, title=f.feed.title, updated=f.feed.updated) self.save_feed(feed) feed.episodes = [] print "Importing " + feed.title for episode in f.entries: sys.stdout.write(" Episode " + episode.title) sys.stdout.flush() if self._is_known_episode(episode.id): print " - old" feed.episodes.append( self.load_episode_by_rss_id(rss_episode_id=episode.id)) continue print " - new" is_new = True # chapter handling cs = [] if hasattr(episode, "psc_chapters"): for chapter in episode.psc_chapters.chapters: link = "" image = "" if hasattr(chapter, 'href'): link = chapter.href if hasattr(chapter, 'image'): image = chapter.image c = Chapter(start=chapter.start, image=image, href=link, title=chapter.title) print "\t" + c.start + ": " + c.title + " Image= " + c.image + " Href= " + c.href cs.append(c) image = "" duration = "" if hasattr(episode, 'image') and hasattr(episode.image, "href"): image = episode.image.href if hasattr(episode, 'itunes_duration'): duration = episode.itunes_duration link = None if hasattr(episode, "links"): for link in episode.links: if link.type == 'audio/mpeg': link = link.href break if link is None: continue subtitle = "" if hasattr(episode, "subtitle"): subtitle = episode.subtitle e = Episode(feed_id=feed.feed_id, rss_episode_id=episode.id, duration=duration, link=link, title=episode.title, subtitle=subtitle, description=episode.summary, published=episode.published, chapters=cs, image=image, is_new=is_new) self._insert_episode(e) if hasattr(feed.episodes, 'append'): feed.episodes.append(e) self.feed = feed return self.feed
def parseChapter(self, chapter, chapter_end_monad): ch = Chapter(self.chapter_first_monad, chapter_end_monad, chapter, self.bookname) self.chapters.append(ch) self.chapter_first_monad = chapter_end_monad + 1
def reset(cls): """Reset to default state.""" cls.chapters_number = 0 Theme.reset() Chapter.reset()
def parse(self, config, source): """Parse presentation from source stream. Parameters ---------- config : MatisseConfig MaTiSSe configuration source: str """ complete_source = self.parser.includes(source=source) self.__get_metadata(source=complete_source) self.__get_theme(source=complete_source) new_theme = Theme() new_theme.set_from(other=self.theme) tokens = self.parser.tokenize(source=complete_source) self.__check_bad_sectioning(tokens=tokens) chapters_number = 0 sections_number = 0 subsections_number = 0 slides_number = 0 titlepage_inserted = False for chap in tokens['chapters']: chapters_number += 1 slide_local_numbers = [0, 0, 0] if chap['match'].group('expr'): chapter = Chapter(number=chapters_number, title=chap['match'].group('expr')) else: chapter = Chapter(number=chapters_number, title='') for sec in tokens['sections']: if sec['start'] >= chap['start'] and sec['start'] <= chap['end_next']: sections_number += 1 slide_local_numbers[1] = 0 slide_local_numbers[2] = 0 section = Section(number=sections_number, title=sec['match'].group('expr')) for subsec in tokens['subsections']: if subsec['start'] >= sec['start'] and subsec['start'] <= sec['end_next']: subsections_number += 1 slide_local_numbers[2] = 0 subsection = Subsection(number=subsections_number, title=subsec['match'].group('expr')) for sld in tokens['slides']: if '$titlepage' in sld['match'].group().lower() and not titlepage_inserted: slide = Slide(number=0, title='titlepage', contents=complete_source[sld['end']:sld['end_next']]) slide.get_overtheme(parser=self.parser) if slide.overtheme.copy_from_theme is not None and slide.overtheme.copy_from_theme: slide.overtheme.copy_from(other=self.theme) self.position.update_position(presentation_theme=self.theme, overtheme=slide.overtheme) slide.set_position(position=self.position.position) subsection.add_slide(slide=slide) titlepage_inserted = True else: if sld['start'] >= subsec['start'] and sld['start'] <= subsec['end_next']: slide_local_numbers[0] += 1 slide_local_numbers[1] += 1 slide_local_numbers[2] += 1 if slide_local_numbers[0] == 1 and config.toc_at_chap_beginning is not None: slides_number += 1 self.position.update_position(presentation_theme=self.theme) subsection.add_slide(slide=Slide(number=slides_number, position=self.position.position, title='Table of Contents', contents='$toc[depth:' + str(config.toc_at_chap_beginning) + ']')) if slide_local_numbers[1] == 1 and config.toc_at_sec_beginning is not None: slides_number += 1 self.position.update_position(presentation_theme=self.theme) subsection.add_slide(slide=Slide(number=slides_number, position=self.position.position, title='Table of Contents', contents='$toc[depth:' + str(config.toc_at_sec_beginning) + ']')) if slide_local_numbers[2] == 1 and config.toc_at_subsec_beginning is not None: slides_number += 1 self.position.update_position(presentation_theme=self.theme) subsection.add_slide(slide=Slide(number=slides_number, position=self.position.position, title='Table of Contents', contents='$toc[depth:' + str(config.toc_at_subsec_beginning) + ']')) slides_number += 1 slide = Slide(number=slides_number, title=sld['match'].group('expr'), contents=complete_source[sld['end']:sld['end_next']]) slide.get_overtheme(parser=self.parser) if slide.overtheme.copy_from_theme is not None and slide.overtheme.copy_from_theme: slide.overtheme.copy_from(other=self.theme) self.position.update_position(presentation_theme=self.theme, overtheme=slide.overtheme) slide.set_position(position=self.position.position) subsection.add_slide(slide=slide) section.add_subsection(subsection=subsection) chapter.add_section(section=section) self.__add_chapter(chapter=chapter) self.metadata['total_slides_number'].update_value(value=str(Subsection.slides_number))
import pygame from base import width, height, running from drawing import Drawer from keys_handler import event_handler from chapter import Chapter from platform import Platform from characters import kokichi, kaito screen = pygame.display.set_mode((width, height)) drawer = Drawer() chapter = Chapter() extra = [] all_sprites = pygame.sprite.Group() all_sprites.add(Platform(100, 100, 100, 100)) all_sprites.add(kokichi) all_sprites.add(kaito) while running: drawer.school_graphic(screen, extra, all_sprites) extra = event_handler(screen, extra, chapter)
def count_questions(self): n = 0 for key in self.chapters: chapter = Chapter.get(key) n += count_questions(chapter) self.num_questions = n
def parse(self, config, source): """Parse presentation from source stream. Parameters ---------- config : MatisseConfig MaTiSSe configuration source: str """ complete_source = self.parser.includes(source=source) self.__get_metadata(source=complete_source) self.__get_theme(source=complete_source) new_theme = Theme() new_theme.set_from(other=self.theme) tokens = self.parser.tokenize(source=complete_source) self.__check_bad_sectioning(tokens=tokens) chapters_number = 0 sections_number = 0 subsections_number = 0 slides_number = 0 titlepage_inserted = False for chap in tokens['chapters']: chapters_number += 1 slide_local_numbers = [0, 0, 0] if chap['match'].group('expr'): chapter = Chapter(number=chapters_number, title=chap['match'].group('expr')) else: chapter = Chapter(number=chapters_number, title='') for sec in tokens['sections']: if sec['start'] >= chap['start'] and sec['start'] <= chap[ 'end_next']: sections_number += 1 slide_local_numbers[1] = 0 slide_local_numbers[2] = 0 section = Section(number=sections_number, title=sec['match'].group('expr')) for subsec in tokens['subsections']: if subsec['start'] >= sec['start'] and subsec[ 'start'] <= sec['end_next']: subsections_number += 1 slide_local_numbers[2] = 0 subsection = Subsection( number=subsections_number, title=subsec['match'].group('expr')) for sld in tokens['slides']: if '$titlepage' in sld['match'].group().lower( ) and not titlepage_inserted: slide = Slide( number=0, title='titlepage', contents=complete_source[ sld['end']:sld['end_next']]) slide.get_overtheme(parser=self.parser) if slide.overtheme.copy_from_theme is not None and slide.overtheme.copy_from_theme: slide.overtheme.copy_from( other=self.theme) self.position.update_position( presentation_theme=self.theme, overtheme=slide.overtheme) slide.set_position( position=self.position.position) subsection.add_slide(slide=slide) titlepage_inserted = True else: if sld['start'] >= subsec['start'] and sld[ 'start'] <= subsec['end_next']: slide_local_numbers[0] += 1 slide_local_numbers[1] += 1 slide_local_numbers[2] += 1 if slide_local_numbers[ 0] == 1 and config.toc_at_chap_beginning is not None: slides_number += 1 self.position.update_position( presentation_theme=self.theme) subsection.add_slide(slide=Slide( number=slides_number, position=self.position. position, title='Table of Contents', contents='$toc[depth:' + str(config. toc_at_chap_beginning) + ']')) if slide_local_numbers[ 1] == 1 and config.toc_at_sec_beginning is not None: slides_number += 1 self.position.update_position( presentation_theme=self.theme) subsection.add_slide(slide=Slide( number=slides_number, position=self.position. position, title='Table of Contents', contents='$toc[depth:' + str(config.toc_at_sec_beginning ) + ']')) if slide_local_numbers[ 2] == 1 and config.toc_at_subsec_beginning is not None: slides_number += 1 self.position.update_position( presentation_theme=self.theme) subsection.add_slide(slide=Slide( number=slides_number, position=self.position. position, title='Table of Contents', contents='$toc[depth:' + str(config. toc_at_subsec_beginning) + ']')) slides_number += 1 slide = Slide( number=slides_number, title=sld['match'].group('expr'), contents=complete_source[ sld['end']:sld['end_next']]) slide.get_overtheme(parser=self.parser) if slide.overtheme.copy_from_theme is not None and slide.overtheme.copy_from_theme: slide.overtheme.copy_from( other=self.theme) self.position.update_position( presentation_theme=self.theme, overtheme=slide.overtheme) slide.set_position( position=self.position.position) subsection.add_slide(slide=slide) section.add_subsection(subsection=subsection) chapter.add_section(section=section) self.__add_chapter(chapter=chapter) self.metadata['total_slides_number'].update_value( value=str(Subsection.slides_number))
def convert_url_to_chapter(url): """Converts a URL to a chapter.""" res = requests.get(url) soup = BeautifulSoup(res.content, 'html.parser') return Chapter(soup)
def generate_chapter(datadir, name): return Chapter(config_id=f'chapter/{name}', **open_yaml(join(datadir, 'chapter', f'{name}.yml')), level=len(name.split('/')))
save = load_save() logger.info("Running scraper.") # If you do not wish to use a task scheduler and want this program # to run indefinitely instead, start the while loop below # this comment. See README for more details. need_to_save = False for url in urls: # Convert HTML to soup. res = requests.get(url) soup = BeautifulSoup(res.content, 'html.parser') try: # Get chapter information from soup. ch = Chapter(soup) # Check and see if the chapter is more recent than what was last saved. manga = ch.manga_title.lower() if manga not in save: save[manga] = ch.latest_num need_to_save = True ch.fetch_chapter_details(save[manga]) if save[manga] < ch.num: try: if manga in save: logger.info(f'Sent push for {ch.manga_title}.') send_push(ch) save[manga] = ch.latest_num
def insertRows(self, row, count, parent=QModelIndex()): self.beginInsertRows(parent, row, row+count-1) for i in range(count): self._mp3.chapters.insert(row + i, Chapter(title='', start=Timestamp(), end=Timestamp())) self.endInsertRows() return True
def main(): parser = ArgumentParser(description='Tool for generating recipe books') parser.add_argument('-b', '--builddir', type=str, default='builds', help='Destination for built documents') parser.add_argument( '-c', '--configfile', type=str, default='book/book.yml', help='Input file describing various editions, e.g., \'book/book.yml\'') parser.add_argument('-d', '--datadir', type=str, default='book') parser.add_argument('-t', '--temperature', help='Set temperature', choices=['imperial', 'si'], default='imperial') args = parser.parse_args() try: configfile = args.configfile datadir = args.datadir temperature = args.temperature builddir = args.builddir except Exception as e: print(e) exit(1) if not isdir(builddir): mkdir(builddir) recipe_dir = join(datadir, 'recipes') equipment_chapter = generate_chapter(datadir, 'equipment') recipes = generate_section(recipe_dir, 'recipe', Recipe) equipment = generate_section(join(datadir, 'equipment'), 'equipment', Equipment) ingredients = generate_section(join(datadir, 'ingredients'), 'ingredient', Ingredient) sections = generate_editions(recipes, configfile) for edition in sections: rendered_cover = CoverAsciidocPresenter(edition['cover']).render() rendered_equipment_chapter = ChapterAsciidocPresenter( Chapter(config_id='chapter/equipment', **open_yaml(join(datadir, 'chapter/equipment.yml')), level=2)).render() rendered_equipment = '\n'.join( EquipmentAsciidocPresenter(entry).render() for entry in equipment) rendered_ingredient_chapter = ChapterAsciidocPresenter( Chapter(config_id='chapter/ingredients', **open_yaml(join(datadir, 'chapter/ingredients.yml')), level=2)).render() rendered_ingredients = '\n'.join( IngredientAsciidocPresenter(ingredient).render() for ingredient in ingredients) rendered_recipes_chapter = ChapterAsciidocPresenter( Chapter(config_id='chapter/recipes', **open_yaml(join(datadir, 'chapter/recipes.yml')))).render() rendered_recipes = [] for section, recipes in edition['recipes'].items(): chapter_name = join('chapter', 'recipes', section) rendered_chapter = ChapterAsciidocPresenter( Chapter(config_id=chapter_name, **open_yaml(join(datadir, f'{chapter_name}.yml')), level=2)).render() rendered_recipes.append(rendered_chapter) rendered_recipes.append(''.join( RecipeAsciidocPresenter(recipe).render() for recipe in recipes)) book = [ rendered_cover, rendered_equipment_chapter, rendered_equipment, rendered_ingredient_chapter, rendered_ingredients, rendered_recipes_chapter, '\n'.join(rendered_recipes) ] fname = f"{edition['book_id']}.adoc" adoc_destination = join(builddir, 'asciidoc') if not isdir(adoc_destination): mkdir(adoc_destination) build_destination = join(adoc_destination, fname) with open(build_destination, 'w') as f: f.write('\n'.join(book))
def get_chapter_layout(book=None, chapter=None, chapter_nav=None): instance = Chapter(renderer=RENDERER, book=book, chapter=chapter, chapter_nav=chapter_nav) return instance