def describe_specialty(name, product, data): ''' the city is famous for .... bread? ''' rules = { 'start': [ '#city# is #famous# for its #product#, and #superlative#.', '#product# is big business in #city#, and #superlative#.', '#city#\'s name is practically synonymous with #product#, ' \ 'and #superlative#.', ], 'product': product, 'name': get_latin(name, capitalize=True), 'city': get_latin(data['city_name'], capitalize=True), 'famous': [ 'famous', 'world renowned', 'renowned', 'known', 'known far and wide', ], 'superlative': [ 'this is the #best# destination to partake', 'there\'s no better introduction to the local specialty than ' \ '#name#', 'it doesn\'t get better than #name#', '#name# is the best in the business', ], 'best': ['best', 'number one'], } grammar = tracery.Grammar(rules) sentence = grammar.flatten('#start#') return format_text(sentence)
def narrate_rejection(event, events): a, b = get_ab(event) rules = { 'origin': "#Onday# #want#, #reject#.", 'a': a['name'], 'b': b['name'], 'Onday': [ f"On {event['date'].strftime('%A')},", f"{event['date'].strftime('%A')} came around.", "Later that week," ], 'want': [ '#a# asked #b# if they wanted to hang out', f'#a# asked #b# if {b["they"]} were free', '#a# wanted to hang out with #b#', '#a# wanted to see #b#', ], 'reject': [ 'but #b# was busy', 'but #b# forgot to return #a#\'s message', 'but #b# had other plans', 'but #b# never responded to #a#\'s message' ] } if event.get('phase') == Phase.COURTING and random.random() < 0.6: print(artifacts.get_date_artifact(event, events, True)) else: print(tracery.Grammar(rules).flatten('<p>#origin#</p>')) return
def get_tracery_snippet(name): with open(f'{name}.json') as m: magic_rules = json.load(m) magic = tracery.Grammar(magic_rules) magic.add_modifiers(base_english) return magic.flatten('#origin#')
def interpertString(text, actor, target): if not isinstance(text, str): return str(text) if "" == text: return text text = re.sub("<PAR> *?<PAR>", "<PAR>", text) text = re.sub("<PAR>", "\n\n", text) text = re.sub("\n\n\n*?", "\n\n", text) text = re.sub("<VOYAGE.*?>", "", text) text = re.sub("<END.*?>", "", text) text = re.sub("<BEGIN.*?>", "", text) text = re.sub("<.*?END>", "", text) text = re.sub("<.*?BEGIN>", "", text) text = transcribeCommands(text, actor, target) local_table = postprocessing_table local_ship_type = [] if actor != None: if actor.ship_type != None: local_ship_type = [actor.ship_type] postprocessing_table["ship"] = [ "ship", "vessel" ] #,str(actor._ship_name)].extend(local_ship_type) grammar = tracery.Grammar(postprocessing_table) grammar.add_modifiers(base_english) output = grammar.flatten(str(text)) output = transcribeCommands(output, actor, target) output = grammar.flatten(str(output)) if len(output) > 0: if output[-1] in "\".?!$\n": # End of a sentence output = output + " " # Add a space # TODO: combine quotes together and break on speaker change return output
def local_dish(data): ''' a stew made from blip and blap ''' rules = { 'start': '#type# made with #main_ingredient# and #ingredient#', 'type': [ '#soup_adjective# soup', 'dish', 'stir-fry', 'pie', 'stew', 'spread', 'flatbread', 'dumpling', 'wrap', ], 'soup_adjective': [ 'thick', 'thin', 'clear', 'hot', 'creamy', 'rich', 'light'], 'main_ingredient': ['#meat#', '#vegetable#'], 'ingredient': ['a vegetable called #vegetable2#'], 'meat': ['%s meat' % get_latin(a['name']) \ for a in data['cuisine']['animals']] + \ ['pork', 'beef', 'lamb', 'chicken', 'game', 'fish'], 'vegetable': '<em>%s</em>' % \ get_latin(data['cuisine']['vegetables'][0]['name']), 'vegetable2': '<em>%s</em>' % \ get_latin(data['cuisine']['vegetables'][1]['name']), } grammar = tracery.Grammar(rules) sentence = grammar.flatten('#start#') return format_text(sentence)
def narrate_committed(events): summary = util.get_event_meta(events) rules = { 'origin': ['<p>#exp#. #conflict#</p>'], 'exp': '#best_exp#' if summary['best_experience'] else 'The couple unfortunately never spent more time together', 'conflict': ['#best_conflict#.', '#best_conflict#, but #worst_conflict#.', '#best_conflict#, but #popular_conflict#.'] if summary['worst_conflict'] else 'The couple never clashed.', 'best_exp': f"Their similar levels in {PROP_NAMES.get(summary['best_experience'])} facilitated a healthy growth in their relationship", 'worst_conflict': f"their fights over their difference in {PROP_NAMES.get(summary['worst_conflict'])} were #bitter#", 'best_conflict': f"The couple was proud of their ability to work through their differences in {PROP_NAMES.get(summary['best_conflict'])}", 'popular_conflict': f"the couple fought often because of differences in {PROP_NAMES.get(summary['popular_conflict'])}", 'bitter': ['acrid', 'virulent', 'bitter', 'harsh', 'difficult', 'hard to recover from', 'emotionally exhausting'] } print(tracery.Grammar(rules).flatten('#origin#</p>')) if events[-1]['type'] == EventType.DEATH: death = random.choice([ "a meteroite striking the Earth", "a global pandemic", "an infected paper cut", "a falling grand piano", "a poorly placed pothole in the road", "a stroke caused by the erroneous publication of their own obituary", "the collapse of the Marxist state" ]) print( f"Unfortunately, {events[-1]['person']['name']} died tragically due to {death}.") else: print( f"Ultimately their differences proved too great to overcome. ")
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): try: grammar = tracery.Grammar(json.loads(code)) grammar.add_modifiers(base_english) result = grammar.flatten("#origin#") if not silent: stream_content = {'name': 'stdout', 'text': result} self.send_response(self.iopub_socket, 'stream', stream_content) return {'status': 'ok', # The base class increments the execution count 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, } except json.decoder.JSONDecodeError as e: if not silent: stream_content = {'name': 'stdout', 'text': "Error: " + str(e)} self.send_response(self.iopub_socket, 'stream', stream_content) return {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, }
def get_drink_description(s, d): description_rules = get_foods() description_rules['artist'] = humanise_name(s['artist']) description_rules['drink1'] = d['name'] description_rules['drink2'] = random.choice( list(drink_data['cocktails'].items()))[0] description_rules['first_sentence'] = [ '#ADJ.a.capitalize# mix between the #drink1# and the #drink2#', '#ADJ.a.capitalize# variant of the #drink1#' ] vocab = get_vocab(s, d) if ('ADJ' in vocab.keys()): description_rules['ADJ'] = vocab['ADJ'] else: description_rules['ADJ'] = ['aged', 'worn', 'weathered'] if ('NOUN' in vocab.keys()): description_rules['NOUN'] = vocab['NOUN'] else: description_rules['NOUN'] = ['flow', 'music', 'crib'] apostrophe = '\'' if (s['artist'][-1].lower() == 's') else '\'s' description_rules[ 'base'] = "#first_sentence#. As #ADJ# as #artist#{} #NOUN#.".format( apostrophe) description = tracery.Grammar(description_rules) description.add_modifiers(base_english) return description.flatten('#base#')
def main(): # Load in the list of filenames data_path = pathlib.Path(__file__).resolve().parent / 'data' with open(data_path / 'files.json', 'r') as f: filenames = json.load(f) rules = dict() # Append the rules from each file to the main rules set for filename in filenames: with open(data_path / filename, 'r') as f: new_rules = json.load(f) rules.update(new_rules) # Build the grammar grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) # If we run with `python3 main.py coffee` then it will run the # #coffee# grammar, which is good for testing. if len(sys.argv) > 1: for arg in sys.argv[1:]: print(grammar.flatten(f"#{arg}#")) else: # Print out one of the options, with a procgen line delimiter print(grammar.flatten("\n#main#")) print(grammar.flatten("#linedrawing#"))
def create_letter(keyword): rules = { 'origin': '#introduction# #talk_about_talent# #lead_into_job_description#', 'introduction': [ 'Found your #website# via Google and enjoyed #reading# it.', 'Stumbled uponed upon your #website#, and loved #reading# it.', 'Found your #website# via a keyword search for KEYWORD engineers.' ], 'website': ['website', 'blog', 'profile', 'resume'], 'reading': ['reading', 'looking at'], 'talk_about_talent': ['You look like you #know# #keyword_praise#.'], 'know': ['know', 'understand'], 'keyword_praise': ['your KEYWORD-fu', 'KEYWORD deeply', 'KEYWORD down to a science'], 'lead_into_job_description': [ 'I have #interesting.a# project for you to look at.', "My company is looking for #interesting# people like you." ], 'interesting': ['interesting', 'cool', 'awesome', 'neat'], } grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) letter = grammar.flatten("#origin#").replace("KEYWORD", keyword) return letter
def get_meetup(a, b): rules = { 'origin': [ '\n#they# #met# #discuss#.', ], 'they': ['They', 'The couple'], 'met': [ 'later met up at #location#', 'later got on the phone', 'arranged a time', 'met up', ], 'location': business_gen.get_business(desc=False), 'discuss': [ 'to discuss', 'to talk', 'to chat', 'to hash things out', 'to continue the conversation' ], 'b_name': b['name'], 'b_they': b['they'], 'b_their': b['their'], 'a_name': a['name'], } grammar = tracery.Grammar(rules) return grammar.flatten('#origin#')
def generate(): parser = argparse.ArgumentParser(description='PlasticPolyhedra tweetbot') parser.add_argument('--grammar', required=True, help='JSON grammar') parser.add_argument('--maxlen', default=270, type=int, help='Max tweet length') parser.add_argument('--print', help='Print score', action='store_true') parser.add_argument('--tweet', help='Tweet score', action='store_true') args = parser.parse_args() with open(args.grammar) as data_file: rules = json.load(data_file) grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) score = '' while len(score) == 0: score = grammar.flatten('#origin#') score = ' '.join(score.split()) score = score.replace('|', '#') if len(score) > args.maxlen: score = score[:args.maxlen] + '...' if args.print: print(score) print(len(score))
def response(): rules = { 'origin': "I have been #activity# and recently got a new #thing#, thanks for asking!", 'activity': [ "working #amount#", "traveling around #place#", "writing a #writtenObject#", "studying #amount#" ], 'amount': ["a lot", "here and there", "too much", "less", "more"], 'place': [ "Thailand", "South Africa", "Europe", "Spain", "Argentina", "Papau New Guinea", "Bulgaria", "Japan" ], 'writtenObject': ["book", "research paper", "magazine submission", "travel blog"], 'thing': [ "dog", "cat", "car", "house", "job", "partner", "passion for #hobby#" ], 'hobby': ["baking", "singing", "running", "teaching", "gaming", "crosswords"] } grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) print(grammar.flatten("#origin#"))
def describe_shrine(god, activity, data): ''' short description of a shrine for pins ''' rules = { 'start': 'A #descriptor# shrine to #god#.', 'descriptor': '#adjective# #material#', 'adjective': [ 'small', 'elaborate', 'popular', 'charming', 'sacred', 'tucked-away', '' * 5], 'god': [ 'the god #god_name#, who #appearance#', '#god_name#, a god who #appearance#', ], 'appearance': 'is depicted as #depiction#', 'god_name': get_latin(god['name'], capitalize=True), 'depiction': god['description'], 'material': [data['primary_material'], '' * 10], 'sacrifice': 'Look for small sacrifices of #sacrifice_item# left ' \ 'by believers.', 'sacrifice_item': [ 'blood', 'hair', 'teeth', 'fresh fruit' * 5, 'secrets', 'loose change', 'bread', 'handmade icons' * 3], 'omens': 'You may find a believer casting their fortune with '\ '#omen_object#.', 'omen_object': [ 'polished stones', 'divining cards', 'lots', 'finger bones', 'animal bones', 'stones', 'ceramic tiles', 'marble tiles', 'carved sticks'], } if activity in ['sacrifice', 'omens']: rules['start'] += ' #%s#' % activity grammar = tracery.Grammar(rules) return format_text(grammar.flatten('#start#'))
def get_tracery(rule_key: str) -> str: grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) sentence = grammar.flatten('#%s#' % rule_key) return sentence
def describe_hf(analysed): global handle global recorded rules = { 'origin': ["#[#setPronouns#][hero:#hf_name#]top#"], } event_strings = get_event_strings(analysed, count=4) try: event_strings = " ".join(event_strings) except: event_strings = "" #print("event strings", event_strings) time.sleep(1) if analysed['deity'] == False: rules['top'] = [ "#hero# #heroWas# a #caste# #race#. #age# #goals# #skills# #links# #entity# #pet# #events#", "#hero# #heroWas# a #caste# #race#. #goals# #skills# #age# #entity# #links# #events#", "#hero# #heroWas# a #race#. #goals# #events# #pet# #skills# #entity# #links# #age#" ] else: rules['top'] = [ "#hero# #heroWas# a deity. #spheres# #worshippers#", "#hero# #heroWas# a deity. #worshippers# #spheres#" ] rules = add_rules(analysed, rules) rules['events'] = [event_strings] grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) text = grammar.flatten("#origin#") handle.write("\n") handle.write(text) handle.write("\n") return text
def andthen(): rules = { 'origin': '#then#...', 'then': ['Then Alex saw', 'Until there was', 'Then, one day'] # 'them': a['them'] } print(tracery.Grammar(rules).flatten('#origin#'))
def narrate_conflict(event): a, b = get_ab(event) target_p = event['target_property'] logging.debug(event['delta']) # First get a description of the conflict problem_phrase = get_problem(a, b, target_p) if not event['initiated']: # A was grumpy, but didn't actually initiate a fight. print(get_conflict_thought(a, b, event, problem_phrase)) return if random.random() < abs(event['delta']): # The bigger the event, the more chance we narrate it explicitly # Print some pretext rules = { 'origin': [ '#message# #meetup# #complaint# #response# #outcome#\n', '#message# #complaint# #response# #outcome#\n', '#thought# #meetup# #response# #outcome#\n', '#thought# #response# #outcome#\n', ], 'thought': get_conflict_thought(a, b, event, problem_phrase), 'message': artifacts.get_fight_trigger(event), 'meetup': get_meetup(a, b), 'complaint': get_problem_statement(a, b, problem_phrase, event), 'response': get_response(a, b, event), 'outcome': get_outcome(a, b, event) } print(tracery.Grammar(rules).flatten('#origin#')) else: narrate_conflict_zoomout(a, b, event, problem_phrase)
def narrate_ran(a, b): rules = { 'origin': [ 'They texted #b# once#response#.', '', '', '', f'They considered getting more interested in {random.choice(list(INTERESTS))}.' ], 'a': a['name'], 'a_they': a['they'], 'a_their': a['their'], 'b': b['name'], 'b_their': b['their'], 'b_they': b['they'], 'response': rank([ ', but #b_they# didn\'t respond', ' but #b_they# politely asked for space', ' and #a_they# got a few messages back, but #b_they# were clearly uninterested', ' and #b_they# chatted with them, but it did little for #a#' ], b['agree']) } print(tracery.Grammar(rules).flatten('#origin#'))
def get_problem_statement(a, b, problem_phrase, event): if event['prev']: again = 'again ' else: again = '' rules = { 'origin': [ '#problem_statement#. #anger# #reaction#.', '#problem_statement#. #anger# #reaction#. #texture#', ], 'problem_statement': [ f'#a# {again}was #upset# because #a_they# felt that #b# #problem#', f'#a# {again}told #b# that #b# #problem#', ], 'anger': util.rank([ "#it_seemd# just the smallest difference in expectations. ", "#it_seemd# a modest gap in expectations", "#it_seemd# some difference in this regard.", "#it_seemd# a gap in expectations.", "#it_seemd# a serious gap.", "#it_seemd# a chasm in understanding.", "#it_seemd# an insurmountable gulf of incompatibility.", ], event['target']), 'it_seemd': [ 'It seemed like there was', 'There was', "It seemed like", '#b# had to admit that #b_they# and #a# had ', ], 'upset': ['upset', 'frustrated', 'mad', 'angry'], 'a': a['name'], 'a_they': a['they'], 'b': b['name'], 'b_they': b['they'], 'problem': problem_phrase, 'pushing': ['pushing', 'telling', 'convincing', 'nagging', 'dragging'], 'reaction': util.rank([ '#a#\'s voice was gentle, but firm', '#a#\'s voice was soft, but firm', '#a# asked #b# if there was anything they could do to help.', '#a# looked at #b# silently, waiting for a response', '#a# folded their arms, glaring at #b#', '#a#\'s voice was harsh', '#a#\'s voice was cold', '#a#\'s tones were accusing', ], event['neuro_roll']), 'texture': util.rank([ '#b# sighed.', '#b# blinked slowly.', '#b# rubbed their eyes.', '#b# took a deep breath.', '#b# gasped anxiously.', '#b# was shocked.', '#b# was mortified.', '#b# was incensed.', ], random.gauss(b['neuro'], 0.1)) } grammar = tracery.Grammar(rules) return grammar.flatten('#origin#')
def narrate_alex(a, interest, hobby): a_verb = random.choice(INTERESTS[interest]['location']) rules = { 'origin': ['#texture##a_does#', '#statement#'], 'texture': [ 'Everything felt harder for a while. ', 'Time seemed to pass in slow motion. ', '#a# couldn\'t deny the slow monotony of their lifeāin attempt to improve it, ', 'The days felt shorter without a partner to fill them. ', 'Alex\'s calendar filled up with ease and ' ], 'statement': [ '#a# wondered if they were going to spend the rest of their life in search for someone who might understand them.', '#a# listened to #music# and thought about their future.', 'While #a# aimed to be independent and confident alone, #a_they# were sometimes struck with the cold shock of solitude.', '#a# journaled about #event#.', '#a# deleted all the dating apps off their phone (again).' ], 'music': [ 'Mitski', 'The National', 'Broken Social Scene', 'Phoebe Bridgers', 'Frank Ocean', 'SZA', 'Carly Rae Jepsen', 'Lorde', 'Fleetwood Mac', 'Robyn' ], 'event': [ 'recent workplace drama', 'achieving their goals', 'current events', 'their political perspective', 'their fraught familial relationships', 'the futility of online dating' ], 'a_does': [ f'#a# took time #doing# {hobby}.', f'#a# #started# {a_verb} #amount#.' ], 'modifer': ['a lot', 'lots of', 'some', 'a little', 'a small amount of'], 'doing': [ 'to practice', 'to watch YouTube videos about', 'enjoying', 'obsessing over', 'delving into', 'appreciating', 'taking pleasure in' ], 'started': [ 'went to', 'made plans to go to', 'started to go to', 'went back to', 'prioritized going to', 'spent time at', 'chilled at' ], 'amount': [ 'often', 'every now and then', 'occasionally', 'as an act of self care', 'as frequently as #they# could' ], 'hobby': hobby, 'a': a['name'], 'a_they': a['they'], } rules.update(getInterestRules(a, {'name': ''}, interest)) print(tracery.Grammar(rules).flatten('#origin#'))
def tea(climate): ''' describe a cup of tea ''' rules = { 'start': [ 'is served #temperature#. ' \ 'It is steeped to #milk_part#, and #tea_flavor#. ' \ 'People sometimes drink it with #additive#, ' \ 'and it is thought to have a mild #drug# effect.' \ ], 'temperature': ['scalding hot and #too_hot#', 'steaming hot, #container#', 'warm, #container#'], 'too_hot': [ 'has to be drunk in small, slurping sips that ' \ 'aerate and cool it down', 'is sipped from a shallow spoon to avoid burning oneself', 'the fragrant steam is enjoyed as a first course ' \ 'while the tea cools', 'is poured between a set of small cups to cool it' ], 'container': [ 'ladled out of a large pot', 'from large kettle', 'the leaves brewed in the cup', 'brewed from a spoonful of ground powder whisked with water ' \ ' in the cup' ], 'tea_flavor': 'has a flavor reminiscent of #sweet# ' \ 'and #savory#', 'sweet': ['honey', 'cardamom', 'cinnamon', 'cloves', 'chocolate', 'anise', 'mint', 'chocolate', 'vanilla'], 'savory': ['pepper', 'chili', 'tarragon', 'basil', 'sage', 'vervain' 'grass', 'lemongrass', 'cumin'], 'milk_part': ['a #tea_color# color, opaque from added #milk#'] + \ ['a #tea_color# color'] * 5, 'tea_color': ['red-brown', 'yellow-green', 'golden yellow', 'dark green', 'grassy', 'orange-red', 'reddish'], 'milk': ['milk', 'nut milk'], 'additive': ['a sweet fruit syrup', 'a little salt', 'honey', 'a sour citrus juice a bit like lime', 'a pinch of dried, ground bark' \ ], 'drug': ['stimulant', 'soporific', 'dizzying', 'hallucinatory', 'dissociative', 'calming', 'pain-relieving'], } if 'arctic' in climate or 'polar' in climate: rules['too_hot'].append( 'a ceramic jar of flaked ice is brought out alongside it,' \ 'to dilute and cool the drink as desired') rules['milk'].append('butter') grammar = tracery.Grammar(rules) sentence = grammar.flatten('#start#') return format_text(sentence)
def singShantyVerse(shanty, verseno): pshanty = prepShanty(shanty) verse_shanty = { "chorus": pshanty["chorus"], "verse": pshanty["verse"][verseno], "verse_2": pshanty["verse_2"][verseno] } grammar = tracery.Grammar(verse_shanty) return grammar.flatten("#chorus#")
def genDungeonName(self): # TODO: generate more than just name here baseName = self.genNameWithMinMaxLength( 2, 10 ) grammar = tracery.Grammar( DUNGEON_RULES ) grammar.add_modifiers( base_english ) title = grammar.flatten( "#origin#") dungeonName = string.replace( title, "$PP", baseName ) return self.title2(dungeonName)
def get_response(a, b, event): biggest_roll = max(event['rolls'], key=lambda k: abs( event['rolls'][k]) if k != 'neuro' else 0) rules = { 'pos': f"#{biggest_roll}#", 'neg': "#neuro#", 'agree': [ '#b# wanted to please #a##apology#.' '#b# didn\'t want #a# to be angry#apology#.' ], 'commit': [ '#b# wanted to do right by the relationship#apology#.', f'#b# didn\'t want to lose {b["their"]} partner#apology#.', ], 'interest': [ '#b# didn\'t want to lose #a##apology#.', '#b# liked #a# quite a bit#apology#.', ], 'neuro': [ '#b# was #angry##worse#', '#b# accused #a# of not liking them enough#worse#', '#b# accused #a# of not being invested enough in the relationship#worse#', ], 'angry': [ 'furious', 'upset', 'angry', 'offended', 'livid', 'pissed', 'defensive', ], 'apology': [ ', and immediately apologized', ', and apologized profusely', ', and promised to make amends', ', and put on a display of repentance', ', and bought flowers for #a# the next day', ', and bought a coffee for #a# the next morning', ], 'worse': [ '. #they# #argued# #bitterly#.', '. #they# failed to reach a conclusion.', ], 'they': ['They', 'The couple', 'The pair'], 'a': a['name'], 'a_they': a['they'], 'b': b['name'], 'argued': ['argued', 'fought', 'clashed'], 'bitterly': ['bitterly', 'heatedly', 'for hours', 'late into the night', 'acridly', 'acidly', 'venemously'], } grammar = tracery.Grammar(rules) if (event['delta'] > 0): return grammar.flatten('#pos# ') else: return grammar.flatten('#neg# ')
def get_event_strings(hf, count=1): hfid = hf['id'] events = None strings = [] if hf['categorised_events']: try: events = hf['categorised_events']['interesting'] + hf[ 'categorised_events']['meh'] except KeyError: events = None if events: for event in events: if 'competition' in event['type']: # these are too hard continue #print('event using', event) rules = {} namerules = name_expansion_rules template = get_template_row(event) # there may be no template for this event type. if template: fields = get_fields(template) # issue here with .capitalize and year print("template", template) rules = write_field_terminal_rules(event, fields, namerules) #print("Data:", row) #print("rules", rules) # time modifier can go anywhere if 'year' in event: origin = [ 'In year #year#, ' + template.lower() + ".", template + " in year #year#." ] # year applies to all of them can appear either place. else: origin = [template + "."] rules['adjective'] = adjectives rules['origin'] = origin grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) ## add modifier queries grammar.add_modifiers(queries) text = grammar.flatten("#origin#") if '((' in text: print("error with template output:", text) continue else: if len(strings) <= count: strings.append({'year': event['year'], 'text': text}) else: break else: continue if strings: strings.sort(key=lambda x: int(x['year'])) texts = [x['text'] for x in strings] return [" ".join(texts)] return [""]
def GenerateImage(): nlp = spacy.load("en_core_web_md") grammar_in = "" rules = buildGrammar(["static/head.json", "static/prompt.json", "static/noun.json", "static/verb.json", "static/tail.json"]) grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) prompt_pre = grammar.flatten("#prompt#") prompt_break = nlp(prompt_pre) pos = [token.pos_ for token in prompt_break] text = [token.text for token in prompt_break] for each in prompt_break.noun_chunks: print(each.text) for i in range(0, len(text)): if each.root.text == text[i] and (each.root.dep_ == "ROOT" or each.root.dep_ == "pobj") : text[i] == "#noun#" print(pos) print(text) for i in range(0, len(text)): if "NOUN" in pos[i]: if text[i] == "#noun#": next elif prompt_break[i].tag_ == "NNS": text[i] = "#noun.s#" elif "ROOT" in prompt_break[i].dep_ or "pobj" in prompt_break[i].dep_: next else: text[i] = "#noun#" prompt_post = grammar.flatten(str.join(" ", text)) for entity in prompt_break.ents: print(entity.text, entity.label_) quip = prompt_post img = Image.open("static/bg.png") fnt = ImageFont.truetype('static/Georgia.ttf', 100) d = ImageDraw.Draw(img) x = 1280 y = 720 shadowcolor = (0, 0, 0, 25) quip_l = list(str(quip)) if d.textsize(quip, font=fnt)[0] > 2160: for i in range(0, len(quip_l)): if i > len(quip)/2: if quip_l[i] == " ": quip_l[i] = "\n" break new_quip = str.join("", quip_l) x = x - d.textsize(new_quip, font=fnt)[0] / 2 y = y - d.textsize(new_quip, font=fnt)[1] / 2 d.multiline_text((x+3, y+3), new_quip, font=fnt, fill=shadowcolor, align="center") d.multiline_text((x, y), new_quip, font=fnt, fill="purple", align="center") img.save('static/out.png') os.popen("rm -f static/grammar.json")
def realGreet(greet): rules = { 'origin': '#hello.capitalize#', 'hello': ['Hello!', 'Hey!', 'Hi!'], 'location': ['world', 'solar system', 'galaxy', 'universe'] } grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) return grammar.flatten("#origin#")
def test_upper_and_lowercase(self): rules = { 'origin': '#hello.lowercase#, #location.uppercase#!', 'hello': ['Hello'], 'location': ['world'] } grammar = tracery.Grammar(rules) grammar.add_modifiers(base_english) self.assertEqual("hello, WORLD!", grammar.flatten("#origin#"))
def get_drink_style(s, d): description_rules = get_vocab(s, d) if ('ADJ' not in description_rules.keys()): description_rules['ADJ'] = ['passable', 'tender', 'loud'] description_rules['second_adj'] = ['', ' and #ADJ#', ', #ADJ# and #ADJ#'] description_rules['base'] = ['#ADJ.capitalize##second_adj#.'] description = tracery.Grammar(description_rules) description.add_modifiers(base_english) return description.flatten('#base#')