def test_failing (): formatter = TelegramStoryFormatter() s = Story(title="failing_title"*500, story_url="https://someurl.com", discussion_url="https://someurl2.com", author="author", created_at=None, tags=[]) with pytest.raises(ValueError): formatter.format_string(s)
def __init__(self, filename): keys = ['storyFile', 'dataFile'] with open(filename, 'r') as f: for line in f: line = line.split('#', maxsplit=1)[0] if line.strip() == '': continue key, value = line.split(maxsplit=1) if key in keys: setattr(self, key.strip(), value.strip()) self.story = Story(self.storyFile) self.background = self.story.startBackground self.speaker = '' self.text = '' self.options = [] self.optionResults = {} self.firstGrey = 0 self.data = {} self.curScene = self.story[self.story.startScene] self.curSceneName = self.story.startScene self.load()
def py_search(session, date_start, date_end, word): occurrences, errors, no_stories, = [], 0, 0 start = timer() date_iter = DateIterator(date_start, date_end) for i, day in date_iter: occurred, story = [], Story(session, day) try: if not story.get_path(): no_stories += 1 continue data = story.decrypt() # AssertionError (if any) is caught here idx, jump, data_len = 0, len(word), len(data) # probably an inefficient way to find the word indices while idx < data_len: idx = data.find(word, idx) if idx == -1: break occurred.append(idx) idx += jump except AssertionError: errors += 1 if errors > 10: print ERROR, "More than 10 files couldn't be decrypted! Terminating the search..." return [], (timer() - start) if occurred and occurred[0] > 0: # "i" indicates the Nth day from the birthday occurrences.append((i, len(occurred), occurred)) sum_value = sum(map(lambda stuff: stuff[1], occurrences)) date_iter.send_msg('[Found: %d]' % sum_value) assert no_stories < (i + 1) return occurrences, (timer() - start)
def main(): """Load data, display data""" args = arg_parser_setup() clubhouse_apikey = get_clubhouse_apikey_or_exit() ch_api = ClubHouseAPI(clubhouse_apikey) cycle_logic = CycleLogic() if args.googlesheets: cycle_logic.enable_google_sheets_output( sheet_id=SHEET_ID, scopes=SCOPES, service_account_file=GOOGLE_SERVICE_ACCOUNT_FILE ) members = ch_api.get_active_members() progress_bar = ProgressBar(total=len(members)) for i, member in enumerate(members): progress_bar.print_progress_bar(i+1) cycle_logic.add_member(member) stories = ch_api.stories_by_mention(member['profile']['mention_name']) for ch_story in stories: story = Story(load_from_dict=ch_story, owner_id=member['id']) cycle_logic.add_story(story) print('\n\n') print(cycle_logic.tabulate_result(debug_member=args.debugmember))
def step(self): # Create an event in a random node # Allow nearby agents to perceive the event (distance = 0, 1, 2?) # We want to set up the frequency of world.step() so that the world is # not swarmed with new stories ... relative frequencey of world.step() to agent.step() # needs to be tweaked newStory = Story(self, self.getRandomNode(), None) self.stories[story.id()] = newStory
async def get_story(name): sheet = service.spreadsheets() story_result = sheet.values().get( spreadsheetId=StoriesID, range='{}!A2:C1000'.format(name)).execute() story_result = story_result.get('values', []) story = Story(name) story.load_story(story_result) return story
def story(): story = Story( KEYS, """Once upon a time in a long-ago {place}, there lived a large {adjective} {noun}. It loved to {verb} {plural_noun}.""") answers = {} for key in KEYS: answers[key] = request.form[key] story = story.generate(answers) return render_template("story.html", story=story)
def find_stories(self, date_start=None, return_on_first_story=True): ''' Find the dates corresponding to stories that exist in a given location (by default, returns on the first encountered story that exists) ''' date_start = date_start if date_start else self.birthday if return_on_first_story: for i, date in DateIterator(date_start, progress_msg=None): if Story(self, date).get_path(): return (i is 0, date) return False, None # getting the list of stories is an exhaustive process and should be considered as the last resort return [ date for _i, date in DateIterator(date_start, progress_msg=None) if Story(self, date).get_path() ]
def loadOrSave(self): app = App.get_running_app().root print(app.current) if app.current == "load": if self.memento: app.currentStory = Story(self.memento.folder, self.memento.stateId) app.current = "game" else: # save current game self.memento = StoryMemento(story=app.currentStory)
def get_top_stories(number, offset): url = "https://hacker-news.firebaseio.com/v0/topstories.json" r = requests.get(url) story_ids = r.json()[offset:number+offset] stories = [] for s in story_ids: url = "https://hacker-news.firebaseio.com/v0/item/" + str(s) + ".json" r = requests.get(url) stories.append(Story(r.json())) return pretty_print(stories, offset)
def pop_story(self): cursor = self.con.cursor() query = "select * from stories_all where is_full <> 1 order by updated_at desc, created_at desc limit 1" cursor.execute(query) contents = cursor.fetchone() str = Story(id=contents[0], created_at=contents[4], updated_at=contents[5], content=json.loads(contents[2])) return str
def __init__(self, server_ip, dialogflow_key_file, dialogflow_agent_id): """ :param server_ip: IP address of Social Interaction Cloud server :param dialogflow_key_file: path to Google's Dialogflow key file (JSON) :param dialogflow_agent_id: ID number of Dialogflow agent to be used (project ID) """ self.sic = BasicSICConnector( server_ip, 'en-US', dialogflow_key_file, dialogflow_agent_id) self.conversation = Conversation(self.sic, robot_present=True, animation=True) self.story = Story(interactive=True)
def go(self): title = self.textBoxes["Title"].get("1.0", 'end-1c') author = self.textBoxes["Author"].get("1.0", 'end-1c') publisher = self.textBoxes["Publisher"].get("1.0", 'end-1c') copyright = self.textBoxes["Copyright"].get("1.0", 'end-1c') newStory = Story(title, author, self.path, copyright, publisher) chapterTitles = self.checkBoxes["Titles"] self.formatter.chapterNames = bool(self.var.get()) self.formatter.take(newStory) self.running = True self.formatThread = thread.start_new_thread(self.formatter.run, ())
def __init__(self): self.story = Story() # 初始的句子向量 self.vocab = self.story.vocab self.batch_size = self.story.batch_size - 2 #126 self.chunk_size = self.story.chunk_size self.embedding_dim = 300 self.num_units = 500 self.learning_rate = 0.001 self.epoch = 25 self.sample_size = 50
def build_paths(session, date_start, date_end): path_list = [] for _i, day in DateIterator(date_start, date_end, 'Building the path list... %s'): file_path = Story(session, day).get_path() if file_path: path_list.append(file_path) assert path_list path_list.append(session.key) return path_list
def get_stories(self): if not self.stories: self.fill_metadata() # WARNING: hard coded class names tofind1, tofind2 = 'bb', 't-t84 bb nobck' self.stories = self.p.findAll('a', {'class': tofind1}) self.stories += self.p.findAll('a', {'class': tofind2}) self.stories = [x['href'][28:] for x in self.stories] self.stories = [Story(story_id) for story_id in self.stories] return self.stories
def main(): if len(argv) != 3: exit("Not enough arguments.") story_file = argv[1] property_file = argv[2] game = Story(property_file) game.Load_Story(story_file) Play(game) return
def save_story(): if request.method == 'POST': story_title = request.form['story_title'] user_story = request.form['user_story'] acceptance_criteria = request.form['acceptance_criteria'] business_value = request.form['business_value'] estimation_hour = request.form['estimation_hour'] status = request.form['status'] story = Story(title=story_title, description=user_story, acceptance_criteria=acceptance_criteria, business_value=business_value, estimation_hour=estimation_hour, status=status) story.save() return redirect("/list")
def print_stuff(grep): # function to choose between pretty and ugly printing sys.stdout.set_mode(0) results_begin = '\nSearch results from %s to %s:' % (start.strftime('%B %d, %Y'), end.strftime('%B %d, %Y')) + \ "\n\nStories on these days have the word '%s' in them...\n" % word if grep: # pretty printing the output (at the cost of decrypting time) try: timer_start = timer() print results_begin for i, (n, word_count, indices) in enumerate(occurrences): colored = [] date = start + timedelta(n) content = Story(session, date).decrypt() numbers = str(i + 1) + '. ' + date.strftime('%B %d, %Y (%A)') text, indices = mark_text(content, indices, jump) # precisely indicate the word in text for idx in indices: # find the word occurrences left_bound = find_line_boundary(text, idx, grep, -1) right_bound = find_line_boundary(text, idx, grep, 1) sliced = '\t' + '... ' + text[left_bound:right_bound].strip() + ' ...' colored.append(sliced) print numbers, '\n%s' % '\n'.join(colored) # print the numbers along with the word occurrences timer_stop = timer() except (KeyboardInterrupt, EOFError): sleep(CAPTURE_WAIT) grep = 0 # default back to ugly printing clear_screen() print "Yep, it takes time! Let's go back to the good ol' days..." if not grep: # Yuck, but cleaner way to print the results sys.stdout.set_mode(0) print results_begin for i, (n, word_count, _indices) in enumerate(occurrences): date = session.birthday + timedelta(n) numbers = ' ' + str(i + 1) + '. ' + date.strftime('%B %d, %Y (%A)') spaces = 40 - len(numbers) print numbers, ' ' * spaces, '[ %s ]' % word_count # print only the datetime and counts in each file sys.stdout.set_mode(1, 0.015) msg = fmt_text('Found a total of %d occurrences in %d stories!' % (total_count, num_stories), 'yellow') print '\n%s %s\n' % (SUCCESS, msg) print fmt_text(' Time taken for searching: ', 'blue') + \ fmt_text('%s seconds!' % timing, 'green') if grep: print fmt_text(' Time taken for pretty printing: ', 'blue') + \ fmt_text('%s seconds!' % (timer_stop - timer_start), 'green')
def write_acknowledgement_page(self, title: str, acknowledgement: str, page: str, user_id: str): story = Story(user_id) story_id = story.create_new_story(title) page_name = 'page_{}'.format(page) pdf_writer = PdfHandler(page_name, user_id) pdf_writer.set_document_title('Acknowledgement') file_path = pdf_writer.write_document(acknowledgement) page_num = 0 return story.save_document_path(str(story_id), file_path, str(page_num), 'acknowledgement')
def main(): """ Start script """ story = Story( "Lorem drugs simply dad mom dad dad bothe boy boy printing. ", { 'addiction': 0, 'love': 0, 'family': 0 }) search = Search() search.analize(story)
def fetch_stories(self, limit, offset): cursor = self.con.cursor() query = "select * from stories-all order by updated_at desc limit %s %s" cursor.execute(query, (offset, limit)) results = cursor.fetchall() stories = [] for result in results: story = Story(id=result[0], created_at=result[4], updated_at=result[5], content=json.loads(result[2])) stories.append(story) return stories
def run_once(debug=True): # Randomly assigns actors, places, and items for story root_state = random_state(4, 4) # Initialize Root Node - Possible Methods boolean MUST BE TRUE root_node = TreeNode(root_state, parent_edge=None, possible_methods=True) # Total methods in story num_methods = len(root_node.possible_methods) """ The following max_numsim = max_expansion * thres max_iter : Number of sentances in story = number of story nodes - 1 = number of story edges max_expansion : Number of expansions in search max_simlength : Maximum length of rollout C : Exploration Constant for selection thres : Minimum MCTS Visits for node expansion """ # Perform Monte Carlo - returns final node and whole story max_expansion = 250 if max_expansion < len(root_node.possible_methods): raise ValueError( "Max exp ({}) should be greater than num methods({})".format( max_expansion, len(root_node.possible_methods))) max_iter = 15 max_simlength = 20 C = 1 thres = 40 minlambda = 0.95 s = Story(root_node) print(s.create_expository()) #print("Max iteration: {}\nMax Expansion: {}\nMax simulation length: {}\nC: {}\nThreshold: {}".format(max_iter, max_expansion, max_simlength, C, thres)) n, s = mcts(root_node, max_iter, max_expansion, max_simlength, C, thres, mixlambda, debug=False) # Print out results #if debug: # print(s) # print(n.believability) # print(n.value) # print(percent_goals_satisfied(n, GOALS)) return (n, s)
def runStory(): #play avatar t = threading.Thread(target = avatar_player.run_avatar) t.daemon = True t.start() time.sleep(2) #create story from nodes and player story_line = getStory() if story_line == None: return player = Player(story_line) story = Story(player, story_line) #run through the story story.walk(player)
def rollout_story_2(node, max_simlength): root = TreeNode(node.state) curr_node = root numsims = 0 while numsims < max_simlength and not goals_satisfied(curr_node, GOALS): expand_rand_edge(curr_node) curr_node = curr_node.edges[-1].next_node if curr_node.believability == 0: curr_node = curr_node.parent_edge.prev_node continue numsims += 1 print(Story(curr_node)) return rollout_value(curr_node.believability, percent_goals_satisfied(curr_node, GOALS))
def write_page(self, text: str, page: str, user_id: str) -> any: story = Story(user_id) page_name = 'page_{}'.format(page) pdf_writer = PdfHandler(page_name, user_id) story_id = story.get_story_id_for_user() filepath = pdf_writer.write_document(text) story.save_progress(story_id, page, 'body') story.save_document_path(story_id, filepath, page, 'text') story.save_content(story_id, page, text) return True
def random(session): # useful only when you have a lot of stories (obviously) days = range((datetime.now() - session.birthday).days + 1) for i in range(25): # try 25 times _story_exists, date = session.find_stories(session.birthday + timedelta(rchoice(days))) if not date: break story = Story(session, date) if story.get_path(): return story.view() print ERROR, "Looks like you don't have much stories in the given location!"
def parse_csv(self, path): """ parse the csv files """ stories = [] with open(path, 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='"') header = next(reader) for row in reader: story_id = row[0] sentences = [] for text in row[1:5]: sentences.append(text) potential_endings = [] for text in row[5:7]: potential_endings.append(text) correct_ending_idx = int(row[7]) - 1 end_one_feats = [] end_two_feats = [] shared_feats = [] if len(row) > 8: for idx in range(8, len(header)): if 'e1' in header[idx]: end_one_feats.append(float(row[idx])) elif 'e2' in header[idx]: end_two_feats.append(float(row[idx])) else: shared_feats.append(float(row[idx])) end_feats = (end_one_feats, end_two_feats) stories.append( Story( story_id, sentences, potential_endings, end_feats, shared_feats, correct_ending_idx )) return stories
def mcts(node, max_iter, max_expansion, max_simlength, C, thres, debug): # Loop for every line in story for count in range(max_iter): if debug: print("Master Iteration Number - " + str(count)) # Loop for every simulation constructing story tree for num_expansion in range(max_expansion): if debug: print("Expansion Number - " + str(num_expansion)) # Choose a node in the story tree chosen_node = selection(node, C, thres) # If the chosen node has a believability of 0, break it from the tree if chosen_node.believability == 0: chosen_node.parent_edge.prev_node.edges.pop() if debug: print("Pruned unbelievable node") elif chosen_node.height > 0: parent_node = chosen_node.parent_edge.prev_node if chosen_node.parent_edge.method.method == parent_node.parent_edge.method.method: parent_node.edges.pop() if debug: print("Pruned repeat-1 node") elif chosen_node.height > 1: grandparent_node = parent_node.parent_edge.prev_node if (chosen_node.parent_edge.method.method == grandparent_node.parent_edge.method.method): grandparent_node.edges.pop() if debug: print("Pruned repeat-2 node") else: # Simuluate if thres number of times for _ in range(thres): sim_value = rollout_story_3(chosen_node, max_simlength) backpropogate(chosen_node, sim_value) # Choose most visited node exp_node = most_visited_child(node) if debug: print(exp_node.parent_edge.method.sentence) # Remove all other edges from the tree - focus on most visited node subtree delete_children(node, exp_node) # Switch root to exp_node node = exp_node print("\n") return (node, Story(node))
def run_reinforce(depth = 15): with open("table2.pickle","rb") as table2file: table2 = pickle.load(table2file) root_state = State(ACTORS, PLACES, ITEMS) root_node = TreeNode(root_state, parent_edge = None, possible_methods = True) current_node = root_node for _ in range(depth): qvals = deepcopy(table2[state_index_number_2(current_node.state)]) pidx = prob_index(softmax(qvals)) edge = expand_index_edge(current_node, pidx) while edge.method.believability == 0: qvals.pop(pidx) pidx = prob_index(softmax(qvals)) edge = expand_index_edge(current_node, pidx) current_node = edge.next_node return Story(current_node)