def makeMoveTable(game, states): header = [['state:', 'moves']] table = [] for state in states: row = [str(state) + ':'] moves = game.actions(state) moveString = str(moves) row.append(moveString) table.append(row) print_table(table, header)
def makeABtable(game, states): topLeft = str(game)[1:-1] header = [[str(topLeft)]] maxChars = len(topLeft) for i in range(len(AB_searches)): header[0].append('AB(' + str(i) + ')') table = [] for state in states: row = [str(state)[:maxChars]] maxDepth = len(AB_searches) if hasattr(state, 'maxDepth'): maxDepth = min(maxDepth, state.maxDepth + 1) for abi in range(len(AB_searches)): if (abi > maxDepth): row.append(None) continue abSearch = AB_searches[abi] bestMove = abSearch(game, state) row.append(str(bestMove)) table.append(row) print_table(table, header, tjust='rjust')
def test_mongo_orm(): def save_user_repo(): json = requests.get(GithubUserUrls('hvnobug').starred_url()).json() for item in json: node_id = item['node_id'] name = item['name'] forks = item['forks'] fork = item['fork'] private = item['private'] watchers = item['watchers'] language = item['language'] full_name = item['full_name'] owner = item['owner']['login'] created_at = format_ufc_datetime(item['created_at']) updated_at = format_ufc_datetime(item['updated_at']) stars = item['stargazers_count'] open_issues = item['open_issues'] gr = GithubRepository(owner=owner, name=name, stars=stars, forks=forks, private=private, watchers=watchers, language=language, full_name=full_name, open_issues=open_issues, fork=fork, created_at=created_at, updated_at=updated_at, id=item['id'], node_id=node_id) gr.create_time = datetime.now() gr.save() save_user_repo() print_table(GithubRepository.objects.all())
help='prints top referred urls in topic') args = parser.parse_args() if args.trends: print '### Trending topics:' print get_trends() elif args.listen: if not args.topic: print '### Tweets about the trending topics:' start_trends_stream(prints=True) else: print '### Tweets about %s' % args.topic start_trends_stream(trends=[args.topic], prints=True) elif args.top and args.topic: print '### Top URLs about %s' % args.topic urls = start_trends_stream(trends=[args.topic], prints=True) urls = sorted(urls, key=urls.get) # ordering urls by counts table = [(url, urls[url]) for url in urls] print_table(table) elif args.top and args.topic: print '### Summary of %s' % args.topic else: parser.print_help()
def lcs(input_a, input_b, codon_length=1, verbose=False): r"""Longest Common Substring with variable codon length. >>> print('%r\n%r\n%r' % lcs((0, 1, 2) * 3, (2, 0, 1) * 3)) [None, 0, 1, 2, 0, 1, 2, 0, 1, 2] [2, 0, 1, 2, 0, 1, 2, 0, 1, None] 8 >>> print('%r\n%r\n%r' % lcs((0, 1, 2) * 3, (2, 0, 1) * 3, 3)) [None, 0, 1, 2, 0, 1, 2, None, None, 0, 1, 2] [2, 0, 1, 2, 0, 1, 2, 0, 1, None, None, None] 2 >>> print('%r\n%r\n%r' % lcs((0, 1, 2) * 3, (0, 1, 2) * 3, 3)) [0, 1, 2, 0, 1, 2, 0, 1, 2] [0, 1, 2, 0, 1, 2, 0, 1, 2] 3 >>> print('%r\n%r\n%r' % lcs((0, 1, 2) * 3, (0, 1, 2) * 3)) [0, 1, 2, 0, 1, 2, 0, 1, 2] [0, 1, 2, 0, 1, 2, 0, 1, 2] 9 >>> print('%r\n%r\n%r' % lcs((0, 1, 2) * 3, (2, 1, 0) * 3)) [None, None, 0, None, 1, None, 2, 0, 1, 2, 0, 1, 2] [2, 1, 0, 2, 1, 0, 2, None, 1, None, 0, None, None] 5 """ if verbose: from util import print_table print(input_a) print(input_b) match_matrix = [[0] * (len(input_b) + 1) for i in range(len(input_a) + 1)] if verbose: matching_points = [[''] * (len(input_b) + 1) for i in range(len(input_a) + 1)] codon_length_less_1 = codon_length - 1 for x in range(codon_length - 1, len(input_a)): for y in range(codon_length - 1, len(input_b)): match_matrix[x + 1][y + 1] = max( ((1 if input_a[x - codon_length_less_1:x + 1] == input_b[y - codon_length_less_1:y + 1] else 0) + match_matrix[x - codon_length_less_1][y - codon_length_less_1]), match_matrix[x][y + 1], match_matrix[x + 1][y]) if verbose: if match_matrix[x + 1][y + 1] != match_matrix[x][y + 1] and match_matrix[x + 1][y + 1] != match_matrix[x + 1][y]: matching_points[x + 1][y + 1] = 'x' if verbose: display_matrix = match_matrix[:] for i in range(len(display_matrix)): display_matrix[i] = display_matrix[i][:] display_matrix[0] = [''] + list(input_b) matching_points[0] = [''] + list(input_b) for x in range(len(input_a)): display_matrix[x + 1][0] = input_a[x] matching_points[x + 1][0] = input_a[x] print_table(display_matrix) print_table(matching_points) x = len(input_a) y = len(input_b) result_a = [] result_b = [] while x > codon_length_less_1 and y > codon_length_less_1: if match_matrix[x][y] == match_matrix[x - 1][y]: x -= 1 result_a.append(input_a[x]) result_b.append(None) elif match_matrix[x][y] == match_matrix[x][y - 1]: y -= 1 result_a.append(None) result_b.append(input_b[y]) else: x -= codon_length y -= codon_length result_a.extend(reversed(input_a[x:x + codon_length])) result_b.extend(reversed(input_b[y:y + codon_length])) while x > 0: x -= 1 result_a.append(input_a[x]) result_b.append(None) while y > 0: y -= 1 result_a.append(None) result_b.append(input_b[y]) result_a.reverse() result_b.reverse() if verbose: if isinstance(input_a, str): print(''.join(' ' if c is None else c for c in result_a)) else: print(result_a) if isinstance(input_b, str): print(''.join(' ' if c is None else c for c in result_b)) else: print(result_b) return (result_a, result_b, match_matrix[len(input_a)][len(input_b)])
def test_mongo_collect2(): print_table(GithubRepository.objects())
def test_mongo_collect1(): with mongo_collection('github_repository') as github_repository: print_table(github_repository.find())
def show(boxes, level): util.clear() table = convert_to_table(boxes, level) util.print_table(table)
def list(self): images = self.session.query(models.Image)\ .filter_by(status="available") util.print_table(["name", "size", "status"], map(lambda x: [x.name, x.size, x.status], images))
def summarize(args): col_order = PredictedSeqInfoKey.get_columns_order() failed_articles = [] articles_folder = os.path.join(args.data_folder, "text") transcript_folder = os.path.join(args.data_folder, "transcript") sections_info_folder = os.path.join(args.data_folder, "sections_info") section_per_sent_folder = os.path.join(args.data_folder, "section_per_sent") article_names = args.article_names print("number of articles: {}".format(len(article_names))) predict_enable = not args.no_predict # log only if we are in predict mode logging_enable = predict_enable for article_i, article_name in enumerate(article_names): if logging_enable: # set up log file for current article log_filename = os.path.join(args.log_folder, article_name) if os.path.isfile(log_filename): raise Exception( "log file already exists: {}".format(log_filename)) logger = Logger(log_filename) sys.stdout = sys.stderr = logger print("Logging to file: {}\n".format(log_filename)) print("--- paper {}: {}\n".format(article_i, article_name)) article_fname = os.path.join(articles_folder, article_name) transcript_fname = os.path.join(transcript_folder, article_name) sections_info_fname = os.path.join(sections_info_folder, article_name) section_per_sent_fname = os.path.join(section_per_sent_folder, article_name) # remove the ".txt" extension and add numpy extension similarity_fname = article_name[:-4] + '.npy' similarity_fname = os.path.join(args.similarity_folder, similarity_fname) try: article_data_sample = ArticleDataSample(transcript_fname, article_fname, sections_info_fname, section_per_sent_fname) # prepare configuration cfg = HmmArticleConfig(args.word_embed_path, labeled_data_mode=False) cfg.similarity_fname = similarity_fname cfg.print_configuration() print("") durations_folder = os.path.join(args.base_summaries_folder, "durations") os.makedirs(durations_folder, mode=0o775, exist_ok=True) durations_fname = os.path.join(durations_folder, article_name) alignment_folder = os.path.join(args.base_summaries_folder, "alignment") os.makedirs(alignment_folder, mode=0o775, exist_ok=True) alignment_fname = os.path.join(alignment_folder, article_name) top_scored_sents_folder = os.path.join( args.base_summaries_folder, "top_scored_sents.num_sents_{}_thresh_{}".format( args.num_sents, args.thresh)) os.makedirs(top_scored_sents_folder, mode=0o775, exist_ok=True) top_scored_sents_fname = os.path.join(top_scored_sents_folder, article_name) if predict_enable: hmm_article = HmmArticle(article_data_sample, cfg) predicted_seq_info, log_prob = hmm_article.predict() print("log_prob = {}".format(log_prob)) print("predicted sequence info:\n") alignment_str = print_table(predicted_seq_info, col_order) with open(alignment_fname, 'w') as out_file: out_file.write(alignment_str + "\n") print("\n") hmm_article.create_durations_file(durations_fname) summary_creator = SummaryCreator(article_data_sample, durations_fname=durations_fname) if os.path.isfile(top_scored_sents_fname): print("file exists: {}".format(top_scored_sents_fname)) else: summary_creator.create_top_scored_sents_file( args.num_sents, args.thresh, top_scored_sents_fname) if predict_enable: warnings = hmm_article.get_warnings() if len(warnings) > 0: for warning in warnings: print("- {}".format(warning)) except Exception as ex: print("EXCEPTION WAS CAUGHT FOR PAPER: {}".format(article_name)) print(ex) failed_articles.append(article_name) return failed_articles
def lcs(input_a, input_b, codon_length=1, verbose=False): r"""Longest Common Substring with variable codon length. >>> print '%r\n%r\n%r' % lcs((0, 1, 2) * 3, (2, 0, 1) * 3) [None, 0, 1, 2, 0, 1, 2, 0, 1, 2] [2, 0, 1, 2, 0, 1, 2, 0, 1, None] 8 >>> print '%r\n%r\n%r' % lcs((0, 1, 2) * 3, (2, 0, 1) * 3, 3) [None, 0, 1, 2, 0, 1, 2, None, None, 0, 1, 2] [2, 0, 1, 2, 0, 1, 2, 0, 1, None, None, None] 2 >>> print '%r\n%r\n%r' % lcs((0, 1, 2) * 3, (0, 1, 2) * 3, 3) [0, 1, 2, 0, 1, 2, 0, 1, 2] [0, 1, 2, 0, 1, 2, 0, 1, 2] 3 >>> print '%r\n%r\n%r' % lcs((0, 1, 2) * 3, (0, 1, 2) * 3) [0, 1, 2, 0, 1, 2, 0, 1, 2] [0, 1, 2, 0, 1, 2, 0, 1, 2] 9 >>> print '%r\n%r\n%r' % lcs((0, 1, 2) * 3, (2, 1, 0) * 3) [None, None, 0, None, 1, None, 2, 0, 1, 2, 0, 1, 2] [2, 1, 0, 2, 1, 0, 2, None, 1, None, 0, None, None] 5 """ if verbose: from util import print_table print input_a print input_b match_matrix = [[0] * (len(input_b) + 1) for i in xrange(len(input_a) + 1)] if verbose: matching_points = [[""] * (len(input_b) + 1) for i in xrange(len(input_a) + 1)] codon_length_less_1 = codon_length - 1 for x in xrange(codon_length - 1, len(input_a)): for y in xrange(codon_length - 1, len(input_b)): match_matrix[x + 1][y + 1] = max( ( (1 if input_a[x - codon_length_less_1 : x + 1] == input_b[y - codon_length_less_1 : y + 1] else 0) + match_matrix[x - codon_length_less_1][y - codon_length_less_1] ), match_matrix[x][y + 1], match_matrix[x + 1][y], ) if verbose: if ( match_matrix[x + 1][y + 1] != match_matrix[x][y + 1] and match_matrix[x + 1][y + 1] != match_matrix[x + 1][y] ): matching_points[x + 1][y + 1] = "x" if verbose: display_matrix = match_matrix[:] for i in xrange(len(display_matrix)): display_matrix[i] = display_matrix[i][:] display_matrix[0] = [""] + list(input_b) matching_points[0] = [""] + list(input_b) for x in xrange(len(input_a)): display_matrix[x + 1][0] = input_a[x] matching_points[x + 1][0] = input_a[x] print_table(display_matrix) print_table(matching_points) x = len(input_a) y = len(input_b) result_a = [] result_b = [] while x > codon_length_less_1 and y > codon_length_less_1: if match_matrix[x][y] == match_matrix[x - 1][y]: x -= 1 result_a.append(input_a[x]) result_b.append(None) elif match_matrix[x][y] == match_matrix[x][y - 1]: y -= 1 result_a.append(None) result_b.append(input_b[y]) else: x -= codon_length y -= codon_length result_a.extend(reversed(input_a[x : x + codon_length])) result_b.extend(reversed(input_b[y : y + codon_length])) while x > 0: x -= 1 result_a.append(input_a[x]) result_b.append(None) while y > 0: y -= 1 result_a.append(None) result_b.append(input_b[y]) result_a.reverse() result_b.reverse() if verbose: if isinstance(input_a, str): print "".join(" " if c is None else c for c in result_a) else: print result_a if isinstance(input_b, str): print "".join(" " if c is None else c for c in result_b) else: print result_b return (result_a, result_b, match_matrix[len(input_a)][len(input_b)])