def main(): parser = argparse.ArgumentParser() parser.add_argument('bibtex') parser.add_argument('-v', '--verbose', action='store_true') args = parser.parse_args() with open(args.bibtex) as f: lines = os.linesep.join(l.strip() for l in f) bibobject = bib.Bibparser(lines, verbose=args.verbose) bibobject.parse() entries = sorted(bibobject.records.values(), key=lambda e: e['issued']['literal']) print('Publications') print('============\n') try: for entry in entries: url = entry['URL'] if 'URL' in entry else entry['url'] print('`%s <%s>`_' % (entry['title'], url)) print('~' * 80) print() print( format_author(entry['author']), '*%s* **%s**, %s %s' % (entry['journal'], entry['issued']['literal'], entry['volume'], entry['page'])) print() print(' ', entry['abstract']) print() except KeyError as e: raise KeyError('Entry failure during processeing of %s' % entry)
def main(argv=sys.argv[:]): if len(argv) > 1: layoutfn = argv[1] else: layoutfn = '_layout.pyhtml' filter_names = set() with open('index.html', 'w') as fout: items = [] for root, dirs, files in os.walk('bibtex'): # shuffle(files) for fn in filter(lambda fn: fn.endswith('bib'), files): print fn parser = bib.Bibparser(bib.clear_comments(open(os.path.join(root, fn), 'r').read())) parser.parse() data = parser.records.values()[0] # teaser image url data['teaser'] = 'teaser_images/%s.jpg' % os.path.splitext(fn)[0] data['thumb'] = 'teaser_images/thumb/%s.jpg' % os.path.splitext(fn)[0] # keywords keywords = map(lambda k: k.strip(), data.get('keywords', '').split(',')) data['keywords'] = keywords filter_names.update(keywords) # pprint(data) # print '========================================================' items.append(data) engine = tenjin.Engine(path=['views'], layout=layoutfn) html = engine.render('items.pyhtml', {'items':items}) fout.write(html) print 'Filters:' pprint(filter_names) print 'Filter #: %d' % len(filter_names)
def main(argv=sys.argv[:]): # example = 'bibtex/robust_inside-outside_segmentation_using_generalized_winding_numbers.bib' example = argv[1] data = bib.clear_comments(open(example, 'r').read()) parser = bib.Bibparser(data) parser.parse() data = parser.json() #pprint(data) pprint(parser.records) return 0
def handle_endtag(self, tag): if(tag == 'dt'): self.paperModeFlag = False self.currentTitle = '' if self.currentFile != None: self.currentFile.close() #print self.currentFileContent self.currentFile = open(self.currentPath, 'w') self.currentFile.writelines(self.currentFileContent) self.currentFile.close() self.currentFile = None self.currentFileContent = '' self.currentRecords = None elif(self.paperModeFlag and tag == 'b'): if(self.titleModeFlag): self.titleModeFlag = False # do some string processing to conver title to file name title = self.currentTitle title = title.replace(' ', '_') # replace space by underline # remove special characters title = title.replace('\\', '') title = title.replace('/', '') title = title.replace(':', '') title = title.replace('*', '') title = title.replace('?', '') title = title.replace('\"', '') title = title.replace('<', '') title = title.replace('>', '') title = title.replace('|', '') title = title.lower() #print 'title:' + title self.currentPath = self.bibtexDirectory + os.path.sep + title + '.bib' try: self.currentFile = open(self.currentPath, 'r') bibParser = bib.Bibparser(bib.clear_comments(self.currentFile.read())) bibParser.parse() self.currentRecords = bibParser.records[bibParser.records.items()[0][0]] self.currentFile.seek(0, 0) self.currentFileContent = self.currentFile.readlines() line = self.currentFileContent[len(self.currentFileContent)-2].rstrip() if line[len(line)-1] != ',': self.currentFileContent[len(self.currentFileContent)-2] = line + ',\n' except IOError: pass
def get_works(n=3): works = [] for root, dirs, files in os.walk('bibtex'): files2 = list(filter(lambda fn: fn.endswith('bib'), files)) shuffle(files2) for chunks in [files2[i:i+n] for i in range(0, len(files2), n)]: ooxx = [] for fn in chunks: parser = bib.Bibparser(bib.clear_comments(open(os.path.join(root, fn), 'r').read())) parser.parse() data = parser.records.values()[0] # teaser image url data['teaser'] = 'teaser_images/%s.jpg' % os.path.splitext(fn)[0] data['teaser-size'] = Image.open(data['teaser']).size data['thumb'] = 'teaser_images/thumb/%s.jpg' % os.path.splitext(fn)[0] data['thumb-size'] = Image.open(data['thumb']).size ooxx.append(data) works.append(ooxx) return {'row_size':12/n, 'works': works}