def demo3(): """ You found a cool paper online and you want to find similar papers: 1. Download and parse the pdf 2. Compare to text of all publications in pubs_ database 3. Open the top 3 matches in browser (but note that current matching alg is very basic and could be much improved) Pre-requisites: - Assumes 'pubs_nips' exists and contains pdf text inside (under key 'pdf_text'). This can be obtained by running nips_download_parse.py and then nips_add_pdftext.py or by downloading it from site. (https://sites.google.com/site/researchpooler/home) Side-effects: - will use os call to open a pdf with default program """ # fetch this pdf from website, parse it, and make a publication dict from it # here is a random pdf from Andrew's website url = 'http://ai.stanford.edu/~ang/papers/icml11-DeepEnergyModels.pdf' print "downloading %s..." % (url, ) text = convertPDF(url) #extract the text bow = stringToWordDictionary( text) #extract the bag of words representation p = {'pdf_text': bow} #create a dummy publication dict # calculate similarities to our publications print "loading database..." pubs = loadPubs('pubs_nips') print "computing similarities. (may take while with current implementation)" scores = publicationSimilarityNaive(pubs, p) # find highest scoring pubs lst = [(s, i) for i, s in enumerate(scores) if s >= 0] lst.sort(reverse=True) # display top 50 matches m = min(50, len(lst)) for s, i in lst[:m]: print "%.2f is similarity to %s." % (s, pubs[i]['title']) #open the top 3 in browser print "opening the top 3..." openPDFs([pubs[i]['pdf'] for s, i in lst[:3]])
def demo3(): """ You found a cool paper online and you want to find similar papers: 1. Download and parse the pdf 2. Compare to text of all publications in pubs_ database 3. Open the top 3 matches in browser (but note that current matching alg is very basic and could be much improved) Pre-requisites: - Assumes 'pubs_nips' exists and contains pdf text inside (under key 'pdf_text'). This can be obtained by running nips_download_parse.py and then nips_add_pdftext.py or by downloading it from site. (https://sites.google.com/site/researchpooler/home) Side-effects: - will use os call to open a pdf with default program """ # fetch this pdf from website, parse it, and make a publication dict from it # here is a random pdf from Andrew's website url = 'http://ai.stanford.edu/~ang/papers/icml11-DeepEnergyModels.pdf' print "downloading %s..." % (url,) text = convertPDF(url) #extract the text bow = stringToWordDictionary(text) #extract the bag of words representation p = {'pdf_text' : bow} #create a dummy publication dict # calculate similarities to our publications print "loading database..." pubs = loadPubs('pubs_nips') print "computing similarities. (may take while with current implementation)" scores = publicationSimilarityNaive(pubs, p) # find highest scoring pubs lst = [(s, i) for i,s in enumerate(scores) if s>=0] lst.sort(reverse = True) # display top 50 matches m = min(50, len(lst)) for s, i in lst[:m]: print "%.2f is similarity to %s." % (s, pubs[i]['title']) #open the top 3 in browser print "opening the top 3..." openPDFs([pubs[i]['pdf'] for s,i in lst[:3]])
#possibly place restrictions on pubs to process here pubs = pubs_all for i,p in enumerate(pubs): #if the pdf url does not exist, in future this could possibly use google #search to try to look up a link for the pdf first. if p.has_key('pdf') and not p.has_key('pdf_text'): # try to open the PDF from downloaded location processed = False try: floc = p['pdf'].index('NIPS') fname = p['pdf'][floc:] txt = convertPDF('downloads/'+fname) processed = True print 'found %s in file!' % (p['title'],) except: pass if not processed: # download the PDF and convert to text try: print 'downloading pdf for [%s] and parsing...' % (p.get('title', 'an un-titled paper')) txt = convertPDF(p['pdf']) processed = True print 'processed from url!' except: print 'error: unable to open download the pdf from %s' % (p['pdf'],) print 'skipping...'