def submit_query_terms(self, term_list, max_url_count=15, parallel_cb=None, cached=True): #Perform queries to Search Engine APIs #This function only operates when there is no information associated with the terms, #usually before running extract_terms() # #Args: # term_list: list of search terms that are submited by user #Returns: # urls: list of urls that are returned by Search Engine print '\n\nsubmit_query_terms\n\n' chdir(self.memex_home + '/seed_crawler/seeds_generator') query = ' '.join(term_list) with open('conf/queries.txt', 'w') as f: f.write(query) if not cached: comm = "java -cp target/seeds_generator-1.0-SNAPSHOT-jar-with-dependencies.jar BingSearch -t " + str( max_url_count) p = Popen(comm, shell=True, stdout=PIPE) output, errors = p.communicate() print output print errors call(["rm", "-rf", "html"]) call(["mkdir", "-p", "html"]) call(["rm", "-rf", "thumbnails"]) call(["mkdir", "-p", "thumbnails"]) #if sys.platform in ['darwin', 'linux2']: if sys.platform in ['darwin']: download("results.txt") else: download("results.txt", True, parallel_cb) if exists(self.memex_home + "/seed_crawler/ranking/exclude.txt"): call([ "rm", self.memex_home + "/seed_crawler/ranking/exclude.txt" ]) with open("results.txt", 'r') as f: urls = [ self.validate_url(line.strip()) for line in f.readlines() ] else: urls = search('text', term_list)[0:max_url_count] for url in urls: self.urls_set.add(url) self.tfidf = tfidf.tfidf(list(self.urls_set)) return urls #Results from Search Engine
def __init__(self, urls = []): #Intermediate data will being handled here: urls, extracted text, terms, clusters, etc. #list of urls and their labels, ranking scores #e.g: urls = [["nature.com", 1, 0.9], ["sport.com", 0, 0.01] #list of terms and their labels, ranking scores #e.g: terms = [["science", 1, 0.9], ["sport", 0, 0.02]] self.urls_set = set(urls) self.positive_urls_set = set() self.negative_urls_set = set() self.tfidf = tfidf.tfidf() self.memex_home = environ['MEMEX_HOME']
def __init__(self, urls=[]): #Intermediate data will being handled here: urls, extracted text, terms, clusters, etc. #list of urls and their labels, ranking scores #e.g: urls = [["nature.com", 1, 0.9], ["sport.com", 0, 0.01] #list of terms and their labels, ranking scores #e.g: terms = [["science", 1, 0.9], ["sport", 0, 0.02]] self.urls_set = set(urls) self.positive_urls_set = set() self.negative_urls_set = set() self.tfidf = tfidf.tfidf() self.memex_home = environ['MEMEX_HOME']
def submit_query_terms(self, term_list, max_url_count = 15, parallel_cb = None, cached=True): #Perform queries to Search Engine APIs #This function only operates when there is no information associated with the terms, #usually before running extract_terms() # #Args: # term_list: list of search terms that are submited by user #Returns: # urls: list of urls that are returned by Search Engine print '\n\nsubmit_query_terms\n\n' chdir(self.memex_home + '/seed_crawler/seeds_generator') query = ' '.join(term_list) with open('conf/queries.txt','w') as f: f.write(query) if not cached: comm = "java -cp target/seeds_generator-1.0-SNAPSHOT-jar-with-dependencies.jar BingSearch -t " + str(max_url_count) p=Popen(comm, shell=True, stdout=PIPE) output, errors = p.communicate() print output print errors call(["rm", "-rf", "html"]) call(["mkdir", "-p", "html"]) call(["rm", "-rf", "thumbnails"]) call(["mkdir", "-p", "thumbnails"]) #if sys.platform in ['darwin', 'linux2']: if sys.platform in ['darwin']: download("results.txt") else: download("results.txt", True, parallel_cb) if exists(self.memex_home + "/seed_crawler/ranking/exclude.txt"): call(["rm", self.memex_home + "/seed_crawler/ranking/exclude.txt"]) with open("results.txt",'r') as f: urls = [self.validate_url(line.strip()) for line in f.readlines()] else: urls = search('text', term_list)[0:max_url_count] for url in urls: self.urls_set.add(url) self.tfidf = tfidf.tfidf(list(self.urls_set)) return urls #Results from Search Engine
def submit_selected_urls(self, positive, negative): #Perform ranking and diversifing on all urls with regard to the positive urls # #Args: # labeled_urls: a list of pair <url, label>. Label 1 means positive and 0 means negative. #Returns: # urls: list of urls with ranking scores # Test new positive and negative examples with exisitng classifier # If accuracy above threshold classify pages # Ranking # Diversification documents = {} other = [] all_docs = get_bag_of_words(list(self.urls_set)) for url in positive: if url in all_docs.keys(): self.positive_urls_set.add(url) self.negative_urls_set.discard(url) for url in negative: if url in all_docs.keys(): self.negative_urls_set.add(url) self.positive_urls_set.discard(url) for url in all_docs.keys(): content = all_docs[url] if (len(self.negative_urls_set) == 0) or (url not in self.negative_urls_set): documents[url] = content if url not in self.positive_urls_set: other.append(url) self.tfidf = tfidf.tfidf(documents) chdir(self.memex_home + '/seed_crawler/ranking') ranker = rank.rank() [ranked_urls, scores] = ranker.results(self.tfidf, self.positive_urls_set, other) return [ranked_urls, scores] # classified, ranked, diversified
def submit_selected_urls(self, positive, negative): #Perform ranking and diversifing on all urls with regard to the positive urls # #Args: # labeled_urls: a list of pair <url, label>. Label 1 means positive and 0 means negative. #Returns: # urls: list of urls with ranking scores # Test new positive and negative examples with exisitng classifier # If accuracy above threshold classify pages # Ranking # Diversification documents = {} other = [] all_docs = get_bag_of_words(list(self.urls_set)) for url in positive: if url in all_docs.keys(): self.positive_urls_set.add(url) self.negative_urls_set.discard(url) for url in negative: if url in all_docs.keys(): self.negative_urls_set.add(url) self.positive_urls_set.discard(url) for url in all_docs.keys(): content = all_docs[url] if (len(self.negative_urls_set) == 0) or (url not in self.negative_urls_set): documents[url] = content if url not in self.positive_urls_set: other.append(url) self.tfidf = tfidf.tfidf(documents) chdir(self.memex_home + '/seed_crawler/ranking') ranker = rank.rank() [ranked_urls,scores] = ranker.results(self.tfidf,self.positive_urls_set, other) return [ranked_urls, scores] # classified, ranked, diversified
def getTermsSummarySeedCrawler(self, opt_maxNumberOfTerms = 40, session = None): es_info = self.esInfo(session['domainId']) format = '%m/%d/%Y %H:%M %Z' if not session['fromDate'] is None: session['fromDate'] = long(CrawlerModel.convert_to_epoch(datetime.strptime(session['fromDate'], format)) * 1000) if not session['toDate'] is None: session['toDate'] = long(CrawlerModel.convert_to_epoch(datetime.strptime(session['toDate'], format)) * 1000) s_fields = { "tag": "Positive", "index": es_info['activeCrawlerIndex'], "doc_type": es_info['docType'] } pos_terms = [field['term'][0] for field in multifield_term_search(s_fields, self._capTerms, ['term'], self._termsIndex, 'terms', self._es)] s_fields["tag"]="Negative" neg_terms = [field['term'][0] for field in multifield_term_search(s_fields, self._capTerms, ['term'], self._termsIndex, 'terms', self._es)] results = term_search(es_info['mapping']['tag'], ['Relevant'], self._pagesCapTerms, ['url', es_info['mapping']['text']], es_info['activeCrawlerIndex'], es_info['docType'], self._es) pos_urls = [field["id"] for field in results] top_terms = [] top_bigrams = [] top_trigrams = [] if session['filter'] is None: urls = [] if len(pos_urls) > 0: # If positive urls are available search for more documents like them results_more_like_pos = get_more_like_this(pos_urls, ['url', es_info['mapping']["text"]], self._pagesCapTerms, es_info['activeCrawlerIndex'], es_info['docType'], self._es) results.extend(results_more_like_pos) urls = pos_urls[0:self._pagesCapTerms] + [field['id'] for field in results_more_like_pos] if not urls: # If positive urls are not available then get the most recent documents results = get_most_recent_documents(self._pagesCapTerms, es_info['mapping'], ['url',es_info['mapping']["text"]], session['filter'], es_info['activeCrawlerIndex'], es_info['docType'], self._es) urls = [field['id'] for field in results] if len(results) > 0: text = [field[es_info['mapping']["text"]][0] for field in results] if len(urls) > 0: tfidf_all = tfidf.tfidf(urls, pos_tags=self.pos_tags, mapping=es_info['mapping'], es_index=es_info['activeCrawlerIndex'], es_doc_type=es_info['docType'], es=self._es) if pos_terms: extract_terms_all = extract_terms.extract_terms(tfidf_all) [ranked_terms, scores] = extract_terms_all.results(pos_terms) top_terms = [ term for term in ranked_terms if (term not in neg_terms)] top_terms = top_terms[0:opt_maxNumberOfTerms] else: top_terms = tfidf_all.getTopTerms(opt_maxNumberOfTerms) if len(text) > 0: [_,_,_,_,_,_,_,_,top_bigrams, top_trigrams] = get_bigrams_trigrams.get_bigrams_trigrams(text, urls, opt_maxNumberOfTerms+len(neg_terms), self.w2v, self._es) top_bigrams = [term for term in top_bigrams if term not in neg_terms] top_trigrams = [term for term in top_trigrams if term not in neg_terms] else: s_fields = { es_info['mapping']["text"]: "(" + session['filter'].replace('"','\"') + ")" } if not session['fromDate'] is None: s_fields[es_info['mapping']["timestamp"]] = "[" + str(session['fromDate']) + " TO " + str(session['toDate']) + "]" results = multifield_query_search(s_fields, self._pagesCapTerms, ["url", es_info['mapping']["text"]], es_info['activeCrawlerIndex'], es_info['docType'], self._es) ids = [field['id'] for field in results] text = [field[es_info['mapping']["text"]][0] for field in results] urls = [field[es_info['mapping']["url"]][0] for field in results] top_terms = get_significant_terms(ids, opt_maxNumberOfTerms, mapping=es_info['mapping'], es_index=es_info['activeCrawlerIndex'], es_doc_type=es_info['docType'], es=self._es) if len(text) > 0: [_,_,_,_,_,_,_,_,top_bigrams, top_trigrams] = get_bigrams_trigrams.get_bigrams_trigrams(text, urls, opt_maxNumberOfTerms+len(neg_terms), self.w2v, self._es) top_bigrams = [term for term in top_bigrams if term not in neg_terms] top_trigrams = [term for term in top_trigrams if term not in neg_terms] s_fields = { "tag": "Custom", "index": es_info['activeCrawlerIndex'], "doc_type": es_info['docType'] } custom_terms = [field['term'][0] for field in multifield_query_search(s_fields, 500, ['term'], self._termsIndex, 'terms', self._es)] top_terms = custom_terms + top_terms if not top_terms: return [] pos_freq = {} if len(pos_urls) > 1: tfidf_pos = tfidf.tfidf(pos_urls, pos_tags=self.pos_tags, mapping=es_info['mapping'], es_index=es_info['activeCrawlerIndex'], es_doc_type=es_info['docType'], es=self._es) [_,corpus,ttfs_pos] = tfidf_pos.getTfArray() total_pos_tf = np.sum(ttfs_pos, axis=0) total_pos = np.sum(total_pos_tf) pos_freq={} for key in top_terms: try: pos_freq[key] = (float(total_pos_tf[corpus.index(key)])/total_pos) except ValueError: pos_freq[key] = 0 else: pos_freq = { key: 0 for key in top_terms } neg_urls = [field['id'] for field in term_search(es_info['mapping']['tag'], ['Irrelevant'], self._pagesCapTerms, ['url'], es_info['activeCrawlerIndex'], es_info['docType'], self._es)] neg_freq = {} if len(neg_urls) > 1: tfidf_neg = tfidf.tfidf(neg_urls, pos_tags=self.pos_tags, mapping=es_info['mapping'], es_index=es_info['activeCrawlerIndex'], es_doc_type=es_info['docType'], es=self._es) [_,corpus,ttfs_neg] = tfidf_neg.getTfArray() total_neg_tf = np.sum(ttfs_neg, axis=0) total_neg = np.sum(total_neg_tf) neg_freq={} for key in top_terms: try: neg_freq[key] = (float(total_neg_tf[corpus.index(key)])/total_neg) except ValueError: neg_freq[key] = 0 else: neg_freq = { key: 0 for key in top_terms } terms = [] s_fields = { "term": "", "index": es_info['activeCrawlerIndex'], "doc_type": es_info['docType'], } results = [] for term in top_terms: s_fields["term"] = term res = multifield_term_search(s_fields, self._capTerms, ['tag', 'term'], self._termsIndex, 'terms', self._es) results.extend(res) tags = {result['term'][0]: result['tag'][0] for result in results} for term in top_terms: entry = [term, pos_freq[term], neg_freq[term], []] if tags and not tags.get(term) is None: entry[3] = tags[term].split(';') terms.append(entry) for term in top_bigrams: entry = [term, 0, 0, []] terms.append(entry) for term in top_trigrams: entry = [term, 0, 0, []] terms.append(entry) return terms
def term_tfidf(self): all_docs = get_bag_of_words(list(self.urls_set)) return tfidf.tfidf(all_docs).getTfidfArray()