def build_stemmed_bigrams_stopwords_vocabulary(corpus,stop_words,): from ng20_globals import min_tf,min_df from commons.stemming_tokenizer import StemmingTokenizer tokenizer = StemmingTokenizer() max_ngram_size = 2 vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf) # save to DB tbl_name = 'ng20_stems_bigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf) save_vocabulary(vocabulary,tbl_name) print 'done '+tbl_name
def build_stemmed_unigrams_stopwords_vocabulary(corpus,stop_words,vocabulary_src): from clef_globals import min_df, min_tf from commons.stemming_tokenizer import StemmingTokenizer tokenizer = StemmingTokenizer() max_ngram_size = 1 vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf) # save to DB tbl_name = 'clef_2010_{0}_stems_unigrams_stopwords_df{1}_tf{2}'.format(vocabulary_src,min_df,min_tf) save_vocabulary(vocabulary,tbl_name) print 'done '+tbl_name
def test_stemmed_all_bigrams(corpus_train_data, corpus_test_data, vocabulary_src, with_stopwords_removal, use_chi_features, use_raw_tokens): from commons.stemming_tokenizer import StemmingTokenizer from clef_globals import min_df, min_tf, test_set_size, max_labels from clef_vocabulary_loader import load_common_vocabulary max_ngram_size = 2 tokenizer = StemmingTokenizer() if with_stopwords_removal == False: stopwords_pattern = '' else: stopwords_pattern = '_stopwords' if use_chi_features == False: chi_features_pattern = '' else: chi_features_pattern = '_chi' if use_raw_tokens == False: raw_tokens_pattern = '' else: raw_tokens_pattern = '_raw' # load vocabulary vocabulary_tbl_name1 = 'clef_2010_{0}{1}_stems{2}_unigrams{3}_df{4}_tf{5}'.format( vocabulary_src, raw_tokens_pattern, chi_features_pattern, stopwords_pattern, min_df, min_tf) vocabulary_tbl_name2 = 'clef_2010_{0}{1}_stems_bigrams{3}_df{4}_tf{5}'.format( vocabulary_src, raw_tokens_pattern, chi_features_pattern, stopwords_pattern, min_df, min_tf) vocabulary_tbl_intersect = 'wiki_wiktionary_google_bigrams_vw' vocabulary = load_common_vocabulary(vocabulary_tbl_name1, vocabulary_tbl_name2, vocabulary_tbl_intersect, 'stem') # generate tfidf vectors corpus_train_tfidf_vectors = vectorize_corpus(corpus_train_data['corpus'], tokenizer, vocabulary, max_ngram_size) corpus_test_tfidf_vectors = vectorize_corpus(corpus_test_data['corpus'], tokenizer, vocabulary, max_ngram_size) # classify & evaluate results = classify(corpus_train_tfidf_vectors, corpus_train_data['labels'], corpus_test_tfidf_vectors, corpus_test_data['labels'], test_set_size, max_labels) print vocabulary_tbl_name1, '^', vocabulary_tbl_name2, '^', vocabulary_tbl_intersect, ' --> ', 'precision ', results[ 'precision'], 'recall ', results['recall'], 'f1 ', results['f1']
def test_stemmed_bigrams_unigrams(bigrams_src, corpus_train_data, corpus_test_data, label_names, with_stopwords_removal, use_chi_features, use_raw_tokens): import numpy as np from commons.stemming_tokenizer import StemmingTokenizer from commons.stemming_tokenizer import RawStemmingTokenizer from ng20_globals import max_labels, min_df, min_tf from ng20_vocabulary_loader import load_common_vocabulary_extend_unigrams max_ngram_size = 2 if with_stopwords_removal == False: stopwords_pattern = '' else: stopwords_pattern = '_stopwords' if use_chi_features == False: chi_features_pattern = '' else: chi_features_pattern = '_chi' if use_raw_tokens == False: raw_tokens_pattern = '' tokenizer = StemmingTokenizer() else: raw_tokens_pattern = '_raw' tokenizer = RawStemmingTokenizer() # load vocabulary vocabulary_tbl_name = 'ng20{0}_stems{1}_bigrams{2}_df{3}_tf{4}'.format( raw_tokens_pattern, chi_features_pattern, stopwords_pattern, min_df, min_tf) if len(bigrams_src) == 1: vocabulary_tbl_intersect = '{0}_bigrams'.format(bigrams_src[0]) else: vocabulary_tbl_intersect = '{0}_'.format(bigrams_src[0]) for i in range(len(bigrams_src) - 1): vocabulary_tbl_intersect = '{0}{1}_'.format( vocabulary_tbl_intersect, bigrams_src[i + 1]) vocabulary_tbl_intersect = '{0}bigrams_vw'.format( vocabulary_tbl_intersect) vocabulary = load_common_vocabulary_extend_unigrams( vocabulary_tbl_name, vocabulary_tbl_intersect, 'stem') print 'done loading vocabulary' # generate tfidf vectors vectorizer, corpus_train_tfidf_vectors = vectorize_corpus( corpus_train_data['corpus'], tokenizer, vocabulary, max_ngram_size) _, corpus_test_tfidf_vectors = vectorize_corpus(corpus_test_data['corpus'], tokenizer, vocabulary, max_ngram_size) # classify & evaluate results = classify(corpus_train_tfidf_vectors, corpus_train_data['labels'], corpus_test_tfidf_vectors, corpus_test_data['labels'], max_labels) print_top_feature_names( results['features_weights'], np.asarray(vectorizer.get_feature_names()), vocabulary_tbl_name + '_' + vocabulary_tbl_intersect, label_names) print vocabulary_tbl_name, '^', vocabulary_tbl_intersect, '(extended unigrams) --> ', 'accuracy ', results[ 'accuracy'] #print vocabulary_tbl_name,'^',vocabulary_tbl_intersect,'(extended unigrams) --> ','precision ',results['precision'],'recall ',results['recall'],'f1 ',results['f1']