#vectorised_test_documents_tf = vectorizer_tf.transform(test_docs)

term_freq = {}; term_prob = {}; all_terms = {}; totterms = 0.0

def add2dict(k, v, features):
    features[k]=  features.get(k, 0.0) + v



#Devide features by class
for i in range(num_labels):
    classdoc_ids = numpy.nonzero(train_labels[:, i])[0].tolist()
    if len(classdoc_ids) == 0:
        term_freq[i] = {term_freq[i-1].keys()[0]: 0}
        continue
    term_freq[i] = get_TF(vectorizer_tf, vectorised_train_documents_tf, classdoc_ids)
    map(lambda (k, v): add2dict(k, v, all_terms), term_freq[i].items())
    totterms += sum(term_freq[i].values())
all_terms_prob = {k: v/totterms for k, v in all_terms.items()}
#Devide features by class-complements
for i in range(num_labels):
    classdoc_ids = numpy.nonzero(train_labels_complement[:, i])[0].tolist()
    term_freq[num_labels + i] = get_TF(vectorizer_tf, vectorised_train_documents_tf, classdoc_ids)
#Convert to Probability & Perform Jelinek-Mercer Smoothing
if weight == "tf" or cor_type == "P":
    for i in range(num_labels):
        term_prob[i] = freqToProbability(term_freq[i], term_freq[num_labels + i], all_terms_prob, lamda)
        term_prob[num_labels + i] = freqToProbability(term_freq[num_labels + i], term_freq[i], all_terms_prob, lamda)
vocab_choice = term_prob

#Clear memory for unused variables
    #----------------Feature Extraction--------------------------

    #Process all documents
    # Learn and transform documents [tf]
    vectorizer_tf = CountVectorizer(stop_words=stop_words, tokenizer=tokenize)
    vectorised_train_documents_tf = vectorizer_tf.fit_transform(train_docs)
    #vectorised_test_documents_tf = vectorizer_tf.transform(test_docs)

    def add2dict(k, v, all_term):
        all_term[k]=  all_term.get(k, 0.0) + v

    #Devide features by class
    term_freq = {}; all_term = {}; tot_freq = 0.0
    for i in range(train_labels.shape[1]):
        classdoc_ids = numpy.nonzero(train_labels[:, i])[0].tolist()
        term_freq[i] = get_TF(vectorizer_tf, vectorised_train_documents_tf, classdoc_ids)
        map(lambda (k, v): add2dict(k, v, all_term), term_freq[i].items())
        tot_freq += sum(term_freq[i].values())
    all_term = {k: v/tot_freq for k, v in all_term.items()}
    #Devide features by class-complements
    compl_term_freq = {}
    for i in range(train_labels.shape[1]):
        classdoc_ids = numpy.nonzero(train_labels_complement[:, i])[0].tolist()
        compl_term_freq[i] = get_TF(vectorizer_tf, vectorised_train_documents_tf, classdoc_ids)

    #Convert to Probability & Perform Jelinek-Mercer Smoothing
    term_prob = {}
    if weight == "tf" or cor_type == "P":
        for i in range(train_labels.shape[1]):
            term_prob[i] = freqToProbability(term_freq[i], compl_term_freq[i], all_term, lamda)