Exemplo n.º 1
0
def build_x_y_t(args):
    time_t = build_time_t(args)
    base_x_t = build_base_x_t(args)
    x_t = extend_function.extend_f_t(time_t,
                                     base_x_t,
                                     args.start_t,
                                     args.end_t,
                                     extension_args=args)

    time_t = build_time_t(args)
    base_y_t = build_base_y_t(args)
    y_t = extend_function.extend_f_t(time_t,
                                     base_y_t,
                                     args.start_t,
                                     args.end_t,
                                     extension_args=args)

    reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, args)

    x_scale = args.x_scale

    def scale_x_t(t):
        return reg_x_t(t) * x_scale

    y_scale = args.y_scale

    def scale_y_t(t):
        return reg_y_t(t) * y_scale

    return scale_x_t, scale_y_t
Exemplo n.º 2
0
    def test_capped_linear_function(self):
        """
        Tests the result of regularization.regularize for a capped linear regularization
        """
        reg_args = Namespace(
            regularization_linear_cap=5.0,
            regularization_method=regularization.Regularization.CAPPED_LINEAR)
        x_t = lambda t: t
        y_t = lambda t: 0
        reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)
        self.assertAlmostEqual(2.0, reg_x_t(2))
        self.assertAlmostEqual(0.0, reg_y_t(2))
        self.assertAlmostEqual(5.0, reg_x_t(20))
        self.assertAlmostEqual(0.0, reg_y_t(20))

        x_t = lambda t: t * 3 / 5
        y_t = lambda t: t * 4 / 5
        reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)
        self.assertAlmostEqual(1.2, reg_x_t(2))
        self.assertAlmostEqual(1.6, reg_y_t(2))
        self.assertAlmostEqual(3.0, reg_x_t(20))
        self.assertAlmostEqual(4.0, reg_y_t(20))
Exemplo n.º 3
0
    def test_hyperbolic_function(self):
        """
        Tests the result of regularization.regularize for a hyperbolic regularization
        """
        reg_args = Namespace(
            regularization_x_trans=-1.0,
            regularization_y_trans=-2.0,
            regularization_slope=2,
            regularization_method=regularization.Regularization.HYPERBOLIC)
        self.assertEqual(
            "Hyperbolic regularization: y = 2 (0.5 (x + 1.0) + (0.25 + 0.25 (x + 1.0)^{2})^{0.5}) - 2.0",
            regularization.hyperbolic_function_string(reg_args))

        x_t = lambda t: t
        y_t = lambda t: 0
        reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)
        self.assertAlmostEqual(2.236067977, reg_x_t(1))
        self.assertAlmostEqual(0, reg_y_t(1))

        x_t = lambda t: t * 3 / 5
        y_t = lambda t: t * 4 / 5
        reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)
        self.assertAlmostEqual(2.236067977 * 3 / 5, reg_x_t(1))
        self.assertAlmostEqual(2.236067977 * 4 / 5, reg_y_t(1))
def main(messages_test):
    #tokenize all messages
    tokens_test = tokenize(messages_test)
    #compute pos tags for all messages
    pos_tags_test = arktagger.pos_tag_list(messages_test)
    #compute pos tag bigrams
    pos_bigrams_test = getBigrams(pos_tags_test)
    #compute pos tag trigrams
    pos_trigrams_test = getTrigrams(pos_tags_test)

    now = time.time()

    #load scores
    pos_tags_scores_neutral, pos_tags_scores_positive, pos_tags_scores_negative, pos_bigrams_scores_neutral, pos_bigrams_scores_positive, pos_bigrams_scores_negative, pos_trigrams_scores_neutral, pos_trigrams_scores_positive, pos_trigrams_scores_negative, mpqaScores = loadScores(
    )

    #load lexicons
    negationList, slangDictionary, lexicons, mpqa_lexicons = loadLexiconsFromFile(
    )

    #load clusters
    clusters = loadClustersFromFile()

    print "Resources loaded"

    #load Glove embeddings
    d = 25
    glove = loadGlove(d)

    #Subjectivity Detection Features

    #SD1 features
    features_test_1 = features.getFeatures(
        messages_test, tokens_test, pos_tags_test, slangDictionary, lexicons,
        mpqa_lexicons, pos_bigrams_test, pos_trigrams_test,
        pos_bigrams_scores_negative, pos_bigrams_scores_positive,
        pos_trigrams_scores_negative, pos_trigrams_scores_positive,
        pos_tags_scores_negative, pos_tags_scores_positive, mpqaScores,
        negationList, clusters, pos_bigrams_scores_neutral,
        pos_trigrams_scores_neutral, pos_tags_scores_neutral)

    #SD2 features
    features_test_2 = []
    for i in range(0, len(messages_test)):
        features_test_2.append(glove.findCentroid(tokens_test[i]))

    features_test_2 = np.array(features_test_2)

    #regularize features
    print "After Reg"
    features_test_1 = regularization.regularize(features_test_1)

    print features_test_1
    features_test_2 = regularization.regularizeHorizontally(features_test_2)

    print features_test_2

    #load SD classifiers
    with open('resources/sd_models.pkl', 'rb') as input:
        sd1 = pickle.load(input)
        sd2 = pickle.load(input)

    #get confidence scores
    test_confidence_1 = sd1.decision_function(features_test_1)
    test_confidence_2 = sd2.decision_function(features_test_2)

    #normalize confidence scores
    softmax = lambda x: 1 / (1. + math.exp(-x))
    test_confidence_1 = [softmax(conf) for conf in test_confidence_1]
    test_confidence_2 = [softmax(conf) for conf in test_confidence_2]

    test_confidence_1 = np.array(test_confidence_1)
    test_confidence_2 = np.array(test_confidence_2)

    #Sentiment Polarity Features (append confidence scores to SD features)

    #SP1 features
    features_test_1 = np.hstack(
        (features_test_1,
         test_confidence_1.reshape(test_confidence_1.shape[0], 1)))

    #SP2 features
    features_test_2 = np.hstack(
        (features_test_2,
         test_confidence_2.reshape(test_confidence_2.shape[0], 1)))

    #load SP classifiers
    with open('resources/sp_models.pkl', 'rb') as input:
        sp1 = pickle.load(input)
        sp2 = pickle.load(input)

    #get confidence scores of every system
    confidence1 = sp1.decision_function(features_test_1)
    confidence2 = sp2.decision_function(features_test_2)

    for i in range(0, confidence1.shape[0]):
        for j in range(0, confidence1.shape[1]):
            confidence1[i][j] = softmax(confidence1[i][j])

    for i in range(0, confidence2.shape[0]):
        for j in range(0, confidence2.shape[1]):
            confidence2[i][j] = softmax(confidence2[i][j])

    #ensemble confidence scores with weight W
    W = 0.66

    confidence = confidence1 * W + confidence2 * (1 - W)
    print "confidence"
    print confidence

    #get final prediction
    prediction = [np.argmax(x) - 1 for x in confidence]

    prediction = np.array(prediction)

    print "Prediction\n"
    for i in range(0, prediction.shape[0]):
        if prediction[i] == -1:
            pol = "Negative"
        elif prediction[i] == 0:
            pol = "Neutral"
        elif prediction[i] == 1:
            pol = "Positive"
        print "Message : " + messages_test[i] + "Polarity : " + pol + "\n"

#accuracy and number of wrong line
    count_t = 0
    num_f = []
    num_f1 = []
    num_f2 = []
    num_f3 = []
    num_f4 = []
    num_f5 = []
    num_f6 = []
    senti_t = []
    prediction_f = []
    for j in range(0, senti.shape[0]):
        if senti[j] == prediction[j]:
            count_t = count_t + 1

        else:
            num_f.append(j)
            senti_t.append(senti[j])
            prediction_f.append(prediction[j])

    print count_t * 100.00 / count
    plt.scatter(num_f, senti_t, c='r')
    plt.scatter(num_f, prediction_f, c='b')
    plt.show()

    #compare value of sentiment -1 0 1
    for j in range(0, senti.shape[0]):
        if senti[j] == 1:
            if prediction[j] == 0:
                num_f1.append(j)
            elif prediction[j] == -1:
                num_f2.append(j)
        if senti[j] == 0:
            if prediction[j] == 1:
                num_f3.append(j)
            elif prediction[j] == -1:
                num_f4.append(j)
        if senti[j] == -1:
            if prediction[j] == 1:
                num_f5.append(j)
            elif prediction[j] == 0:
                num_f6.append(j)

    print num_f1, len(num_f1)
    print num_f2, len(num_f2)
    print num_f3, len(num_f3)
    print num_f4, len(num_f4)
    print num_f5, len(num_f5)
    print num_f6, len(num_f6)
def main(messages_test):
        #tokenize all messages
	tokens_test = tokenize(messages_test)
	#compute pos tags for all messages
	pos_tags_test = arktagger.pos_tag_list(messages_test)
	#compute pos tag bigrams
	pos_bigrams_test = getBigrams(pos_tags_test)
	#compute pos tag trigrams
	pos_trigrams_test = getTrigrams(pos_tags_test)

	now = time.time()

	#load scores
	pos_tags_scores_neutral, pos_tags_scores_positive, pos_tags_scores_negative, pos_bigrams_scores_neutral, pos_bigrams_scores_positive, pos_bigrams_scores_negative, pos_trigrams_scores_neutral, pos_trigrams_scores_positive, pos_trigrams_scores_negative, mpqaScores = loadScores()
	
	#load lexicons
	negationList, slangDictionary, lexicons, mpqa_lexicons = loadLexiconsFromFile()
	
	#load clusters
	clusters = loadClustersFromFile()
		
	print "Resources loaded"
	
	#load Glove embeddings
	d = 200
	glove = loadGlove(d)
		
	#Subjectivity Detection Features
	
	#SD1 features
	features_test_1 = features.getFeatures(messages_test,tokens_test,pos_tags_test,slangDictionary,lexicons,mpqa_lexicons,pos_bigrams_test,pos_trigrams_test,pos_bigrams_scores_negative,pos_bigrams_scores_positive,pos_trigrams_scores_negative,pos_trigrams_scores_positive,pos_tags_scores_negative,pos_tags_scores_positive,mpqaScores,negationList,clusters,pos_bigrams_scores_neutral,pos_trigrams_scores_neutral,pos_tags_scores_neutral)
	
	#SD2 features
	features_test_2=[]
	for i in range(0,len(messages_test)):
		features_test_2.append(glove.findCentroid(tokens_test[i]))

	features_test_2 = np.array(features_test_2)

	#regularize features
	features_test_1=regularization.regularize(features_test_1)
	features_test_2 = regularization.regularizeHorizontally(features_test_2)
	
	#load SD classifiers
	with open('resources/sd_models.pkl', 'rb') as input:
		sd1 = pickle.load(input)
		sd2 = pickle.load(input)
		
	#get confidence scores
	test_confidence_1 = sd1.decision_function(features_test_1)
	test_confidence_2 = sd2.decision_function(features_test_2)

	#normalize confidence scores
	softmax = lambda x: 1 / (1. + math.exp(-x))
	test_confidence_1 = [softmax(conf) for conf in test_confidence_1]
	test_confidence_2 = [softmax(conf) for conf in test_confidence_2]
	
	test_confidence_1 = np.array(test_confidence_1)
	test_confidence_2 = np.array(test_confidence_2)

	#Sentiment Polarity Features (append confidence scores to SD features)
	
	#SP1 features
	features_test_1 = np.hstack((features_test_1,test_confidence_1.reshape(test_confidence_1.shape[0],1)))
	#SP2 features
	features_test_2 = np.hstack((features_test_2,test_confidence_2.reshape(test_confidence_2.shape[0],1)))

	#load SP classifiers
	with open('resources/sp_models.pkl', 'rb') as input:
		sp1 = pickle.load(input)
		sp2 = pickle.load(input)
		
	#get confidence scores of every system
	confidence1 = sp1.decision_function(features_test_1)
	confidence2 = sp2.decision_function(features_test_2)

	for i in range(0,confidence1.shape[0]):
		for j in range(0,confidence1.shape[1]):
			confidence1[i][j] = softmax(confidence1[i][j])

	for i in range(0,confidence2.shape[0]):
		for j in range(0,confidence2.shape[1]):
			confidence2[i][j] = softmax(confidence2[i][j])

	#ensemble confidence scores with weight W
	W=0.66

	confidence = confidence1*W + confidence2*(1-W)

	#get final prediction
	prediction = [np.argmax(x)-1 for x in confidence]
	prediction = np.array(prediction)

	print "Prediction\n"
	for i in range(0, prediction.shape[0]):
		if prediction[i] == -1:
			pol = "Negative"
		elif prediction[i] == 0:
			pol = "Neutral"
		else:
			pol = "Positive"
                print "Message : " + messages_test[i]+"Polarity : "+pol+"\n"
Exemplo n.º 6
0
def classify(messages_train,labels_train,messages_test,process_messages_train,process_messages_test,tokens_train,tokens_test,process_tokens_train,process_tokens_test,pos_tags_train,pos_tags_test,negationList,clusters,slangDictionary,lexicons,mpqa_lexicons): 
    # 0 - negative messages
    # 1 - positives messages
    labels_train = [0 if x=="negative" else 1 for x in labels_train]
    
    #compute pos tag bigrams for all messages
    pos_bigrams_train = getBigrams(pos_tags_train)
    pos_bigrams_test = getBigrams(pos_tags_test)

    #compute pos tag trigrams for all messages
    pos_trigrams_train = getTrigrams(pos_tags_train)
    pos_trigrams_test = getTrigrams(pos_tags_test)

    #get the unique pos bigrams and trigrams from training set
    unique_pos_tags = getPosTagsSet(pos_tags_train)
    unique_bigrams = getBigramsSet(pos_bigrams_train)
    unique_trigrams= getTrigramsSet(pos_trigrams_train)

    #calculate pos bigrams score for all categories
    #both dictionaries will be used for training and testing (cannot create new for testing because we don't know the labels of the new messages)
    pos_tags_scores_negative = posTagsScore(unique_pos_tags,0,pos_tags_train,labels_train)
    pos_tags_scores_positive = posTagsScore(unique_pos_tags,1,pos_tags_train,labels_train)

    #calculate pos bigrams score for all categories
    #both dictionaries will be used for training and testing (cannot create new for testing because we don't know the labels of the new messages)
    pos_bigrams_scores_negative = posBigramsScore(unique_bigrams,0,pos_bigrams_train,labels_train)
    pos_bigrams_scores_positive = posBigramsScore(unique_bigrams,1,pos_bigrams_train,labels_train)

    #calculate pos bigrams score for all categories
    #both dictionaries will be used for training and testing (cannot create new for testing because we don't know the labels of the new messages)
    pos_trigrams_scores_negative = posTrigramsScore(unique_trigrams,0,pos_trigrams_train,labels_train)
    pos_trigrams_scores_positive = posTrigramsScore(unique_trigrams,1,pos_trigrams_train,labels_train)

    #assign a precision and F1 score to each word of to all mpqa lexicons
    mpqaScores = getScores(mpqa_lexicons,process_messages_train,labels_train)


    #get features from train messages
    features_train = features.getFeatures(messages_train,process_messages_train,tokens_train,process_tokens_train,pos_tags_train,slangDictionary,lexicons,mpqa_lexicons,pos_bigrams_train,pos_trigrams_train,pos_bigrams_scores_negative,pos_bigrams_scores_positive,pos_trigrams_scores_negative,pos_trigrams_scores_positive,pos_tags_scores_negative,pos_tags_scores_positive,mpqaScores,negationList,clusters)

    
    #regularize train features
    features_train=regularization.regularize(features_train)


    #get features from test messages 
    features_test = features.getFeatures(messages_test,process_messages_test,tokens_test,process_tokens_test,pos_tags_test,slangDictionary,lexicons,mpqa_lexicons,pos_bigrams_test,pos_trigrams_test,pos_bigrams_scores_negative,pos_bigrams_scores_positive,pos_trigrams_scores_negative,pos_trigrams_scores_positive,pos_tags_scores_negative,pos_tags_scores_positive,mpqaScores,negationList,clusters)


    #regularize test features
    features_test=regularization.regularize(features_test)

    #feature selection
    #features_train, features_test = selection.feature_selection(features_train,labels_train,features_test,1150)

    #C parameter of SVM
    
    C = 0.001953125
    #C = 19.3392161013
    
    #train classifier and return trained model
    #model = LogisticRegression.train(features_train,labels_train)
    model = SVM.train(features_train,labels_train,c=C,k="linear")
        
    #predict labels
    #prediction = LogisticRegression.predict(features_test,model)
    prediction = SVM.predict(features_test,model)

    return prediction
Exemplo n.º 7
0
def main(messages_test):
    #tokenize all messages
    tokens_test = tokenize(messages_test)
    #compute pos tags for all messages
    pos_tags_test = arktagger.pos_tag_list(messages_test)
    #compute pos tag bigrams
    pos_bigrams_test = getBigrams(pos_tags_test)
    #compute pos tag trigrams
    pos_trigrams_test = getTrigrams(pos_tags_test)

    now = time.time()

    #load scores
    pos_tags_scores_neutral, pos_tags_scores_positive, pos_tags_scores_negative, pos_bigrams_scores_neutral, pos_bigrams_scores_positive, pos_bigrams_scores_negative, pos_trigrams_scores_neutral, pos_trigrams_scores_positive, pos_trigrams_scores_negative, mpqaScores = loadScores(
    )

    #load lexicons
    negationList, slangDictionary, lexicons, mpqa_lexicons = loadLexiconsFromFile(
    )

    #load clusters
    clusters = loadClustersFromFile()

    print "Resources loaded"

    #load Glove embeddings
    d = 200
    glove = loadGlove(d)

    #Subjectivity Detection Features

    #SD1 features
    features_test_1 = features.getFeatures(
        messages_test, tokens_test, pos_tags_test, slangDictionary, lexicons,
        mpqa_lexicons, pos_bigrams_test, pos_trigrams_test,
        pos_bigrams_scores_negative, pos_bigrams_scores_positive,
        pos_trigrams_scores_negative, pos_trigrams_scores_positive,
        pos_tags_scores_negative, pos_tags_scores_positive, mpqaScores,
        negationList, clusters, pos_bigrams_scores_neutral,
        pos_trigrams_scores_neutral, pos_tags_scores_neutral)

    #SD2 features
    features_test_2 = []
    for i in range(0, len(messages_test)):
        features_test_2.append(glove.findCentroid(tokens_test[i]))

    features_test_2 = np.array(features_test_2)

    #regularize features
    features_test_1 = regularization.regularize(features_test_1)
    features_test_2 = regularization.regularizeHorizontally(features_test_2)

    #load SD classifiers
    with open('resources/sd_models.pkl', 'rb') as input:
        sd1 = pickle.load(input)
        sd2 = pickle.load(input)

    #get confidence scores
    test_confidence_1 = sd1.decision_function(features_test_1)
    test_confidence_2 = sd2.decision_function(features_test_2)

    #normalize confidence scores
    softmax = lambda x: 1 / (1. + math.exp(-x))
    test_confidence_1 = [softmax(conf) for conf in test_confidence_1]
    test_confidence_2 = [softmax(conf) for conf in test_confidence_2]

    test_confidence_1 = np.array(test_confidence_1)
    test_confidence_2 = np.array(test_confidence_2)

    #Sentiment Polarity Features (append confidence scores to SD features)

    #SP1 features
    features_test_1 = np.hstack(
        (features_test_1,
         test_confidence_1.reshape(test_confidence_1.shape[0], 1)))
    #SP2 features
    features_test_2 = np.hstack(
        (features_test_2,
         test_confidence_2.reshape(test_confidence_2.shape[0], 1)))

    #load SP classifiers
    with open('resources/sp_models.pkl', 'rb') as input:
        sp1 = pickle.load(input)
        sp2 = pickle.load(input)

    #get confidence scores of every system
    confidence1 = sp1.decision_function(features_test_1)
    confidence2 = sp2.decision_function(features_test_2)

    for i in range(0, confidence1.shape[0]):
        for j in range(0, confidence1.shape[1]):
            confidence1[i][j] = softmax(confidence1[i][j])

    for i in range(0, confidence2.shape[0]):
        for j in range(0, confidence2.shape[1]):
            confidence2[i][j] = softmax(confidence2[i][j])

    #ensemble confidence scores with weight W
    W = 0.66

    confidence = confidence1 * W + confidence2 * (1 - W)

    #get final prediction
    prediction = [np.argmax(x) - 1 for x in confidence]
    prediction = np.array(prediction)

    print "Prediction\n"
    for i in range(0, prediction.shape[0]):
        if prediction[i] == -1:
            pol = "Negative"
        elif prediction[i] == 0:
            pol = "Neutral"
        else:
            pol = "Positive"
        print "Message : " + messages_test[i] + "Polarity : " + pol + "\n"
Exemplo n.º 8
0
def main(f):
    print "System training started"

    #load training dataset
    dataset_train = f
    ids, labels_train, messages_train = tsvreader.opentsv(dataset_train)
    print "Train data loaded"

    #labels for subjectivity detection (2 categories)
    temp_labels_train = [0 if x == "neutral" else 1 for x in labels_train]
    #labels for polarity detection (3 categories)
    labels_train = [
        0 if x == "neutral" else -1 if x == "negative" else 1
        for x in labels_train
    ]

    #convert labels to numpy arrays
    temp_labels_train = np.array(temp_labels_train)
    labels_train = np.array(labels_train)

    #load word clusters
    clusters = loadClusters()
    print "Clusters loaded"

    #load Lexicons
    negationList, slangDictionary, lexicons, mpqa_lexicons = loadLexicons()
    print "Lexicons loaded"

    #tokenize all messages
    tokens_train = tokenize(messages_train)
    print "Messages tokenized"

    #compute pos tags for all messages
    pos_tags_train = arktagger.pos_tag_list(messages_train)
    print "Pos tags computed"

    #compute pos tag bigrams
    pos_bigrams_train = getBigrams(pos_tags_train)
    #compute pos tag trigrams
    pos_trigrams_train = getTrigrams(pos_tags_train)

    #get the unique pos bigrams from training set
    unique_pos_tags = getPosTagsSet(pos_tags_train)
    unique_bigrams = getBigramsSet(pos_bigrams_train)
    unique_trigrams = getTrigramsSet(pos_trigrams_train)

    #compute POS tag scores
    pos_tags_scores_neutral = posTagsScore(unique_pos_tags, 0, pos_tags_train,
                                           labels_train)
    pos_tags_scores_positive = posTagsScore(unique_pos_tags, 1, pos_tags_train,
                                            labels_train)
    pos_tags_scores_negative = posTagsScore(unique_pos_tags, -1,
                                            pos_tags_train, labels_train)

    pos_bigrams_scores_neutral = posBigramsScore(unique_bigrams, 0,
                                                 pos_bigrams_train,
                                                 labels_train)
    pos_bigrams_scores_positive = posBigramsScore(unique_bigrams, 1,
                                                  pos_bigrams_train,
                                                  labels_train)
    pos_bigrams_scores_negative = posBigramsScore(unique_bigrams, -1,
                                                  pos_bigrams_train,
                                                  labels_train)

    pos_trigrams_scores_neutral = posTrigramsScore(unique_trigrams, 0,
                                                   pos_trigrams_train,
                                                   labels_train)
    pos_trigrams_scores_positive = posTrigramsScore(unique_trigrams, 1,
                                                    pos_trigrams_train,
                                                    labels_train)
    pos_trigrams_scores_negative = posTrigramsScore(unique_trigrams, -1,
                                                    pos_trigrams_train,
                                                    labels_train)

    #compute mpqa scores
    mpqaScores = getScores(mpqa_lexicons,
                           messages_train,
                           labels_train,
                           neutral=True)

    #save scores and other resources for future use
    savePosScores(pos_tags_scores_neutral, pos_tags_scores_positive,
                  pos_tags_scores_negative, pos_bigrams_scores_neutral,
                  pos_bigrams_scores_positive, pos_bigrams_scores_negative,
                  pos_trigrams_scores_neutral, pos_trigrams_scores_positive,
                  pos_trigrams_scores_negative, mpqaScores)
    #save lexicons
    saveLexicons(negationList, slangDictionary, lexicons, mpqa_lexicons)
    #save clusters
    saveClusters(clusters)

    #load Glove embeddings
    d = 200
    glove = GloveDictionary.Glove(d)

    #save Glove embeddings for future use
    saveGlove(glove)

    #Subjectivity Detection Features

    #SD1 features
    features_train_1 = features.getFeatures(
        messages_train, tokens_train, pos_tags_train, slangDictionary,
        lexicons, mpqa_lexicons, pos_bigrams_train, pos_trigrams_train,
        pos_bigrams_scores_negative, pos_bigrams_scores_positive,
        pos_trigrams_scores_negative, pos_trigrams_scores_positive,
        pos_tags_scores_negative, pos_tags_scores_positive, mpqaScores,
        negationList, clusters, pos_bigrams_scores_neutral,
        pos_trigrams_scores_neutral, pos_tags_scores_neutral)

    #SD2 features
    features_train_2 = []
    #for message in tokens_train :
    for i in range(0, len(messages_train)):
        features_train_2.append(glove.findCentroid(tokens_train[i]))
    features_train_2 = np.array(features_train_2)

    #regularize features
    features_train_1 = regularization.regularize(features_train_1)
    features_train_2 = regularization.regularizeHorizontally(features_train_2)

    #Penalty parameter C of the error term for every SD system
    C1 = 0.001953125
    C2 = 1.4068830572470667

    #get confidence scores
    train_confidence_1 = getConfidenceScores(features_train_1,
                                             temp_labels_train, C1)
    train_confidence_2 = getConfidenceScores(features_train_2,
                                             temp_labels_train, C2)

    #normalize confidence scores
    softmax = lambda x: 1 / (1. + math.exp(-x))
    train_confidence_1 = [softmax(conf) for conf in train_confidence_1]
    train_confidence_2 = [softmax(conf) for conf in train_confidence_2]

    train_confidence_1 = np.array(train_confidence_1)
    train_confidence_2 = np.array(train_confidence_2)

    #train SD classifiers
    sd1 = SVM.train(features_train_1, temp_labels_train, c=C1, k="linear")
    sd2 = SVM.train(features_train_2, temp_labels_train, c=C2, k="linear")

    #Sentiment Polarity Features (append confidence scores to SD features)

    #SP1 features
    features_train_1 = np.hstack(
        (features_train_1,
         train_confidence_1.reshape(train_confidence_1.shape[0], 1)))
    #SP1 features
    features_train_2 = np.hstack(
        (features_train_2,
         train_confidence_2.reshape(train_confidence_2.shape[0], 1)))

    #Penalty parameter C of the error term for every SP system
    C1 = 0.003410871889693192
    C2 = 7.396183688299606

    #train SP classifiers
    sp1 = SVM.train(features_train_1, labels_train, c=C1, k="linear")
    sp2 = SVM.train(features_train_2, labels_train, c=C2, k="linear")

    #save trained models
    saveModels(sd1, sd2, sp1, sp2)

    print "System training completed!"
Exemplo n.º 9
0
def main(f):
	print "System training started"
	
        #load training dataset
	dataset_train = f
	ids,labels_train,messages_train=tsvreader.opentsv(dataset_train)
	print "Train data loaded"
	
	#labels for subjectivity detection (2 categories)
	temp_labels_train = [0 if x=="neutral" else 1 for x in labels_train]
	#labels for polarity detection (3 categories)
	labels_train = [0 if x=="neutral" else -1 if x =="negative" else 1 for x in labels_train]
	
	#convert labels to numpy arrays
	temp_labels_train=np.array(temp_labels_train)
	labels_train=np.array(labels_train)
	
	#load word clusters
	clusters = loadClusters()
	print "Clusters loaded"
	
	#load Lexicons
	negationList, slangDictionary, lexicons, mpqa_lexicons = loadLexicons()
	print "Lexicons loaded"

	#tokenize all messages
	tokens_train = tokenize(messages_train)
	print "Messages tokenized"

	#compute pos tags for all messages
	pos_tags_train = arktagger.pos_tag_list(messages_train)
	print "Pos tags computed"
	
	#compute pos tag bigrams
	pos_bigrams_train = getBigrams(pos_tags_train)
	#compute pos tag trigrams
	pos_trigrams_train = getTrigrams(pos_tags_train)

	#get the unique pos bigrams from training set
	unique_pos_tags = getPosTagsSet(pos_tags_train)
	unique_bigrams = getBigramsSet(pos_bigrams_train)
	unique_trigrams= getTrigramsSet(pos_trigrams_train)

	#compute POS tag scores
	pos_tags_scores_neutral = posTagsScore(unique_pos_tags,0,pos_tags_train,labels_train)
	pos_tags_scores_positive = posTagsScore(unique_pos_tags,1,pos_tags_train,labels_train)
	pos_tags_scores_negative = posTagsScore(unique_pos_tags,-1,pos_tags_train,labels_train)
	   
	pos_bigrams_scores_neutral = posBigramsScore(unique_bigrams,0,pos_bigrams_train,labels_train)
	pos_bigrams_scores_positive = posBigramsScore(unique_bigrams,1,pos_bigrams_train,labels_train)
	pos_bigrams_scores_negative = posBigramsScore(unique_bigrams,-1,pos_bigrams_train,labels_train)

	pos_trigrams_scores_neutral = posTrigramsScore(unique_trigrams,0,pos_trigrams_train,labels_train)
	pos_trigrams_scores_positive = posTrigramsScore(unique_trigrams,1,pos_trigrams_train,labels_train)
	pos_trigrams_scores_negative = posTrigramsScore(unique_trigrams,-1,pos_trigrams_train,labels_train)
	
	#compute mpqa scores
	mpqaScores = getScores(mpqa_lexicons,messages_train,labels_train,neutral=True)
	
	#save scores and other resources for future use
	savePosScores(pos_tags_scores_neutral, pos_tags_scores_positive,pos_tags_scores_negative,pos_bigrams_scores_neutral,pos_bigrams_scores_positive,pos_bigrams_scores_negative,pos_trigrams_scores_neutral,pos_trigrams_scores_positive,pos_trigrams_scores_negative,mpqaScores)
        #save lexicons
	saveLexicons(negationList,slangDictionary,lexicons,mpqa_lexicons)
        #save clusters
	saveClusters(clusters)
	
	#load Glove embeddings
	d = 200
	glove = GloveDictionary.Glove(d)

	#save Glove embeddings for future use
	saveGlove(glove)
	
	#Subjectivity Detection Features
	
	#SD1 features
	features_train_1 = features.getFeatures(messages_train,tokens_train,pos_tags_train,slangDictionary,lexicons,mpqa_lexicons,pos_bigrams_train,pos_trigrams_train,pos_bigrams_scores_negative,pos_bigrams_scores_positive,pos_trigrams_scores_negative,pos_trigrams_scores_positive,pos_tags_scores_negative,pos_tags_scores_positive,mpqaScores,negationList,clusters,pos_bigrams_scores_neutral,pos_trigrams_scores_neutral,pos_tags_scores_neutral)
	
	#SD2 features
	features_train_2 = []
	#for message in tokens_train :
	for i in range(0,len(messages_train)):
		features_train_2.append(glove.findCentroid(tokens_train[i]))
	features_train_2 = np.array(features_train_2)
	
	#regularize features
	features_train_1 = regularization.regularize(features_train_1)
	features_train_2 = regularization.regularizeHorizontally(features_train_2)
	
	#Penalty parameter C of the error term for every SD system
	C1=0.001953125
	C2=1.4068830572470667

	#get confidence scores
	train_confidence_1 = getConfidenceScores(features_train_1, temp_labels_train, C1)
	train_confidence_2 = getConfidenceScores(features_train_2, temp_labels_train, C2)
	
	#normalize confidence scores
	softmax = lambda x: 1 / (1. + math.exp(-x))
	train_confidence_1 = [softmax(conf) for conf in train_confidence_1]
	train_confidence_2 = [softmax(conf) for conf in train_confidence_2]
	
	train_confidence_1 = np.array(train_confidence_1)
	train_confidence_2 = np.array(train_confidence_2)

	#train SD classifiers
	sd1 = SVM.train(features_train_1,temp_labels_train,c=C1,k="linear")
	sd2 = SVM.train(features_train_2,temp_labels_train,c=C2,k="linear")
	
	#Sentiment Polarity Features (append confidence scores to SD features)
	
	#SP1 features
	features_train_1 = np.hstack((features_train_1,train_confidence_1.reshape(train_confidence_1.shape[0],1)))
	#SP1 features
	features_train_2 = np.hstack((features_train_2,train_confidence_2.reshape(train_confidence_2.shape[0],1)))

	#Penalty parameter C of the error term for every SP system
	C1=0.003410871889693192
	C2=7.396183688299606

	#train SP classifiers
	sp1 = SVM.train(features_train_1,labels_train,c=C1,k="linear")
	sp2 = SVM.train(features_train_2,labels_train,c=C2,k="linear")
	
	#save trained models
	saveModels(sd1,sd2,sp1,sp2)
	
	print "System training completed!"
Exemplo n.º 10
0
    #get the unique pos bigrams from training set
    unique_bigrams = getBigramsSet(pos_bigrams_train)

    #calculate pos bigrams score for all categories
    #both dictionaries will be used for training and testing (cannot create new for testing because we don't know the labels of the new messages)
    pos_bigrams_scores_objective = posBigramsScore(unique_bigrams,0,pos_bigrams_train,labels_train)
    pos_bigrams_scores_subjective = posBigramsScore(unique_bigrams,1,pos_bigrams_train,labels_train)

    #assign a precision and F1 score to each word of to all mpqa and semeval_13 lexicons
    mpqaScores = getScores(mpqa_lexicons,process_messages_train,labels_train)

    #get features from train messages
    features_train = features_subjectivity.getFeatures(messages_train,process_messages_train,tokens_train,process_tokens_train,pos_tags_train,slangDictionary,lexicons,mpqa_lexicons,pos_bigrams_train,pos_bigrams_scores_objective,pos_bigrams_scores_subjective,mpqaScores,negationList,clusters)

    #regularize train features
    features_train=regularization.regularize(features_train)

    #get features from test messages 
    features_test = features_subjectivity.getFeatures(messages_test,process_messages_test,tokens_test,process_tokens_test,pos_tags_test,slangDictionary,lexicons,mpqa_lexicons,pos_bigrams_test,pos_bigrams_scores_objective,pos_bigrams_scores_subjective,mpqaScores,negationList,clusters)

    #regularize test features
    features_test=regularization.regularize(features_test)
else:
    # 0 - negative messages
    # 1 - positives messages
    labels_train = [0 if x=="negative" else 1 for x in labels_train]
    labels_test = [0 if x=="negative" else 1 for x in labels_test]
    
    #compute pos tag bigrams for all messages
    pos_bigrams_train = getBigrams(pos_tags_train)
    pos_bigrams_test = getBigrams(pos_tags_test)