def GET(self):
        query = web.ctx.get('query')
        html = html_helper.HTMLHelper()
        twitterData = get_twitter_data.TwitterData()
        if query:
            if(query[0] == '?'):
                query = query[1:]
            arr = query.split('&')
            logging.warning(arr)
            
            #default values
            time = 'daily'

            for item in arr:
                if 'keyword' in item:
                    keyword = item.split('=')[1]
                elif 'method' in item:
                    method = item.split('=')[1]
                elif 'time' in item:
                    time = item.split('=')[1]
            #end loop
                            
            if(method != 'baseline' and method != 'naivebayes' and method != 'maxentropy' and method != 'svm'):
                return html.getDefaultHTML(error=2)
            
            tweets = twitterData.getTwitterData(keyword, time)
            if(tweets):
                if(method == 'baseline'):
                    bc = baseline_classifier.BaselineClassifier(tweets, keyword, time)
                    bc.classify()
                    return bc.getHTML()
                elif(method == 'naivebayes'):
                    trainingDataFile = 'data/training_neatfile_2.csv'               
                    #classifierDumpFile = 'data/naivebayes_trained_model.pickle'
                    classifierDumpFile = 'data/test/naivebayes_test_model.pickle'
                    trainingRequired = 0
                    nb = naive_bayes_classifier.NaiveBayesClassifier(tweets, keyword, time, \
                                                  trainingDataFile, classifierDumpFile, trainingRequired)
                    nb.classify()
                    return nb.getHTML()
                elif(method == 'maxentropy'):
                    trainingDataFile = 'data/training_neatfile.csv'                
                    classifierDumpFile = 'data/maxent_trained_model.pickle'
                    trainingRequired = 0
                    maxent = max_entropy_classifier.MaxEntClassifier(tweets, keyword, time, \
                                                  trainingDataFile, classifierDumpFile, trainingRequired)
                    maxent.classify()
                    return maxent.getHTML()
                elif(method == 'svm'):
                    trainingDataFile = 'data/training_neatfile.csv'                
                    classifierDumpFile = 'data/svm_trained_model.pickle'
                    trainingRequired = 0
                    sc = libsvm_classifier.SVMClassifier(tweets, keyword, time, \
                                                  trainingDataFile, classifierDumpFile, trainingRequired)
                    sc.classify()
                    return sc.getHTML()
            else:
                return html.getDefaultHTML(error=1)
        else:
            return html.getDefaultHTML()
示例#2
0
def search():
	
	keyword = request.args.get('keyword')
	method = request.args.get('method')
	time = 'daily'
	time = request.args.get('time')

	html = html_helper.HTMLHelper()
	#print html
	twitterData = get_twitter_data.TwitterData()
	#print twitterData
	if keyword:	                    
		if(method != 'baseline' and method != 'naivebayes' and method != 'maxentropy'):
		    return html.getDefaultHTML(error=2)
		ACCESS_TOKEN =  session['oauth_token']
		ACCESS_TOKEN_SECRET = session['oauth_secret']
		tweets = twitterData.getTwitterData(keyword, time,CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
		print tweets,"-------------------------"
		if(tweets):
		    if(method == 'baseline'):
		        bc = baseline_classifier.BaselineClassifier(tweets, keyword, time)
		        bc.classify()
		        return bc.getHTML()
		    elif(method == 'naivebayes'):
		        trainingDataFile = 'data/training_neatfile_2.csv'               
		        classifierDumpFile = 'data/naivebayes_trained_model.pickle'
		        #classifierDumpFile = 'data/test/naivebayes_test_model.pickle'
		        trainingRequired = 0
		        nb = naive_bayes_classifier.NaiveBayesClassifier(tweets, keyword, time, \
		                                      trainingDataFile, classifierDumpFile, trainingRequired)
		        nb.classify()
		        return nb.getHTML()
		    elif(method == 'maxentropy'):
		        trainingDataFile = 'data/training_neatfile.csv'                
		        classifierDumpFile = 'data/maxent_trained_model.pickle'
		        trainingRequired = 0
		        maxent = max_entropy_classifier.MaxEntClassifier(tweets, keyword, time, \
		                                      trainingDataFile, classifierDumpFile, trainingRequired)
		        maxent.classify()
		        return maxent.getHTML()
		else:
		    return html.getDefaultHTML(error=1)
	else:
		return html.getDefaultHTML()
def submit(keyword,time):
    print "Analysing "+keyword+ " for "+time+" : \n"
    twitterData = get_twitter_data.TwitterData()
    tweets = twitterData.getTwitterData(keyword, time)
    print "The collected tweets are : \n"
    trainingDataFile = 'data/training_neatfile.csv'
    classifierDumpFile = 'data/maxent_trained_model.pickle'
    trainingRequired = 0
    maxent = max_entropy_classifier.MaxEntClassifier(tweets, keyword, time, \
                              trainingDataFile, classifierDumpFile, trainingRequired)
    maxent.classify()
    val,val2,time,pos_count,neg_count,neut_count=maxent.print_value()
    items=len(val2)-1
    for i in range(0,items):
        print val[i]+ " : " + val2[i]+ "\n"
    print "The positive count be : "+str(pos_count)
    print "The negative count be : "+str(neg_count)
    print "The neutral count be : "+str(neut_count)
    return 0
def submit():
    try:
        keyword=request.form['yourname']
        time=request.form['options']
        twitterData = get_twitter_data.TwitterData()
        tweets = twitterData.getTwitterData(keyword, time)
        classifier = request.form['c']
        if classifier=="maxent":
            print "Maxent chosen"
            trainingDataFile = 'data/training_neatfile.csv'
            classifierDumpFile = 'data/maxent_trained_model.pickle'
            trainingRequired = 0
            maxent = max_entropy_classifier.MaxEntClassifier(tweets, keyword, time, \
                                      trainingDataFile, classifierDumpFile, trainingRequired)
            maxent.classify()
            val,val2,time,pos_count,neg_count,neut_count=maxent.print_value()
            pos_tweet,neg_tweet,neut_tweet=process(val,val2)
            print "maxent finished"
        else:
            trainingDataFile = 'data/training_neatfile.csv'                
            classifierDumpFile = 'data/svm_trained_model.pickle'
            trainingRequired = 0
            sc = libsvm_classifier.SVMClassifier(tweets, keyword, time, \
                                          trainingDataFile, classifierDumpFile, trainingRequired)
            sc.classify()
            print "classified"
            val,val2,time,pos_count,neg_count,neut_count=sc.print_value()
            pos_tweet,neg_tweet,neut_tweet=process(val,val2)
        res=str(pos_count)+" "+str(neut_count)+" "+str(neg_count)
        count = dbcon.Searchresults.select().count()
        if time == 'today':
            dbcon.Searchresults.create(time = get_time('today'),search_id=count+1,search_keyword=keyword,search_result=res,classifier_used=str(classifier))
            return render_template('form_action.html', name=keyword, option=get_time(time), pos_count=pos_count, neg_count=neg_count, neut_count=neut_count, pos_tweet=pos_tweet, neg_tweet=neg_tweet, neut_tweet=neut_tweet)
        elif time == 'lastweek':
            dbcon.Searchresults.create(time = get_time('week'),search_id=count+1,search_keyword=keyword,search_result=res,classifier_used=str(classifier))
            return render_template('form_action_weekly.html', name=keyword, option=get_time(time), pos_count=pos_count, neg_count=neg_count, neut_count=neut_count, pos_tweet=pos_tweet, neg_tweet=neg_tweet, neut_tweet=neut_tweet)
        else:
        	return render_template('form_submit.html',sorry="T")
    except:
        return render_template('form_submit.html',sorry="Y")
    print 'Started to instantiate Naive Bayes Classifier'
    sys.stdout.flush()
    nb = naive_bayes_classifier.NaiveBayesClassifier(tweets, keyword, time,\
                                  trainingDataFile, classifierDumpFile, trainingRequired)
    #nb.classify()
    print 'Computing Accuracy'
    sys.stdout.flush()
    nb.accuracy()
elif (algorithm == 'maxent'):
    #trainingDataFile = 'data/training_trimmed.csv'
    trainingDataFile = 'data/full_training_dataset.csv'
    classifierDumpFile = 'data/test/maxent_test_model.pickle'
    trainingRequired = 1
    print 'Started to instantiate Max Entropy Classifier'
    sys.stdout.flush()
    maxent = max_entropy_classifier.MaxEntClassifier(tweets, keyword, time,\
                                  trainingDataFile, classifierDumpFile, trainingRequired)
    #maxent.analyzeTweets()
    #maxent.classify()
    print 'Computing Accuracy'
    sys.stdout.flush()
    maxent.accuracy()
elif (algorithm == 'svm'):
    #trainingDataFile = 'data/training_trimmed.csv'
    trainingDataFile = 'data/full_training_dataset.csv'
    classifierDumpFile = 'data/test/svm_test_model.pickle'
    trainingRequired = 1
    print 'Started to instantiate SVM Classifier'
    sys.stdout.flush()
    sc = libsvm_classifier.SVMClassifier(tweets, keyword, time,\
                                  trainingDataFile, classifierDumpFile, trainingRequired)
    #sc.classify()
def app():

    keyword = 'apple'
    method = 'svm'
    training_required = 1
    classify_enabled = 1

    while 1:
        print("\n\n\n-------------------------- 0 --------------------------")
        print("Please select a algorithm:")
        print("0 - Exit from application")
        print("1 - Naive Bayes Algorithm")
        print("2 - Maximum Entropy Algorithm")
        print("3 - Support Vector Machine (SVM)")

        input_data = raw_input("Enter Value : ")
        if input_data.isdigit() and 0 < int(input_data) <= 3:
            method = int(input_data)
        elif input_data.isdigit() and int(input_data) == 0:
            break
        else:
            print("Wrong Selection! Read : " + input_data)
            continue

        print("\nCalculate Accuracy or Classify?")
        print("0 - Exit from application")
        print("1 - Accuracy")
        print("2 - Classify")
        input_data = raw_input("Enter Value : ")
        if input_data.isdigit() and 0 < int(input_data) <= 2:
            classify_enabled = int(input_data) - 1
        elif input_data.isdigit() and int(input_data) == 0:
            break
        else:
            print("Wrong Selection! Read : " + input_data)
            continue

        if classify_enabled == 1:
            print("\nIs training required?")
            print("0 - Exit from application")
            print("1 - Not required")
            print("2 - Required")
            input_data = raw_input("Enter Value : ")
            if input_data.isdigit() and 0 < int(input_data) <= 2:
                training_required = int(input_data) - 1
            elif input_data.isdigit() and int(input_data) == 0:
                break
            else:
                print("Wrong Selection! Read : " + input_data)
                continue

            print("\nEnter a keyword for test")
            input_data = raw_input("Enter Keyword : ")
            keyword = input_data

            print("Selected Keyword: " + keyword)

        print("-------------------------- = --------------------------\n\n\n")

        training_data_file = 'data-set/Airline-Sentiment.csv'

        if classify_enabled:
            tweets = twitterClient.getTwitterData(keyword)
            if not tweets:
                print("Tweet couldn't be fetched")
                return

        if method == 1:
            print("Method: naive bayes")
            if classify_enabled:

                classifier_dump_file = 'data-set/nb_trained_model.pickle'
                nb = naive_bayes_classifier.NaiveBayesClassifier(
                    training_data_file, classifier_dump_file)
                nb.classify(tweets, training_required)

            else:
                classifier_dump_file = 'data-set/nb_trained_model_acc.pickle'
                nb = naive_bayes_classifier.NaiveBayesClassifier(
                    training_data_file, classifier_dump_file)
                nb.accuracy()

        elif method == 2:
            print("Method: Max Entropy")
            if classify_enabled:

                classifier_dump_file = 'data-set/maxent_trained_model.pickle'
                maxent = max_entropy_classifier.MaxEntClassifier(
                    training_data_file, classifier_dump_file)
                maxent.classify(tweets, training_required)

            else:
                classifier_dump_file = 'data-set/maxent_trained_model_acc.pickle'
                maxent = max_entropy_classifier.MaxEntClassifier(
                    training_data_file, classifier_dump_file)
                maxent.accuracy()

        elif method == 3:
            print("Method: Support Vector Machine")
            if classify_enabled:

                classifier_dump_file = 'data-set/svm_trained_model.pickle'
                sc = libsvm_classifier.SVMClassifier(training_data_file,
                                                     classifier_dump_file)
                sc.classify(tweets, training_required)

            else:
                classifier_dump_file = 'data-set/svm_trained_model_acc.pickle'
                sc = libsvm_classifier.SVMClassifier(training_data_file,
                                                     classifier_dump_file)
                sc.accuracy()