Example #1
0
def classify_tweets(request):
        consumer_key="Wb4W1n264iHhcrqcXt54bA"
        consumer_secret="2NFs7pO610XKQUOs5hPAz8wCEO4uxmP3111HPhsmgc"
        access_token="36641014-28RR3YAp6MxFxJ706gsp5a7bRy0sYDsjLCwixs2iM"
        access_token_secret="qOGQg84VvurJKX9qSF3Zgl973BxF6ryt7Yruoxtw"
        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
        api = tweepy.API(auth)
        query = request.POST.get('query')
        result=api.search(query)
        tweets=[]
        classification=[]
        for tweet in result:
                    try:
                            tweets.append(str(tweet.text))
                    except:
                            pass
        posScore=0
        negScore=0
        for tweet in tweets:
                tokens=tweet.split()
                data_preprocess.remove_noise_words(tokens)
                data_preprocess.remove_names(tokens)
                data_preprocess.remove_links(tokens)
                tweet_counts=[]
                token_counts=[]
                category_counts=defaultdict(lambda:defaultdict(int))
                p=tweet_category_count.objects.get(id=1)
                tweet_counts.append(p.positive_count)
                tweet_counts.append(p.negative_count)
                p=token_category_count.objects.get(id=1)
                token_counts.append(p.positive_count)
                token_counts.append(p.negative_count)
                for token in tokens:
                        try:
                                p=pos_tokens.objects.get(ptoken=token)
                                category_counts[token]['pos']=p.pcount
                        except:
                                category_counts[token]['pos']=0
                for token in tokens:
                        try:
                                p=neg_tokens.objects.get(ntoken=token)
                                category_counts[token]['neg']=p.ncount
                        except:
                                category_counts[token]['neg']=0
		
		
                classifier=NaiveBayesClassifier()
                result=classifier.classify(tokens,category_counts,tweet_counts,token_counts)
                if(result=='pos'):
                        posScore+=1
                else:
                        negScore+=1
                classification.append(result)
        return render_to_response("index.html",{'tweets':tweets,'pos_neg':classification,'posScore':posScore,'negScore':negScore})
Example #2
0
=======
        
>>>>>>> 89aa7e23b0b2789d0986b7c01aff694715d5590b
        for index in request.POST:
                (ind,a)=index.split(':')
                if request.POST.get(index,''):
                        if(request.POST[index]=='pos'):
                                out_file=open("positive.txt",'a')
<<<<<<< HEAD
				m.update(current_tweets[int(a)-1])		
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='pos',movie_name=current_tweets[0])
                                p.save()
                        else:  
				m.update(current_tweets[int(a)-1])
                                out_file=open("negative.txt",'a')
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='neg',movie_name=current_tweets[0])
                                p.save()
                        tokens=current_tweets[int(a)-1].split()
                        data_preprocess.remove_noise_words(tokens)
                        data_preprocess.remove_names(tokens)
                        data_preprocess.remove_links(tokens)
=======
                        else:
                                out_file=open("negative.txt",'a')
                        tokens=current_tweets[int(a)-1].split()
>>>>>>> 89aa7e23b0b2789d0986b7c01aff694715d5590b
                        for token in tokens:
                                out_file.write(token+'\n')
        return render_to_response("tweetsSaved.html")
        
Example #3
0
def save_tweets(request):
        #unpickle the list of recent tweets retrieved
        with open('current_tweets.txt','rb') as file_id:
                current_tweets=pickle.load(file_id)
        tokens=[]
        
        """Examine every Post variable one by one
        and write the positive and negative marked tweets
        into corresponding files after breaking the
        tweets into tokens"""
        m = hashlib.md5()
        for index in request.POST:
                (ind,a)=index.split(':')
                if request.POST.get(index,''):
                        if(request.POST[index]=='pos'):
                                out_file=open("positive.txt",'a')
                                m.update(current_tweets[int(a)-1])		
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='pos',movie_name=current_tweets[0])
                                p.save()
                                #Update the number of positive tweets seen so far
                                try:
                                        p=tweet_category_count.objects.get(id=1)
                                        p.positive_count=p.positive_count+1
                                        p.save()
                                except:
                                         p=tweet_category_count(id=1,positive_count=1,negative_count=0)
                                         p.save()
                        else: 
				 
                                out_file=open("negative.txt",'a')
                                m.update(current_tweets[int(a)-1])
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='neg',movie_name=current_tweets[0])
                                p.save()
                                #Update the number of negative tweets seen so far
                                try:
                                        p=tweet_category_count.objects.get(id=1)
                                        p.negative_count=p.negative_count+1
                                        p.save()
                                except:
                                        p=tweet_category_count(id=1,positive_count=0,negative_count=1)
                                        p.save()
                        tokens=current_tweets[int(a)-1].split()
                        #Removing noise words,names (@) and hyperlinks from the tweets
                        data_preprocess.remove_noise_words(tokens)
                        data_preprocess.remove_names(tokens)
                        data_preprocess.remove_links(tokens)
			#Removing the name of movie from the token list
                        movieName = current_tweets[0].split()
                        tokens = set(tokens)-set(movieName)
           
                        for token in tokens:
                                out_file.write(token+'\n')
                                if(request.POST[index]=='pos'):
                                        try:
                                                q=token_category_count.objects.get(id=1)
                                                q.positive_count=q.positive_count+1
                                                q.save()
                                        except:
                                                p=token_category_count(id=1,positive_count=1,negative_count=0)
                                                p.save()
                                        try:
                                                q=pos_tokens.objects.get(ptoken=token)
				#Adding 1 to the count of positive tokens
                                                q.pcount = q.pcount + 1
                                                q.save()
                                        except:
                                                r = pos_tokens(ptoken=token,pcount=1)
                                                r.save()
                                else:
                                        try:
                                                q=token_category_count.objects.get(id=1)
                                                q.negative_count=q.negative_count+1
                                                q.save()
                                        except:
                                                p=token_category_count(id=1,positive_count=0,negative_count=1)
                                                p.save()
                                        try:
                                                q=neg_tokens.objects.get(ntoken=token)
				#Adding 1 to the count of negative tokens
                                                q.ncount = q.ncount + 1
                                                q.save()
                                        except:
                                                r = neg_tokens(ntoken=token,ncount=1)
                                                r.save()	

        return render_to_response("tweetsSaved.html")