Esempio n. 1
0
        and write the positive and negative marked tweets
        into corresponding files after the breaking the
        tweets into tokens"""
<<<<<<< HEAD
        m = hashlib.md5()
=======
        
>>>>>>> 89aa7e23b0b2789d0986b7c01aff694715d5590b
        for index in request.POST:
                (ind,a)=index.split(':')
                if request.POST.get(index,''):
                        if(request.POST[index]=='pos'):
                                out_file=open("positive.txt",'a')
<<<<<<< HEAD
				m.update(current_tweets[int(a)-1])		
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='pos',movie_name=current_tweets[0])
                                p.save()
                        else:  
				m.update(current_tweets[int(a)-1])
                                out_file=open("negative.txt",'a')
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='neg',movie_name=current_tweets[0])
                                p.save()
                        tokens=current_tweets[int(a)-1].split()
                        data_preprocess.remove_noise_words(tokens)
                        data_preprocess.remove_names(tokens)
                        data_preprocess.remove_links(tokens)
=======
                        else:
                                out_file=open("negative.txt",'a')
                        tokens=current_tweets[int(a)-1].split()
>>>>>>> 89aa7e23b0b2789d0986b7c01aff694715d5590b
Esempio n. 2
0
def save_tweets(request):
        #unpickle the list of recent tweets retrieved
        with open('current_tweets.txt','rb') as file_id:
                current_tweets=pickle.load(file_id)
        tokens=[]
        
        """Examine every Post variable one by one
        and write the positive and negative marked tweets
        into corresponding files after breaking the
        tweets into tokens"""
        m = hashlib.md5()
        for index in request.POST:
                (ind,a)=index.split(':')
                if request.POST.get(index,''):
                        if(request.POST[index]=='pos'):
                                out_file=open("positive.txt",'a')
                                m.update(current_tweets[int(a)-1])		
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='pos',movie_name=current_tweets[0])
                                p.save()
                                #Update the number of positive tweets seen so far
                                try:
                                        p=tweet_category_count.objects.get(id=1)
                                        p.positive_count=p.positive_count+1
                                        p.save()
                                except:
                                         p=tweet_category_count(id=1,positive_count=1,negative_count=0)
                                         p.save()
                        else: 
				 
                                out_file=open("negative.txt",'a')
                                m.update(current_tweets[int(a)-1])
                                p=data_set(tweet=current_tweets[int(a)-1],tweet_hash=m.hexdigest(),pos_neg='neg',movie_name=current_tweets[0])
                                p.save()
                                #Update the number of negative tweets seen so far
                                try:
                                        p=tweet_category_count.objects.get(id=1)
                                        p.negative_count=p.negative_count+1
                                        p.save()
                                except:
                                        p=tweet_category_count(id=1,positive_count=0,negative_count=1)
                                        p.save()
                        tokens=current_tweets[int(a)-1].split()
                        #Removing noise words,names (@) and hyperlinks from the tweets
                        data_preprocess.remove_noise_words(tokens)
                        data_preprocess.remove_names(tokens)
                        data_preprocess.remove_links(tokens)
			#Removing the name of movie from the token list
                        movieName = current_tweets[0].split()
                        tokens = set(tokens)-set(movieName)
           
                        for token in tokens:
                                out_file.write(token+'\n')
                                if(request.POST[index]=='pos'):
                                        try:
                                                q=token_category_count.objects.get(id=1)
                                                q.positive_count=q.positive_count+1
                                                q.save()
                                        except:
                                                p=token_category_count(id=1,positive_count=1,negative_count=0)
                                                p.save()
                                        try:
                                                q=pos_tokens.objects.get(ptoken=token)
				#Adding 1 to the count of positive tokens
                                                q.pcount = q.pcount + 1
                                                q.save()
                                        except:
                                                r = pos_tokens(ptoken=token,pcount=1)
                                                r.save()
                                else:
                                        try:
                                                q=token_category_count.objects.get(id=1)
                                                q.negative_count=q.negative_count+1
                                                q.save()
                                        except:
                                                p=token_category_count(id=1,positive_count=0,negative_count=1)
                                                p.save()
                                        try:
                                                q=neg_tokens.objects.get(ntoken=token)
				#Adding 1 to the count of negative tokens
                                                q.ncount = q.ncount + 1
                                                q.save()
                                        except:
                                                r = neg_tokens(ntoken=token,ncount=1)
                                                r.save()	

        return render_to_response("tweetsSaved.html")