Ejemplo n.º 1
0
def home(request):
	text_list = Text.objects.all()

	template = loader.get_template('home.html')

	context = Context({
       		'text_list': text_list,
	})
		
	if request.method == 'GET':

		id = request.GET.get('id', '')
		analyse = request.GET.get('analyse', '')

		if id:
			text = Text.objects.get(id=id)

			context['text_object'] = text;			
			
			if analyse and len(text.words.all()) == 0:
				token=Tokenizer(text.id)
				token.analyzeWords()

			if len(text.words.all()) == 0:
				context['analyse'] = 1
	
			else:
				words = text.words.all()
				wordcount = []
				
				for w in words:
					texthaswords = TextHasWords.objects.get(text=text,word=w)
					wordcount.append(WordCount(w.name,texthaswords.count))

				if not len(wordcount) == 0:
					sorted(wordcount, key=lambda WordCount: WordCount.count, reverse=True)

				wordrange = 15

				if len(wordcount) < wordrange:
					wordrange = len(wordcount)
				wordname = ''
				wordcounted = ''

                                for w in range(wordrange):
                                        wordname += "'%s',"%(wordcount[w].name)

                                for w in range(wordrange):
                                        wordcounted += "%i, "%(int(wordcount[w].count))
	
				context['word_name'] = mark_safe(wordname)
				context['word_count'] = wordcounted
				context['word_range'] = wordrange

	return HttpResponse(template.render(context))
Ejemplo n.º 2
0
def home(request):
	text_list = Text.objects.all()

	template = loader.get_template('home.html')

	context = Context({
       		'text_list': text_list,
	})
		
	if request.method == 'GET':

		id = request.GET.get('id', '')
		analyse = request.GET.get('analyse', '')

		if id:
			text = Text.objects.get(id=id)

			context['text_object'] = text;			
			
			if analyse and len(text.words.all()) == 0:
				token=Tokenizer(text.id)
				token.analyzeWords()

			if len(text.words.all()) == 0:
				context['analyse'] = 1
	
			else:
				words = text.words.all()
				wordcount = []
				
				for w in words:
					texthaswords = TextHasWords.objects.get(text=text,word=w)
					wordcount.append(WordCount(w.name,texthaswords.count))

				if not len(wordcount) == 0:
					sorted(wordcount, key=lambda WordCount: WordCount.count, reverse=True)

				wordrange = 15

				if len(wordcount) < wordrange:
					wordrange = len(wordcount)
				wordname = ''
				wordcounted = ''

                                for w in range(wordrange):
                                        wordname += "'%s',"%(wordcount[w].name)

                                for w in range(wordrange):
                                        wordcounted += "%i, "%(int(wordcount[w].count))
	
				context['word_name'] = mark_safe(wordname)
				context['word_count'] = wordcounted
				context['word_range'] = wordrange


				domains = Domain.objects.all()
				
				for domain in domains:
					terms = domain.terms.all()
					if domain.name == 'IT':
						itterms = terms
					if domain.name == 'MEDICAL':
						medterms = terms

				itlist=[]
				medlist=[]

				for term in itterms:
					itlist.append(term.name)
				for term in medterms:
					medlist.append(term.name)
						
				it_words = 0
				med_words = 0

				print itlist

				for w in range(wordrange):
					if wordcount[w].name in itlist:	
						it_words += 1
					if wordcount[w].name in medlist:	
						med_words += 1

			context['it_words'] = it_words
			context['med_words'] = med_words



	return HttpResponse(template.render(context))
Ejemplo n.º 3
0
def home(request):
	text_list = Text.objects.all()

	template = loader.get_template('home.html')

	context = Context({
       		'text_list': text_list,
	})
		
	if request.method == 'GET':

		id = request.GET.get('id', '')
		analyse = request.GET.get('analyse', '')

		if id:
					
				text = Text.objects.get(id=id)


				context['analyse'] = 1
				context['text_object'] = text;			
				
				if analyse:
					token=Tokenizer(text.id)
					token.analyzeWords()

				if len(text.words.all()) > 0:
					
					words = text.words.all()
					wordcount = []
					
					for w in words:
						texthaswords = TextHasWords.objects.get(text=text,word=w)
						wordcount.append(WordCount(w.name,texthaswords.count))

					if not len(wordcount) == 0:
						sorted(wordcount, key=lambda WordCount: WordCount.count, reverse=True)

					wordrange = 15

					if len(wordcount) < wordrange:
						wordrange = len(wordcount)
					wordname = ''
					wordcounted = ''

					for w in range(wordrange):
						wordname += "'%s',"%(wordcount[w].name)
						
					for w in range(wordrange):
						wordcounted += "%i, "%(int(wordcount[w].count))
		
					context['word_name'] = mark_safe(wordname)
					context['word_count'] = wordcounted
					context['word_range'] = wordrange

					domains = Domain.objects.all()
					blacklist = Blacklist.objects.all()	
					
					counted = []
					
					for domain in domains:
						terms = domain.terms.all()
						termlist=[]
		
						counter = 0

						for term in terms:
							termlist.append(term.name)
						
						for w in range(wordrange):
							if wordcount[w].name in termlist:	
								counter += 1

						counted.append(WordCount(domain.name,counter))

					context['domains'] = counted
					
					
					# Pie Charts Work Starts Here.
					
					paragraph_tokens = re.findall(r'\w+', text.text)
					total_tokens =  len(paragraph_tokens)
						
					itTerms = []
					medTerms = []
					
					for domain in domains:
					
						#STUB FOR FREQUENCY CHARTS STARTS
						if domain.name == "IT":
							#print "Yes IT"
							itTerms = domain.terms.all()
									
						
						if domain.name == "MEDICAL":
							#print "Yes Medical"
							medTerms = domain.terms.all()
													
						#STUB FOR FREQUENCY CHARTS ENDS
						
						terms = domain.terms.all()
						termlist=[]
		
						counter = 0

						for term in terms: 
							termlist.append(term.name.upper())
							
						for token in paragraph_tokens:
							if token.upper() in termlist:	
								#print "Token Found: " + token
								counter += 1
					
						#print str(domain.name) + " count: " + str(counter) + " Total : " + str(len(termlist))
						context[domain.name] = str(counter)		
						
					blacklist_count = 0
					for token in paragraph_tokens:
						for blacklist_terms in blacklist:
							if token.upper() == str(blacklist_terms).upper():
								blacklist_count += 1
					
					#print "Blacklist Items: " + str(blacklist_count)					
					context['blacklist_count'] = str(blacklist_count) 					
					context['general_domain'] =  str(total_tokens - int(context['MEDICAL']) - int(context['IT']) - blacklist_count)	

					print str(total_tokens) + " " + context['IT'] + " " + context['blacklist_count'] 
					
					# Pie Charts Work Ends Here.
				
					
					# Frequency Distribution Work Starts Here
					wordrange = total_tokens
					
					
					if len(wordcount) < wordrange:
						wordrange = len(wordcount)
					
					itTermList = []
					medTermList = []
					blackTermList = []
					
					for term in itTerms: 
						itTermList.append(term.name.upper())
						
					for term in medTerms: 
						medTermList.append(term.name.upper())
						
					for term in blacklist:
						blackTermList.append(str(term).upper())	
					
					itData = ''
					medData = ''
					genData = ''
					blackData = ''
					
					#print range(wordrange)
					for w in range(wordrange):
						#wordname += "'%s',"%(wordcount[w].name)
						if wordcount[w].name.upper() in itTermList:
							itData += "%i, "%(int(wordcount[w].count))
							#print  "IT" + " " +(wordcount[w].name) + " " + str(wordcount[w].count)
						elif wordcount[w].name.upper() in medTermList:
							#print  "MED" + " " +(wordcount[w].name) + " " + str(wordcount[w].count)
							medData += "%i, "%(int(wordcount[w].count))
						elif wordcount[w].name.upper() in blackTermList:
							blackData += "%i, "%(int(wordcount[w].count))
							
						else:	
							#print  "GEN" + " " +(wordcount[w].name) + " " + str(wordcount[w].count)
							genData += "%i, "%(int(wordcount[w].count))
					
					context['itData'] = itData
					context['medData'] = medData
					context['genData'] = genData
					context['blackData'] = blackData
					
					# Frequency Distribution Work Ends Here
				
				

	return HttpResponse(template.render(context))