def process_document():
	import nltk
	from pymongo import TEXT

	if 'corpus' in session:
		tagger = ner.SocketNER(host="localhost", port=8080)
		collection_name = session['corpus']
		folder_name = session['folder_name']
		destination_path = os.path.join(app.config['UPLOAD_FOLDER'], collection_name, folder_name)
		data_array = []
		content_array = []
		pageRankSummarizer = PageRankSummarizer()
		tfidf_parser = TFIDF()

		content_collection_name = collection_name + "_content"
		content_table = DBUtils().get_collection_obj(content_collection_name)
		new_collection = DBUtils().get_collection_obj(collection_name)
		
		for input_file_name in glob.glob(destination_path+"/*.txt"):
			dict_entities = {}
			file_content = []

			no_of_entities = 0

			file_line_content = filter(None, [re.sub(r'[^\x00-\x7F]+',' ', line.rstrip('\n\r')).strip() for line in open(input_file_name, 'r')])
			for line in file_line_content:
				dict_line_entities = tagger.get_entities(line)
				file_content.append(tagger.tag_text(line))
				
				for key, value in dict_line_entities.iteritems():
					no_of_entities += len(value)
					if key in dict_entities:
						dict_entities[key] = list(set(dict_entities[key] + value))
					else:
						dict_entities[key] = list(set(value))
			
			list_entity_frequency = []
			str_content = " ".join(file_content)
			content_table.insert([{"__content": x} for x in nltk.sent_tokenize(" ".join(file_line_content))])

			value_template = "<{{type}}>{{value}}</{{type}}>"
			for entity_type,list_value in dict_entities.iteritems():
				for value in list_value:
					value_str = value_template.replace("{{type}}",entity_type).replace("{{value}}",value)
					list_entity_frequency.append([value_str,str_content.count(value_str)])

			dict_entities['__entity_frequency'] = list_entity_frequency
			dict_entities['__word_frequency'] = tfidf_parser.compute_word_frequency(file_line_content)

			blob_file_content = TextBlob(str_content)
			dict_entities['__document_length'] = len(re.findall(r'\w+', str_content))
			dict_entities['__num_entities'] = no_of_entities
			dict_entities['__polarity'] =  blob_file_content.sentiment.polarity
			dict_entities['__subjectivity'] = blob_file_content.sentiment.subjectivity
			
			dict_entities['__formatted_content'] = file_content
			dict_entities['__content'] = file_line_content

			# if(request.form['title'] != "?"):
			# 	selected_title_option = int(request.form['title'])
			# 	if(selected_title_option == 1):
			# 		dict_entities['TITLE'] = os.path.basename(input_file_name)
			# 	else:
			# 		dict_entities['TITLE'] = file_line_content[selected_title_option-1]

			dict_entities['SUMMARY'] = pageRankSummarizer.summarize(file_line_content, int(request.form['summary-lines']))
			# dict_entities['SUMMARY'] = pageRankSummarizer.summarize(file_line_content, 2)
			dict_entities['ID'] = os.path.basename(input_file_name)
			dict_entities['__read_count'] = 0
			new_collection.insert(dict_entities)
			
		content_table.create_index([('__content', TEXT)], default_language='english')

		##Generate a table with the names of all the columns so that this can be referenced further..
		##Caution: Needs to be updated when ever a new entity type is created..
		DBUtils().generate_keys_table(collection_name)
		
		#Set the session value..
		session['token'] = collection_name
		# os.remove(os.path.join(app.config['UPLOAD_FOLDER'], collection_name))
		return json.dumps({"success": True, "redirect": url_for('.visualize')})

	return json.dumps({"success": False})