Exemplo n.º 1
0
 def preprocess_properties(self, usecase_name, runtime_url, api_endpoint,
                           properties):
     headers = ["usecase", "runtime_url", "api_endpoint", "properties"]
     l = [usecase_name, runtime_url, api_endpoint, properties]
     data = [headers, l]
     data_results = data_management_engine.get_data(self.p_auth,
                                                    "ecosystem_meta",
                                                    "dashboards", "{}",
                                                    1000000, "{}", 0)
     for entry in data_results:
         if entry["usecase"] != usecase_name:
             usecase = entry["usecase"]
             rurl = entry["runtime_url"]
             apend = entry["api_endpoint"]
             prop = entry["properties"]
             data.append([usecase, rurl, apend, prop])
     with open("tmp/properties.csv", "w", newline="") as f:
         writer = csv.writer(f)
         writer.writerows(data)
     predictor, database, feature_store, key_field = extract_properties(
         properties)
     auth = access.Authenticate(runtime_url)
     self.setup_use_case_straight(auth, properties)
     self.load_use_case(usecase_name, database, key_field, predictor,
                        feature_store, properties, runtime_url,
                        api_endpoint)
     data_management_engine.drop_document_collection(
         self.p_auth, "ecosystem_meta", "dashboards")
     upload_import_pred(self.p_auth, self.data_path, "tmp/properties.csv",
                        "ecosystem_meta", "dashboards", "properties.csv")
Exemplo n.º 2
0
 def append_graphing_state(self, a_id, a_type, state):
     headers = [
         "analysis_id", "analysis_type", "created_by", "created", "state"
     ]
     user = self.user
     dt = datetime.datetime.now()
     l = [a_id, a_type, user, str(dt), json.dumps(state)]
     data = [headers, l]
     data_results = data_management_engine.get_data(self.p_auth,
                                                    "ecosystem_meta",
                                                    "dashboards_gs", "{}",
                                                    1000000, "{}", 0)
     for entry in data_results:
         if entry["analysis_id"] != a_id or entry["analysis_type"] != a_type:
             analysis_id = entry["analysis_id"]
             analysis_type = entry["analysis_type"]
             analysis_user = entry["created_by"]
             analysis_time = entry["created"]
             a_state = entry["state"]
             data.append([
                 analysis_id, analysis_type, analysis_user, analysis_time,
                 a_state
             ])
     with open("tmp/graphing_states.csv", "w", newline="") as f:
         writer = csv.writer(f)
         writer.writerows(data)
     data_management_engine.drop_document_collection(
         self.p_auth, "ecosystem_meta", "dashboards_gs")
     upload_import_pred(self.p_auth, self.data_path,
                        "tmp/graphing_states.csv", "ecosystem_meta",
                        "dashboards_gs", "graphing_states.csv")
Exemplo n.º 3
0
 def get_documents_for_key_value_header(self, usecase_name, value):
     use_case = self.use_cases[usecase_name]
     database = use_case["database"]
     collection = use_case["feature_store"]
     field = use_case["key_field"]
     find = "{}"
     if type(value) == str:
         if represents_int(value) or represents_float(value):
             find = "{'" + field + "': " + value + "}"
         else:
             find = "{'" + field + "': '" + value + "'}"
     else:
         find = "{'" + field + "': " + str(value) + "}"
     projections = "{}"
     total_to_process = 2
     skip = 0
     results = data_management_engine.get_data(self.p_auth, database,
                                               collection, find,
                                               total_to_process,
                                               projections, skip)
     columns = []
     for doc in results:
         for key in doc.keys():
             value = {"name": key, "id": key}
             columns.append(value)
         break
     return columns
Exemplo n.º 4
0
 def get_documents_for_key_value(self, usecase_name, value):
     use_case = self.use_cases[usecase_name]
     database = use_case["database"]
     collection = use_case["feature_store"]
     field = use_case["key_field"]
     find = "{}"
     if type(value) == str:
         if represents_int(value) or represents_float(value):
             find = "{'" + field + "': " + value + "}"
         else:
             find = "{'" + field + "': '" + value + "'}"
     else:
         find = "{'" + field + "': " + str(value) + "}"
     projections = "{}"
     total_to_process = 100
     skip = 0
     results = data_management_engine.get_data(self.p_auth, database,
                                               collection, find,
                                               total_to_process,
                                               projections, skip)
     for result in results:
         for key in result:
             value = result[key]
             if type(value) != str:
                 result[key] = str(value)
     return results
Exemplo n.º 5
0
 def get_graphing_state_names(self, a_type):
     data_results = data_management_engine.get_data(
         self.p_auth, "ecosystem_meta", "dashboards_gs",
         '{{"analysis_type":"{}"}}'.format(a_type), 1000000, "{}", 0)
     names = []
     for entry in data_results:
         names.append(entry["analysis_id"])
     return names
Exemplo n.º 6
0
 def get_graphing_state(self, a_id, a_type):
     data_results = data_management_engine.get_data(
         self.p_auth, "ecosystem_meta", "dashboards_gs",
         '{{"analysis_id":"{}","analysis_type":"{}"}}'.format(a_id, a_type),
         1000000, "{}", 0)
     state = {}
     for entry in data_results:
         state = json.loads(entry["state"])
     return state
Exemplo n.º 7
0
		def get_ner(self, input_database, input_collection):
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip)
			apos_df = pd.DataFrame(data)
			apos_df = apos_df.loc[apos_df[key_value] == self.callid]
			return apos_df
Exemplo n.º 8
0
 def get_documents(self, database, collection, field, projections, limit,
                   skip):
     results = data_management_engine.get_data(self.p_auth, database,
                                               collection, field, limit,
                                               projections, skip)
     for result in results:
         for key in result:
             value = result[key]
             if type(value) != str:
                 result[key] = str(value)
     return results
Exemplo n.º 9
0
 def read_use_cases(self):
     database = "ecosystem_meta"
     collection = "dashboards"
     find = {}
     total_to_process = 100000
     projections = {}
     skip = 0
     results = data_management_engine.get_data(self.p_auth, database,
                                               collection, find,
                                               total_to_process,
                                               projections, skip)
Exemplo n.º 10
0
		def get_summary(self, input_database, input_collection):
			#Summary
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			asum_df = pd.DataFrame(data)
			asum_df = asum_df.loc[asum_df[key_value] == self.callid]
			return asum_df.iloc[0]["summary"]
Exemplo n.º 11
0
def read_data(**kwargs):
    p_auth = authenticate.prediction_login(**kwargs)
    database = "nlp_examples"
    collection = "nlp_example_text"
    field = "{}"
    limit = 0
    projections = "{}"
    skip = 0
    output = data_management_engine.get_data(p_auth, database, collection,
                                             field, limit, projections, skip)
    print(output)
Exemplo n.º 12
0
	def process_upload_btn_eventhandler(self, obj):
		use_case = self.use_cases[self.dropdown_case.value]
		for fp in self.to_upload.keys():
			if fp == "customers":
				feature_store = "customers_upload"
				data_management_engine.drop_document_collection(self.p_auth, use_case["database"], feature_store)
				upload_import_pred(self.p_auth, self.to_upload[fp], "/", use_case["database"], feature_store, self.to_upload[fp])
			elif fp == "transactions":
				feature_store = "transactions_upload"
				data_management_engine.drop_document_collection(self.p_auth, use_case["database"], feature_store)
				upload_import_pred(self.p_auth, self.to_upload[fp], "/", use_case["database"], feature_store, self.to_upload[fp])
			elif fp == "CTO":
				feature_store = "CTO_upload"
				data_management_engine.drop_document_collection(self.p_auth, use_case["database"], feature_store)
				upload_import_pred(self.p_auth, self.to_upload[fp], "/", use_case["database"], feature_store, self.to_upload[fp])
			else:
				print("ERROR unreachable state.")

		mongo_url = "mongodb://*****:*****@localhost:54445"
		destinations = "listOfDestinations.txt"
		db_name = "fnb"
		proc_tx_data = "transactions_upload"
		proc_customer_data = "customers_upload"
		cto_data = "CTO_upload"
		sample_tx_data_rollup = "transactions_rollup_test"
		sample_tx_data_rollup_norm = "transactions_rollup_normalise_test"
		script = 'python3 enrich_for_runtime.py "{}" "{}" "{}" "{}" "{}" "{}" "{}" "{}"'.format(mongo_url, destinations, db_name, proc_tx_data, proc_customer_data, cto_data, sample_tx_data_rollup, sample_tx_data_rollup_norm)
		utilities.execute_generic(self.p_auth, script)
		init_count = 0
		results = data_management_engine.get_data(self.p_auth, use_case["database"], "customers_upload", "{}", 1000000, "{}", 0)
		for doc in results:
			init_count += 1
		while True:
			re_count = 0
			results = data_management_engine.get_data(self.p_auth, use_case["database"], "customers_upload", "{CTO: { $exists: true }}", 1000000, "{}", 0)
			for doc in results:
				re_count += 1
			if re_count >= init_count:
				break
			time.sleep(5)
			print("{}/ {}".format(re_count, init_count))
Exemplo n.º 13
0
		def get_b5(self, input_database, input_collection):
			#Personality B5
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			ab5_df = pd.DataFrame(data)
			ab5_df = ab5_df.loc[ab5_df[key_value] == self.callid]

			b5_list = ["conscientiousness","extraversion","stability","openess","agreeableness"]
			for b5_entry in b5_list:
				if ab5_df.iloc[0][b5_entry] == 1:
					return b5_entry
Exemplo n.º 14
0
		def get_mbti(self, input_database, input_collection):
			#Personality MBTI
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			ambti_df = pd.DataFrame(data)
			ambti_df = ambti_df.loc[ambti_df[key_value] == self.callid]

			mbti_list = ["ISTJ","INTP","ESTJ","ISTP","ENTP","ESTP","INFJ","ISFJ","INFP","ENFJ","ESFJ","ENFP","ISFP","ESFP","INTJ","ENTJ"] 
			for mbti_entry in mbti_list:
				if ambti_df.iloc[0][mbti_entry] == 1:
					return(mbti_entry)
Exemplo n.º 15
0
 def direct_read_data_models(self, database, collection):
     find = "{}"
     projections = "{}"
     total_to_process = 100
     skip = 0
     results = data_management_engine.get_data(self.p_auth, database,
                                               collection, find,
                                               total_to_process,
                                               projections, skip)
     for result in results:
         for key in result:
             value = result[key]
             if type(value) != str:
                 result[key] = str(value)
     return results
Exemplo n.º 16
0
 def direct_read_data(self, database, collection, model):
     print('{{models:"{}""}}'.format(model))
     find = '{{models:"{}"}}'.format(model)
     projections = "{}"
     total_to_process = 100
     skip = 0
     results = data_management_engine.get_data(self.p_auth, database,
                                               collection, find,
                                               total_to_process,
                                               projections, skip)
     for result in results:
         for key in result:
             value = result[key]
             if type(value) != str:
                 result[key] = str(value)
     results = sorted(results, key=lambda i: i["datetime"])
     return results
Exemplo n.º 17
0
 def retrieve_properties(self):
     data_results = data_management_engine.get_data(self.p_auth,
                                                    "ecosystem_meta",
                                                    "dashboards", "{}",
                                                    1000000, "{}", 0)
     for entry in data_results:
         try:
             properties = entry["properties"]
             usecase_name = entry["usecase"]
             rurl = entry["runtime_url"]
             api_endpoint = entry["api_endpoint"]
             predictor, database, feature_store, key_field = extract_properties(
                 properties)
             self.load_use_case(usecase_name, database, key_field,
                                predictor, feature_store, properties, rurl,
                                api_endpoint)
         except:
             continue
Exemplo n.º 18
0
    def spend_personality_process_uploads(self, usecase_name, tmp_file_path,
                                          c_path, c_filename, c_content,
                                          t_path, t_filename, t_content,
                                          cto_path, cto_filename, cto_content):
        c_fp = c_path + c_filename
        save_coded_file(c_content, c_fp)
        t_fp = t_path + t_filename
        save_coded_file(t_content, t_fp)
        cto_fp = cto_path + cto_filename
        save_coded_file(cto_content, cto_fp)

        use_case = self.use_cases[usecase_name]

        feature_store = "customers_upload"
        data_management_engine.drop_document_collection(
            self.p_auth, use_case["database"], feature_store)
        upload_import_pred(self.p_auth, self.data_path, c_fp,
                           use_case["database"], feature_store, c_filename)

        feature_store = "transactions_upload"
        data_management_engine.drop_document_collection(
            self.p_auth, use_case["database"], feature_store)
        upload_import_pred(self.p_auth, self.data_path, t_fp,
                           use_case["database"], feature_store, t_filename)

        feature_store = "CTO_upload"
        data_management_engine.drop_document_collection(
            self.p_auth, use_case["database"], feature_store)
        upload_import_pred(self.p_auth, self.data_path, cto_fp,
                           use_case["database"], feature_store, cto_filename)

        file_location = self.data_path
        py_file = file_location + "enrich_for_runtime.py"
        mongo_url = "mongodb://*****:*****@localhost:54445"
        destinations = file_location + "listOfDestinations.txt"
        db_name = use_case["database"]
        proc_tx_data = "transactions_upload"
        proc_customer_data = "customers_upload"
        cto_data = "CTO_upload"
        sample_tx_data_rollup = "transactions_rollup_test"
        sample_tx_data_rollup_norm = "transactions_rollup_normalise_test"
        script = "python3 {} {} {} {} {} {} {} {} {}".format(
            py_file, mongo_url, destinations, db_name, proc_tx_data,
            proc_customer_data, cto_data, sample_tx_data_rollup,
            sample_tx_data_rollup_norm)
        utilities.execute_generic(self.p_auth, script)
        init_count = 0
        results = data_management_engine.get_data(self.p_auth,
                                                  use_case["database"],
                                                  "customers_upload", "{}",
                                                  1000000, "{}", 0)
        for doc in results:
            init_count += 1
        while True:
            re_count = 0
            results = data_management_engine.get_data(
                self.p_auth, use_case["database"], "customers_upload",
                "{CTO: { $exists: true }}", 1000000, "{}", 0)
            for doc in results:
                re_count += 1
            if re_count >= init_count:
                break
            time.sleep(1)
        # re_upload
        filename = proc_customer_data + "_re.csv"
        filetype = "csv"
        field = "{}"
        sort = "{}"
        projection = "{}"
        limit = 0
        data_management_engine.export_documents(self.p_auth, filename,
                                                filetype, db_name,
                                                proc_customer_data, field,
                                                sort, projection, limit)
        path = self.data_path
        lines = 1000000
        content = worker_file_service.get_file_tail(self.p_auth, path,
                                                    filename, lines)
        save_file_text(content, tmp_file_path)
        target_path = use_case["data_path"]
        feature_store_file = "to_upload.csv"
        upload_import_runtime(use_case["auth"], tmp_file_path, target_path,
                              use_case["database"], use_case["feature_store"],
                              feature_store_file)
        upload_import_pred(self.p_auth, self.data_path, tmp_file_path,
                           use_case["database"], use_case["feature_store"],
                           feature_store_file)


# 27787506
Exemplo n.º 19
0
def generate_dashboard(auth, table_name, collection_name, key_value):
	# 45666009
	field = '{"$or":[{"employee_number":98638320}]}'
	limit = 1000
	projections = "{}"
	skip = 0

	data = data_management_engine.get_data(auth, table_name, collection_name, field, limit, projections, skip)
	df = pd.DataFrame(data)
	
	input_database = "nlp_examples"
	input_collection = "call_data_words"
	field = '{"$or":[{"callid":45666009}]}'
	# field = '{"$or":[{"employee_number":98638320}]}'
	limit = 1000
	# projections = "{}"
	projections = "channel,callid,phrase,start_time,end_time"
	skip = 0

	data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
	df_wds = pd.DataFrame(data)

	choices = list(df[key_value])
	choices.sort()
	class Dashboard(param.Parameterized):
		callid = param.Selector(default=choices[0], objects=choices)
		def tab_output(self, output, tab_depth=430):
			return pn.Row(
				pn.Column(
					width=20
				),
				pn.Column(
					output,
					width=tab_depth
				)
			)
		def get_agent_text(self):
			df_cid = df.loc[df[key_value] == self.callid]
			value = df_cid.iloc[0]["agent"]
			return self.tab_output(value, tab_depth=400)

		def get_caller_text(self):
			df_cid = df.loc[df[key_value] == self.callid]
			value = df_cid.iloc[0]["caller"]
			return self.tab_output(value, tab_depth=400)

		def get_chat(self):
			groupings = []
			ss_df = df_wds.loc[df_wds[key_value] == 45666009]
			# ss_df = df_wds.loc[df_wds[key_value] == self.callid]
			ss_df = ss_df.sort_values(by=["start_time"])
			print(ss_df)
			previous = None
			group = ""
			for index, row in ss_df.iterrows():
				phrase = row["phrase"]
				channel = row["channel"]
				st = row["start_time"]
				et = row["end_time"]
				if previous != None and channel != previous:
					dct = {
						"call_id": self.callid,
						"channel": previous,
						"text": group
					}
					groupings.append(dct)
					group = ""
				group = group + phrase + " "
				previous = channel

			chat = []
			for group in groupings:
				if group["channel"] == "left":
					row = pn.Row(
						pn.Column(
							group["text"],
							width=205
						),
						pn.Column(
							width=205
						)
					)
					chat.append(row)
				else:
					row = pn.Row(
						pn.Column(
							width=205
						),
						pn.Column(
							group["text"],
							width=205
						)
					)
					chat.append(row)
			print("HERE")
			print(chat)
			print("NOW")
			return pn.Column(*chat)

		def get_summary(self, input_database, input_collection):
			#Summary
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			asum_df = pd.DataFrame(data)
			asum_df = asum_df.loc[asum_df[key_value] == self.callid]
			return asum_df.iloc[0]["summary"]

		def get_summary_agent(self):
			value = self.get_summary(table_name, collection_name + "_agent_sum")
			return self.tab_output(value)

		def get_summary_caller(self):
			value = self.get_summary(table_name, collection_name + "_caller_sum")
			return self.tab_output(value)

		def get_b5(self, input_database, input_collection):
			#Personality B5
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			ab5_df = pd.DataFrame(data)
			ab5_df = ab5_df.loc[ab5_df[key_value] == self.callid]

			b5_list = ["conscientiousness","extraversion","stability","openess","agreeableness"]
			for b5_entry in b5_list:
				if ab5_df.iloc[0][b5_entry] == 1:
					return b5_entry
		def get_b5_agent(self):
			value = self.get_b5(table_name, collection_name + "_agent_b5")
			return self.tab_output(value)

		def get_b5_caller(self):
			value = self.get_b5(table_name, collection_name + "_caller_b5")
			return self.tab_output(value)

		def get_mbti(self, input_database, input_collection):
			#Personality MBTI
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			ambti_df = pd.DataFrame(data)
			ambti_df = ambti_df.loc[ambti_df[key_value] == self.callid]

			mbti_list = ["ISTJ","INTP","ESTJ","ISTP","ENTP","ESTP","INFJ","ISFJ","INFP","ENFJ","ESFJ","ENFP","ISFP","ESFP","INTJ","ENTJ"] 
			for mbti_entry in mbti_list:
				if ambti_df.iloc[0][mbti_entry] == 1:
					return(mbti_entry)

		def get_mbti_agent(self):
			value = self.get_mbti(table_name, collection_name + "_agent_mbti")
			return self.tab_output(value)

		def get_mbti_caller(self):
			value = self.get_mbti(table_name, collection_name + "_caller_mbti")
			return self.tab_output(value)

		#Personality POS
		def get_pos(self, input_database, input_collection):
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			apos_df = pd.DataFrame(data)
			apos_df = apos_df.loc[apos_df[key_value] == self.callid]
			return apos_df

		def get_pos_agent(self):
			return self.get_pos(table_name, collection_name + "_agent_pos")

		def get_pos_caller(self):
			return self.get_pos(table_name, collection_name + "_caller_pos")

		def get_pos_agent_verbs(self):
			pos_df = self.get_pos(table_name, collection_name + "_agent_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "VERB"]
			return self.tab_output(list(verb_df["token"]))

		def get_pos_caller_verbs(self):
			pos_df = self.get_pos(table_name, collection_name + "_caller_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "VERB"]
			return self.tab_output(list(verb_df["token"]))

		def get_pos_agent_proper_nouns(self):
			pos_df = self.get_pos(table_name, collection_name + "_agent_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "PROPN"]
			verbs = list(verb_df["token"])
			verbs = ner_fix(verbs)
			return self.tab_output(verbs)

		def get_pos_caller_proper_nouns(self):
			pos_df = self.get_pos(table_name, collection_name + "_caller_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "PROPN"]
			verbs = list(verb_df["token"])
			verbs = ner_fix(verbs)
			return self.tab_output(verbs)

		def get_pos_agent_nouns(self):
			pos_df = self.get_pos(table_name, collection_name + "_agent_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "NOUN"]
			return self.tab_output(list(verb_df["token"]))

		def get_pos_caller_nouns(self):
			pos_df = self.get_pos(table_name, collection_name + "_caller_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "NOUN"]
			return self.tab_output(list(verb_df["token"]))

		def get_pos_graph(self, input_database, input_collection):
			apos_df = self.get_pos(input_database, input_collection)
			num_records = len(apos_df.index)
			val_list = ["ADJ","ADP","ADV","AUX","CCONJ","DET","INTJ","NOUN","NUM","PART","PRON","PROPN","SCONJ","VERB","X"]
			num_list = []
			if num_records == 0:
				for val in val_list:
					num_list.append(0)
			else:
				for val in val_list:
					new_df = apos_df.loc[apos_df["feature"] == val]
					count = len(new_df.index)
					num_list.append(float(count) / float(num_records))

			return get_radio_graph(num_list, val_list, "Parts of Speech" , "Distribution")

		def get_pos_graph_agent(self):
			value = self.get_pos_graph(table_name, collection_name + "_agent_pos")
			return self.tab_output(value)

		def get_pos_graph_caller(self):
			value = self.get_pos_graph(table_name, collection_name + "_caller_pos")
			return self.tab_output(value)
	return Dashboard()
Exemplo n.º 20
0
def generate_dashboard_demo(auth, table_name, collection_name, key_value):
	field = ""
	limit = 0
	projections = "{}"
	skip = 0

	data = data_management_engine.get_data(auth, table_name, collection_name, field, limit, projections, skip)
	df = pd.DataFrame(data)

	choices = list(df[key_value])
	choices.sort()
	class Dashboard(param.Parameterized):
		callid = param.Selector(default=choices[0], objects=choices)
		def tab_output(self, output, tab_depth=880):
			return pn.Row(
				pn.Column(
					width=20
				),
				pn.Column(
					output,
					width=tab_depth
				)
			)
		def get_agent_text(self):
			df_cid = df.loc[df[key_value] == self.callid]
			value = df_cid.iloc[0]["text"]
			return self.tab_output(value, tab_depth=800)

		def get_summary(self, input_database, input_collection):
			#Summary
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			asum_df = pd.DataFrame(data)
			asum_df = asum_df.loc[asum_df[key_value] == self.callid]
			return asum_df.iloc[0]["summary"]

		def get_summary_agent(self):
			value = self.get_summary(table_name, collection_name + "_summarize")
			return self.tab_output(value)


		def get_b5(self, input_database, input_collection):
			#Personality B5
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			ab5_df = pd.DataFrame(data)
			ab5_df = ab5_df.loc[ab5_df[key_value] == self.callid]

			b5_list = ["conscientiousness","extraversion","stability","openess","agreeableness"]
			for b5_entry in b5_list:
				if ab5_df.iloc[0][b5_entry] == 1:
					return b5_entry
		def get_b5_agent(self):
			value = self.get_b5(table_name, collection_name + "_b5")
			return self.tab_output(value)


		def get_mbti(self, input_database, input_collection):
			#Personality MBTI
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			ambti_df = pd.DataFrame(data)
			ambti_df = ambti_df.loc[ambti_df[key_value] == self.callid]

			mbti_list = ["ISTJ","INTP","ESTJ","ISTP","ENTP","ESTP","INFJ","ISFJ","INFP","ENFJ","ESFJ","ENFP","ISFP","ESFP","INTJ","ENTJ"] 
			for mbti_entry in mbti_list:
				if ambti_df.iloc[0][mbti_entry] == 1:
					return(mbti_entry)

		def get_mbti_agent(self):
			value = self.get_mbti(table_name, collection_name + "_mbti")
			return self.tab_output(value)


		#Personality POS
		def get_pos(self, input_database, input_collection):
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip) 
			apos_df = pd.DataFrame(data)
			apos_df = apos_df.loc[apos_df[key_value] == self.callid]
			return apos_df

		def get_pos_agent(self):
			return self.get_pos(table_name, collection_name + "_pos")

		def get_pos_agent_verbs(self):
			pos_df = self.get_pos(table_name, collection_name + "_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "VERB"]
			return self.tab_output(list(verb_df["token"]))

		def get_pos_agent_proper_nouns(self):
			pos_df = self.get_pos(table_name, collection_name + "_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "PROPN"]
			verbs = list(verb_df["token"])
			verbs = ner_fix(verbs)
			return self.tab_output(verbs)

		def get_pos_agent_nouns(self):
			pos_df = self.get_pos(table_name, collection_name + "_pos")
			verb_df = pos_df.loc[pos_df["feature"] == "NOUN"]
			return self.tab_output(list(verb_df["token"]))

		def get_ner(self, input_database, input_collection):
			field = "{}"
			limit = 0
			projections = "{}"
			skip = 0

			data = data_management_engine.get_data(auth, input_database, input_collection, field, limit, projections, skip)
			apos_df = pd.DataFrame(data)
			apos_df = apos_df.loc[apos_df[key_value] == self.callid]
			return apos_df

		def get_ner_agent(self):
			ner_df = self.get_ner(table_name, collection_name + "_ner")
			new_list = []
			words = list(ner_df["word"])
			entities = list(ner_df["entity"])
			for i in range(len(words)):
				new_list.append(words[i] + ": " + entities[i])
			return self.tab_output(new_list)

		def get_pos_graph(self, input_database, input_collection):
			apos_df = self.get_pos(input_database, input_collection)
			num_records = len(apos_df.index)
			val_list = ["ADJ","ADP","ADV","AUX","CCONJ","DET","INTJ","NOUN","NUM","PART","PRON","PROPN","SCONJ","VERB","X"]
			num_list = []
			if num_records == 0:
				for val in val_list:
					num_list.append(0)
			else:
				for val in val_list:
					new_df = apos_df.loc[apos_df["feature"] == val]
					count = len(new_df.index)
					num_list.append(float(count) / float(num_records))

			return get_radio_graph(num_list, val_list, "Parts of Speech" , "Distribution")

		def get_pos_graph_agent(self):
			value = self.get_pos_graph(table_name, collection_name + "_pos")
			return self.tab_output(value)

	return Dashboard()