def insight(statuses): text = "" for status in statuses: text += status.text personality_insights = PersonalityInsightsV3(version="2016-10-19", username=pi_username, password=pi_password) # personality_insights.set_detailed_response(False) pi_result = personality_insights.profile(text, accept='application/json', content_type='text/plain', consumption_preferences=False, raw_scores=False) data = flatten(pi_result) category = [] result = [] for keys in data['id']: category.append(keys) for keys in data['percentile']: result.append(keys) return result, category, data
def analyzeFunc(): if 'data' in request.args: text = request.args['data'] authenticatorNLU = IAMAuthenticator( 'rW0-13R2RqRbko3bNzOaz1E8toSIy2qH1019AWiHkMZ9') natural_language_understanding = NaturalLanguageUnderstandingV1( version='2019-07-12', authenticator=authenticatorNLU) natural_language_understanding.set_service_url( 'https://gateway-lon.watsonplatform.net/natural-language-understanding/api' ) emotions = natural_language_understanding.analyze( text=text, features=Features(emotion=EmotionOptions( document=True))).get_result() authenticatorPI = IAMAuthenticator( 'uA6GpdKCXJyJCqJyhQEqwH9jSJxqlJhgYq7-uBBfPYL5') personality_insights = PersonalityInsightsV3( version='2017-10-13', authenticator=authenticatorPI) personality_insights.set_service_url( 'https://gateway-lon.watsonplatform.net/personality-insights/api') personality = personality_insights.profile( text.encode('utf-8'), 'application/json').get_result() return jsonify(emotions=emotions, personality=personality)
def analyze_personality(): #------------------------------入力---------------------------------- authenticator = IAMAuthenticator( 'tt0Ul73SdE0aOXsfeeAV55XHrBASyZs1ukRIZD3WWdYn') #APIkeyを入力 #--------------------------------------------------------------------- service = PersonalityInsightsV3(version='2018-10-30', authenticator=authenticator) #------------------------------入力---------------------------------- service.set_service_url( 'https://api.jp-tok.personality-insights.watson.cloud.ibm.com/instances/591407d0-781f-49cf-a85e-2315151d4f61' ) #URLを入力 #-------------------------------------------------------------------- # 性格を分析 with open('C:/Users/Masafumi/PycharmProjects/kasahara/lemon.txt', 'r', encoding="utf-8") as profile_text: #解析したいtxtファイルを絶対パスで指定 profile = service.profile(profile_text.read(), 'application/json', content_language='ja', accept_language='ja').get_result() # ファイルに書き込み with open('C:/Users/Masafumi/PycharmProjects/kasahara/result.json', 'w', encoding="utf-8") as resultFile: json.dump(profile, resultFile, ensure_ascii=False, indent=2)
def post_manager(): json_str = { "contentItems": [{ "content": request.form["content"], "contenttype": "text/plain", "language": "ja" }] } f = open("profile.json", "w") json.dump(json_str, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': ')) f.close() authenticator = IAMAuthenticator( 'tt0Ul73SdE0aOXsfeeAV55XHrBASyZs1ukRIZD3WWdYn') # APIkeyを入力 service = PersonalityInsightsV3(version='2018-10-30', authenticator=authenticator) service.set_service_url( 'https://api.jp-tok.personality-insights.watson.cloud.ibm.com/instances/591407d0-781f-49cf-a85e-2315151d4f61' ) # URLを入力 with open(join(dirname(__file__), './profile.json')) as profile_json: profile = service.profile(profile_json.read(), 'application/json', content_type='application/json', consumption_preferences=True, raw_scores=True).get_result() return profile
def get_personality_insights(): """ generates horizontal bar graph of personality scores using IBM Personality Insights https://www.ibm.com/watson/services/personality-insights/ """ global all_text, is_personality_successful authenticator = IAMAuthenticator(IBM_PERSONALITY_INSIGHTS_API_KEY) personality_insights = PersonalityInsightsV3( version='2017-10-13', authenticator=authenticator ) personality_insights.set_service_url(IBM_PERSONALITY_INSIGHTS_SERVICE_URL) try: profile = personality_insights.profile(all_text, accept='application/json', raw_scores=True).get_result() except: is_personality_successful = False return is_personality_successful = True attributes = ['personality', 'needs', 'values'] for attr in attributes: list_val = [] x_axis = [] for quality in profile[attr]: x_axis.append(quality['name']) list_val.append([quality['percentile'] * 100, quality['raw_score'] * 100]) pd.DataFrame(list_val, columns=['Percentile', 'Raw Score'], index=x_axis) \ .plot(kind='barh', figsize=(8, 6), title='Scores for ' + attr.capitalize()) \ .get_figure().savefig(os.path.join('.', 'temp', 'figures', attr + '.png'), bbox_inches='tight') print('get_personality_insights finished')
def getPersonalityInsight(data, username=None): try: #use saved profile to limit API calls with open('./data/profiles/' + username + '_profile.json') as json_file: profile = json.load(json_file) if len(data.split()) > profile['word_count'] * 2: raise 'error: old saved profile' except: personality_insights = PersonalityInsightsV3( version='2017-10-13', iam_apikey=iam_apikey, url= 'https://gateway-wdc.watsonplatform.net/personality-insights/api') personality_insights.disable_SSL_verification() profile = personality_insights.profile(data, 'application/json', content_type='text/plain', consumption_preferences=True, raw_scores=True).get_result() #save user profile with open('./data/profiles/' + username + '_profile.json', 'w') as outfile: json.dump(profile, outfile) return profile
def __init__(self, consumer_key, consumer_secret, access_token, acess_token_secret, MLapikey, MLurl): self.auth = tw.OAuthHandler(consumer_key, consumer_secret) self.auth.set_access_token(access_token, acess_token_secret) self.api = tw.API(self.auth, wait_on_rate_limit=True) self.authenticator = IAMAuthenticator(MLapikey) self.personality_insights = PersonalityInsightsV3( version='2017-10-13', authenticator=self.authenticator) self.personality_insights.set_service_url(MLurl)
def personality_data(user_content): """Returns personality trait data for the user.""" personality_insights = PersonalityInsightsV3( version='2017-10-13', iam_apikey='m8CcsJovJ2rfO9wpD0eSe0E6EFXTSNTwoc8fGIaj5RrR', url='https://gateway.watsonplatform.net/personality-insights/api') profile = personality_insights.profile(user_content, 'application/json', content_type='application/json', consumption_preferences=True, raw_scores=True).get_result() return profile
class PersonalityAnalyzer: authenticator = IAMAuthenticator( 'ALlGKY01Pa-PQHPoQeBULOGX-180mT7iGjuvD4cx9Mlq') personality_insights = PersonalityInsightsV3(version='2017-10-13', authenticator=authenticator) personality_insights.set_service_url( 'https://api.us-south.personality-insights.watson.cloud.ibm.com/instances/ca0246f9-d739-44e0-a8dc-7c9e98b26886' ) def traits_to_vector(self, profile): if profile is None: return {} big5_objects = profile["personality"] big5_vector = {} for trait in big5_objects: big5_vector[trait["name"]] = trait["percentile"] return big5_vector def get_profile(self, text): string = " ".join(text) if (len(string.split()) < 100): print(" Not enough words to analyze") profile = None else: profile = PersonalityAnalyzer.personality_insights.profile( string, 'application/json', content_type='text/plain', consumption_preferences=True, raw_scores=True).get_result() return profile # Currently borken def get_profile_from_file(self, filename): if os.path.getsize(filename) <= 0: return None data = pandas.read_csv(filename) tweets = np.array(data)[:, 2:3] with open('tweets.txt', 'w', encoding='utf-8') as t: for row in tweets: t.write('%s\n' % row) f = open(filename, encoding='utf-8') string = " ".join(f) f.close() return get_profile(string)
def get_personality_insights(): global allText, result authenticator = IAMAuthenticator(IBM_PERSONALITY_INSIGHTS_API_KEY) personality_insights = PersonalityInsightsV3( version='2017-10-13', authenticator=authenticator ) personality_insights.set_service_url(IBM_PERSONALITY_INSIGHTS_SERVICE_URL) profile = personality_insights.profile(allText, accept='application/json', raw_scores=True).get_result() x_axis = [] per_list = [] raw_list = [] for characteristic in profile['personality']: # print('Scores for facets in {}'.format(characteristic['name'])) # print('General score for {}: {}'.format(characteristic['name'], characteristic['percentile'])) for facet in characteristic['children']: x_axis.append(facet['name']) per_list.append(facet['percentile'] * 100) raw_list.append(facet['raw_score'] * 100) result['personality'] = {} result['personality']['Personality'] = {'labels': x_axis, 'per': per_list, 'raw': raw_list} with open(os.path.join(STATICFILES_DIRS[0], 'JSON', 'result.json'), 'w') as json_file: json.dump(result, json_file) per_list = [] raw_list = [] x_axis = [] # print("Scores for Values") for value in profile['values']: per_list.append(value['percentile'] * 100) raw_list.append(value['raw_score'] * 100) x_axis.append(value['name']) result['personality']['Values'] = {'labels': x_axis, 'per': per_list, 'raw': raw_list} with open(os.path.join(STATICFILES_DIRS[0], 'JSON', 'result.json'), 'w') as json_file: json.dump(result, json_file) per_list = [] raw_list = [] x_axis = [] print("Scores for Needs") for need in profile['needs']: per_list.append(need['percentile'] * 100) raw_list.append(need['raw_score'] * 100) x_axis.append(need['name']) result['personality']['Needs'] = {'labels': x_axis, 'per': per_list, 'raw': raw_list} with open(os.path.join(STATICFILES_DIRS[0], 'JSON', 'result.json'), 'w') as json_file: json.dump(result, json_file) print('personalityInsights finished')
def evaluate_personality(text: str) -> dict: """Returns dict[metric:str]->value:str The output of watson with text as a dict.""" service = PersonalityInsightsV3(version='2017-10-13', authenticator=authenticator) service.set_service_url( 'https://gateway.watsonplatform.net/personality-insights/api') response = service.profile( #profile_json.read(), text, accept='text/csv', csv_headers=True).get_result() profile = response.content cr = csv.reader(profile.decode('utf-8').splitlines()) my_list = list(cr) return dict(zip(my_list[0], my_list[1]))
def analyze(): text = request.form["textToAnalyze"] apiKey = APIKEY url = URL authenticator = IAMAuthenticator(apiKey) service = PersonalityInsightsV3(version='2017-10-13', authenticator=authenticator) service.set_service_url(url) profile = service.profile(text, accept="application/json", consumption_preferences=True).get_result() # warnings = len(profile['warnings']) # print(f"Warnings: {warnings}") print(json.dumps(profile, indent=4)) return json.dumps(profile)
def get_personality_insights(samp): authenticator = IAMAuthenticator(current_app.config['INSIGHTS_API']) personality_insights = PersonalityInsightsV3( version=current_app.config['INSIGHTS_VER'], authenticator=authenticator) personality_insights.set_service_url(current_app.config['INSIGHTS_URL']) profile = personality_insights.profile( samp, 'application/json', content_type='text/plain', ).get_result() print(profile) res = [] for i in profile['personality']: pseudo_dict = {} pseudo_dict['trait_name'] = i['name'] print(i['name']) pseudo_dict['percentile'] = i['percentile'] res.append(pseudo_dict) return res
def get_personality(text): personality_insights = PersonalityInsightsV3( version='2017-10-13', iam_apikey='CgEZPqJjTpxN2g90fZrjC08xJDxi954sjdmmR0eqWDxD', url='https://gateway-lon.watsonplatform.net/personality-insights/api' ) while len(text.split()) <= 150: text = ' '.join((text, text)) profile = None try: profile = personality_insights.profile( text, content_type="text/plain", accept="application/json" ) except: pass return profile
def json_to_dict(filename): personality_insights = PersonalityInsightsV3( iam_apikey=API_KEY, version='2017-10-13', url="https://gateway.watsonplatform.net/personality-insights/api") with open(join(dirname(__file__), filename)) as profile_json: profile = personality_insights.profile(profile_json.read(), 'application/json', content_type='application/json', consumption_preferences=True, raw_scores=True).get_result() scores_dict = dict() def json_dict(profile_str): scores_dict[profile_str] = [] for v in profile[profile_str]: temp_dict = dict() temp_dict["name"] = v["name"] temp_dict["raw_score"] = v["raw_score"] scores_dict[profile_str].append(temp_dict) def get_scores(profile_str): scores_dict[profile_str] = dict() for v in profile[profile_str]: scores_dict[profile_str][v["name"]] = v["raw_score"] get_scores("personality") get_scores("needs") get_scores("values") personality = scores_dict["personality"] needs = scores_dict["needs"] dictionary = {**personality, **needs} return dictionary
def analyze_personality(): """個人のテキストをクエリとし,PersonalityInsightsから性格パラメータを取得する. Returns: dict: Big 5のパラメータ. keyは{'ope', 'con', 'ext', 'agr', 'emo'} """ with open('apikey.txt', 'r') as f: api_key = f.read() authenticator = IAMAuthenticator(api_key) personality_insights = PersonalityInsightsV3(version='2017-10-13', authenticator=authenticator) personality_insights.set_service_url( 'https://gateway.watsonplatform.net/personality-insights/api') with open('sample.txt', 'r') as profile_text: profile = personality_insights.profile( profile_text.read(), 'application/json', consumption_preferences=True, content_language='ja', accept_language='ja', ).get_result() json_obj = profile write_to_json(json_obj, 'json/result.json') ope = json_obj["personality"][0]["percentile"] con = json_obj["personality"][1]["percentile"] ext = json_obj["personality"][2]["percentile"] agr = json_obj["personality"][3]["percentile"] emo = json_obj["personality"][4]["percentile"] dic = {'ope': ope, 'con': con, 'ext': ext, 'agr': agr, 'emo': emo} return dic
def get_and_process_data(input): # WARNING: this is a very expensive step # API call and store data # Do not set the num_periods over 5, it will eat up the API num_periods = 5 name = 'elonmusk' df_a = pd.DataFrame() for i in range(num_periods): try: twitter_user = TWITTER.user_timeline(screen_name=name, count=30, tweet_mode='extended', max_id=twitter_user.max_id) favorites = TWITTER.favorites(name, count=30, max_id=favorites.max_id) def convert_status_to_pi_content_item(t, f): return { 'content': t.full_text + f.text, 'contenttype': 'text/plain', 'created': int(time.mktime(t.created_at.timetuple())), 'id': str(t.id), 'language': t.lang } pi_content_items_array = list( map(convert_status_to_pi_content_item, twitter_user, favorites)) pi_content_items = {'contentItems': pi_content_items_array} data = json.dumps(pi_content_items, indent=2) personality_insights = PersonalityInsightsV3( version='2017-10-13', url=pi_url, iam_apikey=pi_password) profile = personality_insights.profile( data, accept='application/json', content_type='application/json', consumption_preferences=True, raw_scores=True).get_result() p = pd.DataFrame(profile['personality'], columns=['category', 'name', 'raw_score']) n = pd.DataFrame(profile['needs'], columns=['category', 'name', 'raw_score']) v = pd.DataFrame(profile['values'], columns=['category', 'name', 'raw_score']) df = pd.concat([p, n, v], axis=0) df['time'] = twitter_user[0].created_at df_a = df_a.append(df) except: twitter_user = TWITTER.user_timeline(screen_name=name, count=30, tweet_mode='extended') favorites = TWITTER.favorites(screen_name=name, count=30) def convert_status_to_pi_content_item(t, f): return { 'content': t.full_text + f.text, 'contenttype': 'text/plain', 'created': int(time.mktime(t.created_at.timetuple())), 'id': str(t.id), 'language': t.lang } pi_content_items_array = list( map(convert_status_to_pi_content_item, twitter_user, favorites)) pi_content_items = {'contentItems': pi_content_items_array} data = json.dumps(pi_content_items, indent=2) personality_insights = PersonalityInsightsV3( version='2017-10-13', url=pi_url, iam_apikey=pi_password) profile = personality_insights.profile( data, accept='application/json', content_type='application/json', consumption_preferences=True, raw_scores=True).get_result() p = pd.DataFrame(profile['personality'], columns=['category', 'name', 'raw_score']) n = pd.DataFrame(profile['needs'], columns=['category', 'name', 'raw_score']) v = pd.DataFrame(profile['values'], columns=['category', 'name', 'raw_score']) df = pd.concat([p, n, v], axis=0) df['time'] = twitter_user[0].created_at df_a = df_a.append(df) # Return proper data as json for other callbacks return df_a.to_json(date_format='iso', orient='split')
import json import os from os.path import join from ibm_watson import PersonalityInsightsV3 import csv from ibm_cloud_sdk_core.authenticators import IAMAuthenticator # # Authentication via IAM # authenticator = IAMAuthenticator('your_api_key') # service = PersonalityInsightsV3( # version='2017-10-13', # authenticator=authenticator) # service.set_service_url('https://gateway.watsonplatform.net/personality-insights/api') # Authentication via external config like VCAP_SERVICES service = PersonalityInsightsV3(version='2017-10-13') service.set_service_url( 'https://api.us-east.personality-insights.watson.cloud.ibm.com/instances/4c18b521-3abd-4c7c-bec7-6a3fd03644f1' ) ############################ # Profile with JSON output # ############################ with open(join(os.getcwd(), 'resources/personality-v3.json')) as \ profile_json: profile = service.profile(profile_json.read(), 'application/json', raw_scores=True, consumption_preferences=True).get_result()
db = client.ibm_faf userHandlesCollection = db.userHandles analysedUserData = db.analysedUserData # ---------------------------------------------------------------------------------------------------------- # Tweepy --------------------------------------------------------------------------------------------------- auth = tweepy.OAuthHandler(os.getenv('TWITTER_CONSUMER_KEY'), os.getenv('TWITTER_CONSUMER_SECRET')) auth.set_access_token(os.getenv('TWITTER_ACCESS_TOKEN'), os.getenv('TWITTER_ACCESS_TOKEN_SECRET')) api = tweepy.API(auth) # ---------------------------------------------------------------------------------------------------------- # Personality Insights ---------------------------------------------------------------------------------------- personality_insights = PersonalityInsightsV3(version='2017-10-13', iam_apikey=os.getenv('PI_APIKEY'), url=os.getenv('PI_URL')) # ---------------------------------------------------------------------------------------------------------- # Analyse user's personality @analyse.route('/me', methods=['GET']) def analyse_me(): """ Analyses my personality from social media posts using Watson Personality Insights and stores that into the database before returning """ if (not session): abort(401) userHandles = session['userHandles']
""" The example returns a JSON response whose content is the same as that in ../resources/personality-v3-expect2.txt """ from __future__ import print_function import json from os.path import join, dirname from ibm_watson import PersonalityInsightsV3 import csv # If service instance provides API key authentication service = PersonalityInsightsV3( version='2017-10-13', ## url is optional, and defaults to the URL below. Use the correct URL for your region. url='https://gateway.watsonplatform.net/personality-insights/api', iam_apikey='DBxOesEcwYTQK9-dvcaxTwBICWk0s3RwwEW6m-2eppDn') # service = PersonalityInsightsV3( # version='2017-10-13', # ## url is optional, and defaults to the URL below. Use the correct URL for your region. # # url='https://gateway.watsonplatform.net/personality-insights/api', # username='******', # password='******') ############################ # Profile with JSON output # ############################ with open(join(dirname(__file__), '../resources/personality-v3.json')) as \ profile_json: profile = service.profile(profile_json.read(),
def get_insight_local(input_text, dir=""): DATAFOLDER = dir + "data/" MODELFOLDER = dir + "models/" # =============================Personality_Insight============================= url = "https://gateway-fra.watsonplatform.net/personality-insights/api" apikey = "w-s1kGzcVV8xeTzYvYgwsIKk4UAF8M2Zr7xkPRgfiKCd" # # Authentication via IAM authenticator = IAMAuthenticator(apikey) service = PersonalityInsightsV3( version='2017-10-13', authenticator=authenticator) service.set_service_url(url) profile = text2profile(input_text, service) profile = pd.DataFrame(profile) profile = profile.T #profile.to_csv("test_profile.csv", index=False) print("Profile: ", profile) # =============================Loading_Datasets============================= df_2d = pd.read_csv(DATAFOLDER + 'clustered_2d_v3.csv') df_22d = pd.read_csv(DATAFOLDER + 'clustered_22d_v3.csv') # cluster_cores cores_2d = pd.read_csv(DATAFOLDER + 'cores_2d_v3.csv') cores = pd.read_csv(DATAFOLDER + 'cores_v3.csv') # =============================Model_Prediction============================= # preparing data X = profile.to_numpy() X = np.round(X, 3) # loading models encoder = load_model(MODELFOLDER + 'encoder_v3.h5') encoder.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001), loss=tf.keras.losses.MSE) #kmeans = joblib.load(MODELFOLDER + 'kmeans_clf_v2.joblib') # reduction found_factors = encoder.predict(X)[0] found_factors = np.round(found_factors, 5) found_factors = found_factors.astype('double') print(found_factors) # classification Y_pred = handy_predict(found_factors, cores_2d) print("Predicted: ", Y_pred) print(type(Y_pred)) # =============================Results============================= big5_keys = list(df_22d.iloc[:3, 0:5].columns) needs_keys_1 = list(df_22d.iloc[:3, 5:12].columns) needs_keys_2 = list(df_22d.iloc[:3, 12:18].columns) values_keys = list(df_22d.iloc[:3, 18:-1].columns) trait_columns = {'Needs 1': needs_keys_1, 'Needs 2': needs_keys_2, 'Big5': big5_keys, 'Values': values_keys} hex_traits = big5_keys.copy() hex_traits.append('Self-transcendence') radar_img = radar_plot(data = profile[hex_traits].to_numpy()[0], traits=hex_traits, COLOR='indigo', drop_yticks=False, drop_xticks=False, title='Profile principal traits') # Creating a color palette: my_palette = plt.cm.get_cmap("Accent", len(cores.index)) RGB_codes = np.array([my_palette(0), my_palette(1), my_palette(2), my_palette(3)]) RGB_codes = RGB_codes[:, :-1] # plotting cluster cores cores_img = radar_cluster_cores(cores, profile, hex_traits, my_palette) # plotting radar plots of all traits for profile & its cluster predicted_cluster = cores.loc[Y_pred] all_traits_img = all_traits_plot(predicted_cluster, profile, trait_columns, my_palette(Y_pred)) # plotting coordinates of the projection in a 2D space projection_img = projection_plot(df_2d, found_factors, cores_2d, my_palette) # plotting donut shares of each cluster donut_img = donut_shares(found_factors, cores_2d, RGB_codes) return Y_pred
import tweepy import json from k3y5 import TWITTER_API_KEY, TWITTER_API_SECRET_KEY, TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET, IBM_API_KEY, IBM_URL from ibm_watson import PersonalityInsightsV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator # loading keys from json file MAX_TWEET = 100 # connecting to twitter api auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET_KEY) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) api = tweepy.API(auth) authenticator = IAMAuthenticator(IBM_API_KEY) PI = PersonalityInsightsV3(version='2020-06-15', authenticator=authenticator) PI.set_service_url(IBM_URL) def limit_handled(cursor, list_name): while True: try: yield cursor.next() # catch the api rate limit exception and wait for 15 minutes except tweepy.RateLimitError: print(f"\nData points in list = {len(list_name)}") print("Hit Twitter API rate limit.") for i in range(3, 0, -1): print(f"Wait for {i*5} mins.") time.sleep(5 * 60)
#Client details for Twitter API consumer_key = "pZb2PizxEsB1eyMcMtPaiCKt0" consumer_secret = "MLAIqKyYVkdykZ6eibeoWcPOX56gOgfV0vkWIVzmKEVxGQkaJu" access_key = "3046753945-qtQ26I5KlW9Ng1LmX44OxdbJTQ8NAqjvpcDBPBE" access_secret = "yxNsmJMzxB7WumR5u2HB9NYM5Cdftj3gbJpRSwwaxQCTw" auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) #Client details for Personality Insight API url1 = "https://gateway-lon.watsonplatform.net/personality-insights/api" apikey = "rRAw3UeMSph-ClMJ-LNwa0D8HHrBdHQnqHpglaa7QhKt" service = PersonalityInsightsV3(url=url1, iam_apikey=apikey, version="2017-10-13") #Client details for Reddit API reddit = praw.Reddit( client_id="bqa8wgD7IjEhQw", client_secret="e-nSLf7A6gP1U4XOU4K15BoArM8", password="******", username="******", user_agent="training", ) #Client details for SpeechToText API speech_to_text = SpeechToTextV1( iam_apikey="2GLz9FSNQKpCutmiRtpVMG3AJQPh_DItUe89rTwy4QJC", url="https://gateway-lon.watsonplatform.net/speech-to-text/api")
def build_ibm(username): # Prepare authentication personality_insights = PersonalityInsightsV3( version='2017-10-13', iam_apikey=, # add your own key here url='https://gateway-lon.watsonplatform.net/personality-insights/api' ) # Read tweets tweets = Tweet.objects.filter(username=username, is_active=True) tweets = '\n'.join([x.tweet for x in tweets]) profile = personality_insights.profile( tweets, 'application/json', consumption_preferences=True, raw_scores=True, csv_headers=True ).get_result() # Save profile results based on this traits = [] for trait in profile['personality']: name = trait['name'] if name == 'Emotional range': name = 'neuroticism' # Rename this to be more similar to other Big Five names prediction = 'Neurotic' if float(trait['percentile']) > 0.5 else 'Not Neurotic' categories = 'Not Neurotic\nNeurotic' category_desc ='Less Neurotic\nMore Neurotic' elif name == 'Openness': name = 'openness' prediction = 'Open' if float(trait['percentile']) > 0.5 else 'Not Open' categories = 'Not Open\nOpen' category_desc ='Less Open\nMore Open' elif name == 'Conscientiousness': name = 'conscientiousness' prediction = 'Conscientious' if float(trait['percentile']) > 0.5 else 'Not Conscientious' categories = 'Not Conscientious\nConscientious' category_desc ='Less Conscientious\nMore Conscientious' elif name == 'Extraversion': name = 'extraversion' prediction = 'Extraverted' if float(trait['percentile']) > 0.5 else 'Introverted' categories = 'Introverted\nExtraverted' category_desc ='Introverted\nExtraverted' elif name == 'Agreeableness': name = 'agreeableness' prediction = 'Agreeable' if float(trait['percentile']) > 0.5 else 'Not Agreeable' categories = 'Not Agreeable\nAgreeable' category_desc ='Less Agreeable\nMore Agreeable' else: raise Exception("Unexpected name %s" % name) p = Profile(username=username, attr_name=name, classifier='ibm', predicted_class=prediction, attr_categories=categories, category_desc=category_desc, attr_values=trait['percentile'], confidence_str=min(100, int(float(trait['percentile'])*100)), explanations='', global_explanations='', is_current=True, class_confidence=trait['raw_score'], is_sensitive=False) p.save() traits.append(p) return traits
translator = Translate(from_lang='id', to_lang='en') counter = 0 # COS Instance cos = ibm_boto3.resource( "s3", ibm_api_key_id=COS_API_KEY_ID, ibm_auth_endpoint=COS_AUTH_ENDPOINT, config=Config(signature_version="oauth"), endpoint_url=COS_ENDPOINT, ibm_service_instance_id=COS_RESOURCE_CRN, ) # Personality Insight Authenticator authenticator = IAMAuthenticator(PERSONALITY_INSIGHT_AUTHENTICATOR) personality_insights = PersonalityInsightsV3(version='2017-10-13', authenticator=authenticator) personality_insights.set_service_url(PERSONALITY_INSIGHT_SERVICE_URL) # Discovery Authenticator authenticator = IAMAuthenticator(DISCOVERY_AUTHENTICATOR) DiscoveryService = DiscoveryV1(version='2019-11-22', authenticator=authenticator) DiscoveryService.set_service_url(DISCOVERY_SERVICE_URL) def get_url_content(url): global counter headers = {'User-Agent': USER_AGENT} session = requests.Session()
from ibm_watson import PersonalityInsightsV3 personality_insights = PersonalityInsightsV3( version='2017-10-13', iam_apikey='W73kz6O3XR1pkIQVn2RYbrrtIU2o0IvNYuqiMICwSwro') profile_text = open("personality.txt").read() profile = personality_insights.profile(profile_text, "text/plain").get_result() needs = profile["needs"] values = profile["values"] personality = profile["personality"] def print_traits(traits_category_name, traits): print(traits_category_name + ":") for trait in traits: print(trait["name"] + ": {:.3f}%".format(trait["percentile"] * 100)) print("\n") print_traits("Needs", needs) print_traits("Values", values) print_traits("Personality", personality)
import os from os.path import join from ibm_watson import PersonalityInsightsV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from dotenv import load_dotenv, find_dotenv # #Load env variables dir_path = os.path.dirname(os.path.realpath(__file__)) load_dotenv(dotenv_path=dir_path + '/ibm-credentials.env') API_KEY = os.getenv('PERSONALITY_INSIGHTS_APIKEY') API_URL = os.getenv('PERSONALITY_INSIGHTS_URL') #Authentication with Watson servers authenticator = IAMAuthenticator(API_KEY) personality_insights = PersonalityInsightsV3(version='2018-08-01', authenticator=authenticator) personality_insights.set_service_url(API_URL) # create directory for saving results if os.path.exists(dir_path + '/personality_results/') == False: os.makedirs(dir_path + '/personality_results/') #list the inputs json_folder = [f for f in os.listdir(dir_path + '/character_jsons/')] #Read and analyze JSON file for each character for json_file in json_folder: with open(dir_path + '/character_jsons/' + json_file) as profile_json: profile = personality_insights.profile( profile_json.read(), 'application/json',
#! /usr/bin/env python import praw import prawcore import sys import path import os import json from ibm_watson import PersonalityInsightsV3 personality_insights = PersonalityInsightsV3( version='2017-10-13', iam_apikey='', url='https://gateway.watsonplatform.net/personality-insights/api') def traits(persona, user): personality = persona["personality"] needs = persona["needs"] print("From the BIG 5 traits they have:\n") for i in range(len(personality)): big5TraitName = personality[i]["name"] childTraits = personality[i]["children"] if personality[i]["percentile"] > 0.7: print("They have Big 5 trait " + big5TraitName + ", based on their comments") else: print("They haven't shown Big 5 trait " + big5TraitName +
from ibm_watson import PersonalityInsightsV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator import json authenticator = IAMAuthenticator('yLcjP_vAobDEORUcO86S1BttbZVclO3IQzFlIEnIaPKL') personality_insights = PersonalityInsightsV3( version='22-05-2020', authenticator=authenticator ) samp = R'I don’t like personality tests! Whether they reveal something right or wrong about you, I don’t want to reduce people’s understanding of me to four letters, or five numbers, or a few signs. I could write many pages about how this slapdash way of “understanding” a person might lead to discrimination at work, misguided judgment, or violations of one’s privacy, but that’s not quite the point of this blogpost — though it is the motivation behind writing it. Here, I will focus on one specific new form of personality testing — one that relies on machine learning. I’m referring to the IBM Watson product called Personality Insights. According to IBM’s website, the tool “uses linguistic analytics to infer individuals’ intrinsic personality characteristics, including Big Five [or O.C.E.A.N], Needs, and Values, from digital communications such as email, text messages, tweets, and forum posts.”' samp2 = R' In addition, Personality Insights shows your consumption habits and “temporal behavior” (if the input text is timestamped). Let me show you what this means. I fed the tool with my Twitter feed and received this nice visualization of the tool’s output, supposedly showing my personality characteristics, consumer needs, and values:If you look into the output file (here), you can see that, according to the tool, I am more likely to “to be influenced by online ads when making product purchases.” Also, I am more likely to be concerned about the environment and to like documentaries, and am less likely to like musical movies (🤚🏽objection: one of my favorite shows these days is Crazy Ex-Girlfriend)' samp+=samp2 personality_insights.set_service_url('https://api.eu-gb.personality-insights.watson.cloud.ibm.com/instances/3be001b5-9f13-46ec-bb07-286e363d8ba1') profile = personality_insights.profile( samp, 'application/json', content_type='text/plain', ).get_result() print(profile) res =[] for i in profile['personality']: pseudo_dict={} pseudo_dict['trait_name'] = i['name'] print(i['name']) pseudo_dict['percentile'] = i['percentile'] res.append(pseudo_dict)
try: from githavior.credentials import NLU_KEY, PERSONALITY_INSIGHTS_KEY except ModuleNotFoundError: # For Heroku NLU_KEY = os.environ['NLU_KEY'] PERSONALITY_INSIGHTS_KEY = os.environ['PERSONALITY_INSIGHTS_KEY'] from githavior.fetch_repos import get_commits, get_pr_issues_body, get_avatar app = Flask(__name__) cors = CORS(app) app.config['CORS_HEADERS'] = 'Content-Type' personality_service = PersonalityInsightsV3( version='2017-10-13', url='https://gateway-lon.watsonplatform.net/personality-insights/api', username='******', password=PERSONALITY_INSIGHTS_KEY) naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1( version='2018-11-16', url= 'https://gateway-lon.watsonplatform.net/natural-language-understanding/api', username='******', password=NLU_KEY) stopwords = set(STOPWORDS) stopwords.add("said") @app.route('/', methods=['GET'])