import json from io import BytesIO from ibm_watson import NaturalLanguageUnderstandingV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from ibm_watson.natural_language_understanding_v1 import Features, KeywordsOptions authenticator = IAMAuthenticator('{apikey}') natural_language_understanding = NaturalLanguageUnderstandingV1( version='2020-08-01', authenticator=authenticator) natural_language_understanding.set_service_url('{url}') response = natural_language_understanding.analyze( url= 'https://www.bloomberg.com/news/articles/2020-12-03/asia-stocks-set-to-slip-dollar-extends-decline-markets-wrap?srnd=premium-middle-east', features=Features(keywords=KeywordsOptions( sentiment=True, emotion=True, limit=2))).get_result() print(json.dumps(response, indent=2)) txt_char = response['usage']['text_characters'] print(f'The Text Character is : {txt_char}') for t in response['keywords']: text = t['text'] print(f'The Text is : {text}') sentiments = t['sentiment']['label'] print(f'The Sentemint is : {sentiments}') scores = t['sentiment']['score'] print(f'The Score is : {scores}') #The Feeling
import plotly.express as px from plotly.subplots import make_subplots import pandas as pd # these functions will be needed for the keywords frequency graph def sorting_on_count(a): return a['count'] def sorting_on_sentiment(a): return a['sentiment'] # set up services authenticator = IAMAuthenticator('rcXs4WvKrUtItOK0P1fv0mGGAC5ZfRth0ZNQWM17NIUT') NLU = NaturalLanguageUnderstandingV1(version='2020-08-01', authenticator=authenticator) NLU.set_service_url( "https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/2acea4e4-9e41-400b-b6a1" "-a27c3c7c608c") input_files = [] classes_listed = [] illegal_types = ["Major", "AcademicStanding", "Quantity"] illegal_subtypes = ["Freshman", "Sophomore", "Junior", "Senior", "Student", "Faculty"] # used to get input files for later on with os.scandir('./Sample_Class_CSV_Files') as it: for entry in it: print(entry.name)
# pip install --upgrade ibm-watson --> install it to use this file from ibm_watson import VisualRecognitionV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator import urllib.request import json from pandas.io.json import json_normalize # Paste your API key for IBM Watson Visual Recognition below: my_apikey = 'JiFBdOBY-EE_rukAZCmoZ7kNt65sDNzq0R8qYOtQSmyw' authenticator = IAMAuthenticator(my_apikey) visrec = VisualRecognitionV3('2018-03-19', authenticator=authenticator) # Downloading Beagle dataset urllib.request.urlretrieve( "http://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/CV0101/Dataset/Beagle.zip", "beagle.zip") # Downloading Husky dataset urllib.request.urlretrieve( "http://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/CV0101/Dataset/Husky.zip", "husky.zip") # Downloading Golden Retriever dataset urllib.request.urlretrieve( "http://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/CV0101/Dataset/GoldenRetriever.zip", "goldenretriever.zip" ) # note that we should remove any hyphens from the zip file name with open('beagle.zip', 'rb') as beagle, \
#Watson Time import json from ibm_watson import DiscoveryV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator('7AQhKTuRCG_tEM68269k1zZiQFQbovXHGNIX_Q1kxyWT') discovery = DiscoveryV1( version='2019-04-30', authenticator=authenticator ) discovery.set_service_url('https://gateway-syd.watsonplatform.net/discovery/api') with open('./test-doc1.html') as fileinfo: add_doc = discovery.add_document( '{environment_id}', '{collection_id}', file=fileinfo).get_result() print(json.dumps(add_doc, indent=2)) curl -X POST -u "apikey:CUjh24sJMFvIO7WeXPUSWH5do9XqKBWfbuY88w7XK_f9" \ -H "Content-Type: application/json" \ -d "{\"name\":\"my-first-environment\",\"description\":\"exploring environments\"}" \ "https://gateway-syd.watsonplatform.net/discovery/api/v1/environments?version=2019-04-30" curl -X POST -u "apikey":"CUjh24sJMFvIO7WeXPUSWH5do9XqKBWfbuY88w7XK_f9" -H "Content-Type: application/json" -d '{ "name": "my_environment", "description": "My environment"
CHUNK = 1024 # Note: It will discard if the websocket client can't consumme fast enough # So, increase the max size as per your choice BUF_MAX_SIZE = CHUNK * 10 # Buffer to store audio q = Queue(maxsize=int(round(BUF_MAX_SIZE / CHUNK))) # Create an instance of AudioSource audio_source = AudioSource(q, True, True) ############################################### #### Prepare Speech to Text Service ######## ############################################### # initialize speech to text service authenticator = IAMAuthenticator( 'yTSSJ5GSmGhgIA95KnVPDf61KSZinztq909UBMfoqh7l') speech_to_text = SpeechToTextV1(authenticator=authenticator) speech_to_text.set_service_url( "https://api.us-east.speech-to-text.watson.cloud.ibm.com/instances/77c94867-643f-431b-a593-0bc775c18bb7" ) actions = [] def main(): global actions bullet_state = "ready" # initialize the pygame pygame.init() # create the screen screen = pygame.display.set_mode((800, 600)) # width , height or x,y axis
encoding='utf-8')) track_ids = [] song_names = [] all_features = [] all_harmonies = [] labels = [] all_lyric_object = [] all_lyric_tones = [] # MusixMatch API musixmatch = Musixmatch('3dbbf7d593f0f05ab045dac7b015c430') # Tone Analyzer API authenticator = IAMAuthenticator( 'R7Ja2rP0jp6LucFzOl5-4xbMSVSX5Fci8wc63J0O5-l3') tone_analyzer = ToneAnalyzerV3(version='2017-09-21', authenticator=authenticator) tone_analyzer.set_service_url( 'https://api.us-south.tone-analyzer.watson.cloud.ibm.com/instances/b8f00a45-63d1-4bb9-b1a0-1c2e6bc3e4ca' ) if token: for obs in track_info: try: labels.append(obs[2]) sp = spotipy.Spotify(auth=token) results = sp.search(q=obs[0] + ' ' + obs[1], limit=1) lyrics = musixmatch.matcher_lyrics_get(obs[1], obs[0])
import json import random from ibm_watson import ToneAnalyzerV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator import os import pygame import time authenticator = IAMAuthenticator( '9e2rg8T3by_sBIu3SbyCFWZkQLzCHGBfZUHYx0s4U8E3') tone_analyzer = ToneAnalyzerV3(version='2017-09-21', authenticator=authenticator) tone_analyzer.set_service_url( 'https://api.eu-gb.tone-analyzer.watson.cloud.ibm.com/instances/3806638c-85e1-4670-9013-4e303338e90a' ) root_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) pygame.mixer.init() # Initialize the mixer module. tone = "" flag = False class ToneMusic: def run(self): tone = self if tone == "confident": path = os.path.join(root_dir, "AI_game", "music", "normal.wav")
Generating captcha mp3 audio files. python Scripts/generate_audio.py --count 10 --symbols Symbols/symbols.txt --length 8 --tts gtts --targetdir C:\\Users\\SIDDHARTHA\\Downloads\\captchas\\audio\\gtts """ from gtts import gTTS import tts.sapi import os import random import argparse from progressbar import ProgressBar from os.path import join, dirname from ibm_watson import TextToSpeechV1 from ibm_watson.websocket import SynthesizeCallback from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator('2SSH7ZDGn788ZPiu6IZVkAn8DaTuOEy9zr0och7yYAj3') IBMservice = TextToSpeechV1(authenticator=authenticator) IBMservice.set_service_url('https://gateway-lon.watsonplatform.net/text-to-speech/api') voices = IBMservice.list_voices().get_result() def numbersAreTogether(text): for i in range(0,len(text)-1): if text[i].isdigit() and text[i+1].isdigit(): return True return False def formRandomText(length,symbols): s='' for i in range(length): chr = str(symbols[random.randint(0,len(symbols)-1)]) if len(s)>0:
from ibm_watson import TextToSpeechV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from playsound import playsound authenticator = IAMAuthenticator( 'fumBqyCTaCbMHNOsJDHaFn2eIkGDO2P9UbEiAIFd1HLE') text_to_speech = TextToSpeechV1(authenticator=authenticator) text_to_speech.set_service_url( 'https://api.eu-gb.text-to-speech.watson.cloud.ibm.com/instances/13787e5d-dfd8-4822-92e2-f657bd5e2ba2' ) with open('Hello_world.mp3', 'wb') as audio_file: audio_file.write( text_to_speech.synthesize('Hello how r u', voice='en-US_AllisonV3Voice', accept='audio/mp3').get_result().content) playsound('Hello_world.mp3')
#encoding: utf-8 import json from ibm_watson import SpeechToTextV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator # # authenticator = IAMAuthenticator('I2ZXOmDQ-z-e8ekV1xuDWU5ay8IMn_cZsD2q-5t9bArR') # speech_to_text = SpeechToTextV1( # authenticator=authenticator # ) # # speech_to_text.set_service_url('https://stream.watsonplatform.net/speech-to-text/api') # # speech_models = speech_to_text.list_models().get_result() # print(json.dumps(speech_models, indent=2)) authenticator = IAMAuthenticator( 'I2ZXOmDQ-z-e8ekV1xuDWU5ay8IMn_cZsD2q-5t9bArR') speech_to_text = SpeechToTextV1(authenticator=authenticator) speech_to_text.set_service_url( 'https://stream.watsonplatform.net/speech-to-text/api') # speech_models = speech_to_text.list_models().get_result() # print(json.dumps(speech_models, indent=2)) #speech_model = speech_to_text.get_model('zh-CN_NarrowbandModel') #speech_model = speech_to_text.get_model('zh-CN_BroadbandModel').get_result() #print(speech_model) # print(json.dumps(speech_model, indent=2)) data = open('output.wav', 'rb').read()
#https://cloud.ibm.com/apidocs/speech-to-text?code=python from ibm_watson import SpeechToTextV1 import json from ibm_cloud_sdk_core.authenticators import IAMAuthenticator #The service endpoint is based on the location of the service instance, we store the information in the variable URL. #To find out which URL to use, view the service credentials. url_s2t = "https://api.us-south.speech-to-text.watson.cloud.ibm.com/instances/2344fe0f-e3d2-49ab-862b-57ec806a39c3" #retrieved from IBM Watson Service Page #You require an API key, and you can obtain the key on the Dashboard . iam_apikey_s2t = "JSwdI01S9dV9--To9WPI0-_m18fmg4vZ4pNgz-1X4Unk" #retrieved from IBM Watson Service Page #You create a Speech To Text Adapter object the parameters are the endpoint and API key. authenticator = IAMAuthenticator(iam_apikey_s2t) s2t = SpeechToTextV1(authenticator=authenticator) s2t.set_service_url(url_s2t) #Code from Juptyer Notebook #!wget -O PolynomialRegressionandPipelines.mp3 https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/PolynomialRegressionandPipelines.mp3 #We have the path of the wav file we would like to convert to text filename = 'PolynomialRegressionandPipelines.mp3' #We create the file object wav with the wav file using open ; we set the mode to "rb" , #this is similar to read mode, but it ensures the file is in binary mode. #We use the method recognize to return the recognized text. #The parameter audio is the file object wav, the parameter content_type is the format of the audio file. with open(filename, mode="rb") as wav: response = s2t.recognize(audio=wav, content_type='audio/mp3')
if debug: print("Twitter access details: key: %s, secret: %s, token: %s, secret: %s" %(ckey, csecret, atoken, asecret), file=sys.stderr) if debug: print("NLU access details: key: %s, url: %s" %(nlu_api_key, nlu_api_url), file=sys.stderr) if debug: print("Tone access details: key: %s, url: %s" %(tone_analyzer_api_key, tone_analyzer_api_url), file=sys.stderr) ## Connect to twitter API auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) api = tweepy.API(auth) ## Connect to IBM Watson NLU API nlu_authenticator = IAMAuthenticator(nlu_api_key) natural_language_understanding = NaturalLanguageUnderstandingV1( version='2020-08-01', authenticator=nlu_authenticator ) natural_language_understanding.set_service_url(nlu_api_url) ## Connect to IBM Watson tone analyzer API ta_authenticator = IAMAuthenticator(tone_analyzer_api_key) tone_analyzer = ToneAnalyzerV3( version='2017-09-21', authenticator=ta_authenticator ) tone_analyzer.set_service_url(tone_analyzer_api_url)
import json from ibm_watson import NaturalLanguageUnderstandingV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from ibm_watson.natural_language_understanding_v1 import Features, SentimentOptions, EntitiesOptions import nltk from nltk.tokenize.toktok import ToktokTokenizer from nltk.corpus import stopwords from nltk.stem.snowball import SnowballStemmer import re import ntpath import json import tqdm authenticator = IAMAuthenticator( 'P6Y6IwvlXMt50mYGmmFXUsbWdgoihk07pc_1WnxQ0h64') natural_language_understanding = NaturalLanguageUnderstandingV1( version='2020-08-01', authenticator=authenticator) natural_language_understanding.set_service_url( 'https://api.eu-de.natural-language-understanding.watson.cloud.ibm.com/instances/2fec67fb-e992-4f60-8403-f6f41aad1346' ) def path_leaf(path): head, tail = ntpath.split(path) return tail or ntpath.basename(head) def gen_dict_extract(key, var): if hasattr(var, 'items'):
conn = ibm_db.connect(dsn, "", "") except: pass with open('watson-assistant-credentials.json', 'r') as credentialsFile: credentials = json.loads(credentialsFile.read()) apikey = credentials.get('apikey') url = credentials.get('url') assistantid = credentials.get('assistant-id') ######################### # Watson Assistant Authentication ######################### authenticator = IAMAuthenticator(apikey) assistant = AssistantV2(version='2020-04-01', authenticator=authenticator) assistant.set_service_url(url) ######################### # Watson Assistant Sessions ######################### def createSession(): global sessionid session = assistant.create_session(assistantid).get_result() sessionid = session.get('session_id') print('New Session created ID: ', sessionid)
import json from ibm_watson import NaturalLanguageUnderstandingV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from ibm_watson.natural_language_understanding_v1 import Features, SentimentOptions authenticator = IAMAuthenticator( 'PtJi8o9UyO8koiyqOJ-JOQPDFURQI9H8lKI8WzQxHc6Y') natural_language_understanding = NaturalLanguageUnderstandingV1( version='2019-07-12', authenticator=authenticator) natural_language_understanding.set_service_url( 'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/17e005b5-ebde-489d-8483-60fbd1ea1cfd' ) f = open("Uber_Ride_Reviews742" + ".txt", "r") text = f.readline() sentiment_analysis = natural_language_understanding.analyze( text=text, features=Features(sentiment=SentimentOptions()), language="en").get_result() print(json.dumps(sentiment_analysis, indent=2))
sttService = SpeechToTextV1() response = sttService.recognize( audio=request.get_data(cache=False), content_type='audio/wav', timestamps=True, word_confidence=True, smart_formatting=True).get_result() # Ask user to repeat if STT can't transcribe the speech if len(response['results']) < 1: return Response(mimetype='plain/text', response="Sorry, didn't get that. please try again!") text_output = response['results'][0]['alternatives'][0]['transcript'] text_output = text_output.strip() return Response(response=text_output, mimetype='plain/text') port = os.environ.get("PORT") or os.environ.get("VCAP_APP_PORT") or 5000 if __name__ == "__main__": load_dotenv() # SDK is currently confused. Only sees 'conversation' for CloudFoundry. #authenticator = (get_authenticator_from_environment('assistant') or # get_authenticator_from_environment('conversation')) authenticator = IAMAuthenticator('4WDcBZjc8BoExiNOb1u6TaPPP64ZxhMGyeEX2ca4j7g4') assistant = AssistantV1(version="2019-11-06", authenticator=authenticator) workspace_id = assistant_setup.init_skill(assistant) socketio.run(app, host='0.0.0.0', port=int(port))
if len(opcao_com_cor) == 0: return 0, opcao_sem_cor else: return len(opcao_com_cor), opcao_com_cor """# Watson""" ASSISTANT_APIKEY = '6SbPNEcnalIurb5LSlOnqr-DkdZBbWAZugC_JFkYFzUS' ASSISTANT_URL = 'https://api.us-south.assistant.watson.cloud.ibm.com/instances/7f639ca4-251a-4a66-b21f-b020a9c92ce3' ASSISTANT_ID = 'e0482367-a817-420e-b5a3-a27dd800aa0f' from ibm_watson import AssistantV2 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator(ASSISTANT_APIKEY) assistant = AssistantV2( version='2020-04-01', authenticator=authenticator ) assistant.set_service_url(ASSISTANT_URL) VISUAL_RECOGNITION_APIKEY = "w3ZEWWwtmKkhjT8cCvmb7Tl7BA8XzEFS-2kYZEhaoG8u" VISUAL_RECOGNITION_URL = 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/d247f614-69ad-410e-992f-c69df43501b0' CLASSIFIER_ID = 'DefaultCustomModel_962446588' from ibm_watson import VisualRecognitionV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator(VISUAL_RECOGNITION_APIKEY)
def Service(apikey: str, url: str) -> watson.AssistantV1: authenticator = IAMAuthenticator(apikey) service = watson.AssistantV1(version=VERSION, authenticator=authenticator) service.set_service_url(url) return service
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions url_api = 'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/8358a9cb-b7e2-46a1-832f-7452ff63d082' api_key = 'yourAPi' analise_text = '''Desde o início, a diretoria do Nacional não se mostrou disposta a fazer negócio com o Palmeiras. O principal argumento dos colombianos era a pendência que ainda existe da contratação do atacante Borja, em 2017, fato que é contestado pelo clube brasileiro. Em fevereiro, o presidente Juan David Pérez deu uma entrevista declarando que não venderia Muñoz ao Palmeiras por conta desse motivo. Ele chegou a dizer que "aqui não se castra um cachorro duas vezes", em referência ao imbróglio envolvendo a negociação de Borja. A transferência para o Genk foi confirmada pelos dois clubes envolvidos. Muñoz ainda fará exames médicos, quando houver permissão por conta da pandemia do coronavírus, antes de fechar o contrato. Ele era o capitão do Atlético Nacional. Com isso, o Palmeiras segue com Marcos Rocha e Mayke como opções para a lateral direita no elenco.''' #Todo processo de autenticação e request é feito usando a biblioteca da IBM. #Para mais informação deve-se acessar a documentação da NLU Watson. #link => https://cloud.ibm.com/apidocs/natural-language-understanding?code=python#sentiment auth = IAMAuthenticator(f'{api_key}') nlu = NaturalLanguageUnderstandingV1(version='2019-07-12', authenticator=auth) nlu.set_service_url(f'{url_api}') response = nlu.analyze(text=f'{analise_text}', features=Features( entities=EntitiesOptions(emotion=True, sentiment=True, limit=5), keywords=KeywordsOptions(emotion=True, sentiment=True, limit=10))).get_result() #A chamada para a <API> esta me retornando um <dict> ai inves de um <JSON>. #Por isso primeiro converto com <DUMPS> o <dict> para <json>. #Depois converto o <JSON> para <Python Object> com <loads>.
# Load environment variables from dotenv import load_dotenv import os load_dotenv() from flask import Flask, request, Response # import IBM Watson SDK from ibm_watson import DiscoveryV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator import json # Authenticate with the service authenticator = IAMAuthenticator(os.getenv("DISCOVERY_APIKEY")) discovery = DiscoveryV1(version=os.getenv("DISCOVERY_VERSION"), authenticator=authenticator) discovery.set_service_url(os.getenv("DISCOVERY_URL")) # Set up flask server app = Flask("query-service") app.config["DEBUG"] = True # Expose endpoint for query @app.route("/search", methods=["POST"]) def start_query(): try: input_request = request.get_json() query = input_request['query'] resp = {} if query is not None and len(query) > 0:
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from ibm_watson.natural_language_understanding_v1 import ( Features, CategoriesOptions, KeywordsOptions, ConceptsOptions, ) # load the .env file containing your environment variables for the required load_dotenv(find_dotenv()) """ Initialize NLU Instance with Environment Varibles Stored in .env """ authenticator = IAMAuthenticator(os.environ.get('NATURAL_LANGUAGE_UNDERSTANDING_APIKEY')) natural_language_understanding = NaturalLanguageUnderstandingV1( version="2019-07-12", authenticator=authenticator ) natural_language_understanding.set_service_url(os.environ.get("NATURAL_LANGUAGE_UNDERSTANDING_URL")) """ INPUT: filepath to an input csv file OUTPUT: csv file that contains courses, descriptions, keywords, concepts and subjects Notes: - Make sure that your input file mirrors the formatting of the input file in ./data/discovery-nlu/input/ElementarySchoolClasses.csv """ def extractEntities(input_filepath, output_filepath, course_level):
import json from ibm_watson import AssistantV2 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator('a4AqChKZqBJAKxuSH28TtAYQe0iuy2ve1gikX0ZQjdJX') assistant = AssistantV2( version='2020-04-01', authenticator = authenticator ) assistant.set_service_url('https://api.eu-gb.assistant.watson.cloud.ibm.com/instances/d914b609-5302-4173-8376-eec616e99f8f') response = assistant.create_session( assistant_id='f464c096-684c-4a7e-9de2-d8d8a5748a16' ).get_result() print(json.dumps(response, indent=2))
tag.decompose() ### remove common English stop words and non-alphanumeric characters text = soup.get_text(strip=True).replace(".", " ").replace(",", " ").replace( "is ", " ").replace("a ", " ").replace("the ", " ") print(len(text)) ### remove digits text = re.sub("^\d+\s|\s\d+\s|\s\d+$", " ", text) print(len(text)) text_to_send = " ".join(text.split()) print(len(text_to_send)) authenticator = IAMAuthenticator('API.key') natural_language_understanding = NaturalLanguageUnderstandingV1( version='2019-07-12', authenticator=authenticator) natural_language_understanding.set_service_url('url') print('calling IBM server.....') response = natural_language_understanding.analyze( text=text_to_send, #url='http://emotionanalysis.s3-website-ap-southeast-2.amazonaws.com/', features=Features( #entities=EntitiesOptions(emotion=True, sentiment=True, limit=2), emotion=EmotionOptions(document=True), sentiment=SentimentOptions(document=True)), language='en',
import json from ibm_watson import VisualRecognitionV4 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator( '-L_ISbMRp4t_bFoGMrQIeTG2n9Qy3pdd1QmKQxGF3al2') visual_recognition = VisualRecognitionV4(version='2019-02-11', authenticator=authenticator) visual_recognition.set_service_url( 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/d9acfb96-cfe0-48b5-9859-e26d687fa469' ) result = visual_recognition.train( collection_id='d7169bfd-fa76-4d4f-b96a-05e56218631b').get_result() print(json.dumps(result, indent=2))
import json from os.path import join, dirname from ibm_watson import SpeechToTextV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator( 'VIHZWLSfwy4ysLXTbGDvBXjM0wexJ8hbV76lxcRRhukA') speech_to_text = SpeechToTextV1(authenticator=authenticator) speech_to_text.set_service_url( 'https://api.eu-gb.speech-to-text.watson.cloud.ibm.com/instances/75456471-35c6-4f8d-83f7-77634afd496c' ) #if my file isin another location ,provide the entire path with rawinput r with open(join(dirname(__file__), './.', 'BotAssistent.wav'), 'rb') as audio_file: speech_recognition_results = speech_to_text.recognize( audio=audio_file, content_type='audio/wav').get_result() print(json.dumps(speech_recognition_results, indent=2)) word = speech_recognition_results["results"][0]["alternatives"][0][ "transcript"] print(word)
import json from ibm_watson import VisualRecognitionV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator with open('mykey.json') as json_file: mykey = json.load(json_file) authenticator = IAMAuthenticator(mykey["key"]) visual_recognition = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) visual_recognition.set_service_url(mykey["url"]) #desabilita SSL #visual_recognition.disable_SSL_verification() with open('../../../datasets/imagens/lions/imagem_test1.jpg', 'rb') as one_image_file: classes = visual_recognition.classify( images_file=one_image_file, threshold=0.6, classifier_ids='default').get_result() #with open('datasets/imagens/lions/imagem_test1.jpg', 'rb') as one_image_file: # classes = visual_recognition.classify(images_file=one_image_file, # threshold=0.6, # classifier_ids='explicit').get_result() print(json.dumps(classes, indent=2))
from ibm_watson import LanguageTranslatorV3, ApiException from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from ibm_watson.natural_language_understanding_v1 \ import Features,\ CategoriesOptions,\ ConceptsOptions,\ EntitiesOptions,\ KeywordsOptions,\ RelationsOptions,\ EmotionOptions app = Flask(__name__) app.config.from_object(__name__) port = int(os.getenv('PORT', 3000)) authenticatorNLU = IAMAuthenticator('APIKEY do seu serviço') nlu = NaturalLanguageUnderstandingV1(version='2019-07-12', authenticator=authenticatorNLU) nlu.set_service_url('url do seu serviço') authenticatorDiscovery = IAMAuthenticator('APIKEY do seu serviço') discovery = DiscoveryV1(version='2019-07-12', authenticator=authenticatorDiscovery) discovery.set_service_url('url do seu serviço') authenticatorTranslator = IAMAuthenticator('APIKEY do seu serviço') lt = LanguageTranslatorV3(version='2018-05-01', authenticator=authenticatorTranslator) lt.set_service_url('url do seu serviço')
import json import os from os.path import join from ibm_watson import PersonalityInsightsV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from dotenv import load_dotenv, find_dotenv # #Load env variables dir_path = os.path.dirname(os.path.realpath(__file__)) load_dotenv(dotenv_path=dir_path + '/ibm-credentials.env') API_KEY = os.getenv('PERSONALITY_INSIGHTS_APIKEY') API_URL = os.getenv('PERSONALITY_INSIGHTS_URL') #Authentication with Watson servers authenticator = IAMAuthenticator(API_KEY) personality_insights = PersonalityInsightsV3(version='2018-08-01', authenticator=authenticator) personality_insights.set_service_url(API_URL) # create directory for saving results if os.path.exists(dir_path + '/personality_results/') == False: os.makedirs(dir_path + '/personality_results/') #list the inputs json_folder = [f for f in os.listdir(dir_path + '/character_jsons/')] #Read and analyze JSON file for each character for json_file in json_folder: with open(dir_path + '/character_jsons/' + json_file) as profile_json: profile = personality_insights.profile( profile_json.read(),
import settings import sessions import action_handler import traceback # Configure Logger LOGGER = settings.get_logger("main") # Initialize flask APP = Flask(__name__) BOT_ID = settings.BOT_ID WA = {} if not settings.CALL_PROXY: authenticator = IAMAuthenticator(settings.WA_IAM_KEY) WA = AssistantV2( version=settings.WA_VERSION, authenticator=authenticator ) if settings.WA_OPT_OUT: WA.set_default_headers({'x-watson-learning-opt-out': "true"}) THREADS = {} def check_auth(headers): """Ensures API key is in header when required""" auth = headers.get("X-Api-Key") return auth == settings.API_KEY
from ibm_watson import TextToSpeechV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator mensaje = """ Osita bebé, Osita bebé bebé. Osita bebé, e qué la osita e una bebé """ api = IAMAuthenticator("pkvNYLc_ww9nTHZSRq1nbAvEPHXpC5AJPsHWQzDl-taA") text_2_speech = TextToSpeechV1(authenticator=api) text_2_speech.set_service_url( "https://api.us-south.text-to-speech.watson.cloud.ibm.com/instances/716a8c7f-1668-48bf-afe0-f904d8595e5c" ) pronunciacion = text_2_speech.get_pronunciation(text='IEEE', voice='es-ES_EnriqueV3Voice', format='ibm').get_result() voices = text_2_speech.list_voices().get_result() with open("osi.mp3", "wb") as audiofile: audiofile.write( text_2_speech.synthesize(mensaje, voice='es-ES_EnriqueV3Voice', accept="audio/mp3").get_result().content)