コード例 #1
0
ファイル: textRobot.py プロジェクト: marcos-s1/Bots
async def fethWatsonAndReturnKeywords(doc):
    # Autenticação de usuario
    with open(r'C:\Users\Marcos\Documents\Credenciais\credenciais.json') as json_file:
        dados = json.load(json_file)
        watson_apiKey = dados["Watson_apikey"]
        watson_URL = dados["Watson_url"]
    authenticator = IAMAuthenticator(watson_apiKey)
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12',
        authenticator=authenticator
    )
    natural_language_understanding.set_service_url(watson_URL)
    #result = random.sample(range(0,len(doc)), 7)
    keyInformation = []
    for index in range(7):
        response = natural_language_understanding.analyze(
            text=doc[index]['Sentence'],
            features=Features(keywords=KeywordsOptions(sentiment=False, emotion=False, limit=10)),
            language='pt').get_result()
        clean_dictionary = cleanResponse(response)
        info = dict(doc[index], **clean_dictionary)
        keyInformation.append(info)
    return keyInformation
コード例 #2
0
    def run(self):
        c = 0
        authenticator = IAMAuthenticator(
            'EKLHgx0kP2YPgbXIuLhb5LZAvepAjmPitUSpsJoxIGB2')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2020-08-01', authenticator=authenticator)

        # IBM Service URL for your selected region in IBM Cloud
        natural_language_understanding.set_service_url(
            'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/e3f72bb8-213a-482b-84b3-d0b8d0086624'
        )
        with open(os.path.join('Clean_data',
                               'Cleaned_annotation.txt')) as infile:
            for line in infile:
                response = natural_language_understanding.analyze(
                    text=line[:-1],
                    features=Features(sentiment=SentimentOptions(
                        targets=[line[:-1]])),
                    language='en').get_result()

                if os.path.exists('temp.json'):
                    data = json.load(open('temp.json'))

                    # convert data to list if not
                    if type(data) is dict:
                        data = [data]

                    # append new item to data lit
                    data.append(response)

                    # write list to file
                    with open('temp.json', 'w') as outfile:
                        json.dump(data, outfile)

                else:
                    with open('temp.json', 'w') as temp_file:
                        json.dump(response, temp_file)
コード例 #3
0
def retornaKeyword(paragrafo, authenticator):
    '''
    Para cada parágrafo, analisa e
    estabelece palavras-chave,
    que serão usadas para a procura
    de imagens.
    (Tem uma nota de corte baseada na
    relevância da palavra-chave, se
    a relevância for menor, retorna None)
    '''
    natural_language_understanding = NaturalLanguageUnderstandingV1(
      version='2019-07-12',
      authenticator=authenticator
    )
    natural_language_understanding.set_service_url('https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/99046149-caff-413c-b9c4-12866481c76f')
    #USA:
    mostraDatetime()
    print(f'ANALISANDO TEXTO E EXTRAINDO PALAVRAS CHAVE DE "{paragrafo[:20]}..." ...')

    try:
        response = natural_language_understanding.analyze(
        text= paragrafo,
        features=Features(
            keywords=KeywordsOptions(limit=1))).get_result()

        keywords = response['keywords']

        mostraDatetime()
        print('TEXTO ANALIZADO E PALAVRAS CHAVE EXTRAIDAS :)')
        if keywords[0]['relevance'] >= nota_de_corte_para_imagens:
            return(keywords[0])
        else: return(None)
    except:
        mostraDatetime()
        print('OCORREU UM ERRO AO ANALISAR O TEXTO, TALVEZ ELE É CURTO DEMAIS')
        return(None)
コード例 #4
0
def AnalyzeText(text_to_analyze):
    # Authenticate with BM Watson

    #authenticator = IAMAuthenticator('YOUR AUTHENTICATION STRING FROM IBM CLOUD')
    service = NaturalLanguageUnderstandingV1(version='2019-07-12',
                                             authenticator=authenticator)
    service.set_service_url(
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    # Get the analysis
    dict_object = {}

    try:
        response = service.analyze(text=text_to_analyze,
                                   features=Features(
                                       entities=EntitiesOptions(),
                                       keywords=KeywordsOptions(),
                                   )).get_result()

        return response
    except ApiException as e:
        dict_object["Error"] = e.message
        return dict_object
コード例 #5
0
def Watson_categories(article):

    # If service instance provides API key authentication
    service = NaturalLanguageUnderstandingV1(
        version='2018-03-16',
        ## url is optional, and defaults to the URL below. Use the correct URL for your region.
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api',
        iam_apikey='VP6Axcp-Hfx_NMyaQpg-imNVEiwyw6E8rznikt1Virxg')

    # service = NaturalLanguageUnderstandingV1(
    #     version='2018-03-16',
    #     ## url is optional, and defaults to the URL below. Use the correct URL for your region.
    #     # url='https://gateway.watsonplatform.net/natural-language-understanding/api',
    #     username='******',
    #     password='******')

    response = service.analyze(
        #text='In the rugged Colorado Desert of California, there lies buried a treasure ship sailed there hundreds of years ago by either Viking or Spanish explorers. Some say this is legend; others insist it is fact. A few have even claimed to have seen the ship, its wooden remains poking through the sand like the skeleton of a prehistoric beast. Among those who say they’ve come close to the ship is small-town librarian Myrtle Botts. In 1933, she was hiking with her husband in the Anza-Borrego Desert, not far from the border with Mexico. It was early March, so the desert would have been in bloom, its washed-out yellows and grays beaten back by the riotous invasion of wildflowers. Those wildflowers were what brought the Bottses to the desert, and they ended up near a tiny settlement called Agua Caliente. Surrounding place names reflected the strangeness and severity of the land: Moonlight Canyon, Hellhole Canyon, Indian Gorge. To enter the desert is to succumb to the unknowable. One morning, a prospector appeared in the couple’s camp with news far more astonishing than a new species of desert flora: He’d found a ship lodged in the rocky face of Canebrake Canyon. The vessel was made of wood, and there was a serpentine figure carved into its prow. There were also impressions on its flanks where shields had been attached—all the hallmarks of a Viking craft. Recounting the episode later, Botts said she and her husband saw the ship but couldn’t reach it, so they vowed to return the following day, better prepared for a rugged hike. That wasn’t to be, because, several hours later, there was a 6.4 magnitude earthquake in the waters off Huntington Beach, in Southern California. Botts claimed it dislodged rocks that buried her Viking ship, which she never saw again.There are reasons to doubt her story, yet it is only one of many about sightings of the desert ship. By the time Myrtle and her husband had set out to explore, amid the blooming poppies and evening primrose, the story of the lost desert ship was already about 60 years old. By the time I heard it, while working on a story about desert conservation, it had been nearly a century and a half since explorer Albert S. Evans had published the first account. Traveling to San Bernardino, Evans came into a valley that was “the grim and silent ghost of a dead sea,” presumably Lake Cahuilla. “The moon threw a track of shimmering light,” he wrote, directly upon “the wreck of a gallant ship, which may have gone down there centuries ago.” The route Evans took came nowhere near Canebrake Canyon, and the ship Evans claimed to see was Spanish, not Norse. Others have also seen this vessel, but much farther south, in Baja California, Mexico. Like all great legends, the desert ship is immune to its contradictions: It is fake news for the romantic soul, offering passage into some ancient American dreamtime when blood and gold were the main currencies of civic life. The legend does seem, prima facie, bonkers: a craft loaded with untold riches, sailed by early-European explorers into a vast lake that once stretched over much of inland Southern California, then run aground, abandoned by its crew and covered over by centuries of sand and rock and creosote bush as that lake dried out…and now it lies a few feet below the surface, in sight of the chicken-wire fence at the back of the Desert Dunes motel, $58 a night and HBO in most rooms. Totally insane, right? Let us slink back to our cubicles and never speak of the desert ship again. Let us only believe that which is shared with us on Facebook. Let us banish forever all traces of wonder from our lives. Yet there are believers who insist that, using recent advances in archaeology, the ship can be found. They point, for example, to a wooden sloop from the 1770s unearthed during excavations at the World Trade Center site in lower Manhattan, or the more than 40 ships, dating back perhaps 800 years, discovered in the Black Sea earlier this year.',
        text=article,
        features=Features(entities=EntitiesOptions(),
                          categories=CategoriesOptions())).get_result()

    return json.dumps(response, indent=2)
コード例 #6
0
	
	
print(json.dumps(response, indent=2))

'''
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 \
    import Features, EntitiesOptions, KeywordsOptions
import ast
import streamingcollectingtweet as streaming
import collecting_tweet
#import createintent
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2018-11-16',
    iam_apikey='18f34dtBi11noHcG4UES1unofJeGWrewAolawgJQudp9',
    url=
    'https://gateway.watsonplatform.net/natural-language-understanding/api/v1/analyze?version=2018-11-16rl'
)


def jsonformat(input_json):
    # # Transform json input to python objects
    # input_dict = json.loads(input_json)

    # # Filter python objects with list comprehensions
    # output_dict = [x for x in input_dict if ['keywords'] == '#StCatharines']

    # # Transform python object back into json
    # output_json = json.dumps(output_dict)

    # # Show json
コード例 #7
0
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import *

# Create a connection to the NLU service
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2018-11-16',
    iam_apikey='w02JlNpjSbT6OBJSjT1dd3Fe4ebXETJz4yp0etEEFmrU',
    url='https://gateway.watsonplatform.net/natural-language-understanding/api'
)

# Invoke NLU to analyze the text at the specified URL
response = natural_language_understanding.analyze(
    url="https://en.wikipedia.org/wiki/SpaceX",  # URL of the page to analyze
    features=Features(  # Indicate what features to look for
        categories=CategoriesOptions(limit=4),  # Look for up to 4 categories
        concepts=ConceptsOptions(limit=10)  # Look for up to 10 concepts
    )).get_result()  # Get the results of the analysis

print
print(
    "=======================================================================")
print
print(json.dumps(response, indent=2))
コード例 #8
0
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import (
    Features,
    EntitiesOptions,
    KeywordsOptions,
)

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version="2018-11-16",
    iam_apikey="wrSPdAvw2f00HR0RrzDePmEbbqKckfMOwZuJiMwWk9sW",
    url="https://gateway.watsonplatform.net/natural-language-understanding/api",
)

response = natural_language_understanding.analyze(
    text="We work directly with our designers and suppliers, and sell direct to you, which means quality, exclusive products, at a price anyone can afford.",
    # "We have no water or are food and my children are starving and very unhappy and stressed.. please help us."
    # "We can't wait that much longer",
    features=Features(
        entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),
        keywords=KeywordsOptions(emotion=True, sentiment=True, limit=2),
    ),
).get_result()

print(json.dumps(response, indent=2))
コード例 #9
0
ファイル: extract.py プロジェクト: sambbhavgarg/Healthcare
 def authenticate(self, API_KEY, URL):
     authenticator = IAMAuthenticator(API_KEY)
     service = NaturalLanguageUnderstandingV1(version='2018-03-16',
                                              authenticator=authenticator)
     service.set_service_url(URL)
     return service
コード例 #10
0
def main(args):

    # Parse incoming request headers
    _c_type, p_dict = parse_header(args['__ow_headers']['content-type'])

    # Decode body (base64)
    decoded_string = b64decode(args['__ow_body'])

    # Set Headers for multipart_data parsing
    p_dict['boundary'] = bytes(p_dict['boundary'], "utf-8")
    p_dict['CONTENT-LENGTH'] = len(decoded_string)

    # Parse incoming request data
    multipart_data = parse_multipart(BytesIO(decoded_string), p_dict)

    text = multipart_data.get('text')

    if text is not None:
        text = text[0]
    else:

        # Build flac file from stream of bytes
        fo = open("audio_sample.flac", 'wb')
        fo.write(multipart_data.get('audio')[0])
        fo.close()

        # Basic Authentication with Watson STT API
        stt_authenticator = BasicAuthenticator(
            'apikey', '9kesqvRxa7lKR3WUkcR81zFXfhrJXXYl3dzobsEWEfCM')

        # Construct a Watson STT client with the authentication object
        stt = SpeechToTextV1(authenticator=stt_authenticator)

        # Set the URL endpoint for your Watson STT client
        stt.set_service_url(
            'https://api.us-south.speech-to-text.watson.cloud.ibm.com')

        # Read audio file and call Watson STT API:
        with open(
                os.path.join(os.path.dirname(__file__), './.',
                             'audio_sample.flac'), 'rb') as audio_file:
            # Transcribe the audio.flac with Watson STT
            # Recognize method API reference:
            # https://cloud.ibm.com/apidocs/speech-to-text?code=python#recognize
            stt_result = stt.recognize(
                audio=audio_file,
                content_type='audio/flac',
                model='pt-BR_BroadbandModel').get_result()

        # Print STT API call results
        print(json.dumps(stt_result, indent=2))

        # Return a dictionary with the transcribed text
        transcript = stt_result['results'][0]['alternatives'][0]['transcript']
        text = transcript

    # Serviço NLU
    nlu_apikey = "rv_tVs7qTDhdlEtEwcY-D11AGDdI6VVkVh8RaZj1jwQ-"
    nlu_service_url = "https://api.us-south.natural-language-understanding.watson.cloud.ibm.com"
    nlu_entity_model = "187ea1d8-ea11-4285-b96e-32db2996a9aa"

    # Cria-se um autenticador
    nlu_authenticator = IAMAuthenticator(apikey=nlu_apikey)

    # Criamos o serviço passando esse autenticador
    nlu_service = NaturalLanguageUnderstandingV1(
        version='2018-03-16', authenticator=nlu_authenticator)

    # Setamos a URL de acesso do nosso serviço
    nlu_service.set_service_url(nlu_service_url)

    # O método analyze cuida de tudo
    nlu_response = nlu_service.analyze(
        text=text,
        features=Features(
            entities=EntitiesOptions(model=nlu_entity_model, sentiment=True)),
        language='pt').get_result()

    return {
        'result':
        json.dumps(nlu_response['entities'][0], indent=2, ensure_ascii=False)
    }
コード例 #11
0
 def test_missing_credentials(self):
     with pytest.raises(ValueError):
         NaturalLanguageUnderstandingV1(version='2016-01-23')
     with pytest.raises(ValueError):
         NaturalLanguageUnderstandingV1(version='2016-01-23')
コード例 #12
0
import requests
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, SentimentOptions, EmotionOptions

#keys
newskey = "4430c65cad4d4b8b9a5660b18c1d0dfe"  #powered by newsapi.org
watsonkey = "g-YqWDDroCfNkNDv0RbMhdNNxKUHNeflvivnFqw29V3c"
watsonurl = "https://gateway-lon.watsonplatform.net/natural-language-understanding/api"

#functions
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2019-07-12', iam_apikey=watsonkey, url=watsonurl)

#search news parameters
country = ""
category = "business"  #business entertainment general health science sports technology
q = "bitcoin"
sources = ""  #or see https://newsapi.org/sources
searchin = "everything"  # "top-headlines" or "everything"
maxresults = 4


def search():
    if searchin == "everything":
        if category == True:
            newsurl = ('https://newsapi.org/v2/' + searchin + "?category=" +
                       category + "&q=" + q + "&apiKey=" + newskey +
                       "&pageSize=100")
        else:
            newsurl = ('https://newsapi.org/v2/' + searchin + "?sources=" +
                       sources + "&q=" + q + "&apiKey=" + newskey +
コード例 #13
0
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, CategoriesOptions

NLU_KEY = "7AGk8HyuLvEairikmsN1hkUwo9Xz7TzmB4ggaPxUOtvd"
NLU_URL = "https://gateway.watsonplatform.net/natural-language-understanding/api"
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2018-11-16', iam_apikey=NLU_KEY, url=NLU_URL)

response = natural_language_understanding.analyze(
    text="""
       Hola amigo, se que me necesitas""",
    language="es",
    features=Features(categories=CategoriesOptions(limit=5))).get_result()

print(json.dumps(response, indent=2))
コード例 #14
0
from __future__ import print_function
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions

# If service instance provides API key authentication
service = NaturalLanguageUnderstandingV1(
    version='2018-03-16',
    ## url is optional, and defaults to the URL below. Use the correct URL for your region.
    url=
    'https://gateway-fra.watsonplatform.net/natural-language-understanding/api',
    iam_apikey='YOUR-API-KEY')

# service = NaturalLanguageUnderstandingV1(
#     version='2018-03-16',
#     ## url is optional, and defaults to the URL below. Use the correct URL for your region.
#     # url='https://gateway.watsonplatform.net/natural-language-understanding/api',
#     username='******',
#     password='******')

response = service.analyze(
    text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
    'Superman fears not Banner, but Wayne.',
    features=Features(entities=EntitiesOptions(),
                      keywords=KeywordsOptions())).get_result()

print(json.dumps(response, indent=2))
コード例 #15
0
from __future__ import print_function
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions

# If service instance provides API key authentication
service = NaturalLanguageUnderstandingV1(
    version='2018-03-16',
    ## url is optional, and defaults to the URL below. Use the correct URL for your region.
    url='https://gateway.watsonplatform.net/natural-language-understanding/api',
    iam_apikey='DBxOesEcwYTQK9-dvcaxTwBICWk0s3RwwEW6m-2eppDn')

# service = NaturalLanguageUnderstandingV1(
#     version='2018-03-16',
#     ## url is optional, and defaults to the URL below. Use the correct URL for your region.
#     # url='https://gateway.watsonplatform.net/natural-language-understanding/api',
#     username='******',
#     password='******')

response = service.analyze(
    text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
    'Superman fears not Banner, but Wayne.',
    features=Features(entities=EntitiesOptions(),
                      keywords=KeywordsOptions())).get_result()

print(json.dumps(response, indent=2))
コード例 #16
0
def home(request):
    try:
        authenticator = IAMAuthenticator(
            '-GEDGacgnI36ctk77Aa4X5k3PAXBA_AaRQIxp6G71sOP')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticator)
        natural_language_understanding.set_service_url(
            'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/b61e5fb9-726b-4cba-8b4b-12f1403ed4a1'
        )

        # ii = "Hello, I'm having a problem with your service. Nothing is working well. The service here is very bad. I am really very upset. I was expecting better than that. And my service has been stopped since yesterday. I have been suffering from this problem for a long time and cannot find a solution. The service here is bad most of times. Why you do not solve these problems. Some had left your service for this reason. The network is weak all the time, and it stops at the call. why this happen!? I wait. I'm fed up with complaining from the service."
        # ii = "Hello, I need some help. I've subscribed to some news services and want to cancel them.They were not helpful with me plus they used a lot of balance. I feel bad because I used this service. Please remove it and try to improve these services. It has more harm than good. I hope to improve some services and offer some offers soon. I have another problem. My service has been disabled since yesterday. I have been suffering from this problem for a different times and cannot find a solution. It affects my work and communication in some important times."
        ii = request.POST['text']
        response1 = natural_language_understanding.analyze(
            text=ii,
            features=Features(emotion=EmotionOptions(
                targets=[ii.split()[1]]))).get_result()

        response2 = natural_language_understanding.analyze(
            text=ii,
            features=Features(sentiment=SentimentOptions(
                targets=[ii.split()[1]]))).get_result()
        global sad, joy, fear, disgust, anger, sentiment_label, sentiment
        sad = response1['emotion']['document']['emotion']['sadness']
        joy = response1['emotion']['document']['emotion']['joy']
        fear = response1['emotion']['document']['emotion']['fear']
        disgust = response1['emotion']['document']['emotion']['disgust']
        anger = response1['emotion']['document']['emotion']['anger']
        sentiment_label = response2['sentiment']['document']['label']
        sentiment = response2['sentiment']['document']['score']

        ####################################################################

        data = pd.read_csv(
            "/Users/Ameen/Desktop/CV-projects/emotions/emotions/loyalty/dataset/final_dataset.csv"
        )
        X_train, X_test, y_train, y_test = train_test_split(
            data[["sadness", "joy", "fear", "disgust", 'anger', 'score']],
            data["label_state"],
            test_size=0.4)
        lsvm = LinearSVC()
        prid = lsvm.fit(X_train, y_train)
        accuracy = lsvm.score((X_test), y_test)
        # print(accuracy)
        out = lsvm.predict((X_test))
        from sklearn.metrics import classification_report
        # print(classification_report(out, y_test))
        lls = [sad, joy, fear, disgust, anger, sentiment]
        predict = lsvm.predict([lls])
        ss = predict
        if predict == [0]:
            predict = "leave"
        else:
            predict = "stay"

        form = file_form()
        context = {
            'sad': sad,
            'joy': joy,
            'fear': fear,
            'disgust': disgust,
            'anger': anger,
            'sentiment_label': sentiment_label,
            'sentiment': sentiment,
            'predict': predict,
            'form': form
        }
        return render(request, 'temp.html', context)
    except Exception as e:
        form = file_form()
        messages.error(request, e, extra_tags='error')
        return render(request, 'temp.html', {'form': form})
コード例 #17
0
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions, MetadataOptions, ConceptsOptions
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator

# Authentication via IAM
authenticator = IAMAuthenticator('fvqo2-7Ubmnf5rmImg7ycQkj3IW9Op2J3bQtsCmqLqf_')
service = NaturalLanguageUnderstandingV1(
      version='2018-03-16',
      authenticator=authenticator)
service.set_service_url('https://gateway.watsonplatform.net/natural-language-understanding/api')

# Authentication via external config like VCAP_SERVICES
# service = NaturalLanguageUnderstandingV1(
#    version='2018-03-16')
# service.set_service_url('https://gateway.watsonplatform.net/natural-language-understanding/api')

response = service.analyze(
    url='https://en.wikipedia.org/wiki/Fortnite',
    features=Features(metadata=MetadataOptions()),
    return_analyzed_text=True).get_result()


print(json.dumps(response, indent=2))
print(response['analyzed_text'])

コード例 #18
0
ファイル: NLU.py プロジェクト: Sailer43/CSE5914Project
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 \
    import Features, EntitiesOptions, KeywordsOptions

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2019-07-12',
    iam_apikey='k7068psCXo7VgHmODIQnaL9N0j4zxD7NdnLi4GtJhjkE',
    url=
    'https://gateway-wdc.watsonplatform.net/natural-language-understanding/api'
)


def nlu(str):
    response = natural_language_understanding.analyze(
        text=str,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2))).get_result()

    return {
        "entities": response.get("entities"),
        "keywords": response.get("keywords")
    }
コード例 #19
0
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, EmotionOptions

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2019-07-12',
    iam_apikey='1erf8hkp6tJpTAVzh8_F7HmyRTWG6Rn9A567j5lyAcaM',
    url='https://gateway.watsonplatform.net/natural-language-understanding/api'
)
##rachel
response = natural_language_understanding.analyze(
    text=
    'nothing happen really making together looking around trying you.\\n talking friends anything coming him.\\n probably everything too.\\n really excuse rachel alright forget actually something sounds course okay.\\n started wanted little thought this.\\n supposed okay.\\n getting sorry.\\n happened laughing apartment starts matter listen monica know.\\n chandler right.\\n yeah.\\n birthday pheebs please people everybody phoebe crying amazing anyway believe person picture stupid already thanks thinking remember almost married somebody someone here.\\n phoebe better another telling points out.\\n that.\\n pretty minute things beautiful laughs always enough couple wedding coffee yknow everyone saying thank totally oh.\\n taking second father seeing though entering called tomorrow office leaves bedroom friend monica listens joshua',
    features=Features(emotion=EmotionOptions())).get_result()

print(json.dumps(response, indent=2))

##ross
response = natural_language_understanding.analyze(
    text=
    'amazing that.\\n anything alright please looking start rachel everything minute wanted another taking actually thought always talking nothing happened okay.\\n friend chandler really this.\\n saying thing happen people totally coming little tomorrow someone making laugh second together married okay.\\n course stupid couple probably something monica laughing believe leaf really everyone birthday point thank listens already remember sound better pheebs you.\\n telling seeing around beautiful thinking pretty getting trying yeah.\\n phoebe out.\\n him.\\n entering here.\\n somebody excuse anyway supposed everybody know enough coffee listen monica apartment called matter sorry.\\n started wedding thanks father cry almost friend phoebe forget though too.\\n know.\\n picture right.\\n bedroom person oh.\\n office',
    features=Features(emotion=EmotionOptions())).get_result()

print(json.dumps(response, indent=2))

##monica
response = natural_language_understanding.analyze(
    text=
    'everything stupid minute phoebe someone him.\\n this.\\n you.\\n believe rachel somebody chandler monica people person tomorrow anything really friend listen everyone everybody around looking thanks thank seeing thing pretty getting something entering called always actually coming please friend remember laughing that.\\n listens alright leaf nothing start another started picture point supposed birthday phoebe thought right.\\n happen course sound trying wedding making couple really laugh totally pheebs better talking sorry.\\n enough little forget already almost cry probably yknow taking bedroom apartment here.\\n happened amazing thinking beautiful too.\\n okay.\\n know.\\n wanted matter out.\\n though father yeah.\\n okay.\\n together anyway saying second excuse married monica oh.\\n telling office coffee joshua',
    features=Features(emotion=EmotionOptions())).get_result()
コード例 #20
0
import csv
import urllib.request
import os
import tweepy
from imageai.Detection import ObjectDetection
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions, CategoriesOptions, SentimentOptions
from textblob import TextBlob
from spacy.pipeline import EntityRuler
import spacy
from TaxonomySearcher import TaxonomySearcher

service = NaturalLanguageUnderstandingV1(
    version='2018-03-16',
    url='https://gateway.watsonplatform.net/natural-language-understanding/api',
    iam_apikey='3lu4pe1GCX_ey2JIq5fTh5MjccYo0peTs8PgUCOW6Jw8')

file = open('user-id-targetTweet-PastTweets.csv', 'r', newline='')
writefile = open('user-id-sentiment-category_and_score', 'a', newline='')
writer = csv.writer(writefile)

reader = csv.reader(file)
big_list = list(reader)
csvRow2 = [""]
csvRow3 = [""]
consumer_key = "uqKb1h9prIwbAVCqocBuqInFs"
consumer_secret = "EXlWGr7VFTGJ00116M25mDWyNveORVkHVPGXHaAOsg1lwFUQn8"
access_token = "2388347288-uEH2UbQnr2uZYCZDuvh93wD8UHZ3PMB15diH9tK"
access_token_secret = "RCXSN3rj4m04ECekNo3DnF2u7B4G7AJauZXs3DmbX14dc"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
コード例 #21
0
 def test_analyze_throws(self):
     authenticator = BasicAuthenticator('username', 'password')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          authenticator=authenticator)
     with pytest.raises(ValueError):
         nlu.analyze(None, text="this will not work")
コード例 #22
0
import numpy
import string
from watson_developer_cloud import ToneAnalyzerV3

import paralleldots as pd

from deepsegment import DeepSegment

from ibm_watson import NaturalLanguageUnderstandingV1

from ibm_watson.natural_language_understanding_v1 \
    import Features, EntitiesOptions, KeywordsOptions

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2019-07-12',
    iam_apikey='oRW-WiI73HQMQxq0mVZnPJzN3UFwX4-9oD-XpjLjqUNi',
    url=
    'https://gateway-wdc.watsonplatform.net/natural-language-understanding/api'
)

segmenter = DeepSegment('en')

pd.set_api_key("Mf5Rgw0kBSWSNThFQxKYbEQvPgKgrexUKqPEPDMwGkM")

tone_analyzer = ToneAnalyzerV3(
    version='2017-09-21',
    iam_apikey='E8dobLcUUvh7NZU6MzpFv-GDUiIuEmOV43vQIWSNO0tE',
    url='https://gateway-wdc.watsonplatform.net/tone-analyzer/api')

import sys
import nltk
from youtube_transcript_api import YouTubeTranscriptApi
コード例 #23
0
import ast
import os

import operator
from functools import reduce
from io import StringIO
import numpy as np
from os.path import join, dirname
import requests
import re
import nltk
from nltk import word_tokenize, sent_tokenize, ne_chunk

app = Flask(__name__)

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2018-08-14', )


@app.route("/")
def indexpage():
    news_details = dict()
    result = '{"name":"Bank EWS","children":[{"name":"Management Default","children":[]},{"name":"Management Change","children":[]},{"name":"Credit Rating","children":[]},{"name":"Financial Analysis","children":[]},{"name":"Capital Adequacy","children":[]},{"name":"Legal Action","children":[]},{"name":"Strike","children":[]},{"name":"Loan Servicing","children":[]},{"name":"Share Price Deviation","children":[]},{"name":"Auditors Change","children":[]},{"name":"Terminal Disablement","children":[]},{"name":"Security Related","children":[]}]}'

    config_file = open("config.json", "r")
    config = config_file.read()

    news_details, rel = discover_news_related_entities('director resign',
                                                       'Management Change',
                                                       news_details, result,
                                                       config, 'NO', 'NO')
    news_details, rel = discover_news_related_entities('loan default',
コード例 #24
0
import os
import re
from PIL import Image
from os import path
from wordcloud import WordCloud
import matplotlib.pyplot as plt

import matplotlib.cm as cm
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})

"""IBM Natural Language Understanding API Credentials"""
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2018-11-16',
    iam_apikey='JxvnihDojci02S25uZkusc79w8tkmJxEuohB4ooGBMdz',
    url='https://gateway-lon.watsonplatform.net/natural-language-understanding/api'
)

"""Computation using Natural Language API"""
def compute_all_features(text):
    response = natural_language_understanding.analyze(
                    text=text,
                    features=Features(entities=EntitiesOptions(),
                                      keywords=KeywordsOptions(sentiment = True,emotion = True),
                                      semantic_roles=SemanticRolesOptions(keywords=True,entities=True),
                                      relations=RelationsOptions()),
                    language='en').get_result()
    key_sent = {}
    for words in response['keywords']:
        key_sent[words['text']] = words['sentiment']['score']
コード例 #25
0
    if not submission.stickied:
        df["ups"].append(submission.ups)
        df["downs"].append(submission.downs)
        df["author"].append(submission.author.name)
        df["body"].append(submission.selftext)
        df["time"].append(get_date(submission.created_utc))
        df["title"].append(submission.title)
        df["score"].append(submission.score)
        df["comms_num"].append(submission.num_comments)
        numData += 1
#data_df = cudf.DataFrame(df)
#print(data_df.to_pandas().to_csv(keyword+'/'+keyword+'.csv', index=False))

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2018-11-16',
    iam_apikey='tjuSDKzozH6xkacB9sj9o1Ke-TiRClcnpVTGvisJMtwo',
    url=
    'https://gateway-tok.watsonplatform.net/natural-language-understanding/api'
)

df_nlu = {
    "time": [],
    "score": [],
    "sentiment": [],
    "sadness": [],
    "joy": [],
    "fear": [],
    "disgust": [],
    "anger": []
}

i = 0
コード例 #26
0
import json
import argparse
import io
import os

from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 \
    import Features, EntitiesOptions, KeywordsOptions

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2019-07-12',
    iam_apikey='***',
    url=
    'https://gateway-wdc.watsonplatform.net/natural-language-understanding/api'
)

response = natural_language_understanding.analyze(
    text=
    "Here’s a story you won’t see on CNN: President Trump's ambassador to the Bahamas Papa Doug Manchester has pledged $1 million of his own money for hurricane Dorian relief efforts. God bless this man. God bless this administration. God bless the Bahamas. RT!",
    features=Features(entities=EntitiesOptions(emotion=True,
                                               sentiment=True,
                                               limit=20),
                      keywords=KeywordsOptions(emotion=True,
                                               sentiment=True,
                                               limit=2))).get_result()

print(json.dumps(response, indent=2))
import tweepy
import TwitterKey  #Confidential files
import datetime
import mysql.connector
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson.natural_language_understanding_v1 import Features, EmotionOptions, SentimentOptions

# connection of IBM Watson Studio
authenticator = IAMAuthenticator(
    '3YVTTrpXHhpeouEsvNLJoQhRJm20aHGzCiS5oMnl0Ijr')
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2019-07-12', authenticator=authenticator)

natural_language_understanding.set_service_url(
    'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/93a3cf90-92c7-4d86-99aa-762e9b1dffb5'
)

# Credentials of twitter
consumerKey = TwitterKey.twitter_key["consumerKey"]
consumerSecret = TwitterKey.twitter_key["consumerSecret"]
accessKey = TwitterKey.twitter_key["accessKey"]
accessSecret = TwitterKey.twitter_key["accessSecret"]

# Authenticate Object
authenticate = tweepy.OAuthHandler(consumerKey, consumerSecret)
authenticate.set_access_token(accessKey, accessSecret)
api = tweepy.API(authenticate, wait_on_rate_limit=True)

# dictionary of longitude,latitude,radius of cities
cities = {
コード例 #28
0
import io # for reading file
import json # for printing json

from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, KeywordsOptions

natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2019-07-12', # what are the possible 'versions'?
    iam_apikey='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
    url='https://gateway.watsonplatform.net/natural-language-understanding/api'
)
file_name = 'in.txt'
with io.open(file_name, 'r') as in_file:
    text = in_file.read()

response = natural_language_understanding.analyze(
    text=text,
    features=Features(
        keywords=KeywordsOptions()
    )
).get_result()

# need all this encode decode stuff because otherwise dumps() prints \u4e0b\u95a2
print(json.dumps(response, indent=2, ensure_ascii=False).encode('utf8').decode())
コード例 #29
0
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson.natural_language_understanding_v1 import Features, KeywordsOptions

# changing system path to import config modeul
import os, sys
sys.path.insert(1, os.getcwd())
from config import config_nlu_api, config_nlu_api_url

api = config_nlu_api
api_url = config_nlu_api_url

# print(api)
# print(api_url)

authenticator = IAMAuthenticator(api)
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2020-08-01', authenticator=authenticator)

natural_language_understanding.set_service_url(api_url)

path_to_file = './entries_txt/20201126_TTG1188_otter.ai.txt'

with open(path_to_file) as f:
    contents = f.readlines()

response = natural_language_understanding.analyze(
    # url='www.ibm.com',
    text=' '.join(contents),
    features=Features(keywords=KeywordsOptions(
        sentiment=True, emotion=True, limit=2))).get_result()

print(json.dumps(response, indent=2))
コード例 #30
0
import json
from django import forms
from blog.models import Post

from ibm_watson import NaturalLanguageUnderstandingV1
#from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions
from ibm_watson.natural_language_understanding_v1 import Features, KeywordsOptions, EntitiesOptions, CategoriesOptions, EmotionOptions, SentimentOptions

from ibm_cloud_sdk_core.authenticators import IAMAuthenticator

authenticator = IAMAuthenticator('')
service = NaturalLanguageUnderstandingV1(version='2019-07-12',
                                         authenticator=authenticator)


class PostForm(forms.ModelForm):
    class Meta:
        model = Post
        fields = (
            'title',
            'text',
        )


class Analise():
    def watson(input_text):
        response = service.analyze(
            text=input_text,
            features=Features(sentiment=SentimentOptions(),
                              keywords=KeywordsOptions())).get_result()
        print(json.dumps(response, indent=2))