Exemple #1
0
 def calculate_extensions(self):
     return # need to pass because this cost money that we don't have
     tweet_text = self.tweet.text
     response = "Languange not supported"
     try:
         if self.tweet.lang == 'ar':
             response = NLU.analyze(text=tweet_text, features=Features(categories=CategoriesOptions(limit=1)),
                                    language='ar').get_result()
             if len(response['categories']) > 0:
                 self.category = response['categories'][0]['label']
             translated = NLT.translate(text=tweet_text, model_id='ar-en', source='ar', target='en').get_result()
             translated = translated['translations'][0]['translation']
             response = NLU.analyze(text=translated, features=Features(concepts=ConceptsOptions(limit=1),
                                                                       entities=EntitiesOptions(limit=1, sentiment=True),
                                                                       keywords=KeywordsOptions(limit=1, sentiment=True),
                                                                       ), language='en').get_result()
             self.extract_englishonly_catagories(response)
         elif self.tweet.lang == 'en':
             response = NLU.analyze(text=tweet_text, features=Features(concepts=ConceptsOptions(limit=1),
                                                                       entities=EntitiesOptions(limit=1, sentiment=True),
                                                                       keywords=KeywordsOptions(limit=1, sentiment=True),
                                                                       categories=CategoriesOptions(limit=1),
                                                                       ), language='en').get_result()
             if len(response['categories']) > 0:
                 self.category = response['categories'][0]['label']
             self.extract_englishonly_catagories(response)
     except ApiException as ex:
         print("error in calculate_AI_things")
         print(exc)
         return
 def get_api_response(self):
     if self.API_options == "aspects":
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(entities=EntitiesOptions(emotion=True,
                                                        sentiment=True,
                                                        limit=10000),
                               keywords=KeywordsOptions(
                                   emotion=True,
                                   sentiment=True,
                                   limit=10000))).get_result()
     elif self.API_options == "overall":
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(sentiment=SentimentOptions(
                 document=True))).get_result()
     else:
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(
                 entities=EntitiesOptions(emotion=True,
                                          sentiment=True,
                                          limit=10000),
                 keywords=KeywordsOptions(emotion=True,
                                          sentiment=True,
                                          limit=10000),
                 sentiment=SentimentOptions(document=True))).get_result()
def getFeatFromText(text):
    authenticator = IAMAuthenticator(
        'WHorjeEwYYM9pazs9uDLsQbGpAooOIVyltsTlXOa_Rz4')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(
        'https://gateway-lon.watsonplatform.net/natural-language-understanding/api'
    )

    # response = natural_language_understanding.analyze(
    #     text=text,
    #     features=Features(keywords=KeywordsOptions(sentiment=True, emotion=True, limit=5))).get_result()

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(keywords=KeywordsOptions(
            emotion=True, sentiment=False, limit=6))).get_result()

    print(json.dumps(response, indent=2))

    jsonResponse = response['keywords']

    list_keywords = []

    for jso in jsonResponse:
        if jso['relevance'] > 0.5:
            list_keywords.append(EntityProb(jso['text'], jso['relevance']))

    print([obj.getEntity() for obj in list_keywords])

    return list_keywords
Exemple #4
0
    def select_relevant_sentences(lista_de_sentenças):
        service = NaturalLanguageUnderstandingV1(
            version=Watson_credentials['version'],
            url=Watson_credentials['url'],
            iam_apikey=Watson_credentials['apiKey']
        )  # Autenticação do Watson para o serviço de interpretação de linguagem natural

        sentenças_selecionadas = []

        for sentença in lista_de_sentenças:
            response = service.analyze(
                text=sentença,
                features=Features(keywords=KeywordsOptions()),
                language='en').get_result()

            for key_words in response['keywords']:

                if len(sentenças_selecionadas) < 10:

                    if key_words['relevance'] > 0.97:
                        sentenças_selecionadas.append([sentença, key_words])
                        break  # para de iterar sobre as key_words

                    else:
                        pass

            if len(sentenças_selecionadas) >= 10:
                break  # parar de iterar as sob as sentenças

        return sentenças_selecionadas
Exemple #5
0
def sentimentKeywordsWatson(file):

    df = pd.read_csv(file, sep=';')
    df['score'] = None
    df['sentiment'] = None
    df['keywords'] = None

    authenticator = IAMAuthenticator(config('apikey'))
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(config('url'))

    for i in range(len(df)):
        print('Aplicando IA... buscando keywords de {}'.format(i))
        tweet = df.loc[i, 'text_sanitized']

        try:
            response = natural_language_understanding.analyze(
                text=tweet,
                features=Features(keywords=KeywordsOptions(
                    limit=1))).get_result()

            doc_ini = json.dumps(response)
            doc_fim = json.loads(doc_ini)

            keywords = []

            [
                keywords.append(i['text'].lower())
                for i in doc_fim['keywords'][0:]
            ]

            df.loc[i, 'keywords'] = keywords

        except:
            None

    for i in range(len(df)):
        print('Aplicando IA... calculando score e sentimento de {}'.format(i))
        text_sanitized = df.loc[i, 'text_sanitized']

        try:
            response = natural_language_understanding.analyze(
                text=text_sanitized,
                features=Features(sentiment=SentimentOptions())).get_result()

            doc_ini = json.dumps(response)
            doc_fim = json.loads(doc_ini)

            df.loc[i, 'score'] = doc_fim['sentiment']['document']['score']
            df.loc[i, 'sentiment'] = doc_fim['sentiment']['document']['label']

        except:
            None

    file_name = 'dataSet_final.csv'
    df.to_csv(file_name, sep=';')

    print('Arquivo {} gerado com sucesso!'.format(file_name))
def main():

    #Example image
    server = 'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com'
    IMAGE_API_KEY = os.getenv('NARURALANALYSERAPI')
    naturalLanguageAnalyser = NaturalLanguageUnderstandingV1(
        version='2018-03-19', authenticator=IAMAuthenticator(IMAGE_API_KEY))
    naturalLanguageAnalyser.set_service_url(server)

    #Example text
    text = 'Team, I know that times are tough! Product'\
           'sales have been disappointing for the past three '\
           'quarters. We have a competitive product, but we '\
           'need to do a better job of selling it!'

    response = naturalLanguageAnalyser.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(mentions=True,
                                                   emotion=True,
                                                   sentiment=True,
                                                   limit=10),
                          emotion=EmotionOptions(),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=10),
                          relations=RelationsOptions(),
                          syntax=SyntaxOptions(sentences=True))).get_result()
    print("Start")
    print(json.dumps(response, indent=2))
Exemple #7
0
def callback():
    global call_results
    add_ons = json.loads(request.values['AddOns'])

    if 'ibm_watson_speechtotext' not in add_ons['results']:
        return 'Add Watson Speech to Text add-on in your Twilio console'

    payload_url = add_ons["results"]["ibm_watson_speechtotext"]["payload"][0]["url"]

    account_sid = twilio_account_sid
    auth_token = twilio_auth_token

    resp = requests.get(payload_url, auth=(account_sid, auth_token)).json()
    results = resp['results'][0]['results']

    transcripts = map(lambda res: res['alternatives'][0]['transcript'], results)

    call_results = ''.join(transcripts)
    response = natural_language_understanding.analyze(
                text=call_results,
                features=Features(
                    entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),
                    keywords=KeywordsOptions(emotion=True, sentiment=True,
                                             limit=2))).get_result()
    print(json.dumps(response, indent=2))

    return ''.join(transcripts)
Exemple #8
0
def analyzeSampleMessages_Default(questions_problems_text, nlu):
    results_list = []
    for message in questions_problems_text:
        result = nlu.analyze(
            text=message,
            features=Features(
                keywords=KeywordsOptions(),
                semantic_roles=SemanticRolesOptions())).get_result()
        actions_arr = []
        keywords_arr = []
        for keyword in result["keywords"]:
            keywords_arr.append(keyword["text"])
        if ("semantic_roles" in result):
            for semantic_result in result["semantic_roles"]:
                if ("action" in semantic_result):
                    actions_arr.append(semantic_result["action"]["normalized"])
        results_list.append({
            "header":
            "-------------------------------------------------------------",
            "message": message,
            "actions": actions_arr,
            "keywords": keywords_arr,
            "spacer": ""
        })
    return results_list
def extract(data):
    natural_language_understanding.set_service_url(service_url)
    response = natural_language_understanding.analyze(
        text=data,
        features=Features(keywords=KeywordsOptions(
            sentiment=True, emotion=False, limit=25))).get_result()
    return response
Exemple #10
0
def extractEntities(input_filepath, output_filepath):
    df = pd.read_csv(input_filepath)
    (rows, _) = df.shape
    for idx in range(0, rows, 1):
        hotline_url = df["URL"][idx]
        nlu_categories = natural_language_understanding.analyze(
            url=hotline_url,
            features=Features(categories=CategoriesOptions())).get_result()
        nlu_keywords = natural_language_understanding.analyze(
            url=hotline_url,
            features=Features(
                keywords=KeywordsOptions(sentiment=True, emotion=True)),
        ).get_result()
        nlu_concepts = natural_language_understanding.analyze(
            url=hotline_url,
            features=Features(concepts=ConceptsOptions())).get_result()
        categories_list = list(
            map(lambda x: x["label"], nlu_categories["categories"]))
        keywords_list = list(map(lambda x: x["text"],
                                 nlu_keywords["keywords"]))
        concepts_list = list(map(lambda x: x["text"],
                                 nlu_concepts["concepts"]))
        categories_list_extracted = list(
            map(lambda x: x.split("/")[1:], categories_list))
        categories_list_flattened = list(
            set(list(itertools.chain(*categories_list_extracted))))
        # If there are not enough concepts add keywords to the list
        if len(concepts_list) < 3:
            concepts_list = concepts_list + keywords_list
        df["Concepts"][idx] = concepts_list
        df["Subject"][idx] = categories_list_flattened
    df.to_csv(output_filepath, index=False)
    return df
def nlp_watson(url):
    print(url)
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey='',
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )
    try:
        response = natural_language_understanding.analyze(
            url=url,
            features=Features(categories=CategoriesOptions(limit=15),
                              concepts=ConceptsOptions(limit=10),
                              entities=EntitiesOptions(sentiment=True,
                                                       limit=20),
                              keywords=KeywordsOptions(sentiment=True,
                                                       emotion=True,
                                                       limit=5),
                              metadata=MetadataOptions())
            #relations=RelationsOptions()),
        ).get_result()

        data = json.dumps(response, indent=2)
        # new = json.loads(response)
        # print(data)
        db = client.asalvo
        news = db.news
        new_id = news.insert_one(response).inserted_id
        # print(new_id)
    except:
        print('Error ocurred')

    return 0
Exemple #12
0
 def watson(input_text):
     response = service.analyze(
         text=input_text,
         features=Features(sentiment=SentimentOptions(),
                           keywords=KeywordsOptions())).get_result()
     print(json.dumps(response, indent=2))
     return json.dumps(response, indent=2)
Exemple #13
0
 def test_analyze(self):
     response = self.natural_language_understanding.analyze(
         text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
         'Superman fears not Banner, but Wayne.',
         features=Features(entities=EntitiesOptions(),
                           keywords=KeywordsOptions())).get_result()
     assert response is not None
Exemple #14
0
def demo():

    global call_results
    data = natural_language_understanding.analyze(text=call_results,features=Features(
                                                                            entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),
                                                                            keywords=KeywordsOptions(emotion=True, sentiment=True, limit=2))).get_result()
    callData = {'incident_no': 201907061, 'name': [], 'location': [], 'disaster_type': [], 'sentiment': None, 'remarks': []}

    for entity in data['entities']:
        if entity['type'] == "Person":
            callData["name"].append(entity['text'])
        elif entity['type'] == "Quantity":
            callData["remarks"].append(entity['text'])
        else:
            callData["location"].append(entity['text'])

    sentiment = -1

    if sentiment < 0:
        callData["sentiment"] = "horrified"
    else:
        callData["sentiment"] = "scared"

    keywordList = []
    for keyword in data['keywords']:
        keywordList.append(keyword['text'])

    keywordList = ''.join(keywordList)
    keywordList = keywordList.replace('HESITATION ', '')
    callData['remarks'] = keywordList

    return callData
    def get_keywords_from_sentences(self, sentences):
        response = self.nlu.analyze(
            text=sentences,
            features=Features(keywords=KeywordsOptions())).get_result()

        keywords_list = [keywords['text'] for keywords in response['keywords']]
        return keywords_list
Exemple #16
0
def watsonNlpApiCall(txtOrUrl: str, isText: bool):
    if (isText):
        return getWatsonServiceInstance(0).analyze(
            text=txtOrUrl,
            language='en',
            features=Features(entities=EntitiesOptions(),
                              categories=CategoriesOptions(),
                              keywords=KeywordsOptions(
                                  sentiment=True, emotion=True))).get_result()
    else:
        return getWatsonServiceInstance(0).analyze(
            url=txtOrUrl,
            language='en',
            features=Features(entities=EntitiesOptions(),
                              categories=CategoriesOptions(),
                              keywords=KeywordsOptions(
                                  sentiment=True, emotion=True))).get_result()
Exemple #17
0
 def get_keywords(self, raw_feedback):
     response = self.nlu.analyze(
         return_analyzed_text=True,
         text=raw_feedback.review_text,
         features=Features(keywords=KeywordsOptions(sentiment=True, limit=5))).get_result()
     keywords_array = []
     for keyword in response['keywords']:
         keywords_array.append(keywordx(keyword['text'], keyword['sentiment']['score'], keyword['relevance']))
     return af(raw_feedback.review_text, keywords_array)
Exemple #18
0
def analyze_text(text):
    return natural_language_understanding.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2))).get_result()
Exemple #19
0
def getWatsonNLP(text):

    response = watsonService.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(),
                          keywords=KeywordsOptions(),
                          sentiment=SentimentOptions())).get_result()

    return response
 def extract(filename, row):
     reviews = row["Review"]
     user_contrib = row["Contribution"]
     hotel_name = row["Hotel Name"]
     recency = row["Recency"]
     for sent in nlp(str(reviews)).sents:
         sent_lemma = lemmatize(sent, nlp)
         try:
             response = natural_language_understanding.analyze(
                 text=str(sent_lemma),
                 features=Features(keywords=KeywordsOptions(
                     sentiment=True, limit=10))).get_result()
         except Exception as e:
             # print(e,sent_lemma)
             continue
         for i in response["keywords"]:
             keyword = i["text"]
             sentiment = i["sentiment"]["score"]
             if sentiment >= 0:  # Skip sentences which have positive sentiment
                 continue
             category = which_bucket(keyword, nlp, domain)
             # and (not connection[category]["Review"].str.contains(sent).any()):
             if category:
                 response_emo = natural_language_understanding.analyze(
                     text=str(sent_lemma),
                     features=Features(emotion=EmotionOptions(
                         targets=[keyword]))).get_result()
                 joy = response_emo["emotion"]["targets"][0]["emotion"][
                     "joy"]
                 sadness = response_emo["emotion"]["targets"][0]["emotion"][
                     "sadness"]
                 anger = response_emo["emotion"]["targets"][0]["emotion"][
                     "anger"]
                 disgust = response_emo["emotion"]["targets"][0]["emotion"][
                     "disgust"]
                 fear = response_emo["emotion"]["targets"][0]["emotion"][
                     "fear"]
                 try:
                     connection[category] = connection[category].append(
                         {
                             "Hotel Name": hotel_name,
                             "Review": str(sent),
                             "Review_Lemma": sent_lemma,
                             "Keyword": keyword,
                             "Sentiment": sentiment,
                             "User Contribution": user_contrib,
                             "Recency": recency,
                             "joy": joy,
                             "sadness": sadness,
                             "anger": anger,
                             "disgust": disgust,
                             "fear": fear
                         },
                         ignore_index=True)
                 except:
                     # print("Error")
                     pass
Exemple #21
0
def analyze(natural_language_understanding, input_text):
    response = natural_language_understanding.analyze(
        text=input_text,
        features=Features(emotion=EmotionOptions(),
                          categories=CategoriesOptions(limit=3),
                          concepts=ConceptsOptions(limit=3),
                          keywords=KeywordsOptions(limit=2))).get_result()

    return response
Exemple #22
0
def fn_ibm_nlu(analyze_text, reqd_limit=20):
    response = natural_language_understanding.analyze(
        text=analyze_text,
        features=Features(
            #entities=EntitiesOptions(emotion=False, sentiment=False, limit=reqd_limit),
            keywords=KeywordsOptions(emotion=False,
                                     sentiment=False,
                                     limit=reqd_limit))).get_result()
    return response
Exemple #23
0
def NLU(df1):
    # Entity values
    df1['INTENTS_NLU'] = ''
    df1['ENTITIES_NLU'] = ''
    df1['ENTITIES_SPACY'] = ''
    # Fetching important keywords for Intent using NLU using 3 keywords
    i = 0
    authenticator = IAMAuthenticator(
        'FMUFQJHtKvAqukIATHrBQqnVy8GP_5lvN5Iq0JzokZgn')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(
        'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/f9f3fbd1-3de7-44e6-a53a-dc93992d6627'
    )
    for i in range(0, len(df1)):
        try:
            response = natural_language_understanding.analyze(
                text=df1['INTENTEXAMPLES'][i],
                features=Features(keywords=KeywordsOptions(
                    sentiment=False, emotion=False, limit=3))).get_result()
            if response['keywords'] != []:
                if len(response['keywords']) == 1:
                    df1['INTENTS_NLU'][i] = response['keywords'][0]['text']
                elif len(response['keywords']) == 2:
                    a = response['keywords'][0]['text']
                    b = response['keywords'][1]['text']
                    df1['INTENTS_NLU'][i] = (a + '_' + b)

                else:
                    a = response['keywords'][0]['text']
                    b = response['keywords'][1]['text']
                    c = response['keywords'][2]['text']
                    df1['INTENTS_NLU'][i] = (a + '_' + b + '_' + c)
            else:
                df1['INTENTS_NLU'][i] = ''
        except:
            df1['INTENTS_NLU'][i] = ''
        i = i + 1
    #fetching entities from NLU
    i = 0
    for i in range(0, len(df1)):
        try:
            response = natural_language_understanding.analyze(
                text=df1['INTENTEXAMPLES'][i],
                features=Features(entities=EntitiesOptions(
                    sentiment=False, limit=2))).get_result()
            #    features=Features(concepts=ConceptsOptions(limit=3))).get_result()
            if response['entities'] == []:
                df1['ENTITIES_NLU'][i] = ''
            else:
                df1['ENTITIES_NLU'][i] = response['entities'][0]['text']
        except:
            df1['ENTITIES_NLU'][i] = ''
        i = i + 1
    return df1
def textanalysis(tweet):
	response = natural_language_understanding.analyze(
		text=tweet,
		features=Features(
			entities=EntitiesOptions(emotion=True, sentiment=True, limit=4),
			keywords=KeywordsOptions(emotion=True, sentiment=True,
									 limit=4))).get_result()

	jsonformat(json.dumps(response, indent=2))
	print(json.dumps(response, indent=2))
def get_keywords(sentence):
    """Fetches the keywords of the given sentence using IBM Watson Natural Language Understanding API"""
    keywords = []
    response = nlu.analyze(
        text=sentence,
        language="en",
        features=Features(keywords=KeywordsOptions())).get_result()
    for keyword_obj in response['keywords']:
        keywords.append(keyword_obj["text"].lower())
    return separate_elements(keywords)
Exemple #26
0
    def ask_watson(self):
        input_text = self.cleaned_data['comment']

        response = service.analyze(
            text=input_text,
            features=Features(entities=EntitiesOptions(),
                              keywords=KeywordsOptions())).get_result()

        #response = service.analyze(text="isso eh um teste", features=Features(entities=EntitiesOptions(), keywords=KeywordsOptions())).get_result()
        return json.dumps(response, indent=2)
Exemple #27
0
def twitter_crwaler():
    pre_time = datetime.datetime.now() + datetime.timedelta(days=-7)
    pre_time = str(pre_time)
    send_data = {}
    end_data2 = []
    cursor = tweepy.Cursor(
        api.search,
        q='코로나',
        since=pre_time[:11],  # 2020-09-08 이후에 작성된 트윗들로 가져옴
        count=2  # 페이지당 반환할 트위터 수 1
    ).items(100)  #최대 100개 까지만
    for i, tweet in enumerate(cursor):
        #print("{}: {}".format(i, tweet.text))
        send_data['location'] = "Unknown"
        send_data['categorized'] = ""
        send_data['score'] = 0

        response = natural_language_understanding.analyze(
            text=tweet.text,
            features=Features(entities=EntitiesOptions(emotion=False),
                              categories=EntitiesOptions(emotion=False, ),
                              semantic_roles=EntitiesOptions(
                                  emotion=False,
                                  sentiment=False,
                              ),
                              keywords=KeywordsOptions(
                                  emotion=False,
                                  sentiment=False,
                              ))).get_result()

        for re in response['entities']:
            if re['type'] == "Location":
                send_data['location'] = re['text']
            else:
                send_data['location'] = "Unknown"

        for re1 in response['categories']:
            send_data['categorized'] = re1['label']
            send_data['score'] = re1['score']

        send_data['author'] = tweet.author.name
        send_data['title'] = tweet.author.id
        send_data['contents'] = tweet.text
        send_data['created'] = tweet.created_at
        send_data['published'] = datetime.datetime.now()
        send_data['imageurl'] = ""
        try:
            send_data['imageurl'] = tweet.entities['media'][0]['media_url']
        except (NameError, KeyError):
            #we dont want to have any entries without the media_url so lets do nothing
            pass
        dictionary_copy = send_data.copy()
        end_data2.append(dictionary_copy)

    return end_data2
Exemple #28
0
def analyzeFrame(text):
    try:
        return nlu.analyze(text=text,
                           features=Features(
                               categories=CategoriesOptions(limit=3),
                               concepts=ConceptsOptions(limit=3),
                               entities=EntitiesOptions(limit=5),
                               keywords=KeywordsOptions(limit=10),
                               relations=RelationsOptions())).get_result()
    except (Exception, ApiException) as err:
        return {'err': True, 'errMsg': err.__str__()}
    def nlp_understanding(self, text=None, url=None):
        service = NaturalLanguageUnderstandingV1(version='2018-03-16',
                                                 authenticator=self.NLP_AUTH)

        service.set_service_url(
            'https://gateway.watsonplatform.net/natural-language-understanding/api'
        )

        if url:
            response = service.analyze(
                url=url,
                features=Features(entities=EntitiesOptions(),
                                  keywords=KeywordsOptions())).get_result()
        elif text:
            response = service.analyze(
                text=text,
                features=Features(entities=EntitiesOptions(),
                                  keywords=KeywordsOptions())).get_result()

        return response
Exemple #30
0
def sentiment_and_keyword(st, service=service):
    """
    Função para fazer requisição com o servidor IBM e obter keywords e sentimento do texto a ser analisado
    return: JSON com análises do texto informado
    """
    return (service.analyze(text=st,
                            features=Features(
                                keywords=KeywordsOptions(sentiment=True,
                                                         emotion=True,
                                                         limit=3),
                                sentiment=SentimentOptions())).get_result())