def test_detect_language(self):
        credentials = CognitiveServicesCredentials(
            self.settings.CS_SUBSCRIPTION_KEY
        )
        text_analytics = TextAnalyticsClient(endpoint="https://westcentralus.api.cognitive.microsoft.com", credentials=credentials)
        response = text_analytics.detect_language(
            documents=[{
                'id': 1,
                'text': 'I had a wonderful experience! The rooms were wonderful and the staff was helpful.'
            }]
        )

        self.assertEqual(response.documents[0].detected_languages[0].name, "English")
예제 #2
0
def sentiment(subscription_key):
    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics_url = "https://{}.api.cognitive.microsoft.com".format(
        TEXTANALYTICS_LOCATION)
    text_analytics = TextAnalyticsClient(endpoint=text_analytics_url,
                                         credentials=credentials)

    try:
        documents = [{
            "id": "1",
            "language": "en",
            "text": "I had the best day of my life."
        }, {
            "id":
            "2",
            "language":
            "en",
            "text":
            "This was a waste of my time. The speaker put me to sleep."
        }, {
            "id": "3",
            "language": "es",
            "text": "No tengo dinero ni nada que dar..."
        }, {
            "id":
            "4",
            "language":
            "it",
            "text":
            "L'hotel veneziano era meraviglioso. È un bellissimo pezzo di architettura."
        }]

        response = text_analytics.sentiment(documents=documents)
        for document in response.documents:
            print("Document Id: ", document.id, ", Sentiment Score: ",
                  "{:.2f}".format(document.score))

    except Exception as err:
        print("Encountered exception. {}".format(err))
예제 #3
0
    def analyze_sentiment(self):
        # [START batch_analyze_sentiment]
        from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
        text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint,
                                                    credential=self.key)
        documents = [
            "I had the best day of my life.",
            "This was a waste of my time. The speaker put me to sleep.",
            "No tengo dinero ni nada que dar...",
            "L'hôtel n'était pas très confortable. L'éclairage était trop sombre."
        ]

        result = text_analytics_client.analyze_sentiment(documents)
        docs = [doc for doc in result if not doc.is_error]

        for idx, doc in enumerate(docs):
            print("Document text: {}".format(documents[idx]))
            print("Overall sentiment: {}".format(doc.sentiment))
            # [END batch_analyze_sentiment]
            print(
                "Overall scores: positive={0:.3f}; neutral={0:.3f}; negative={0:.3f} \n"
                .format(
                    doc.document_scores['positive'],
                    doc.document_scores['neutral'],
                    doc.document_scores['negative'],
                ))
            for idx, sentence in enumerate(doc.sentences):
                print("Sentence {} sentiment: {}".format(
                    idx + 1, sentence.sentiment))
                print(
                    "Sentence score: positive={0:.3f}; neutral={0:.3f}; negative={0:.3f}"
                    .format(
                        sentence.sentence_scores['positive'],
                        sentence.sentence_scores['neutral'],
                        sentence.sentence_scores['negative'],
                    ))
                print("Offset: {}".format(sentence.offset))
                print("Length: {}\n".format(sentence.length))
            print("------------------------------------")
예제 #4
0
    def authentication_with_azure_active_directory(self):
        """DefaultAzureCredential will use the values from the environment
        variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
        """
        # [START create_ta_client_with_aad]
        from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
        from azure.identity import DefaultAzureCredential

        endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
        credential = DefaultAzureCredential()

        text_analytics_client = TextAnalyticsClient(endpoint,
                                                    credential=credential)
        # [END create_ta_client_with_aad]

        doc = ["I need to take my cat to the veterinarian."]
        result = text_analytics_client.detect_languages(doc)

        print("Language detected: {}".format(
            result[0].detected_languages[0].name))
        print("Confidence score: {}".format(
            result[0].detected_languages[0].score))
예제 #5
0
def sentiment(subscription_key,text):
    """Sentiment.
    Scores close to 1 indicate positive sentiment, while scores close to 0 indicate negative sentiment.
    """
    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics_url = "https://{}.api.cognitive.microsoft.com".format(
        TEXTANALYTICS_LOCATION)
    text_analytics = TextAnalyticsClient(
        endpoint=text_analytics_url, credentials=credentials)

    try:
        documents = [
            {"id": "1", "language": "en", "text": text},
        ]

        response = text_analytics.sentiment(documents=documents)
        for document in response.documents:
            print("Document Id: ", document.id, ", Sentiment Score: ",
                  "{:.2f}".format(document.score))

    except Exception as err:
        print("Encountered exception. {}".format(err))
    def detect_languages(self):
        # [START batch_detect_languages]
        from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
        text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=self.key)
        documents = [
            "This document is written in English.",
            "Este es un document escrito en Español.",
            "这是一个用中文写的文件",
            "Dies ist ein Dokument in englischer Sprache.",
            "Detta är ett dokument skrivet på engelska."
        ]

        result = text_analytics_client.detect_languages(documents)

        for idx, doc in enumerate(result):
            if not doc.is_error:
                print("Document text: {}".format(documents[idx]))
                print("Language detected: {}".format(doc.detected_languages[0].name))
                print("ISO6391 name: {}".format(doc.detected_languages[0].iso6391_name))
                print("Confidence score: {}\n".format(doc.detected_languages[0].score))
            if doc.is_error:
                print(doc.id, doc.error)
def sentiment(subscription_key):
    """Sentiment.

    Scores close to 1 indicate positive sentiment, while scores close to 0 indicate negative sentiment.
    """
    endpoint = "https://{}.api.cognitive.microsoft.com".format(TEXTANALYTICS_LOCATION)
    client = TextAnalyticsClient(endpoint=endpoint, credentials=CognitiveServicesCredentials(subscription_key))

    try:
        documents = [{
            'language': 'en',
            'id': 0,
            'text': "I had the best day of my life."
        }, {
            'language': 'en',
            'id': 1,
            'text': "This was a waste of my time. The speaker put me to sleep."
        }, {
            'language': 'es',
            'id': 2,
            'text': "No tengo dinero ni nada que dar..."
        }, {
            'language': 'it',
            'id': 3,
            'text': "L'hotel veneziano era meraviglioso. È un bellissimo pezzo di architettura."
        }]

        for document in documents:
            print("Asking sentiment on '{}' (id: {})".format(document['text'], document['id']))

        response = client.sentiment(
            documents=documents
        )

        for document in response.documents:
            print("Found out that in document {}, sentimet score is {}:".format(document.id, document.score))

    except Exception as err:
        print("Encountered exception. {}".format(err))
    def test_show_stats_and_model_version(self, resource_group, location, cognitiveservices_account, cognitiveservices_account_key):
        text_analytics = TextAnalyticsClient(cognitiveservices_account, cognitiveservices_account_key)

        def callback(response):
            self.assertIsNotNone(response.model_version)
            self.assertIsNotNone(response.raw_response)
            self.assertEqual(response.statistics.documents_count, 5)
            self.assertEqual(response.statistics.transactions_count, 4)
            self.assertEqual(response.statistics.valid_documents_count, 4)
            self.assertEqual(response.statistics.erroneous_documents_count, 1)

        docs = [{"id": "56", "text": ":)"},
                {"id": "0", "text": ":("},
                {"id": "22", "text": ""},
                {"id": "19", "text": ":P"},
                {"id": "1", "text": ":D"}]

        response = text_analytics.analyze_sentiment(
            docs,
            show_stats=True,
            model_version="latest",
            response_hook=callback
        )
    def test_successful_detect_language(self, resource_group, location, cognitiveservices_account, cognitiveservices_account_key):
        text_analytics = TextAnalyticsClient(cognitiveservices_account, cognitiveservices_account_key)

        docs = [{"id": "1", "text": "I should take my cat to the veterinarian."},
                {"id": "2", "text": "Este es un document escrito en Español."},
                {"id": "3", "text": "猫は幸せ"},
                {"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]

        response = text_analytics.detect_languages(docs, show_stats=True)

        self.assertEqual(response[0].detected_languages[0].name, "English")
        self.assertEqual(response[1].detected_languages[0].name, "Spanish")
        self.assertEqual(response[2].detected_languages[0].name, "Japanese")
        self.assertEqual(response[3].detected_languages[0].name, "German")
        self.assertEqual(response[0].detected_languages[0].iso6391_name, "en")
        self.assertEqual(response[1].detected_languages[0].iso6391_name, "es")
        self.assertEqual(response[2].detected_languages[0].iso6391_name, "ja")
        self.assertEqual(response[3].detected_languages[0].iso6391_name, "de")

        for doc in response:
            self.assertIsNotNone(doc.id)
            self.assertIsNotNone(doc.statistics)
            self.assertIsNotNone(doc.detected_languages[0].score)
예제 #10
0
    def test_successful_recognize_pii_entities(self, resource_group, location, cognitiveservices_account, cognitiveservices_account_key):
        text_analytics = TextAnalyticsClient(cognitiveservices_account, cognitiveservices_account_key)

        docs = [{"id": "1", "text": "My SSN is 555-55-5555."},
                {"id": "2", "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."},
                {"id": "3", "text": "Is 998.214.865-68 your Brazilian CPF number?"}]

        response = text_analytics.recognize_pii_entities(docs, show_stats=True)
        self.assertEqual(response[0].entities[0].text, "555-55-5555")
        self.assertEqual(response[0].entities[0].type, "U.S. Social Security Number (SSN)")
        self.assertEqual(response[1].entities[0].text, "111000025")
        self.assertEqual(response[1].entities[0].type, "ABA Routing Number")
        self.assertEqual(response[2].entities[0].text, "998.214.865-68")
        self.assertEqual(response[2].entities[0].type, "Brazil CPF Number")
        for doc in response:
            self.assertIsNotNone(doc.id)
            self.assertIsNotNone(doc.statistics)
            for entity in doc.entities:
                self.assertIsNotNone(entity.text)
                self.assertIsNotNone(entity.type)
                self.assertIsNotNone(entity.offset)
                self.assertIsNotNone(entity.length)
                self.assertIsNotNone(entity.score)
예제 #11
0
def entity_recognition():

    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics = TextAnalyticsClient(endpoint=endpoint,
                                         credentials=credentials)

    try:
        documents = [{
            "id":
            "1",
            "language":
            "en",
            "text":
            "Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975, to develop and sell BASIC interpreters for the Altair 8800."
        }, {
            "id":
            "2",
            "language":
            "es",
            "text":
            "La sede principal de Microsoft se encuentra en la ciudad de Redmond, a 21 kilómetros de Seattle."
        }]
        response = text_analytics.entities(documents=documents)

        for document in response.documents:
            print("Document Id: ", document.id)
            print("\tKey Entities:")
            for entity in document.entities:
                print("\t\t", "NAME: ", entity.name, "\tType: ", entity.type,
                      "\tSub-type: ", entity.sub_type)
                for match in entity.matches:
                    print("\t\t\tOffset: ", match.offset, "\tLength: ",
                          match.length, "\tScore: ",
                          "{:.2f}".format(match.entity_type_score))

    except Exception as err:
        print("Encountered exception. {}".format(err))
예제 #12
0
def language_extraction(subscription_key):
    """Language extraction.
    This example detects the language of several strings. 
    """
    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics_url = "https://{}.api.cognitive.microsoft.com".format(
        TEXTANALYTICS_LOCATION)
    text_analytics = TextAnalyticsClient(
        endpoint=text_analytics_url, credentials=credentials)

    try:
        documents = [
            {'id': '1', 'text': 'This is a document written in English.'},
            {'id': '2', 'text': 'Este es un document escrito en Español.'},
            {'id': '3', 'text': '这是一个用中文写的文件'}
        ]
        response = text_analytics.detect_language(documents=documents)

        for document in response.documents:
            print("Document Id: ", document.id, ", Language: ",
                  document.detected_languages[0].name)

    except Exception as err:
        print("Encountered exception. {}".format(err))
예제 #13
0
class SentimentAnalyser:
    id = 0

    def __init__(self):
        subscription_key = azure['api_key_1']
        endpoint = azure['text_analytics_endpoint']
        credentials = CognitiveServicesCredentials(subscription_key)
        self.text_analytics = TextAnalyticsClient(endpoint=endpoint, credentials=credentials)

    def analyse(self, text, id=None, language='en'):
        if id is None:
            id = self.id
            self.id += 1
        doc = {
            'id': id,
            'language': language,
            'text': text
        }
        response = self.text_analytics.sentiment(documents=[doc])
        return response.documents[0].score
예제 #14
0
class KeyPhrases:
    def __init__(self, language='en'):
        self.subscription_key = Constants.EXT_ANALYTICS_SUBSCRIPTION_KEY
        self.endpoint = Constants.TEXT_ANALYTICS_ENDPOINT
        self.text_analytics_url = Constants.TEXT_ANALYTICS_URL
        print(self.text_analytics_url)

        credentials = CognitiveServicesCredentials(self.subscription_key)
        self.text_analytics = TextAnalyticsClient(
            endpoint=self.text_analytics_url, credentials=credentials)
        self.docId = 0
        self.language = language

    def lookup(self, lookupstr):
        documents = [{
            "id": str(self.docId),
            "language": self.language,
            "text": lookupstr
        }]
        response = self.text_analytics.key_phrases(documents=documents)
        return response.documents[0].key_phrases
예제 #15
0
def main(documents):
    key_var_name = 'TEXT_ANALYTICS_SUBSCRIPTION_KEY'
    if not key_var_name in os.environ:
        raise Exception(
            'Please set/export the environment variable: {}'.format(
                key_var_name))
    subscription_key = os.environ[key_var_name]

    endpoint_var_name = 'TEXT_ANALYTICS_ENDPOINT'
    if not endpoint_var_name in os.environ:
        raise Exception(
            'Please set/export the environment variable: {}'.format(
                endpoint_var_name))
    endpoint = os.environ[endpoint_var_name]

    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics = TextAnalyticsClient(endpoint=endpoint,
                                         credentials=credentials)
    get_tags(text_analytics, documents)
    get_sentiment(text_analytics, documents)

    print(json.dumps(document))
    sys.stdout.flush()
예제 #16
0
 def __init__(self):
     subscription_key = azure['api_key_1']
     endpoint = azure['text_analytics_endpoint']
     credentials = CognitiveServicesCredentials(subscription_key)
     self.text_analytics = TextAnalyticsClient(endpoint=endpoint, credentials=credentials)
comments_max_id_df = pd.read_sql_query(max_id_query, conn)
max_id = comments_max_id_df['max_id'].max()
print("max_id is " + str(max_id))

next_batch_query = "select top ({}) \
    CONVERT(nvarchar(12), id) AS [id], \
    'en' AS language, \
    comments AS [text] \
    from dbo.reviews \
    where id >  {} \
    and sentiment_score is null \
    order by id asc"

# Authenticate Text Analytics Client
credentials = CognitiveServicesCredentials(access_key)
text_analytics = TextAnalyticsClient(endpoint=endpoint,
                                     credentials=credentials)

while (last_id <= max_id):

    print("processing last_id " + str(last_id))

    # Get the next batch of comments as a dataframe with id, language, text
    comments_df = pd.read_sql_query(
        next_batch_query.format(batch_size, last_id), conn)

    # Converts the dataframe to a json string, then to a list of json docs (one item per row)
    documents = json.loads(comments_df.to_json(orient="records"))

    # Call the sentiment cognitive services
    response = text_analytics.sentiment(documents=documents)
    for document in response.documents:
예제 #18
0
import requests
from flask import Blueprint, request, Response
from sqlalchemy import desc, asc
from models import db
from config import getConfig
from models import Channel, User, UserScore, ChannelScore
from datetime import date
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
from flask import jsonify

config = getConfig()

# Azure Setup
credentials = CognitiveServicesCredentials(config.azure_key)
text_analytics = TextAnalyticsClient(endpoint=config.text_analytics_url,
                                     credentials=credentials)

slackapi = Blueprint('slackapi', __name__)


@slackapi.route('/overview', methods=["POST"])
def showOverview():
    try:
        users = db.session.query(UserScore).filter(
            UserScore.date == date.today()).order_by(asc(
                UserScore.id)).limit(3).all()
        channels = db.session.query(ChannelScore).filter(
            ChannelScore.date == date.today()).order_by(asc(
                ChannelScore.id)).limit(3).all()

        user_list = ''
# Set this to the region for your Speech resource (for example, westus, eastus, and so on).
speech_region = 'westus'

text_analytics_endpoint = 'PASTE_YOUR_TEXT_ANALYTICS_ENDPOINT_HERE'
text_analytics_subscription_key = 'PASTE_YOUR_TEXT_ANALYTICS_SUBSCRIPTION_KEY_HERE'

# Authenticate, you may need to change the region to your own.
speech_config = speechsdk.SpeechConfig(subscription=speech_subscription_key,
                                       region=speech_region)

# Creates a speech recognizer using a microphone as audio input.
# The default language is "en-us".
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)

text_analytics_client = TextAnalyticsClient(
    text_analytics_endpoint,
    CognitiveServicesCredentials(text_analytics_subscription_key))

# Starts speech recognition, and returns after a single utterance is recognized.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
print('Speak a phrase into your microphone...')
result = speech_recognizer.recognize_once()
print()

# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
    print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
    print("No speech could be recognized")
elif result.reason == speechsdk.ResultReason.Canceled:
    cancellation_details = result.cancellation_details
예제 #20
0
def analysis(request):
    if request.method == 'GET':
        form = request.GET
        ticker = form.get("Ticker", "0")
        co = Conames.objects.get(Co_name=ticker).Co_name
        index = Conames.objects.get(Co_name=ticker).pk
        Y = Conames.objects.only('ticker').get(pk=index).ticker
        X = Conames.objects.only('tick').get(pk=index).tick
        tick = X.upper()
        start = dt.datetime(2017, 1, 1)
        end = date.today()
        df = web.DataReader(Y, 'yahoo', start, end)
        close = df['Close']
        high = df['High']
        low = df['Low']
        volume = df['Volume']
        p = close[-1]
        p1 = close[-2]
        diff = ((p - p1) / p1) * 100
        d = "%.2f" % diff
        if diff > 0:
            diff_green = "+" + d + "%"
        else:
            diff_green = ""

        if diff < 0:
            diff_red = d + "%"
        else:
            diff_red = ""

        price = "%.2f" % p

        def rs():
            r = ta.rsi(close, n=14, fillna=False)
            rsi = (r[-1])
            if 15.1 <= rsi <= 30:
                status = "Buy"
            elif rsi <= 15:
                status = "Strong Buy"
            elif 84.9 <= rsi >= 70:
                status = "Sell"
            elif rsi >= 85:
                status = "Strong Sell"
            else:
                status = "Hold"
            return status

        def ao():
            ao = ta.ao(high, low, s=5, len=34, fillna=False)
            AO = ao[-1]
            A = (ao[-10:])
            if 0 >= AO >= -0.5:

                def order():  # For ascending
                    for i in range(len(A) - 1):
                        if A[i] - A[i + 1] > 0:
                            return False
                        return True

                if order():
                    status1 = "Buy"
                else:
                    status1 = "Sell"

            elif 0 <= AO <= 0.5:

                def order():  # For descending
                    for i in range(len(A) - 1):
                        if A[i] - A[i + 1] < 0:
                            return False
                        return True

                if order():
                    status1 = "Sell"
                else:
                    status1 = "Buy"

            elif AO <= -1:
                status1 = "Buy"
            elif AO >= 1:
                status1 = "Sell"
            else:
                status1 = "Hold"
            return status1

        def mf():
            mfi = ta.money_flow_index(high,
                                      low,
                                      close,
                                      volume,
                                      n=14,
                                      fillna=False)
            MFI = mean(mfi[-7:])
            if MFI <= 20:
                status2 = "Buy"
            elif MFI <= 30:
                status2 = "Buy"
            elif MFI >= 80:
                status2 = "Sell"
            elif MFI >= 70:
                status2 = "Sell"
            else:
                status2 = "Hold"
            return status2

        def so():
            so = ta.stoch(high, low, close, n=14, fillna=False)
            SO = (so[-1])
            if SO <= 20:
                status3 = "Buy"
            elif SO <= 30:
                status3 = "Buy"
            elif SO >= 80:
                status3 = "Sell"
            elif SO >= 70:
                status3 = "Sell"
            else:
                status3 = "Hold"
            return status3

        def s_o():
            sos = ta.stoch_signal(high, low, close, n=14, d_n=3, fillna=False)
            SOS = (sos[-1])
            if SOS <= 20:
                status4 = "Buy"
            elif SOS <= 30:
                status4 = "Buy"
            elif SOS >= 80:
                status4 = "Sell"
            elif SOS >= 70:
                status4 = "Sell"
            else:
                status4 = "Hold"
            return status4

        def tsi():
            tsi = ta.tsi(close, r=25, s=13, fillna=False)
            TSI = tsi[-1]
            if TSI >= 20:
                status5 = "Buy"
            elif 5 <= TSI <= 20:
                status5 = "Buy"
            elif TSI <= 5:
                status5 = "Hold"
            elif -20 <= TSI <= -5:
                status5 = "Sell"
            elif TSI >= -20:
                status5 = "Sell"
            return status5

        def u_o():
            uo = ta.uo(high,
                       low,
                       close,
                       s=7,
                       m=14,
                       len=28,
                       ws=4.0,
                       wm=2.0,
                       wl=1.0,
                       fillna=False)
            UO = uo[-1]
            if UO <= 10:
                status6 = "Buy"
            elif 10.1 <= UO <= 30:
                status6 = "Buy"
            elif 70 <= UO <= 90:
                status6 = "Sell"
            elif UO >= 90:
                status6 = "Sell"
            else:
                status6 = "Hold"
            return status6

        def w_r():
            wr = ta.wr(high, low, close, lbp=14, fillna=False)
            WR = wr[-1]
            if 0 >= WR >= 20:
                status7 = "Sell"
            elif -80 <= WR:
                status7 = "Buy"
            else:
                status7 = "Hold"
            return status7

        def cm():
            cmf = ta.chaikin_money_flow(high,
                                        low,
                                        close,
                                        volume,
                                        n=20,
                                        fillna=False)
            CMF = cmf[-1]

            if CMF > 1.5:
                vol_status_cmf = "Buy"
            elif 0 <= CMF <= 1.5:
                vol_status_cmf = "Buy"
            elif CMF == 0:
                vol_status_cmf = "Hold"
            elif -1.5 <= CMF <= 0:
                vol_status_cmf = "Sell"
            else:
                vol_status_cmf = "Sell"
            return vol_status_cmf

        def em():
            emv = ta.ease_of_movement(high,
                                      low,
                                      close,
                                      volume,
                                      n=20,
                                      fillna=False)
            EMV = emv[-1]
            if EMV >= 1.5:
                vol_status_emv = "Buy"
            elif -1.5 <= EMV <= 1.5:
                vol_status_emv = "Hold"
            else:
                vol_status_emv = "Sell"
            return vol_status_emv

        def f_i():
            fi = ta.force_index(close, volume, n=2, fillna=False)
            FI = fi[-1]
            if FI >= 0:
                vol_status_fi = "Buy"
            elif FI <= 0:
                vol_status_fi = "Sell"
            else:
                vol_status_fi = "Hold"
            return vol_status_fi

        def nv():
            nvi = ta.negative_volume_index(close, volume, fillna=False)
            ema255 = ta.ema_indicator(close, n=255, fillna=False)
            NVI = nvi[-1]
            E = ema255[-1]
            if NVI > E:
                vol_status_nvi = "Buy"
            elif NVI < E:
                vol_status_nvi = "Sell"
            else:
                vol_status_nvi = "Hold"
            return vol_status_nvi

        def ob():
            obv = ta.on_balance_volume(close, volume, fillna=False)
            OBV = obv[-10:]

            def order():  # For ascending
                for i in range(len(OBV) - 1):
                    if OBV[i] - OBV[i + 1] > 0:
                        return False
                    return True

            if order():
                vol_status_obv = "Buy"
            else:
                vol_status_obv = "Buy"
            return vol_status_obv

        def adi():
            add = ta.acc_dist_index(high, low, close, volume, fillna=False)
            a = add[-1]
            ad = add[-7:]

            if a <= 1000:

                def order():  # For ascending
                    for i in range(len(ad) - 1):
                        if ad[i] - ad[i + 1] > 0:
                            return False
                        return True

                if order():
                    vol_status_add = "Buy"
                else:
                    vol_status_add = "Sell"

            else:
                vol_status_add = "No signal"

            return vol_status_add

        def at():
            atr = ta.average_true_range(high, low, close, n=14, fillna=False)

            if atr[-1] >= 1.5 + mean(atr[-10:]):
                vot_status_atr = "Buy"
            elif atr[-1] <= mean(atr[-10:] - 1.5):
                vot_status_atr = "Sell"
            else:
                vot_status_atr = "Hold"
            return vot_status_atr

        def bb():
            bbhb = ta.bollinger_hband(close, n=20, ndev=2, fillna=False)
            bbhb_ind = ta.bollinger_hband_indicator(close,
                                                    n=20,
                                                    ndev=2,
                                                    fillna=False)
            bblb = ta.bollinger_lband(close, n=20, ndev=2, fillna=False)
            bblb_ind = ta.bollinger_lband_indicator(close,
                                                    n=20,
                                                    ndev=2,
                                                    fillna=False)
            bbmavg = ta.bollinger_mavg(close, n=20, fillna=False)
            sub = bbhb[-1] - close[-1]
            sub2 = close[-1] - bblb[-1]

            if sub > sub2:
                vot_status_bb = "Buy"
            elif sub < sub2:
                vot_status_bb = "Sell"
            else:
                vot_status_bb = "Hold"
            return vot_status_bb

        def dch():
            dch = ta.donchian_channel_hband(close, n=20, fillna=False)
            dchi = ta.donchian_channel_hband_indicator(close,
                                                       n=20,
                                                       fillna=False)
            dcl = ta.donchian_channel_lband(close, n=20, fillna=False)
            dcli = ta.donchian_channel_lband_indicator(close,
                                                       n=20,
                                                       fillna=False)

            if close[-1] == dch[-1]:
                vot_status_dc = "Strong Sell"
            elif dch[-1] > close[-1] > dch[-1] - 2:
                vot_status_dc = "Sell"
            elif dcl[-1] == close[-1]:
                vot_status_dc = "Strong Buy"
            elif dcl[-1] < close[-1] <= dcl[-1] + 2:
                vot_status_dc = "Buy"
            else:
                vot_status_dc = "Hold"
            return vot_status_dc

        def adx():
            adx = ta.adx(high, low, close, n=14, fillna=False)
            adxn = ta.adx_neg(high, low, close, n=14, fillna=False)
            adxp = ta.adx_pos(high, low, close, n=14, fillna=False)

            if adxp[-1] > adxn[-1]:
                trn_adx_status = " Buy"
            elif adxp[-1] < adxn[-1]:
                trn_adx_status = " Sell"
            else:
                trn_adx_status = " Hold"
            return trn_adx_status

        def ai():
            aid = ta.aroon_down(close, n=25, fillna=False)
            aiu = ta.aroon_up(close, n=25, fillna=False)
            if aiu[-1] > aid[-1]:
                trn_ai_status = "Buy"
            elif aiu[-1] < aid[-1]:
                trn_ai_status = "Sell"
            else:
                trn_ai_status = "Hold"
            return trn_ai_status

        def c():
            cci = ta.cci(high, low, close, n=20, c=0.015, fillna=False)
            cc = cci[-1]

            if 0 <= cc <= 50:
                trn_cci_status = "Buy"
            elif 50.1 <= cc <= 100:
                trn_cci_status = "Hold"
            elif 100.1 <= cc:
                trn_cci_status = "Sell"
            elif -50 <= cc <= 0:
                trn_cci_status = "Sell"
            elif -100 <= cc <= -50.1:
                trn_cci_status = "Hold"
            else:
                trn_cci_status = "Buy"
            return trn_cci_status

        def dpo():
            d = ta.dpo(close, n=20, fillna=False)
            do = d[-1]
            if do >= 0:
                trn_dpo_status = "Buy"
            elif do <= 0:
                trn_dpo_status = "Sell"
            else:
                trn_dpo_status = "Hold"
            return trn_dpo_status

        def ema():
            em = ta.ema_indicator(close, n=12, fillna=False)
            e = em[-7:]
            if em[-1] < close[-1]:

                def order():  # For ascending
                    for i in range(len(e) - 1):
                        if e[i] - e[i + 1] > 0:
                            return False
                        return True

                if order():
                    trn_ema_status = "Sell"
                else:
                    trn_ema_status = "Buy"
                return trn_ema_status
            elif em[-1] > close[-1]:

                def order():  # For ascending
                    for i in range(len(e) - 1):
                        if e[i] - e[i + 1] > 0:
                            return False
                        return True

                if order():
                    trn_ema_status = "Buy"
                else:
                    trn_ema_status = "Sell"
                return trn_ema_status

        def ich():
            ica = ta.ichimoku_a(high,
                                low,
                                n1=9,
                                n2=26,
                                visual=False,
                                fillna=False)
            icb = ta.ichimoku_b(high,
                                low,
                                n2=26,
                                n3=52,
                                visual=False,
                                fillna=False)

            if ica[-1] > icb[-1]:
                trn_ich_status = "Buy"
            elif ica[-1] < icb[-1]:
                trn_ich_status = "Sell"
            else:
                trn_ich_status = "Hold"
            return trn_ich_status

        def kst():
            kst = ta.kst(close,
                         r1=10,
                         r2=15,
                         r3=20,
                         r4=30,
                         n1=10,
                         n2=10,
                         n3=10,
                         n4=15,
                         fillna=False)
            kst_sig = ta.kst_sig(close,
                                 r1=10,
                                 r2=15,
                                 r3=20,
                                 r4=30,
                                 n1=10,
                                 n2=10,
                                 n3=10,
                                 n4=15,
                                 nsig=9,
                                 fillna=False)
            if kst[-1] < kst_sig[-1]:
                trn_kst_status = "Sell"
            elif kst[-1] > kst_sig[-1]:
                trn_kst_status = "Buy"
            else:
                trn_kst_status = "Hold"
            return trn_kst_status

        def macd():
            macd = ta.macd(close, n_fast=12, n_slow=26, fillna=False)
            macd_sig = ta.macd_signal(close,
                                      n_fast=12,
                                      n_slow=26,
                                      n_sign=9,
                                      fillna=False)
            if macd[-1] > macd_sig[-1]:
                trn_macd_status = "Buy"
            elif macd[-1] < macd_sig[-1]:
                trn_macd_status = "Sell"
            else:
                trn_macd_status = "Hold"
            return trn_macd_status

        subscription_key = "d225b3f12aab446aa34af931359edbe0"
        search_term = ticker

        client = NewsSearchAPI(CognitiveServicesCredentials(subscription_key))
        news_result = client.news.search(query=search_term, market="en-US")

        if news_result.value:

            first_news_result = news_result.value[0]
            data = format(first_news_result.description)

            sec_news_result = news_result.value[1]
            data1 = format(sec_news_result.description)

            third_news_result = news_result.value[2]
            data2 = format(third_news_result.description)

            fourth_news_result = news_result.value[3]
            data3 = format(fourth_news_result.description)
            # print("news name: {}".format(first_news_result.name))
            #
            # print("news description: {}".format(first_news_result.description))

        else:
            HttpResponse("NIL")

        # --------------------------------------------

        TEXT_ANALYTICS_SUBSCRIPTION_KEY = '4d1d3697dc8548b59163e2592b22beb7'
        subscription_key = TEXT_ANALYTICS_SUBSCRIPTION_KEY

        TEXT_ANALYTICS_ENDPOINT = 'https://analytics4sentiment.cognitiveservices.azure.com/'
        endpoint = TEXT_ANALYTICS_ENDPOINT

        credentials = CognitiveServicesCredentials(subscription_key)
        text_analytics_client = TextAnalyticsClient(endpoint=endpoint,
                                                    credentials=credentials)

        client = text_analytics_client

        documents = [
            {
                "id": "1",
                "language": "en",
                "text": data
            },
            {
                "id": "2",
                "language": "en",
                "text": data1
            },
            {
                "id": "3",
                "language": "en",
                "text": data2
            },
            {
                "id": "4 ",
                "language": "en",
                "text": data3
            },
        ]

        # print(documents)
        response = client.sentiment(documents=documents)
        res = []
        for document in response.documents:
            res.append(float(format(document.score)))

        res_mean = (sum(res) / 4) * 100
        if res_mean > 60:
            final_sentiment_buy = "%.1f" % res_mean + "% Positive"
        else:
            final_sentiment_buy = ""

        if res_mean < 40:
            final_sentiment_sell = "%.1f" % res_mean + "% Positive"
        else:
            final_sentiment_sell = ""

        if 40 <= res_mean <= 60:
            final_sentiment_hold = "%.1f" % res_mean + "% Positive"
        else:
            final_sentiment_hold = ""

    def final_signal():
        global path, data, data1, data2, data3
        tech_signals = [
            rs(),
            ao(),
            mf(),
            so(),
            s_o(),
            tsi(),
            u_o(),
            w_r(),
            cm(),
            em(),
            f_i(),
            nv(),
            ob(),
            adi(),
            at(),
            bb(),
            dch(),
            adx(),
            ai(),
            c(),
            dpo(),
            ema(),
            ich(),
            kst(),
            macd(),
        ]
        buy = tech_signals.count("Buy")
        hold = tech_signals.count("Hold")
        tech_score = ((buy * 4) + (hold * 2)) / 2
        news_score = res_mean / 2
        final_score = tech_score + news_score

        if final_score >= 80:
            path = 'bsh_user/Strong [email protected]'
            signal = 'Strong Buy'
        elif 55 <= final_score < 80:
            path = 'bsh_user/[email protected]'
            signal = 'Buy'
        elif 45 <= final_score < 55:
            path = 'bsh_user/[email protected]'
            signal = 'Hold'
        elif 10 < final_score < 45:
            path = 'bsh_user/[email protected]'
            signal = 'Sell'
        elif final_score <= 10:
            path = 'bsh_user/Strong [email protected]'
            signal = 'Strong Sell'
        return path

    def signal():
        global signal
        tech_signals = [
            rs(),
            ao(),
            mf(),
            so(),
            s_o(),
            tsi(),
            u_o(),
            w_r(),
            cm(),
            em(),
            f_i(),
            nv(),
            ob(),
            adi(),
            at(),
            bb(),
            dch(),
            adx(),
            ai(),
            c(),
            dpo(),
            ema(),
            ich(),
            kst(),
            macd(),
        ]
        buy = tech_signals.count("Buy")
        hold = tech_signals.count("Hold")
        tech_score = ((buy * 4) + (hold * 2)) / 2
        news_score = res_mean / 2
        final_score = tech_score + news_score
        if final_score >= 80:
            signal = 'Strong Buy'
        elif 55 <= final_score < 80:
            signal = 'Buy'
        elif 45 <= final_score < 55:
            signal = 'Hold'
        elif 10 < final_score < 45:
            signal = 'Sell'
        elif final_score <= 10:
            signal = 'Strong Sell'

        return signal

    dfn = web.DataReader("^NSEI", 'yahoo', start, end)

    closen = dfn['Close']
    pricen1 = closen[-1]
    pricen2 = closen[-2]
    pdiffn = ((pricen1 - pricen2) / pricen2) * 100
    diffn = "%.2f" % pdiffn
    price_nifty = "%.2f" % pricen1
    if pdiffn > 0:
        diffn_green = "+" + diffn + "%"
    else:
        diffn_green = ""

    if pdiffn < 0:
        diffn_red = diffn + "%"
    else:
        diffn_red = ""

    dfs = web.DataReader("^BSESN", 'yahoo', start, end)
    closes = dfs['Close']
    prices1 = closes[-1]
    prices2 = closes[-2]
    pdiffs = ((prices1 - prices2) / prices2) * 100
    diffs = "%.2f" % pdiffs
    price_sensex = "%.2f" % prices1
    if pdiffs >= 0:
        diffs_green = "+" + diffs + "%"
    else:
        diffs_green = ""

    if pdiffs <= 0:
        diffs_red = diffs + "%"
    else:
        diffs_red = ""

    dfnb = web.DataReader("VAKRANGEE.NS", 'yahoo', start, end)
    closenb = dfnb['Close']
    pricenb1 = closenb[-1]
    pricenb2 = closenb[-2]
    pdiffnb = ((pricenb1 - pricenb2) / pricenb2) * 100
    diffnb = "%.2f" % pdiffnb
    if pdiffnb > 0:
        diffnb_green = "+" + diffnb + "%"
    else:
        diffnb_green = ""

    if pdiffnb < 0:
        diffnb_red = diffnb + "%"
    else:
        diffnb_red = ""
    price_nb = "%.2f" % pricenb1

    return render(
        request, 'bsh_user/analysis.html', {
            'x': tick,
            'ticker': ticker,
            "diff_green": diff_green,
            'diff_red': diff_red,
            'price': price,
            'price_nifty': price_nifty,
            'diffn_green': diffn_green,
            'diffn_red': diffn_red,
            'price_sensex': price_sensex,
            'diffs_red': diffs_red,
            'diffs_green': diffs_green,
            'price_nb': price_nb,
            'diffnb_green': diffnb_green,
            'diffnb_red': diffnb_red,
            'name': "Vakrangee Ltd.",
            "news_buy": final_sentiment_buy,
            "news_sell": final_sentiment_sell,
            "news_hold": final_sentiment_hold,
            'mom_rsi': rs(),
            'mom_ao': ao(),
            'mom_mf': mf(),
            'mom_so': so(),
            'mom_sos': s_o(),
            'mom_tsi': tsi(),
            'mom_uo': u_o(),
            'mom_wr': w_r(),
            'vol_cm': cm(),
            'vol_em': em(),
            'vol_fi': f_i(),
            'vol_nv': nv(),
            'vol_ob': ob(),
            'vol_adi': adi(),
            'vot_atr': at(),
            'vot_bb': bb(),
            'vot_dch': dch(),
            'trn_adx': adx(),
            'trn_ai': ai(),
            'trn_cci': c(),
            'trn_dpo': dpo(),
            'trn_ema': ema(),
            'trn_ich': ich(),
            'trn_kst': kst(),
            'trn_macd': macd(),
            'score': final_signal(),
            'signal': signal()
        })
예제 #21
0
def homepage(request):
    global x, rows, signal
    start = dt.datetime(2017, 1, 1)
    end = date.today()
    dfn = web.DataReader("^NSEI", 'yahoo', start, end)

    closen = dfn['Close']
    pricen1 = closen[-1]
    pricen2 = closen[-2]
    pdiffn = ((pricen1 - pricen2) / pricen2) * 100
    diffn = "%.2f" % pdiffn
    price_nifty = "%.2f" % pricen1
    if pdiffn > 0:
        diffn_green = "+" + diffn + "%"
    else:
        diffn_green = ""

    if pdiffn < 0:
        diffn_red = diffn + "%"
    else:
        diffn_red = ""

    dfs = web.DataReader("^BSESN", 'yahoo', start, end)
    closes = dfs['Close']
    prices1 = closes[-1]
    prices2 = closes[-2]
    pdiffs = ((prices1 - prices2) / prices2) * 100
    diffs = "%.2f" % pdiffs
    price_sensex = "%.2f" % prices1
    if pdiffs >= 0:
        diffs_green = "+" + diffs + "%"
    else:
        diffs_green = ""

    if pdiffs <= 0:
        diffs_red = diffs + "%"
    else:
        diffs_red = ""

    dfnb = web.DataReader("^NSEBANK", 'yahoo', start, end)
    closenb = dfnb['Close']
    pricenb1 = closenb[-1]
    pricenb2 = closenb[-2]
    pdiffnb = ((pricenb1 - pricenb2) / pricenb2) * 100
    diffnb = "%.2f" % pdiffnb
    if pdiffnb > 0:
        diffnb_green = "+" + diffnb + "%"
    else:
        diffnb_green = ""

    if pdiffnb < 0:
        diffnb_red = diffnb + "%"
    else:
        diffnb_red = ""
    price_nb = "%.2f" % pricenb1

    dfbm = web.DataReader("BSE-MIDCAP.BO", 'yahoo', start, end)
    closebm = dfbm['Close']
    pricebm1 = closebm[-1]
    pricebm2 = closebm[-2]
    pdiffbm = ((pricebm1 - pricebm2) / pricebm2) * 100
    diffbm = "%.2f" % pdiffbm
    if pdiffbm > 0:
        diffbm_green = "+" + diffbm + "%"
    else:
        diffbm_green = ""

    if pdiffbm < 0:
        diffbm_red = diffbm + "%"
    else:
        diffbm_red = ""
    price_bm = "%.2f" % pricebm1
    tickers = ['LT.BO', 'ITC.NS', 'RELIANCE.NS', 'WIPRO.BO', 'TCS.NS']
    stocks = ["Larsen & Toubro", "ITC Limited", "Reliance Industries", "Wipro", "TATA Consultancy Services"]
    closeps = []
    x = []
    price_diff_green = []
    price_diff_red = []
    hpbar = []
    signal = []
    for ticker in tickers:
        start = dt.datetime(2019, 1, 1)
        end = date.today()
        df = web.DataReader(ticker, 'yahoo', start, end)
        close = df['Close']
        price_diff = ((close[-1] - close[-2]) / close[-2]) * 100
        if price_diff > 0:
            price_diff_green.append("+" + "%.2f" % price_diff + "%")
        else:
            price_diff_green.append("")

        if price_diff < 0:
            price_diff_red.append("%.2f" % price_diff + "%")
        else:
            price_diff_red.append("")
        closeps.append("%.2f" % close[-1])
        x.append(Homepage_db.objects.only('hco_logo').get(htic_name=ticker).hco_logo)
        subscription_key = "d225b3f12aab446aa34af931359edbe0"
        search_term = ticker

        client = NewsSearchAPI(CognitiveServicesCredentials(subscription_key))
        news_result = client.news.search(query=search_term, market="en-us", count=10)

        if news_result.value:

            first_news_result = news_result.value[0]
            data = format(first_news_result.description)



            # print("news name: {}".format(first_news_result.name))
            #
            # print("news description: {}".format(first_news_result.description))


        else:
            HttpResponse("NIL")

        # --------------------------------------------

        TEXT_ANALYTICS_SUBSCRIPTION_KEY = '4d1d3697dc8548b59163e2592b22beb7'
        subscription_key = TEXT_ANALYTICS_SUBSCRIPTION_KEY

        TEXT_ANALYTICS_ENDPOINT = 'https://analytics4sentiment.cognitiveservices.azure.com/'
        endpoint = TEXT_ANALYTICS_ENDPOINT

        credentials = CognitiveServicesCredentials(subscription_key)
        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint, credentials=credentials)

        client = text_analytics_client

        documents = [
            {"id": "1", "language": "en", "text": data},
                   
        ]

        response = client.sentiment(documents=documents)
        res = []
        for document in response.documents:
            res.append(float(format(document.score)))

        res_mean = (sum(res) * 100)

        if res_mean > 80:
            hpbar.append("StrongBuy_hpbar.svg")
            signal.append("Strong Buy")
        elif 80 > res_mean > 60:
            hpbar.append("Buy_hpbar.svg")
            signal.append("Buy")
        elif 60 > res_mean > 35:
            hpbar.append("Hold_hpbar.svg")
            signal.append("Hold")
        elif 20 > res_mean > 35:
            hpbar.append("Sell_hpbar.svg")
            signal.append("Sell")
        else:
            hpbar.append("StrongSell_hpbar.svg")
            signal.append("Strong Sell")

    rows = zip(x, closeps, price_diff_green, price_diff_red, hpbar, signal, stocks)

    return render(request, 'homepage/homepage.html',
                  {"rows": rows, 'price_nifty': price_nifty, 'diffn_green': diffn_green, 'diffn_red': diffn_red,
                   'price_sensex': price_sensex,
                   'diffs_red': diffs_red, 'diffs_green': diffs_green, 'price_nb': price_nb,
                   'diffnb_green': diffnb_green,
                   'diffnb_red': diffnb_red, 'price_bm': price_bm, 'diffbm_green': diffbm_green,
                   'diffbm_red': diffbm_red})
예제 #22
0
def authenticateClient():
    credentials = CognitiveServicesCredentials(key)
    text_analytics_client = TextAnalyticsClient(endpoint=endpoint,
                                                credentials=credentials)
    return text_analytics_client
#--------------------------------------------------------------------------

from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials

# Llave del recurso de Cognitive services en Azure
subscription_key = "-"
# Objeto credenciales de cognitive services
credentials = CognitiveServicesCredentials(subscription_key)

# URL para conectarse al recurso, el template es:
# https://<location>.api.cognitive.microsoft.com/
text_analytics_url = "-"

# Cliente de servicios de cognitivos
text_analytics = TextAnalyticsClient(endpoint=text_analytics_url,
                                     credentials=credentials)

################################# DETECT LANGUAGES #################################

# Datos dummy
documents = [{
    'id': '1',
    'text': 'This is a document written in English.'
}, {
    'id': '2',
    'text': 'Este es un documento escrito en Español.'
}, {
    'id': '3',
    'text': '这是一个用中文写的文件'
}]
예제 #24
0
if not key_var_name in os.environ:
    raise Exception('Set/export environment variable: {}'.format(key_var_name))
subscription_key = os.environ[key_var_name]

# get endpoint from environment
endpoint_var_name = 'TEXT_ANALYTICS_ENDPOINT'
if not endpoint_var_name in os.environ:
    raise Exception(
        'Set/export environment variable: {}'.format(endpoint_var_name))
endpoint = os.environ[endpoint_var_name]

# set credentials object
credentials = CognitiveServicesCredentials(subscription_key)

# authenticate client
text_analytics = TextAnalyticsClient(endpoint=endpoint,
                                     credentials=credentials)

# get text from file
file_name = 'in.txt'
with io.open(file_name, 'r') as in_file:
    text = in_file.read()

# pack and send for analysis
# https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/language-support
documents = [{'id': '1', 'language': 'en', 'text': text}]
response = text_analytics.key_phrases(documents=documents)

# loop through entities returned from API
for document in response.documents:
    print('Document ID: ', document.id)
    print('\tKey Phrases:')
예제 #25
0
    def get_client_lazy(self):
        from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
        from msrest.authentication import CognitiveServicesCredentials

        return TextAnalyticsClient(
            self.url, CognitiveServicesCredentials(self.get_secret()))
class TextAnalytics():
    """
    Constructor: Se encarga de validar las credenciales y hacer la conexión
    con el recurso en Azure.

    Parametros:
        SubscriptionKey     = La llave perteneciente al recurso de Cognitive Services
        Location            = La locación del recurso, por ejemplo "eastus" o "westus"
    """
    def __init__(self, SubscriptionKey, Location):
        SubscriptionKey = SubscriptionKey
        TA_URL = "https://{0}.api.cognitive.microsoft.com/".format(Location)

        Credentials = CognitiveServicesCredentials(SubscriptionKey)

        self.Text_Analytics = TextAnalyticsClient(endpoint=TA_URL,
                                                  credentials=Credentials)

    def detectLanguages(self, Documents):
        """
        Función encargada de hacer el llamado para detectar el lenguaje
        y regresar el arreglo de lenguajes.

        Parametros:
            Documents   =   El arreglo con los objetos a analizar. Estos objetos DEBEN de tener
                            los atributos "id" y "text".
        """
        arrLanguages = self.Text_Analytics.detect_language(documents=Documents)

        return arrLanguages

    def analyzeSentiment(self, Documents):
        """
        Función encargada de hacer el llamado para analizar el sentimiento y obtener un arreglo
        con los puntjaes.

        Parametros:
            Documents   =   El arreglo con los objetos a analizar. Estos objetos DEBEN de tener
                            los atributos "id", "langugae" y "text".
        """
        arrScore = self.Text_Analytics.sentiment(documents=Documents)

        return arrScore

    def keyPhrases(self, Documents):
        """
        Función encargada de hacer el llamado para obtener un arreglo con las palabras clave

        Parametros:
            Documents   =   El arreglo con los objetos a analizar. Estos objetos DEBEN de tener
                            los atributos "id", "langugae" y "text".
        """
        matKeyPhrases = self.Text_Analytics.key_phrases(documents=Documents)

        return matKeyPhrases

    def identifyEntites(self, Documents):
        """
        Función encargada de hacer el llamado para identificar las entidades
        Parametros:
            Documents   =   El arreglo con los objetos a analizar. Estos objetos DEBEN de tener
                            los atributos "id" y "text".
        """
        matEntitites = self.Text_Analytics.entities(documents=Documents)

        return matEntitites
예제 #27
0
# ----------------------------------------------------------------------

option_parser = argparse.ArgumentParser(add_help=False)

option_parser.add_argument('sentence', nargs="*", help='sentence to analyse')

args = option_parser.parse_args()

# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------

key, endpoint = request_priv_info()

credentials = CognitiveServicesCredentials(key)
client = TextAnalyticsClient(endpoint=endpoint, credentials=credentials)

# ------------------------------------------------------------------------
# Helper function
# ------------------------------------------------------------------------


def analyseText(txt):
    documents = [{'id': '1', 'text': txt}]
    response = client.detect_language(documents=documents)

    l = response.documents[0]
    dl = l.detected_languages[0]
    lang = dl.iso6391_name

    documents = [{'id': '1', 'language': lang, 'text': txt}]
예제 #28
0
def authenticateClient():
    subscription_key, endpoint = get_env()
    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics_client = TextAnalyticsClient(endpoint=endpoint,
                                                credentials=credentials)
    return text_analytics_client
예제 #29
0
def analyze(filename: str, cloud: str) -> float:
    """Run a sentiment analysis request on text within a passed filename.

    :param filename: The sentence to be analyzed
    :type filename: str
    :param cloud: The cloud to operate on
    :type cloud: str
    :return: score
    :return type: float

    """

    # get credentials for Google and Azure cloud text services listed in
    # cloudmesh yaml
    credentials_list = get_credentials()

    # add file located in text cache directory to the registry so it can be
    # loaded and passed to services.
    add_file(filename)
    content = load_content(filename)

    if cloud == "azure":
        credentials = CognitiveServicesCredentials(credentials_list[1])
        endpoint = credentials_list[2]
        text_analytics_client = TextAnalyticsClient(endpoint=endpoint,
                                                    credentials=credentials)
        client = text_analytics_client

        documents = []

        sentences = re.split('\.|\?|!', content)

        for index, sentence in enumerate(sentences):
            new_doc = {"id": index, "language": "en", "text": sentence}
            documents.append(new_doc.copy())
        response = client.sentiment(documents=documents)
    elif cloud == "google":
        client = language.LanguageServiceClient()

        document = types.Document(content=content,
                                  type=enums.Document.Type.PLAIN_TEXT)

        response = client.analyze_sentiment(document=document)
    else:
        print("Cloud not supported.")

    if cloud == "azure":
        print("------Azure Cognitive Services------")
        for document in response.documents:
            print("Sentence ", document.id, " has a sentiment score of ",
                  "{:.2f}".format(document.score))
            return document.score
    elif cloud == "google":
        score = response.document_sentiment.score
        magnitude = response.document_sentiment.magnitude
        print("------Google Natural Language Analysis------")
        for index, sentence in enumerate(response.sentences):
            sentence_sentiment = sentence.sentiment.score
            print('Sentence {} has a sentiment score of {}'.format(
                index, sentence_sentiment))
        print('Overall Sentiment: score of {} with magnitude of {}'.format(
            score, magnitude))
        return score
예제 #30
0
import json
import os
import sys
import time
import requests
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
import re

# from pdf_to_png import pdf_to_png

# variables for entity extraction from text
key_var_name = '389ed828f66c4a42a830c542ff27125f'  # '2f77f291c6e8467c8c694bd80a445f9a'
endpoint_var_name = 'https://text-extract.cognitiveservices.azure.com/'  # 'https://hcdf-form-text.cognitiveservices.azure.com/'
credentials = CognitiveServicesCredentials(key_var_name)
text_analytics = TextAnalyticsClient(endpoint=endpoint_var_name,
                                     credentials=credentials)


def find_pattern(text, patterns):
    if re.search(patterns, text):
        return re.search(patterns, text)
    else:
        return False


def extract_entities(t):
    documents = [{"id": "1", "language": "en", "text": t}]
    response = text_analytics.entities(documents=documents)
    return response

예제 #31
0
# Extract the word bounding boxes and text.
line_infos = [region["lines"] for region in analysis["regions"]]
word_infos = []
text_only = []
for line in line_infos:
    for word_metadata in line:
        for word_info in word_metadata["words"]:
            word_infos.append(word_info)
            text_only.append(word_info["text"])

print(text_only)

# Text Analytics
credentials = CognitiveServicesCredentials(subscription_key)
text_analytics_url = "https://westcentralus.api.cognitive.microsoft.com/"
text_analytics = TextAnalyticsClient(endpoint=text_analytics_url,
                                     credentials=credentials)

documents = text_only[:]
response = text_analytics.entities(documents=documents)

for document in response.documents:
    print("Document Id: ", document.id)
    print("\tKey Entities:")
    for entity in document.entities:
        print("\t\t", "NAME: ", entity.name, "\tType: ", entity.type,
              "\tSub-type: ", entity.sub_type)
        for match in entity.matches:
            print("\t\t\tOffset: ", match.offset, "\tLength: ", match.length,
                  "\tScore: ", "{:.2f}".format(match.entity_type_score))

# Display the image and overlay it with the extracted text.