コード例 #1
0
 def process(self, item):
     client = language.LanguageServiceClient()
     doc = language.Document(content=item["tweet"],
                             type_=language.Document.Type.PLAIN_TEXT)
     response = client.analyze_sentiment(document=doc)
     item["sentiment"] = response.document_sentiment.score
     return [item]
コード例 #2
0
def analyze_text_entities(text):
    client = language.LanguageServiceClient.from_service_account_json(
        "C:\\Users\\Srinu\\Documents\\keys\\nlp.json")
    document = language.Document(content=text,
                                 type_=language.Document.Type.PLAIN_TEXT)

    response = client.analyze_entities(document=document,
                                       encoding_type='UTF32')
    print(response.entities)
    result = []
    for entity in response.entities:

        if entity.type_.name == "DATE":
            # print("=" * 80)
            result.append(entity.name)
            # results = dict(
            #     name=entity.name,
            #     # type=entity.type_.name,
            #     # salience=f"{entity.salience:.1%}",
            #     # wikipedia_url=entity.metadata.get("wikipedia_url", "-"),
            #     # mid=entity.metadata.get("mid", "-"),
            # )
            # for k, v in results.items():
            #     print(f"{k:15}: {v}")
    print(result)
    return result
コード例 #3
0
ファイル: parser.py プロジェクト: kristing400/fbhacks
def main(inputText):
    if inputText == "":
        return []
    dirpath = os.path.dirname(os.path.realpath(__file__))
    os.environ[
        "GOOGLE_APPLICATION_CREDENTIALS"] = dirpath + "/credentials.json"
    # Instantiates a client
    client = language.LanguageServiceClient()

    # Instantiates a plain text document.
    document = language.Document(content=inputText,
                                 type=language.Document.Type.PLAIN_TEXT)

    # Detects entities in the document. You can also analyze HTML with:
    #   document.type == enums.Document.Type.HTML

    entities = client.analyze_entities(document).entities

    # entity types from enums.Entity.Type
    entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', 'EVENT',
                   'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER')
    result = []
    maxItems = 10
    for i in range(min(maxItems, len(entities))):
        entity = entities[i]
        result.append({
            'name': entity.name,
            'type': entity_type[entity.type],
            'salience': entity.salience
        })
    return result
コード例 #4
0
def analyze(text):
    """Run a sentiment analysis request on text within a passed filename."""
    client = language.LanguageServiceClient()

    document = language.Document(content=text, type_=language.Document.Type.PLAIN_TEXT)
    annotations = client.analyze_sentiment(request={'document': document})

    return print_result(annotations)
コード例 #5
0
def analyze_text_sentiment(text):
    client = language.LanguageServiceClient.from_service_account_json(
        "C:\\Users\\Srinu\\Documents\\keys\\nlp.json")
    document = language.Document(content=text,
                                 type_=language.Document.Type.PLAIN_TEXT)

    response = client.analyze_sentiment(document=document)

    sentiment = response.document_sentiment
    return sentiment.score * 10, sentiment.magnitude * 10
コード例 #6
0
def wikipedia_route(company):

    # Imports the Google Cloud client library
    from google.cloud import language
    result = wikipedia.summary(company, sentences=10)

    client = language.LanguageServiceClient()
    document = language.Document(
        content=result,
        type_=language.Document.Type.PLAIN_TEXT)
    encoding_type = language.EncodingType.UTF8
    entities = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}).entities
    return str(entities)
コード例 #7
0
def analyze_text_sentiment(text):
    client = language.LanguageServiceClient()
    document = language.Document(content=text,
                                 type_=language.Document.Type.PLAIN_TEXT)

    response = client.analyze_sentiment(document=document)

    sentiment = response.document_sentiment
    results = dict(
        text=text,
        score=f"{sentiment.score:.1%}",
        magnitude=f"{sentiment.magnitude:.1%}",
    )
    score = sentiment.score
    magnitude = sentiment.magnitude
    for k, v in results.items():
        print(f"{k:10}: {v}")
    return (score, magnitude)
コード例 #8
0
def analyze_entities(text):
    '''Returns a set of detected entities, and parameters associated with those entities, such as the
    entity's type, relevance of the entity to the overall text, and locations in the text that refer to the same entity.
    Entities are returned in the order (highest to lowest) of their salience scores, which reflect their relevance to
    the overall text.

    :param text: string
    :return: JSON
    '''

    client = language.LanguageServiceClient()

    document = language.Document(content=text,
                                 type=language.Document.Type.PLAIN_TEXT)
    encoding_type = language.EncodingType.UTF8

    entities = client.analyze_entities(document=document,
                                       encoding_type=encoding_type)

    return entities
コード例 #9
0
def classify_text(article):
    response = nl_client.classify_text(document=language.Document(
        content=article, type_=language.Document.Type.PLAIN_TEXT))
    return response
コード例 #10
0
    location = str(df.at[i, 'Location'])
    date = str(df.at[i, 'Date'])

    content = normalize_space(content)
    location = normalize_space(location)

    content = content.replace("'", "-")
    content = content.replace(",", "-")

    location = location.replace("'", "-")
    location = location.replace(",", "-")

    location.replace(" ", "_")
    location.splitlines()

    document = language.Document(content=content,
                                 type_=language.Document.Type.PLAIN_TEXT)
    response = client.analyze_sentiment(request={'document': document})

    score_tweet = format(response.document_sentiment.score)
    magnitude_tweet = format(response.document_sentiment.magnitude)

    df.at[i, 'Score'] = score_tweet
    df.at[i, 'Magnit'] = magnitude_tweet
    content = content[:50]


    sql = "INSERT  " + table_id + " (number,user,location,text,magnit,score) " + \
           "VALUES(" + str(i) + ",'" +str(user) + "','" + str(location) + "','" + str(content) + \
            "'," + str(magnitude_tweet) + "," + str(score_tweet) + ")"

    sql = str(sql)