コード例 #1
0
ファイル: app.py プロジェクト: hpanushan/GCP_NLP_API
def getValue():
    filter = request.form['filter']

    keyword = str(filter)
    obj = StreamingClass(keyword,10)
    obj.findTweets()

    # Tweets data
    tweetIDList,tweetUserNameList,tweetTextList = obj.getDetails()
    ##### 
    # Covert into JSON data format
    
    dataList = []
    for i in range(0,len(tweetIDList)):
        row = {}
        row["id"] = tweetIDList[i]
        row["userName"] = tweetUserNameList[i]
        row["text"] = tweetTextList[i]
        try:
            row["sentiment"] = gcpNLP(tweetTextList[i])
        except: 
            row["sentiment"] = gcpNLP(langTranslator(tweetTextList[i]))
    #    dataDictionary[str(i)] = row
        dataList.append(row)

    return render_template('index.html',analysis=dataList)
コード例 #2
0
ファイル: main.py プロジェクト: hpanushan/GCP_NLP_API
def main():
    testTweet = "@libertlady @TheGreySonof @sergiolpn @EugeniaRolon_ @Angelalerena Corrección; Foxconn tiene denuncias por trabajo e… https://t.co/XAuqim69GT"
    cleanedTestTweet = cleanTweet(testTweet)
    print(cleanedTestTweet)
    # Using Google Cloud Platform NLP model
    testTweetSentiment = gcpNLP(testTweet)
    cleanedTestTweetSentiment = gcpNLP(cleanedTestTweet)

    # Printing the results
    print("testTweetSentiment", testTweetSentiment)
    print("cleanedTestTweetSentiment", cleanedTestTweetSentiment)
コード例 #3
0
def addSentimentProperties(listOfTuples):
    # Add sentiment score and sentiment magnitude into the tuples

    output = []  # Output list
    for i in listOfTuples:
        sentiment = gcpNLP(i[-1])
        sentimentScore = sentiment.score
        sentimentMagnitude = sentiment.magnitude

        i = i + (sentimentScore, sentimentMagnitude
                 )  # Add sentiment score and magnitude into the tuple
        output.append(i)

    return output
コード例 #4
0
ファイル: Main.py プロジェクト: hpanushan/NeuralNetwork_Model
def main():
    fileName = "TrainingPhrases.xlsx"
    readObj = ReadExcelFile(fileName)

    # Reading the phrases column
    listOfPhrases = readObj.getColumnValues("Phrase")

    # Passing the phrases into Google and getting sentiment parameters and storing new values into excel sheet
    n = len(listOfPhrases)

    # Creating an object to write values in Excel file
    writeObj = WriteExcelFile(fileName)

    for i in range(0, n):
        sentimentScore, sentimentMagnitude = gcpNLP(listOfPhrases[i])
        # For sentiment score
        writeObj.write(i + 2, 3, str(sentimentScore))
        # For sentiment magnitude
        writeObj.write(i + 2, 4, str(sentimentMagnitude))
コード例 #5
0
def getTextFromDatabase(dbName, tableName):

    # Connect with Clickhouse
    dbObject = ClickhouseClient("10.0.0.30")

    # Get all table data
    data = dbObject.selectData(dbName, tableName)

    # Add new columns to existing data table (For sentiment score and magnitude)

    # Need to be edited
    dbObject.addColumn(dbObject, tableName, "score", "String")
    dbObject.addColumn(dbObject, tableName, "magnitude", "String")
    # Need to be edited

    # Read the rows one by one
    for i in data:
        # Get the sentiment value one by one
        sentiment = gcpNLP(i[-1])
        print(sentiment.score)
コード例 #6
0
def storeClickhouse(ipAddress, userName, tweetURL, listOfTweetReplyObjects):
    # Creating Clickhouse client object
    dbObject = ClickhouseClient(ipAddress)

    # Create a new table
    tableName = createTableName(userName, tweetURL)

    # Creating list of tuples
    listOfTuples = []

    # Read tweet objects one by one and store them in Clickhouse
    for i in listOfTweetReplyObjects:
        # Filter out required data from the object
        dateTime, userID, userName, textID, text = filterDataFields(i)

        # Save date into tuple
        dataTuple = (dateTime, userID, userName, textID, text)

        # Pass the data to Google NLP API and fetch sentiment parameters
        sentiment = gcpNLP(text)
        sentimentScore = sentiment.score
        sentimentMagnitude = sentiment.magnitude

        # Append sentiment properties into dataTuple
        dataTuple = dataTuple + (sentimentScore, sentimentMagnitude)

        # Append to listOfTuples
        listOfTuples.append(dataTuple)

    # Convert list of tuples into list of dict
    listOfDict = getListOfDict([
        'date_time', 'user_id', 'user_name', 'text_id', 'text', 'score',
        'magnitude'
    ], listOfTuples)

    # Store data inside the Clickhouse
    dbObject.createTable("twitter", tableName)

    # Store the data
    dbObject.insertData("twitter", tableName, listOfDict)
コード例 #7
0
def modelTesting(phrase):

    val1, val2 = gcpNLP(phrase)

    # Parameters
    W1 = np.array([[
        0.7944864, 2.5306618, -4.5559416, 0.3835472, 0.99691343, -16.251022,
        -4.3798785, 8.443347, -2.4882212, 0.18412943
    ],
                   [
                       1.3711807, 2.76434, -2.2028754, 1.7946438, 0.1288933,
                       -13.720011, -4.2971883, -8.429026, -2.3004491,
                       0.82452744
                   ]])

    W2 = np.array([[1.545527], [2.8403213], [4.8268833], [-2.4048],
                   [-1.0374035], [-13.8223915], [-3.170226], [12.88139],
                   [3.4203565], [1.0138328]])

    b1 = np.array([[
        -3.2464652e+00, -4.2906141e+00, 5.2482719e-03, -1.4363248e+00,
        8.6730689e-01, 5.2704124e+00, 4.4298167e+00, 5.3570614e+00,
        -1.5046034e-02, -2.4429324e+00
    ]])

    b2 = np.array([[0.959692]])

    # Inputs
    inputArray = np.array([[val1, val2]])

    # Layer one output
    out1 = sigmoid(inputArray.dot(np.array(W1)) + np.array(b1))

    # Layer two output
    out2 = sigmoid(out1.dot(np.array(W2)) + np.array(b2))

    return np.round(out2, 1)[0][0]