Example #1
0
async def chat(context):
    def check(author):
        def inner_check(message):
            return message.author == author

        return inner_check

    await client.say("Start talking with the bot!")
    mod = n.nlp()

    while True:
        inp = ''

        #wait for next message as message.
        msg = await client.wait_for('message',
                                    check=check(context.author),
                                    timeout=30)

        inp = msg.content
        if inp.lower() == "quit":
            break

        inp_pr = np.array([mod.bag_of_words(inp, mod.words)])
        results = mod.model.predict(inp_pr)

        results_index = np.argmax(results)
        tag = mod.labels[results_index]

        for tg in mod.raw_data["intents"]:
            if tg['tag'] == tag:
                responses = tg['responses']

        await client.say(client.get_channel(),
                         "RoBot: " + str(random.choice(responses)))
Example #2
0
def tokens(s):
    p = nlp.nlp(s)
    token = []
    for i in p:
        token.append(i)
    for i in p:
        w = i
        synonyms = wn.synsets(w)
        l = list()
        t = []

        for synset in synonyms:
            x = (synset.name().split('.')[0])
            j = synset.hyponyms()
            k = sorted(lemma.name() for m in j for lemma in m.lemmas())
            l.append(k)
            for k1 in k:
                t.append(k1)

        t1 = []
        for i in t:
            if i not in t1:
                t1.append(i)
        for i in t1:
            token.append(i)
    token1 = []
    for i in token:
        if i not in token1:
            token1.append(i)

    return token1
Example #3
0
def dashboard():
    data = request.args.to_dict()
    print "Recieved data"
    companyName = data['companyname']
    cleanedCompanyName = nlp.nlp(companyName)[0].encode("utf-8")
    date = data['date']
    print cleanedCompanyName
    print date
    code = findStock.findCode(cleanedCompanyName)
    page = findStock.findPage(date)
    if code is not None:
        print "Company code found. Sending price data back to client"
        '''
        if (dt.datetime.today().strftime("%Y.%m.%d") == dt.datetime.strptime(date,"%Y.%m.%d").strftime("%Y.%m.%d")):    
            finalData = findStock.todayInfo(code)
            finalData[0]['date'] = dt.datetime.strptime(date,"%Y.%m.%d").strftime("%Y.%m.%d")
            return json.dumps(finalData), 200

        else:
        '''

        finalData = findStock.pastInfo(code, page)
        return finalData, 200
    else:
        print "code not found"
        return "Failure", 404
Example #4
0
def pickle_texts(outfile, text):
    """ pickle the tens of thousands of inscriptions . . . . """

    doc = nlp(text)

    with open(outfile, 'wb') as f:
        pickle.dump(doc, f)
Example #5
0
def extract_code(text):
    logging.info("Processing result...")
    time1 = time.time()
    result = nlp(text)
    time2 = time.time()
    logging.info('Processing took %.3f ms' % ((time2 - time1) * 1000))

    return parse_result(result)
Example #6
0
def plot_wrap():
    show = plotstock.plotgraph()
    sent = nlp()
    showsent = plotstock.plotsent()
    return render_template('plot.html',
                           show=show,
                           sent=sent,
                           showsent=showsent)
def text_to_sents():
    text = request.json.get("text", "")
    doc = nlp.nlp(text)

    nlp.all_sents.update({s.text: s for s in doc.sents})

    sents = [s.text for s in doc.sents]

    return jsonify(sents)
Example #8
0
def bin_inscriptions(corpus):
    """ put the texts into the docbin """
    doc_bin = DocBin(attrs=["LEMMA", "TAG", "POS", "DEP", "HEAD"],
                     store_user_data=True)
    for c in corpus:
        doc = nlp(c)
        doc_bin.add(doc)

    with open('dbg.bin', 'wb') as f:
        f.write(doc_bin.to_bytes())
Example #9
0
def get_news_sentiment():
    print("Analyzing news' sentiments...")
    df = retrieve_client_news()
    sentiment = []
    for idx, row in df.iterrows():
        if row.Content == '-----':
            sentiment.append('-----')
        else:
            s = SnowNLP(row.Content)
            sentences_sen = []
            for sentence in s.sentences:
                ss = SnowNLP(sentence)
                sentences_sen.append(ss.sentiments)
            mean_score = np.mean(np.array(sentences_sen))
            s = SnowNLP(row.Title)
            score = TITLE_WEIGHT * s.sentiments + CONTENT_WEIGHT * mean_score
            sentiment.append(score)
    df['Sentiment'] = sentiment
    print("Saving results...")
    df.to_csv(mp.DIR_DATA_CUSTOMERS + 'customer_related_news.csv', index=False, encoding='utf_8_sig')
    nlp()
    def assess_paragraph_difficulty(self):
        """ see how many words and how many unique words there are """
        assessment = []
        for paragraph in text:
            doc = nlp(paragraph)
            total_words = 0
            unique_words = []
            unique_lemmata = []
            for token in doc:
                if not token.is_punct:
                    total_words += 1
                    if token.text not in unique_words:
                        unique_words.append(token.text)
                    if token.lemma_ not in unique_lemmata:
                        unique_lemmata.append(token.lemma_)

            assessment.append(f"Total: {total_words}; Unique: {len(unique_words)}; Lemmata: {len(unique_lemmata)}")
            
        for a in assessment:
            print(a)
 def formulate_response(self,question):
     grammar = JSGFParser('speech/hark-sphinx/grammar/NielsSebastiaan.gram')
     language_parsing = nlp()
     question = language_parsing.remove_name(language_parsing.remove_opts(question))
     if(grammar.findToken(question) != None):
     #Question
         responses = {\
         'what time is it' : "The current time is: " + time.strftime("%H:%M:%S") + ".",\
         'what is the oldest most widely used drug on earth' : 'the oldest, most widely used drug on earth is coffee.',\
         'who are your creators' : 'My creators are Niels and Sebastiaan.'}
         return responses[question]
     else:
         words = question.split(' ')
         if(grammar.findTokenVar(words[0]) == '<verb>'):
             #command
             question = question.replace(words[0] + " to the ", "")
             if(grammar.findTokenVar(question) == '<location>'):
                 return "I am moving to " + question + "."
         else:
             #request
             return "I am approaching the dining table."
Example #12
0
def main_logic():
    temp_text = say_str_queue.get()
    global_fuck = 0  # 作假变量
    while temp_text is not None:
        func_num, *args = nlp(thu1, temp_text)
        # if global_fuck == 0:
        #     start_obstacle_recognition()  # 障碍物是什么
        #     global_fuck = 1
        # elif global_fuck == 1:
        #     start_general_recognition()  # 前方有什么
        #     global_fuck = 2
        # elif global_fuck == 2:
        #     start_object_recognition(*args)  # 人在那里
        #     global_fuck = 3
        # elif global_fuck == 3:
        #     start_limited_meter_object_recognition(*args)  # 1米内有什么
        #     global_fuck = 4
        # else:
        speak_str = direction('南邮广场')
        # if speak_str is not None:
        #     logging.debug('室外导航开始播报: {}'.format(speak_str))
        text_to_audio.put(speak_str)
        # if func_num == 0:
        #     start_object_recognition(*args)
        # elif func_num == 1:
        #     start_general_recognition()
        # elif func_num == 2:
        #     start_indoor_navigation(*args)
        # elif func_num == 3:
        #     start_outdoor_navigation(*args)
        # elif func_num == 4:  # 米数图像识别
        #     start_limited_meter_object_recognition(*args)
        # elif func_num == 5:  # 障碍物识别
        #     start_obstacle_recognition()

        temp_text = say_str_queue.get()
Example #13
0
def analyze():
    return nlp(request.data.decode('utf-8'))
Example #14
0
def tokenize_magically(text):
    return [tok.text for tok in nlp(text)]
Example #15
0
import discord
from discord.ext import commands
import os
import asyncio
import nlp as n
import numpy as np
import random

token = "YOUR KEY HERE"

#client = commands.Bot(command_prefix='?', description='A bot that greets the user back.')
client = discord.Client()
mod = n.nlp()
context = {}


@client.event
async def on_ready():
    activity = discord.Game(name="with 3D husbandos! owo")
    await client.change_presence(status=discord.Status.idle, activity=activity)
    print("Logged in as " + client.user.name)
    #servers = list(client.guilds)
    #print("Connected on " + str(len(client.guilds)) + " servers:")
    #for x in range(len(servers)):
    #    print(' ' + servers[x-1].name)


@client.event
async def on_message(message):
    if message.channel.name == "bot_commands":
        if not message.author == client.user:
Example #16
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import nlp

if __name__ == "__main__":
	nlp.nlp()
#coding: utf-8

#util
from data import *
from read_conf import config
from optparse import OptionParser
import csv
import cPickle as pickle
from operator import itemgetter
from nlp import nlp
from itertools import combinations
import sys

mnlp = nlp()

dp = config("../conf/dp.conf")

#rake
from rake import Rake
rake = Rake()

#nltk
import nltk

#math
import math
from math import log

print "读入数据文件"
f = open(dp["word_tag"],"rb")
word_tag = pickle.load(f)
Example #18
0
def start(body):  
    # Get the user, or create one if one does not exist.
    cellNumber = body['cellNumber']
    currentUser = users.find_one({'cellNumber' : cellNumber})
    if (currentUser == None):
        currentUser = newUser(cellNumber)

    # Load the question text
    question = body['question'].lower()
    
    # Check for 'help'
    if (question.lower() == 'help'):
        answer =    "For a specific fact, ask a question like 'what is the population of"\
                    " London'\n" \
                    "For general information, ask for a description with 'describe "\
                    "London'\n" \
                    "If the contents is trimmed and you want more, send 'more'\n" \
                    "If an answer is of low quality, help improve the database by "\
                    "sending 'poor'"
    elif ('rate' in question):
        lastQuestion = currentUser['lastQuestion']
        
        # Establish whether the rating is of the correct form
        splitQuestion = question.split(' ')
        if not (len(splitQuestion) == 2\
        and splitQuestion[0] == 'rate'\
        and splitQuestion[1].isdigit()\
        and int(splitQuestion[1]) in range(1,6)):
            answer = "Feedback unrecognised. Send 'Rate' followed by a quality "\
                "out of 5. E.g, for a bad quality answer, send 'Rate 1' "\
                "or for a good quality answer, send 'Rate 5'"
        
        # Check that the criteria for rating the previous question is good, then process
        elif (lastQuestion['givenProperty'] != None)\
        and (lastQuestion['returnedProperty'] != None)\
        and (lastQuestion['receivedFeedback'] == False)\
        and (lastQuestion['question'] != None)\
        and (lastQuestion['answer'] != None):# TODO - this line and the line above - correct? False?!
            successful = adjustRanking(
                            lastQuestion['question'],
                            lastQuestion['answer'],
                            lastQuestion['givenProperty'],
                            lastQuestion['returnedProperty'],
                            int(splitQuestion[1]))
            if (successful):
                currentUser['lastQuestion']['receivedFeedback'] = True
                updateUser(currentUser)
                answer = "Thank you for your feedback - it has been recorded."
            else:
                answer = "Feedback unrecognised. Send 'Rate' followed by a quality "\
                    "out of 5. E.g, for a bad quality answer, send 'Rate 1' "\
                    "or for a good quality answer, send 'Rate 5'"
        else:
            answer = "Feedback already received or not expected for this question."
    else:
        # Process the natural language in the question
        parsedQuestion = nlp.nlp(question)
        if parsedQuestion['success'] == False:
            answer = 'No answer was found'
            print 'NLP Failed'
        else:
            property = parsedQuestion['property']
            placeDict = parsedQuestion['place']
            wikiPlaceName = placeDict['wikiName']
            realName = placeDict['realName']
            print 'Finding argument ', property, ' on page ', wikiPlaceName
            answer, keyUsed = sourceProcessor.findArgumentOnPage(property,wikiPlaceName)
            updateUserWithLastQuestion(currentUser, wikiPlaceName, property, keyUsed, answer)

    return answer
Example #19
0
 def __init__(self, string):
     string = toolkit.ensure_unicode(string)
     self._doc = nlp.nlp(string)
     for ent in reversed(self._doc.ents):
         ent.merge(ent.root.tag_, ent.root.lemma_, ent.label_)
     self.tokens = [MutableToken(self, token.i) for token in self._doc]
def server():
    return nlp.nlp(request.forms.get('story'))
Example #21
0
    c.Username = USERNAME
    c.Limit = 1
    c.Hide_output = True
    c.Store_object = True

    # Run
    twint.run.Search(c)
    print("DONE INITIAL SEARCH")

    # Store output
    tweets = twint.output.tweets_list
    latest_tweet_date = " ".join(tweets[0].datetime.split(" ")[:2])
    #print("Latest tweet: " + tweets[0].tweet)

    # Track the Ticker from the latest tweet
    result_list = nlp.nlp(tweets[0].tweet)

    for word in result_list:
        ticker = get_ticker(word)
        #print("Found ticker: {}".format(ticker))

    q.put(ticker)

    t = PrettyTable(
        ['Tweet                                      ', '  Detected Ticker'])
    t.align['Tweet                                      '] = 'l'
    t.align['  Detected Ticker'] = 'r'
    t.hrules = 1
    print(t)
    t.add_row([tweets[0].tweet, ticker])
    print("\n".join(t.get_string().splitlines()[-2:]))
Example #22
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import nlp

if __name__ == "__main__":
    nlp.nlp()
    def train(self, data1, tdata, cdata):
        enl = tdata[0]
        enr = tdata[2]
        anl = tdata[4]
        anr = tdata[6]
        gnl = tdata[8]
        gnr = tdata[10]
        oll = tdata[12]
        olr = tdata[14]
        orl = tdata[16]
        orr = tdata[18]
        y_train = tdata[20]
        y_test = tdata[21]
        ohtr = tdata[22]
        tr_len = tdata[23]
        ohte = tdata[24]
        te_len = tdata[25]

        data = np.argmax(data1, 1)
        src = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
               29, 30}

        for i in range(2000):
            arr = random.sample(range(self.sjnum), self.sjnum - 1)
            a = []
            b = []
            c = []
            d = []
            e = []
            f = []
            a1 = []
            b1 = []
            c1 = []
            d1 = []
            e1 = []
            f1 = []
            g = []
            j = 0
            num = 0
            totacc=0
            while num < self.batch_size:
                if data[arr[j]] in src:
                    a.append(enl[arr[j]])
                    a1.append(enr[arr[j]])
                    b.append(anl[arr[j]])
                    b1.append(anr[arr[j]])
                    c.append(gnl[arr[j]])
                    c1.append(gnr[arr[j]])
                    d.append(oll[arr[j]])
                    d1.append(olr[arr[j]])
                    e.append(orl[arr[j]])
                    e1.append(orr[arr[j]])
                    f.append(y_train[arr[j]])
                    f1.append(ohtr[arr[j]])
                    g.append(tr_len[arr[j]])
                    num = num + 1
                j = j + 1

            self.sess.run(self.optim, feed_dict={self.emgl: a,
                                                 self.emgr: a1,
                                                 self.accl: b,
                                                 self.accr: b1,
                                                 self.gyrl: c,
                                                 self.gyrr: c1,
                                                 self.oll: d,
                                                 self.olr: d1,
                                                 self.oril: e,
                                                 self.orir: e1,
                                                 self.target: f,
                                                 self.label: f1,
                                                 self.target_len: g,
                                                 self.dropout: 0.5})

            totacc = 0
            a = []
            b = []
            c = []
            f = []
            a1 = []
            b1 = []
            c1 = []
            f1 = []
            d = []
            e = []
            d1 = []
            e1 = []
            aa = np.zeros(36)
            bb = np.zeros(36)
            g = []
            da = []
            num = 0
            znum = 0
            for j in range(self.sjnum):
                if data[j] in src:
                    a.append(enl[j])
                    a1.append(enr[j])
                    b.append(anl[j])
                    b1.append(anr[j])
                    c.append(gnl[j])
                    c1.append(gnr[j])
                    d.append(oll[j])
                    d1.append(olr[j])
                    e.append(orl[j])
                    e1.append(orr[j])
                    f.append(y_train[j])
                    f1.append(ohtr[j])
                    g.append(tr_len[j])
                    da.append(data[j])
                    num = num + 1
                    znum = znum + 1
                if num == self.batch_size:
                    prob, rloss = self.sess.run([self.pprobs, self.ploss], feed_dict={self.emgl: a,
                                                                                     self.emgr: a1,
                                                                                     self.accl: b,
                                                                                     self.accr: b1,
                                                                                     self.gyrl: c,
                                                                                     self.gyrr: c1,
                                                                                     self.oll: d,
                                                                                     self.olr: d1,
                                                                                     self.oril: e,
                                                                                     self.orir: e1,
                                                                                     self.target: f,
                                                                                     self.label: f1,
                                                                                     self.target_len: g,
                                                                                     self.dropout: 0.5})

                    for k in range(self.batch_size):
                        nl = nlp.nlp(prob[k][:self.word_em - 1])
                        c = nl.getans()
                        hb_maxa = self.hb(c, len(c))
                        aq ,_,_= self.lcs(f[k][1:], hb_maxa, g[k] - 1, len(hb_maxa))
                        aa[da[k]] = aa[da[k]] + aq
                        bb[da[k]] = bb[da[k]] + 1
                        totacc = totacc + aq
                    num = 0
                    a = []
                    b = []
                    c = []
                    d=[]
                    e=[]
                    f = []
                    a1 = []
                    b1 = []
                    c1 = []
                    e1  =[]
                    d1=[]

                    f1 = []
                    da = []
                    g = []
            for j in range(36):
                print('seq ', j, '\'sacc:', aa[j] / bb[j], ' ', aa[j], ' ', bb[j])
            totacc = totacc / (znum - num)
            print('epoch ', i, '\'s acc', totacc)
            totacc = 0
            totir=0
            totdr=0
            for start, end in zip(range(0, self.sdnum, self.batch_size),
                                  range(self.batch_size, self.sdnum + 1, self.batch_size)):
                a = []
                b = []
                c = []
                f = []
                a1 = []
                b1 = []
                c1 = []
                f1 = []
                d=[]
                e=[]
                d1=[]
                e1=[]
                g = []
                for j in range(start, end):
                    a.append(cdata[0][j])
                    a1.append(cdata[1][j])
                    b.append(cdata[2][j])
                    b1.append(cdata[3][j])
                    c.append(cdata[4][j])
                    c1.append(cdata[5][j])
                    f.append(y_test[j])
                    f1.append(ohte[j])
                    d.append(cdata[6][j])
                    d1.append(cdata[7][j])
                    e.append(cdata[8][j])
                    e1.append(cdata[9][j])
                    g.append(te_len[j])
                prob, rloss= self.sess.run([self.pprobs, self.ploss], feed_dict={self.emgl: a,
                                                                                 self.emgr: a1,
                                                                                 self.accl: b,
                                                                                 self.accr: b1,
                                                                                 self.gyrl: c,
                                                                                 self.gyrr: c1,
                                                                                 self.oll: d,
                                                                                 self.olr: d1,
                                                                                 self.oril: e,
                                                                                 self.orir: e1,
                                                                                 self.target: f,
                                                                                 self.label: f1,
                                                                                 self.target_len: g,
                                                                                 self.dropout: 0.5})

                for k in range(self.batch_size):
                    nl = nlp.nlp(prob[k][:self.word_em - 1])
                    c = nl.getans()
                    hb_maxa = self.hb(c, len(c))
                    aq,isr , idr = self.lcs(f[k][1:], hb_maxa, g[k] - 1, len(hb_maxa))
                    totacc = totacc + aq
                    totir=totir + isr
                    totdr=totdr +idr
            totacc = totacc / end
            totir  = totir /end
            totdr  = totdr /end
            print('epoch ', i, ' test\'s acc', totacc, ' test\'s ir', totir, ' test\'s dr', totdr)

            if totacc > self.bacc:
                self.bacc = totacc
                self.saver.save(self.sess, "Model_mxpool-ztd/model.ckpt")
            print('newest bacc:', self.bacc)
        return 0
Example #24
0
from nlp import nlp
from preprocessing import preprocess
from scraper import scrape
import plotly
import plotly.express as px
import plotly.graph_objs as go
import json
import get_db_data

sentiment = nlp()
df_stocks = get_db_data.get_stock_prices(sentiment)
df_sent = get_db_data.get_14_day_sentiment(sentiment)


def plotgraph(dfstocks=df_stocks):
    data = [go.Scatter(x=dfstocks["Date"], y=dfstocks["Close"])]
    graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
    #fig = px.line(dfstocks,x="Date", y="Close")
    return graphJSON


def plotsent(df_sent=df_sent):
    datasent = [go.Scatter(x=df_sent["index"], y=df_sent["sentiment"])]
    graphsentJSON = json.dumps(datasent, cls=plotly.utils.PlotlyJSONEncoder)
    #fig = px.line(dfstocks,x="Date", y="Close")
    return graphsentJSON
Example #25
0
import translator
import nlp
import composit_image
#import getimage
import getimage_bing

prime_sent=input("input a sentence: ")

nlp_list=nlp.nlp(prime_sent)

print(nlp_list)

noun_list=[]

for key,value in nlp_list[0].items():
 
    if "Na" in value:
        noun_list.append(key)
    
    elif "Nb" in value:
        noun_list.append(key)
    
    elif "Nc" in value:
        noun_list.append(key)
    

print(noun_list)


trans_list=translator.trans(noun_list)
Example #26
0
import os
import re
from itertools import combinations

#rake
from rake import Rake
rake = Rake()

#nltk
import nltk
from nltk.util import clean_html
from nltk.util import clean_url

#nlp
from nlp import nlp
mnlp = nlp()

tag_re = re.compile(r"<p>(.+?)</p>", re.DOTALL)

dp = config("../conf/dp.conf")


#这个函数的作用是去重
#先读取title,然后和test的title相对比,看看有没有重的
def remove_duplicate():
    dup = open(dp["dup_test"], "w")
    other = open(dp["other_test"], "w")
    w_dup = csv.writer(dup)
    w_other = csv.writer(other)

    #读取训练文件
Example #27
0
def lemmatize(text):
    tokens = nlp(text)
    return [
        token.lemma_.lower() for token in tokens
        if token.is_alpha and not token.pos_ == "PRON"
    ]
Example #28
0
# %%
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from nlp import nlp as nlp
from collections import Counter
from fuzzywuzzy import fuzz
import Levenshtein as lev
import spacy
Spnlp = spacy.load("en_core_web_sm")
from spacy.matcher import PhraseMatcher
matcher = PhraseMatcher(Spnlp.vocab)
import matplotlib.pyplot as plt
from wordcloud import WordCloud

LangProcessor = nlp()


# %%
#load the job description

with open('identity.txt') as job:
    text = job.read()    


# %%
#load cv

with open('cv') as cv:
    cvtext = cv.read()
Example #29
0
    def train(self, data1, tdata, cdata):
        enl = tdata[0]
        enr = tdata[2]
        anl = tdata[4]
        anr = tdata[6]
        gnl = tdata[8]
        gnr = tdata[10]
        oll = tdata[12]
        olr = tdata[14]
        orl = tdata[16]
        orr = tdata[18]
        y_train = tdata[20]
        y_test = tdata[21]
        ohtr = tdata[22]
        tr_len = tdata[23]
        ohte = tdata[24]
        te_len = tdata[25]

        data = np.argmax(data1, 1)
        src = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
               29, 30}

        for i in range(2000):
            arr = random.sample(range(self.sjnum), self.sjnum - 1)
            a = []
            b = []
            c = []
            d = []
            e = []
            f = []
            a1 = []
            b1 = []
            c1 = []
            d1 = []
            e1 = []
            f1 = []
            g = []
            j = 0
            num = 0
            totacc=0
            while num < self.batch_size:
                if data[arr[j]] in src:
                    a.append(enl[arr[j]])
                    a1.append(enr[arr[j]])
                    b.append(anl[arr[j]])
                    b1.append(anr[arr[j]])
                    c.append(gnl[arr[j]])
                    c1.append(gnr[arr[j]])
                    d.append(oll[arr[j]])
                    d1.append(olr[arr[j]])
                    e.append(orl[arr[j]])
                    e1.append(orr[arr[j]])
                    f.append(y_train[arr[j]])
                    f1.append(ohtr[arr[j]])
                    g.append(tr_len[arr[j]])
                    num = num + 1
                j = j + 1

            self.sess.run(self.optim, feed_dict={self.emgl: a,
                                                 self.emgr: a1,
                                                 self.accl: b,
                                                 self.accr: b1,
                                                 self.gyrl: c,
                                                 self.gyrr: c1,
                                                 self.oll: d,
                                                 self.olr: d1,
                                                 self.oril: e,
                                                 self.orir: e1,
                                                 self.target: f,
                                                 self.label: f1,
                                                 self.target_len: g,
                                                 self.dropout: 0.5})

            totacc = 0
            a = []
            b = []
            c = []
            f = []
            a1 = []
            b1 = []
            c1 = []
            f1 = []
            d = []
            e = []
            d1 = []
            e1 = []
            aa = np.zeros(36)
            bb = np.zeros(36)
            g = []
            da = []
            num = 0
            znum = 0
            for j in range(self.sjnum):
                if data[j] in src:
                    a.append(enl[j])
                    a1.append(enr[j])
                    b.append(anl[j])
                    b1.append(anr[j])
                    c.append(gnl[j])
                    c1.append(gnr[j])
                    d.append(oll[j])
                    d1.append(olr[j])
                    e.append(orl[j])
                    e1.append(orr[j])
                    f.append(y_train[j])
                    f1.append(ohtr[j])
                    g.append(tr_len[j])
                    da.append(data[j])
                    num = num + 1
                    znum = znum + 1
                if num == self.batch_size:
                    prob, rloss = self.sess.run([self.pprobs, self.ploss], feed_dict={self.emgl: a,
                                                                                     self.emgr: a1,
                                                                                     self.accl: b,
                                                                                     self.accr: b1,
                                                                                     self.gyrl: c,
                                                                                     self.gyrr: c1,
                                                                                     self.oll: d,
                                                                                     self.olr: d1,
                                                                                     self.oril: e,
                                                                                     self.orir: e1,
                                                                                     self.target: f,
                                                                                     self.label: f1,
                                                                                     self.target_len: g,
                                                                                     self.dropout: 0.5})

                    for k in range(self.batch_size):
                        nl = nlp.nlp(prob[k][:self.word_em - 1])
                        c = nl.getans()
                        hb_maxa = self.hb(c, len(c))
                        aq = self.lcs(f[k][1:], hb_maxa, g[k] - 1, len(hb_maxa))
                        aa[da[k]] = aa[da[k]] + aq
                        bb[da[k]] = bb[da[k]] + 1
                        totacc = totacc + aq
                    num = 0
                    a = []
                    b = []
                    c = []
                    d=[]
                    e=[]
                    f = []
                    a1 = []
                    b1 = []
                    c1 = []
                    e1  =[]
                    d1=[]

                    f1 = []
                    da = []
                    g = []
            for j in range(36):
                print('seq ', j, '\'sacc:', aa[j] / bb[j], ' ', aa[j], ' ', bb[j])
            totacc = totacc / (znum - num)
            print('epoch ', i, '\'s acc', totacc)
            totacc = 0
            for start, end in zip(range(0, self.sdnum, self.batch_size),
                                  range(self.batch_size, self.sdnum + 1, self.batch_size)):
                a = []
                b = []
                c = []
                f = []
                a1 = []
                b1 = []
                c1 = []
                f1 = []
                d=[]
                e=[]
                d1=[]
                e1=[]
                g = []
                for j in range(start, end):
                    a.append(cdata[0][j])
                    a1.append(cdata[1][j])
                    b.append(cdata[2][j])
                    b1.append(cdata[3][j])
                    c.append(cdata[4][j])
                    c1.append(cdata[5][j])
                    f.append(y_test[j])
                    f1.append(ohte[j])
                    d.append(cdata[6][j])
                    d1.append(cdata[7][j])
                    e.append(cdata[8][j])
                    e1.append(cdata[9][j])
                    g.append(te_len[j])
                prob, rloss = self.sess.run([self.pprobs, self.ploss], feed_dict={self.emgl: a,
                                                                                 self.emgr: a1,
                                                                                 self.accl: b,
                                                                                 self.accr: b1,
                                                                                 self.gyrl: c,
                                                                                 self.gyrr: c1,
                                                                                 self.oll: d,
                                                                                 self.olr: d1,
                                                                                 self.oril: e,
                                                                                 self.orir: e1,
                                                                                 self.target: f,
                                                                                 self.label: f1,
                                                                                 self.target_len: g,
                                                                                 self.dropout: 0.5})

                for k in range(self.batch_size):
                    nl = nlp.nlp(prob[k][:self.word_em - 1])
                    c = nl.getans()
                    hb_maxa = self.hb(c, len(c))
                    aq = self.lcs(f[k][1:], hb_maxa, g[k] - 1, len(hb_maxa))
                    totacc = totacc + aq
            totacc = totacc / end
            print('epoch ', i, ' test\'s acc', totacc)
            if totacc > self.bacc:
                self.bacc = totacc
                self.saver.save(self.sess, "Model_biLSTM_fc/model.ckpt")
            print('newest bacc:', self.bacc)
        return 0
Example #30
0
def single_panel(prime_sent, return_list):

    noun_list = nlp.nlp(prime_sent)

    trans_list = translator.trans(noun_list)

    print(trans_list)

    image_path_list = []
    for word in trans_list:
        if word == "up" or word == "down" or word == "left" or word == "right" or word == "plus":
            path = firebase.get_arrow(word)
        else:
            path = firebase.get_icon(word)

            if (path == None):
                path = getimage_bing.crawler_bing(word)
                firebase.post_address(path, word)

        path = str(path).replace('C:\\Users\\a1235\\Desktop\\P\\', "./")
        path = str(path).replace('\\', "/")
        image_path_list.append(path)

    print(image_path_list)

    # composite_image_path = composit_image.composit_icon(image_path_list)

    # step_panel = Image.new('RGB', (394, 493), (255, 255, 255))

    # step_image = Image.open(composite_image_path)

    # step_border = ImageDraw.Draw(step_panel)
    # step_border.line([(0, 101), (389, 101)], fill=(117, 0, 0), width=5)

    # # set border

    # # x-top
    # step_border.line([(0, 2.5), (394, 2.5)], fill=(117, 0, 0), width=5)

    # # x-bottom
    # step_border.line([(0, 490.5), (394, 490.5)], fill=(117, 0, 0), width=5)

    # # y-left
    # step_border.line([(2.5, 0), (2.5, 493)], fill=(117, 0, 0), width=5)

    # # y-right
    # step_border.line([(391.5, 0), (391.5, 493)], fill=(117, 0, 0), width=5)

    # font = ImageFont.truetype("microblack.ttf", 35)
    # step_text = ImageDraw.Draw(step_panel)
    # step_text.text((5, 5), prime_sent, font=font, fill=(0, 0, 0), align="center")

    # step_panel.paste(step_image, (5, 104))

    # theTime = datetime.datetime.now()
    # str_time = str(theTime).replace(".", "_")
    # str_time = str_time.replace(":", "_")

    # folder_path = "./panel_image/"+str_time

    # if(os.path.exists(folder_path) == False):
    #     os.makedirs(folder_path)

    # image_path = folder_path+"/merge.jpg"
    # step_panel.save(image_path)

    return_list.append(image_path_list)
Example #31
0
    def test(self,tdata,emgl,emgr,accl,accr,gyrl,gyrr,oril,orir,ol,or1):
        enl = tdata[0]
        enr = tdata[2]
        anl = tdata[4]
        anr = tdata[6]
        gnl = tdata[8]
        gnr = tdata[10]
        oll = tdata[12]
        olr = tdata[14]
        orl = tdata[16]
        orr = tdata[18]
        y_train = tdata[20]
        y_test = tdata[21]
        ohtr = tdata[22]
        tr_len = tdata[23]
        ohte = tdata[24]
        te_len = tdata[25]
        a = []
        b = []
        c = []
        d = []
        e = []
        f = []
        a1 = []
        b1 = []
        c1 = []
        d1 = []
        e1 = []
        f1 = []
        g = []
        j = 0
        num = 0
        totacc=0
        a.append(emgl)
        a1.append(emgr) 
        b.append(accl)
        b1.append(accr)
        c.append(gyrl)
        c1.append(gyrr)
        d.append(ol)
        d1.append(or1)
        e.append(oril)
        e1.append(orir)
        f.append(y_train[0])
        f1.append(ohtr[0])
        g.append(tr_len[0])        
        for j in range(self.batch_size-1):
            a.append(enl[j])
            a1.append(enr[j])
            b.append(anl[j])
            b1.append(anr[j])
            c.append(gnl[j])
            c1.append(gnr[j])
            d.append(oll[j])
            d1.append(olr[j])
            e.append(orl[j])
            e1.append(orr[j])
            f.append(y_train[j])
            f1.append(ohtr[j])
            g.append(tr_len[j])

        prob,loss=self.sess.run([self.pprobs, self.ploss], feed_dict={self.emgl: a,
                                             self.emgr: a1,
                                             self.accl: b,
                                             self.accr: b1,
                                             self.gyrl: c,
                                             self.gyrr: c1,
                                             self.oll: d,
                                             self.olr: d1,
                                             self.oril: e,
                                             self.orir: e1,
                                             self.target: f,
                                             self.label: f1,
                                             self.target_len: g,
                                             self.dropout: 0.5})
        nl = nlp.nlp(prob[0][:self.word_em - 1])
        c = nl.getans()
        hb_maxa = self.hb(c, len(c))
        result=[mp[a] for a in hb_maxa]
        print('result is:',result)
        return ''.join(result)
Example #32
0
    file = "output_audio.mp3"
    tts.save(file)
    playsound.playsound(file)
    os.remove(file)


# speech recognition
def get_audio():
    with sr.Microphone() as source:
        audio = r.listen(source)
        voice_data = ''
        try:
            voice_data = r.recognize_google(audio)
        except sr.UnknownValueError:
            pass
        except sr.RequestError:
            print("Sorry, My services are down")

        return voice_data


time.sleep(1)
# calling these functions
login()
ava("How can I help you?")
while 1:  # runs this infinitely
    voice_data = get_audio()
    nlp(voice_data)

    # print what the user said
    print(voice_data)
Example #33
0
def parse(m):
    prs = nlp(m)
    return (prs['sentences'][0]['parse'])