def getClassificationModel2(email):
    preprocessing_group = identify_email_group(email)
    if preprocessing_group == 'email_group_1':
        tempString = preprocessGroup1(email.text_body)
        outputString = tempString[:1000]
        #print(outputString)
        natural_language_classifier = NaturalLanguageClassifierV1(
            username=WATSON_USERNAME,
            password=WATSON_PASSWORD)
        classification = natural_language_classifier.classify(CLASSIFIER_ID_MODEL2, outputString)
        top_classification = classification['top_class']
        temp_classes = classification['classes']
        for i in temp_classes:
            if i['class_name'] == top_classification:
                top_classification_confidence = i['confidence']
        classification_output = [top_classification, top_classification_confidence]
    elif preprocessing_group == 'email_group_2':
        tempString = preprocessGroup2(email.text_body)
        outputString = tempString[:1000]
        #print(outputString)
        natural_language_classifier = NaturalLanguageClassifierV1(
            username=WATSON_USERNAME,
            password=WATSON_PASSWORD)
        classification = natural_language_classifier.classify(CLASSIFIER_ID_MODEL2, outputString)
        top_classification = classification['top_class']
        temp_classes = classification['classes']
        for i in temp_classes:
            if i['class_name'] == top_classification:
                top_classification_confidence = i['confidence']
        classification_output = [top_classification, top_classification_confidence]
    else:
        classification_output = ['not recognized', 0.00]
    return classification_output
Esempio n. 2
0
    def nlc(self, nlc_input):
        # try:
        logging.info('entered into nlc function')
        natural_language_classifier = NaturalLanguageClassifierV1(
            username=spark_conf.classifier_input['username'],
            password=spark_conf.classifier_input['password'])
        classes = []
        classifiers = natural_language_classifier.list()
        #print(simplejson.dumps(classifiers, indent=2))

        status = natural_language_classifier.status('359f41x201-nlc-65743')
        #print(simplejson.dumps(status, indent=2))

        #df = pd.read_csv(spark_conf.classifier_input['input_file']+'reviews_%s.txt'% spark_conf.retrieved_time.replace(':','-'),sep='|')
        df = nlc_input.toPandas()
        # df1 = df.'review_text'.unique()
        # print df
        df2 = df.copy(deep=True)

        if (status['status'] == 'Available' and len(df2.review_text) > 0):
            for i in range(0, len(df2.review_text), 1):
                line = df2.review_text[i]
                # print line#ISO-8859-1
                #classes.append(natural_language_classifier.classify('359f41x201-nlc-65743',line.encode("ISO-8859-1")))
                classes.append(
                    natural_language_classifier.classify(
                        '359f41x201-nlc-65743', line))
            with open(
                    spark_conf.file_path['data_update_path'] +
                    'review_{}.json'.format('classifier'), 'w') as f:
                simplejson.dump(classes, f, indent=5)
            return 1
        else:
            logging.info("NO DATA AVAILABLE")
            return 0
Esempio n. 3
0
def get_illness(request):
    natural_language_classifier = NaturalLanguageClassifierV1(username='',
                                                              password='')

    classes = natural_language_classifier.classify('', request)
    print(type(classes))
    return classes['classes']
Esempio n. 4
0
def nlc():
    try:
        logging.info('entered into nlc function')
        natural_language_classifier = NaturalLanguageClassifierV1(
            username='******',
            password='******')
        classes = []
        classifiers = natural_language_classifier.list()
        #print(json.dumps(classifiers, indent=2))
        status = natural_language_classifier.status('359f41x201-nlc-65743')
        #print(json.dumps(status, indent=2))

        df = pd.read_csv(
            paths['output_dir'] +
            'review_text_%s.txt' % retrieved_time.replace(':', '-'),
            sep='|')
        if (status['status'] == 'Available' and len(df.review_text) > 0):
            for i in range(0, len(df.review_text), 1):
                line = df.review_text[i]
                classes.append(
                    natural_language_classifier.classify(
                        '359f41x201-nlc-65743', line.decode("ISO-8859-1")))
            with open('yelp_{}_{}.json'.format('Resto2', 'HOU'), 'w') as f:
                json.dump(classes, f, indent=5)
        else:
            logging.info('No Data Available')
        return 1
    except Exception as e:
        logging.info('error in nlc function  %s' % str(e))
def deleteClassifier(classifier_id,username,password):
    natural_language_classifier = NaturalLanguageClassifierV1(
        username=username,
        password=password)

    classifier_info = natural_language_classifier.delete_classifier(classifier_id)
    return classifier_info
Esempio n. 6
0
def cherk():
    tweet = request.args.get("tweet")
    natural_language_classifier = NaturalLanguageClassifierV1(
        username='******',
        password='******')
    classes = natural_language_classifier.classify('b8f3cex446-nlc-797', tweet)
    print(json.dumps(classes, indent=2))
    def initNLC(self):
        credentials = self.getPixieAppEntity()
        if credentials is None:
            return "<div>You must provide credentials to your Watson NLC Service</div>"

        self.natural_language_classifier = NaturalLanguageClassifierV1(
            username=credentials['username'], password=credentials['password'])
Esempio n. 8
0
def post_nlc():
    input_text = request.form.get('data')

    # nlcアクセス
    natural_language_classifier = NaturalLanguageClassifierV1(username='',
                                                              password='')

    if natural_language_classifier:
        classes = natural_language_classifier.classify('c7fa4ax22-nlc-10554',
                                                       input_text)

    json_file_path = './restaurants.json'
    with open(json_file_path) as json_file:
        data = json.load(json_file)
        restaurants = data['restaurants']

    for restaurant in restaurants:
        if classes['top_class'] in restaurant['class']:
            restaurant_name = restaurant['name']
            restaurant_url = restaurant['url']
            restaurant_image = restaurant['image']
            restaurant_location = restaurant['location']
            restaurant_budget = restaurant['budget']

    response = {
        'restaurant_name': restaurant_name,
        'restaurant_url': restaurant_url,
        'image_url': restaurant_image,
        'input_text': input_text,
        'location': restaurant_location,
        'budget': restaurant_budget,
    }
    return jsonify(response)
def getTrainingStatus(classifier_id,username,password):
    natural_language_classifier = NaturalLanguageClassifierV1(
        username=username,
        password=password)

    classifier_info = natural_language_classifier.get_classifier(classifier_id)
    return classifier_info
    def __init__(self, url, username, password, classifier_id):
        self.creds['url'] = url
        self.creds['username'] = username
        self.creds['password'] = password

        self.api_ids['classifier_id'] = classifier_id

        self.nlc = NaturalLanguageClassifierV1(username=self.creds['username'],
                                               password=self.creds['password'])
Esempio n. 11
0
def get_illness(request):
    natural_language_classifier = NaturalLanguageClassifierV1(username='',
                                                              password='')

    classes = natural_language_classifier.classify('', request)

    illness = classes['classes'][0]['class_name']
    confidence = classes['classes'][0]['confidence']

    return illness, confidence
Esempio n. 12
0
def nlcQuery(query):
    natural_language_classifier = NaturalLanguageClassifierV1(
        username=cred.NLCUSER, password=cred.NLCPASS)
    #status = natural_language_classifier.status(cred.NLCCLUSTER)

    try:
        classes = natural_language_classifier.classify(cred.NLCCLUSTER, query)
        results = classes['top_class']
    except:
        results = {}
    return results
Esempio n. 13
0
def nlc_0(text):
    #NLC読み込みのためのアカウント情報
    natural_language_classifier = NaturalLanguageClassifierV1(
        #username='******',
        #password='******'
        #新たに、アカウント作成必要
    )
    #watson ID:ff1c2bx159-nlc-4926
    print('----- Watson NLC からの応答待ち -----')
    res = natural_language_classifier.classify('cedd09x164-nlc-4477', text)
    ans = res["top_class"]
    return ans
Esempio n. 14
0
def Analyze():
    api_key = "LiI3o53WHaOU02ATKIwKhSQdirvntK1lZUPA6rhdEwCZ"
    workspace_ID = "6deb62x509-nlc-477"
    natural_language_classifier = NaturalLanguageClassifierV1(
        iam_apikey=api_key)
    comment_text = request.form['text']
    classes = {}
    result = ""
    if comment_text != "":
        classes = natural_language_classifier.classify(workspace_ID,
                                                       comment_text)
    result = classes.result
    return jsonify(result)
def getClassificationModelD2(email):
    tempString = preprocessGroup1(email.text_body)
    outputString = tempString[:1000]
    #print(outputString)
    natural_language_classifier = NaturalLanguageClassifierV1(
        username=WATSON_USERNAME,
        password=WATSON_PASSWORD)
    classification = natural_language_classifier.classify(CLASSIFIER_ID_MODELD2, outputString)
    top_classification = classification['top_class']
    temp_classes = classification['classes']
    for i in temp_classes:
        if i['class_name'] == top_classification:
            top_classification_confidence = i['confidence']
    classification_output = [top_classification, top_classification_confidence]
    return classification_output
Esempio n. 16
0
def classify(classifier_name, sentence):
    classifiers = classifier.list_classifiers_name_id()
    
    # API CALL 
    natural_language_classifier = NaturalLanguageClassifierV1(
    username=nlc_usr, password=nlc_psw)
    classes = natural_language_classifier.classify(classifiers[classifier_name], sentence)
    myjson = json.dumps(classes)
                 
    # Parsing 
    jsonparser = json.loads(myjson);  # parse the ans of the api
    answer_class = jsonparser["classes"][0]["class_name"]  # classified class with more confidence 
    # print("Actual Class: ",actual_class," ","Response Class: ",answer_class,"\n")
                
    return answer_class
Esempio n. 17
0
def apiNLCTest(comment_text):
    api_key = "LiI3o53WHaOU02ATKIwKhSQdirvntK1lZUPA6rhdEwCZ"
    workspace_ID = "6deb62x509-nlc-477"

    natural_language_classifier = NaturalLanguageClassifierV1(
        iam_apikey=api_key)
    # classifier instance
    response = natural_language_classifier.classify(workspace_ID, comment_text)
    result = []
    response_new = response.result
    if "classes" in response_new.keys():
        for predicted_class in response_new["classes"]:
            result.append(
                [predicted_class['class_name'], predicted_class['confidence']])
        return (result)
def main():
    parser = argparse.ArgumentParser(
        description="Util method for Natural Language Processor")
    parser.add_argument('--trainingData',
                        help='path to data file of training data')

    args = parser.parse_args()
    with open(authFile, 'r') as service_file:
        service_data = json.loads(service_file.read())
    # authorize and get a token
    credentials = service_data['credentials']

    global nlc
    nlc = NaturalLanguageClassifierV1(username=credentials['username'],
                                      password=credentials['password'])

    sendTrainingData(args.trainingData)
Esempio n. 19
0
def classify(classifier_name, sentence):
    classifiers = list_classifiers_name_id()
    
    # API CALL 
    natural_language_classifier = NaturalLanguageClassifierV1(
    username=nlc_usr, password=nlc_psw)
    t = time.clock()
    classes = natural_language_classifier.classify(classifiers[classifier_name], sentence)
    t = time.clock() - t
    print('API call time : ' , str(t))
    myjson = json.dumps(classes)
                 
    # Parsing 
    jsonparser = json.loads(myjson);  # parse the ans of the api
    answer_class = jsonparser["classes"][0]["class_name"]  # classified class with more confidence 
                
    return answer_class
Esempio n. 20
0
def watson(
    text
):  #This function determines, which school subject is being talked about in the input string.
    """
    Desc:
        An IBM Watson Natural Language Classifier instance, that determines, what school subjet is being talked about.
    Takes:
        str text        : The input string that the function classifies.
    Returns:
        str top_class   : The school subject with the highest confidence.
        IF the confidence is under the certain point, it returns only
        none None
    Notes:
        None
    Raises:
        None
    """
    def getTopClassConfidence(top_class, response):
        """
        Desc:
            Returns the confidence of the parent funtion's top_class str.
        Takes:
            str top_class   : The class with the highes confidence; See the desc of the parent function.
            JSON response   : The JSON object representing the response from the IBM NLC.
        Retunrs:
            float confidence    : The confidence of the top class.
        Note:
            Only works together with the parent funcntion.
        Raises:
            None
        """
        for i in response['classes']:
            if i['class_name'] == top_class:
                return i['confidence']

    natural_language_classifier = NaturalLanguageClassifierV1(
        username=watson_nlc_username, password=watson_nlc_password)

    response = natural_language_classifier.classify(watson_nlc_id, text)
    top_class = response['top_class']
    print(getTopClassConfidence(top_class, response))
    if top_class == 'keskustelu':
        return None
    else:
        return top_class
def Welcome():
    global CLASSIFIER
    
    try:
        global NLC_SERVICE
        NLC_SERVICE = NaturalLanguageClassifierV1(
        username=NLC_USERNAME,
        password=NLC_PASSWORD      
        )
    except:
        NLC_SERVICE = False
    
    if NLC_SERVICE:
        # create classifier if it doesn't exist, format the json
        CLASSIFIER = _create_classifier()
        classifier_info = json.dumps(CLASSIFIER, indent=4)
        # update the UI, but only the classifier info box
        return render_template('index.html', classifier_info=classifier_info, icd_code="", icd_output="", classifier_output="")
    else:
        return render_template('index.html', classifier_info="Please add a _config.py file with your NLC credentials if running locally. "  , icd_code="", icd_output="", classifier_output="")
Esempio n. 22
0
def WatsonStatus():
    retrieve_and_rank = RetrieveAndRankV1(username=cred.RRUSER,
                                          password=cred.RRPASS)
    nlc = NaturalLanguageClassifierV1(username=cred.NLCUSER,
                                      password=cred.NLCPASS)
    status = {}
    status2 = {}
    timeout = eventlet.Timeout(3, True)
    try:
        status2 = nlc.status(cred.NLCCLUSTER)
        status = retrieve_and_rank.get_solr_cluster_status(
            solr_cluster_id=cred.CLUSTERID)
    except:
        status = {
            "solr_cluster_status": "Retrieve and Rank Unable to be Reached."
        }
        status2 = {
            "status_description":
            "Natural Language Classifier Unable to be reached"
        }
    finally:
        timeout.cancel()
    return status["solr_cluster_status"], status2["status_description"]
Esempio n. 23
0
def init_nat_lang_classifier(initialized=False):

    natural_language_classifier = NaturalLanguageClassifierV1(
        username=USERNAME, password=PASSWORD)

    classifiers = natural_language_classifier.list()
    print(json.dumps(classifiers, indent=2))

    if initialized and classifiers:
        return natural_language_classifier, [
            classifier['classifier_id']
            for classifier in classifiers['classifiers']
            if classifier['classifier_id'] == CURRENT_CLASSIFIER
        ][0]

    if not initialized:
        with open(CLASSES_PATH, 'rb') as training_data:
            response = natural_language_classifier.create(
                training_data=training_data, name='symptoms'),
            # print(json.dumps(response, indent=2))
            return natural_language_classifier, response[0]['classifier_id']

    return
Esempio n. 24
0
def choose_response(message_json):
    response = ''
    message_text = message_json['text']

    natural_language_classifier = NaturalLanguageClassifierV1(
        username='******',
        password='******')

    classification_response = natural_language_classifier.classify(
        classifier_id, message_text)

    # logging to see how the classifier's working
    print('Input: {}'.format(message_text))
    for c in classification_response['classes']:
        print('Class: {}  Confidence: {}'.format(c['class_name'],
                                                 c['confidence']))
    print('')

    top_class = classification_response['classes'][0]
    if top_class['confidence'] < confidence_limit:
        return response

    name = get_message_subject(message_json)
    print('Subject: {}'.format(name))

    top_class_name = top_class['class_name']
    if top_class_name == 'standings':
        response = get_current_standings()
    elif top_class_name == 'total':
        response = get_total_score(name)
    elif top_class_name == 'week':
        response = get_current_week_score(name)
    elif top_class_name == 'position':
        response = get_current_position(name)

    return response
Esempio n. 25
0
    'Cabin Crew': '6874ebx556-nlc-1066',
    'Airport': '6876e8x557-nlc-1146',
    'Medical': '687c74x560-nlc-1053',
    'Duty Free': '68788bx558-nlc-1120',
    'Security': '687c74x560-nlc-1055',
    'Safety': '68788bx558-nlc-1123',
    'Emirates Skywards': '6f1d8ax561-nlc-950',
    'Product Development': '6f1d8ax561-nlc-951'
}

nlu = NaturalLanguageUnderstandingV1(
    version='2017-02-27',
    username="******",
    password='******')

nlc_p = NaturalLanguageClassifierV1(
    username="******", password='******')

nlc_s = NaturalLanguageClassifierV1(
    username="******", password='******')

print('reading...')
data = pd.read_csv('testv1.csv')


def sentiment(text):
    try:
        response = nlu.analyze(text=text,
                               features=Features(sentiment=SentimentOptions()))

        sentiment = json.dumps(response['sentiment']['document']['label'])
    except Exception as e:
Esempio n. 26
0
import json
# from os.path import join, dirname
from watson_developer_cloud import NaturalLanguageClassifierV1

natural_language_classifier = NaturalLanguageClassifierV1(
    username='******', password='******')

classifiers = natural_language_classifier.list()
print(json.dumps(classifiers, indent=2))

# create a classifier
# with open('../resources/weather_data_train.csv', 'rb') as training_data:
#     print(json.dumps(natural_language_classifier.create(
# training_data=training_data, name='weather'), indent=2))

# replace 2374f9x68-nlc-2697 with your classifier id
status = natural_language_classifier.status('2374f9x68-nlc-2697')
print(json.dumps(status, indent=2))

if status['status'] == 'Available':
    classes = natural_language_classifier.classify(
        '2374f9x68-nlc-2697', 'How hot will it be '
        'tomorrow?')
    print(json.dumps(classes, indent=2))

# delete = natural_language_classifier.remove('2374f9x68-nlc-2697')
# print(json.dumps(delete, indent=2))

# example of raising a WatsonException
# print(json.dumps(
#     natural_language_classifier.create(training_data='', name='weather3'),
Esempio n. 27
0
import numpy as np
from flask import session
from flask import render_template
from flask import request, url_for, make_response, redirect
from watson_developer_cloud import ConversationV1
from os.path import join, dirname
from flask import Flask
from watson_developer_cloud import NaturalLanguageClassifierV1
#import pysolr
#from watson_developer_cloud import RetrieveAndRankV1

conversation = ConversationV1(username='******',
                              password='******',
                              version='2017-02-03')

natural_language_classifier = NaturalLanguageClassifierV1(
    username='******', password='******')

print("inside global application")

#conv_workspace_id = '72e3ba4d-5ca3-4fa4-b696-4b790d55cf5d'
# conv_workspace_id = '5c2446b9-28a3-40f9-906e-b46350f494b3'
conv_workspace_id = '5e685fd6-a971-47f1-af9b-ab409f4c5a36'

app = Flask(__name__, static_url_path='/static')
app.secret_key = os.urandom(24)


@app.route("/")
def get():
    print("inside get")
    session['context'] = {}
Esempio n. 28
0
from nltk.stem.wordnet import WordNetLemmatizer
import string
from nltk import word_tokenize, pos_tag
from nltk.stem.porter import PorterStemmer

words = set(nltk.corpus.words.words())
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()

token_dict = {}
stemmer = PorterStemmer()

printable = set(string.printable)

natural_language_classifier = NaturalLanguageClassifierV1(
    username='******', password='******')


def download_file_cos(cos_credentials, key):
    auth_endpoint = 'https://iam.bluemix.net/oidc/token'
    _cos = ibm_boto3.client(
        's3',
        ibm_api_key_id=cos_credentials['apikey'],
        ibm_service_instance_id=cos_credentials['resource_instance_id'],
        ibm_auth_endpoint=auth_endpoint,
        config=Config(signature_version='oauth'),
        endpoint_url=cos_credentials['service_endpoint'])
    f = get_item(bucket_name=cos_credentials['BUCKET'],
                 item_name=key,
                 cos=_cos)
    print("=====================*********************")
Esempio n. 29
0
from ibmcloudenv import IBMCloudEnv
from watson_developer_cloud import NaturalLanguageClassifierV1


if IBMCloudEnv.getString('watson_natural_language_classifier_apikey'):
    iam_url = 'https://iam.stage1.bluemix.net/identity/token' if 'staging' in IBMCloudEnv.getString('watson_natural_language_classifier_iam_serviceid_crn') else 'https://iam.bluemix.net/identity/token'
    natural_language_classifier = NaturalLanguageClassifierV1(
        url=IBMCloudEnv.getString('watson_natural_language_classifier_url'),
        iam_api_key=IBMCloudEnv.getString('watson_natural_language_classifier_apikey'),
        iam_url=iam_url)
else:
    natural_language_classifier = NaturalLanguageClassifierV1(
        username=IBMCloudEnv.getString('watson_natural_language_classifier_username'),
        password=IBMCloudEnv.getString('watson_natural_language_classifier_password'))
<% if (bluemix.backendPlatform.toLowerCase() === 'python') { %>
def getService(app):
    return 'watson-natural-language-classifier', natural_language_classifier
<% } else { %>
def getService():
    return 'watson-natural-language-classifier', natural_language_classifier
<% } %>
import sys, logging
import simplejson
import pandas as pd
import numpy as np
# from os.path import join, dirname
from watson_developer_cloud import NaturalLanguageClassifierV1

logging.info('entered into nlc function')
#Intializing IBM Natural Language Classifier
natural_language_classifier = NaturalLanguageClassifierV1(
    username='******', password='******')
classes = []
#Printing out existing classifiers
classifiers = natural_language_classifier.list()
print(simplejson.dumps(classifiers, indent=2))
#Print Status
status = natural_language_classifier.status('359f41x201-nlc-65743')
print(simplejson.dumps(status, indent=2))
#Reading Input Data
df = pd.read_csv(
    '/home/bluedata/decisionengine/reviews_2017-08-08 12-55-05.txt', sep='\t')
df2 = df.copy(deep=True)
#if status of the classifier is available and a review exists then extract the review and classify it and dump it into a json
if (status['status'] == 'Available' and len(df2.review_text) > 0):
    for i in range(0, len(df2.review_text), 1):
        line = df2.review_text[i]
        classes.append(
            natural_language_classifier.classify('359f41x201-nlc-65743',
                                                 line.decode("ISO-8859-1")))
    with open(
            '/home/bluedata/decisionengine/yelp_{}_{}.json'.format(