Exemplo n.º 1
0
def run_watson_nlu():
    files = glob.glob('work/bug-*.json')
    (user, passwd) = get_auth()
    for fname in files:
        with open(fname) as f:
            LOG.debug("Processing %s" % fname)
            bug = json.loads(f.read())
            num = bug["link"].split("/")[-1]
            with open("work/res-%s.json" % num, "w") as out:
                nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(
                    version='2017-02-27', username=user, password=passwd)
                res = nlu.analyze(text=bug["comments"],
                                  features=[
                                      features.Concepts(),
                                      features.Keywords(),
                                      features.Emotion(),
                                      features.Sentiment(),
                                  ])
                output = {
                    "link": bug["link"],
                    "tags": bug["tags"],
                    "importance": bug["importance"],
                    "length": len(bug["comments"]),
                    "results": res
                }
                out.write(json.dumps(output, indent=4))
Exemplo n.º 2
0
def getData(user, pw, doc):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username=user,
        password=pw)
    url = 'https://gateway.watsonplatform.net/natural-language-understanding/api'
    response = natural_language_understanding.analyze(
        text=doc,
        features=[features.Sentiment(), features.Emotion()])
    return json.dumps(response)
Exemplo n.º 3
0
def nl_processing(reqd_text):
    response = natural_language_understanding.analyze(text=reqd_text,
                                                      features=[
                                                          features.Entities(),
                                                          features.Keywords(),
                                                          features.Emotion(),
                                                          features.Concepts(),
                                                          features.Sentiment()
                                                      ])
    return response
Exemplo n.º 4
0
    def analyse_emotions(self, tweet):
        # print('Analysis Text:',tweet)
        response = natural_language_understanding.analyze(
            text=tweet, features=[Features.Emotion(),
                                  Features.Sentiment()])

        list_emotion = list(
            response['emotion']['document']['emotion'].values())
        list_emotion.append(response['sentiment']['document']['score'])
        print('List emotion is : ', list_emotion)
        return list_emotion
 def test_html_analyze(self):
     nlu_url = "http://bogus.com/v1/analyze"
     responses.add(responses.POST, nlu_url,
                   body="{\"resulting_key\": true}", status=200,
                   content_type='application/json')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          url='http://bogus.com',
                                          username='******',
                                          password='******')
     nlu.analyze([features.Sentiment(),
                  features.Emotion(document=False)],
                 html="<span>hello this is a test</span>")
     assert len(responses.calls) == 1
Exemplo n.º 6
0
 def map_feature(name):
     feature_name_mappings = {
         'keywords': features.Keywords(),
         'entities': features.Entities(),
         'concepts': features.Concepts(),
         'categories': features.Categories(),
         'sentiment': features.Sentiment(),
         'emotion': features.Emotion()
     }
     if name in feature_name_mappings:
         return feature_name_mappings[name]
     else:
         print("Invalid feature name")
         return None
def nlu(text):
    response = n.analyze(text=text,
                         features=[
                             features.Emotion(),
                             features.Concepts(),
                             features.Categories(),
                             features.Entities(),
                             features.Keywords(),
                             features.SemanticRoles(),
                             features.Relations(),
                             features.Sentiment()
                         ],
                         language='en')
    return json.dumps(response, indent=2)
Exemplo n.º 8
0
def get_text_data(text,language):
    username = os.environ.get("BLUEMIX-NLU-USERNAME")
    password = os.environ.get("BLUEMIX-NLU-PASSWORD")

    natural_language_understanding = NaturalLanguageUnderstanding(
        version = "2017-02-27",
        username=username,
        password=password
    )
    return natural_language_understanding.analyze(
        text = text,
        features = [features.Emotion(), features.Sentiment(), features.Keywords()],
        language = language
    )
Exemplo n.º 9
0
 def featureList(self, tags):
     f_list = []
     for tag in tags:
         if tag == "sentiment":
             f_list.append(features.Sentiment())
         elif tag == "categories":
             f_list.append(features.Categories())
         elif tag == "concepts":
             f_list.append(features.Concepts())
         elif tag == "emotion":
             f_list.append(features.Emotion())
         elif tag == "entities":
             f_list.append(features.Entities())
     return f_list
Exemplo n.º 10
0
def _addAnalysis(text, sentimentArray, emotionsDict):
    try:
        result = natural_language_understanding.analyze(
            text=text, features=[Features.Sentiment(),
                                 Features.Emotion()])
        result['emotion']
    except:
        return
    sentiment_score = result['sentiment']['document']['score']
    sentimentArray.append(sentiment_score)
    emotions = result['emotion']['document']['emotion']
    for emo in emotions:
        if emo not in emotionsDict:
            emotionsDict[emo] = []
        emotionsDict[emo].append(emotions[emo])
Exemplo n.º 11
0
def call_ibm_watson(text):
    username = os.environ['BLUEMIX_USERNAME']
    password = os.environ['BLUEMIX_PASSWORD']
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username=os.environ['BLUEMIX_USERNAME'],
        password=os.environ['BLUEMIX_PASSWORD'])

    response = natural_language_understanding.analyze(text=text,
                                                      features=[
                                                          Features.Emotion(),
                                                      ])

    #print(json.dumps(response, indent=2))
    return response
Exemplo n.º 12
0
def sentimentMining():
    score = 0
    totalCount = 0
    negativeCount = 0
    positiveCount = 0
    inputText = ''

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-04-14',
        username='******',
        password='******')

    # the input file to be used for sentiment analysis
    inputFile = ''
    with open(inputFile, 'r') as f:
        reader = csv.reader(f)
        cnt = 0
        for row in reader:
            # skipping the first row which is the heading
            if cnt == 0:
                cnt += 1
            else:
                if len(row) == 0:
                    pass
                    # if empty review
                else:
                    inputText = row[0]
                    cnt += 1
                    totalCount += 1
                    # sentiment analysis of every review
                    response = natural_language_understanding.analyze(
                        text=inputText,
                        features=[features.Sentiment(),
                                  features.Emotion()],
                        language='en')
                    print(totalCount)

                    # counting the number of positive and negative review with the score
                    if response["sentiment"]["document"][
                            "label"] == 'positive':
                        positiveCount += 1
                        score += response["sentiment"]["document"]["score"]
                    elif response["sentiment"]["document"][
                            "label"] == 'negative':
                        negativeCount += 1
                        score += response["sentiment"]["document"]["score"]
    return positiveCount, negativeCount, score, totalCount
def sentimentAnalysis(text):
    try:
        # Remove unwanted special characters from text
        correct_text = re.sub('[^a-zA-Z0-9 \n\.]', '', text)
        # encoded_text = urllib.quote(text)
        response = natural_language_understanding.analyze(
            text=correct_text,
            features=[features.Emotion(),
                      features.Sentiment()])

        emotion_dict = response['emotion']['document']['emotion']
        overall_sentiment = response['sentiment']['document']['label']

        return overall_sentiment, emotion_dict

    except Exception, e:
        print 'Sentiment API error ' + str(e)
Exemplo n.º 14
0
    def report(self, text):
        """
        Returns the Watson Data for a specific text.
        """

        # Real Call
        payload = self.natural_language_understanding.analyze(
            text=text,
            features=[
                features.Entities(),
                features.Keywords(),
                features.Emotion()
            ])

        # Fake Call, since we only have limited access to IBM
        # payload = self.mock_watson(text)

        return payload
Exemplo n.º 15
0
def get_nlu(data):
    response = natural_language_understanding.analyze(
      text = data,
      features=[
        Features.Entities(
        emotion=True,
        sentiment=True,
        limit=2),

        Features.Keywords(
        emotion=True,
        sentiment=True,
        limit=2),

        Features.Emotion(),
      ]
    )
    info = json.dumps(response, indent=2)
    return response
Exemplo n.º 16
0
def main(params):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=params["username"],
        password=params["password"],
        version=params["version"])
    response = natural_language_understanding.analyze(
        url=params["url"],
        features=[
            Features.Concepts(limit=1),
            Features.Entities(limit=1),
            Features.Keywords(limit=1),
            Features.Categories(),
            Features.Emotion(),
            Features.Sentiment(),
            Features.MetaData(),
            Features.Relations(),
            Features.SemanticRoles(limit=1)
        ])
    return response
Exemplo n.º 17
0
def sentimentAnalysis(text):
    try:
        # Remove unwanted special characters from text
        correct_text=re.sub('[^a-zA-Z0-9 \n\.]', '', text)
        # encoded_text = urllib.quote(text)
        response = natural_language_understanding.analyze(
            text=correct_text,
            features=[features.Emotion(), features.Sentiment()])
        # print text
        # emotion_dict = response['emotion']['document']['emotion']
        overall_sentiment = response['sentiment']['document']['label']

        # print ("The overall sentiment of the text is: "+overall_sentiment)
        # print("The emotional quotient of the text is as follows: ")
        # for key in emotion_dict:
        #     print(key + " : " + str(emotion_dict[key]))
        return overall_sentiment

    except Exception, e:
        print 'Sentiment API error ' + str(e)
def execute_watson_request(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=constants.WATSON_USER,
        password=constants.WATSON_PASS,
        version="2017-02-27")

    try:
        response = natural_language_understanding.analyze(
            text=text,
            features=[
                features.Concepts(),
                features.Categories(),
                features.Emotion(),
                features.Entities(emotion=True, sentiment=True),
                features.Keywords(emotion=True, sentiment=True),
                features.Sentiment()
            ])
        return response
    except WatsonException as error:
        return str(error)
def execute_watson_request(word):
    in_data = json.load(open('./in/fed_speech.json'))
    speech = in_data['speech'].encode('ascii', 'replace')

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=constants.WATSON_USER,
        password=constants.WATSON_PASS,
        version="2017-02-27")

    try:
        response = natural_language_understanding.analyze(
            text=speech,
            features=[
                features.Emotion(targets=[word]),
                features.Sentiment(targets=[word])
            ])
        return response
    except WatsonException as error:
        print(str(error))
        exit(0)
Exemplo n.º 20
0
    def understand_text(self):
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            username=nlu_settings.get("username"),
            password=nlu_settings.get("password"),
            version="2017-02-27")

        self.nl_understanding = natural_language_understanding.analyze(
            text=self.converted_text,
            features=[
                Features.Entities(emotion=True, sentiment=True, limit=100),
                Features.Keywords(emotion=True, sentiment=True, limit=100),
                Features.Categories(),
                Features.Concepts(),
                Features.Sentiment(),
                Features.Emotion(),
                #     Features.Feature(),
                #     Features.MetaData(),
                Features.Relations(),
                Features.SemanticRoles(),
            ])

        return self.nl_understanding
Exemplo n.º 21
0
def _addTweetStats(tweet, statList):
    try:
        result = natural_language_understanding.analyze(
            text=tweet['text'],
            features=[Features.Sentiment(),
                      Features.Emotion()])
        result['emotion']
    except:
        return
    sentiment_score = result['sentiment']['document']['score']
    emotions = result['emotion']['document']['emotion']
    user_id = tweet['user']['id']
    userStats = averageUserStats(user_id)
    tweetObject = []
    tweetObject.append(sentiment_score)
    tweetObject.append(emotions)
    tweetObject.append(tweet['retweet_count'])
    tweetObject.append(tweet['favorite_count'])
    tweetObject.append(tweet['user']['followers_count'])
    tweetObject.append(userStats)
    tweetObject.append(tweet['created_at'])
    statList.append(tweetObject)
Exemplo n.º 22
0
def create_emo(input_text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')

    response = natural_language_understanding.analyze(
        text=input_text, features=[features.Emotion()])
    result = json.loads(json.dumps(response, indent=2))
    print(result)
    emo_rate = result["emotion"]["document"]["emotion"]
    labels = ['sadness', 'joy', 'fear', 'disgust', 'anger']
    sizes = [
        emo_rate['sadness'], emo_rate['joy'], emo_rate['fear'],
        emo_rate['disgust'], emo_rate['anger']
    ]
    colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral', 'black']
    patches, texts = plt.pie(sizes, colors=colors, shadow=True, startangle=90)
    plt.legend(patches, labels, loc="best")
    plt.tight_layout()
    plt.savefig('Interviewer/static/Interviewer/userMedia/emo.png')
    plt.close()
Exemplo n.º 23
0
def callNLU(text):
    '''
	Checks what features are enabled, then makes a call to NLU and returns JSON. 
	:param text The string containing the information you want to analyse. 
	'''
    if text == None or text.strip() == '':
        return {}

    f = []
    if c.getboolean('nlu_feature', 'concepts'): f.append(features.Concepts())
    if c.getboolean('nlu_feature', 'entities'): f.append(features.Entities())
    if c.getboolean('nlu_feature', 'keywords'): f.append(features.Keywords())
    if c.getboolean('nlu_feature', 'categories'):
        f.append(features.Categories())
    if c.getboolean('nlu_feature', 'emotion'): f.append(features.Emotion())
    if c.getboolean('nlu_feature', 'semanticroles'):
        f.append(features.SemanticRoles())
    if c.getboolean('nlu_feature', 'relations'): f.append(features.Relations())
    if c.getboolean('nlu_feature', 'sentiment'): f.append(features.Sentiment())

    r = nlu.analyze(text=text, features=f)

    return r
Exemplo n.º 24
0
def emotion_analysis(link):

    data = natural_language_understanding.analyze(
        url=link, features=[features.Emotion()])

    def get_emotion(emo):
        return data['emotion']['document']['emotion'][emo]

    emotion = []

    # length_of_list = len(df['claim_course_url'])
    # print (length_of_list)

    print(data)
    # i = 0

    # print(i)

    emotion.append(get_emotion('anger'))
    emotion.append(get_emotion('sadness'))
    emotion.append(get_emotion('disgust'))
    emotion.append(get_emotion('fear'))
    emotion.append(get_emotion('joy'))
    return emotion
Exemplo n.º 25
0
def get_data_from_bluemix(target_url):
    nl_understanding = cache_get(target_url)
    if not nl_understanding:
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            username=nlu_settings.get("username"),
            password=nlu_settings.get("password"),
            version="2017-02-27")
        features = [
                Features.Entities(limit=100,emotion=True,sentiment=True),
                Features.Keywords(limit=100,emotion=True,sentiment=True),
                Features.Categories(),
                Features.Concepts(),
                Features.Sentiment(),
                Features.Emotion(),
                #     Features.Feature(),
                #     Features.MetaData(),
                Features.Relations(),
                Features.SemanticRoles(),

            ]
        nl_understanding = None

        for i in range(NUMBEROFTRIES):
            try:
                nl_understanding = natural_language_understanding.analyze(
                    url=target_url,
                    features=features
                )
            except:
                pass

            if nl_understanding:
                break
        cache_put(target_url, nl_understanding)

    return nl_understanding
Exemplo n.º 26
0
#==============================================================================
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud import WatsonException
import watson_developer_cloud.natural_language_understanding.features.v1 \
  as Features
  
natural_language_understanding = NaturalLanguageUnderstandingV1(
  username="******",
  password="******",
  version="2017-02-27")
with open('IBMjson', 'w') as ibmoutfile:
    try:
        response = natural_language_understanding.analyze(
          url=qqll,
          features=[
              Features.Emotion(),
        Features.Sentiment(),
      Features.Concepts(limit=1),
    Features.Keywords(limit=1, sentiment=False, emotion=False),
    Features.Categories(),
      Features.Entities(limit=1, sentiment=False, emotion=False),
        Features.MetaData()
          ]
        )
        #print(json.dumps(response, indent=2))
        json.dump(response, ibmoutfile)    
    except WatsonException as e:  # This is the correct syntax 
        json.dump(qqll, ibmoutfile)  

        
        
Exemplo n.º 27
0
def handle_message(conversation_client, slack_client, workspace_id, context,
                   message, channel, user):
    """Handler for messages coming from Watson Conversation using context.

        Fields in context will trigger various actions in this application.

        :param str message: text from UI
        :param SlackSender sender: used for send_message, hard-coded as Slack

        :returns: True if UI input is required, False if we want app
         processing and no input
        :rtype: Bool
    """
    global gv_nlu, gv_cortical_client, gv_bot_deafault_channel_name, gv_bot_deafault_channel_id, gv_ai
    url_list = []
    response = ""
    cortical_response_text = ""
    nlu_analyzed_text = ""
    nlu_responce_text = ""
    nlu_keyword = None
    nlu_entities = None
    context = None

    # extract URLs from the message of the post
    url_list = get_urls(slack_client, message)

    if url_list is not None:
        # send the message to user indicating that teh process of analysis started
        slack_client.api_call("chat.postMessage",
                              channel=channel,
                              text="analyzing . . . ",
                              as_user=True)
        for i in range(len(url_list)):
            try:
                # Analyze the URL article using WATSON Natural Language Understanding
                nlu_response = gv_nlu.analyze(url=url_list[i],
                                              return_analyzed_text=True,
                                              features=[
                                                  features.Categories(),
                                                  features.Concepts(),
                                                  features.Emotion(),
                                                  features.Entities(),
                                                  features.Keywords(),
                                                  features.MetaData(),
                                                  features.Relations(),
                                                  features.Sentiment()
                                              ])
                # get information from JSON format resulted by NLU
                nlu_responce_text, nlu_sentiment, nlu_categoties, nlu_entities, nlu_keyword, nlu_concepts, nlu_analyzed_text = convert_nlujson(
                    url_list[i], nlu_response)

            except WatsonException:
                # print(json.dumps(nlu_response, indent=2))
                nlu_responce_text = "Sentiments can not be retrieved from the URL"

            # performs CORTICAL SEMANTIC analysis and returns results as a response text
            cortical_response_text = cortical_analyze(nlu_analyzed_text,
                                                      nlu_keyword,
                                                      nlu_entities)

            # build response text
            title = "\n\n\n ===== Watson Sentiment Analysis =====\n"
            response = response + title + nlu_responce_text + cortical_response_text  ## Uncomment to add URL to the response text <+ url_list[i]>
            i = i + 1
    else:
        response = "No valid URL found !!!"

    return response
Exemplo n.º 28
0
def main():
	with io.open('./Results/CleanedLocatedTweets.csv', 'r', encoding=encoding) as nlpFile:
		inputNLPBuffer = nlpFile.readlines()
		listNLP = []
		for line in inputNLPBuffer:
			listNLP.append(line.split('\t')[0])

	with io.open('./Results/compiledOtherTweets.csv','w',encoding = encoding) as outputFile:
		with io.open('./Results/locatedResultsCompilation.csv','r', encoding=encoding) as inputFile:
			inputNLPBuffer = inputFile.readlines()
			outputFile.write(u'\t'.join(['crimeRelatedentity','TweetNumber', 'TweetID', 'Username', 'Date', 'Concept', 'Latitud', 'Longitud', 'Tweet', 'JoyScore', 'BadFeelingsScore',  '', 'Locations mentioned','\n']))
			for line in inputNLPBuffer[501:]:
				lineList = line.split('\t')
				if not(lineList[0] in listNLP):
					print lineList[0]
					cleanedTweet = ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ",lineList[-1]).split())
					cleanedTweet = cleanedTweet.lower()
					try:
						response = natural_language_understanding.analyze(text = cleanedTweet, features=[Features.Entities(), Features.Keywords(), Features.Emotion()])
					except: 
						print cleanedTweet.lower()
						continue
					emotionBuffer = {}
					for key in response:
						if 'emotion' in response.keys():
							emotionBuffer['anger'] = response['emotion']['document']['emotion']['anger']
							emotionBuffer['joy'] = response['emotion']['document']['emotion']['joy']
							emotionBuffer['sadness'] = response['emotion']['document']['emotion']['sadness']
							emotionBuffer['fear'] = response['emotion']['document']['emotion']['fear']
							emotionBuffer['disgust'] = response['emotion']['document']['emotion']['disgust']
							emotionBuffer['badFeelings'] = (emotionBuffer['anger']+emotionBuffer['sadness']+emotionBuffer['fear']+emotionBuffer['disgust'])/4
					if len(response['entities']) > 0:
						bufferEntityCondition = False
						bufferEntityEntities = [] 
						for elements in response['entities']:
							if elements['type'] == 'Crime':
								bufferEntityCondition = True
								bufferEntityEntities.append(unicode(elements['text'].lower()))
						if bufferEntityCondition:
							bufferEntityEntities.append(unicode(lineList[0]))
							bufferEntityEntities.append(unicode(lineList[1]))
							bufferEntityEntities.append(unicode(lineList[2]))
							bufferEntityEntities.append(unicode(lineList[3]))
							bufferEntityEntities.append(unicode(lineList[6]))
							bufferEntityEntities.append(unicode(lineList[11]))
							bufferEntityEntities.append(unicode(lineList[12]))
							bufferEntityEntities.append(unicode(cleanedTweet))
							bufferEntityEntities.append(unicode(emotionBuffer['joy']))
							bufferEntityEntities.append(unicode(emotionBuffer['badFeelings']))
							bufferEntityEntities.append(unicode(''))
							for elements in response['entities']:
								if elements['type'] == 'Location':
									bufferEntityEntities.append(unicode(elements['text'].lower()))
							bufferEntityEntities.append(u'\n')
							outputFile.write(u'\t'.join(bufferEntityEntities))
Exemplo n.º 29
0
def exe_api(text):

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')

    response = natural_language_understanding.analyze(
        text=text,
        features=[features.Entities(), features.Keywords(), features.Concepts(), features.Sentiment(), features.Emotion()])

    name = ''
    location = ''
    priority = 'LOW'

    for entity in response['entities']:
        if entity['type'] == 'Person':
            name = entity['text']
        elif entity['type'] == 'Location':
            location = entity['text']

    fear = response['emotion']['document']['emotion']['fear']
    anger = response['emotion']['document']['emotion']['anger']

    if fear >= 0.4 or anger >= 0.4:
        priority = 'HIGH'
    elif fear >= 0.3 or anger >= 0.3:
        priority = 'MEDIUM'


    deportation_count = 0
    visa_count = 0
    greencard_count = 0

    words = [w for w in text.split(' ') if not w in stopwords.words("english")]

    base_words = []

    for word in words:
        word = WordNetLemmatizer().lemmatize(word, 'v')
        base_words.append(word)
        if word in deportation:
            deportation_count += 1
        elif word in visa:
            visa_count += 1
        elif word in greencard:
            greencard_count += 1

    stage1 = {'deportation':deportation_count, 'visa':visa_count, 'greencard':greencard_count}
    stage1_ans = max(stage1, key=stage1.get)

    about = '''{
        "concern": "'''+stage1_ans+'''",
    '''

    if stage1_ans == 'deportation':

        for word in base_words:
            if word in arrest:
                # deportation -> arrested -> offense -> information
                offense = ''
                for w in base_words:
                    if w.lower() in offenses:
                        offense = w
                        break
                if offense == '':
                    keywords = [w['text'] for w in response['keywords']]
                    tags = nltk.pos_tag(keywords)
                    for tag in tags:
                        if 'NN' in tag:
                            offense = tag[0]
                            break

                information = ''
                url = '['
                count = 0


                try:
                    results = search('deportation because of '+offense)['webPages']['value']
                    for result in results:
                        url += '''{"link": "'''+result['url']+'''", "name": "'''+result['name']+'''"},'''
                    url = url[:-1]
                    url += ']'

                    while len(information) < 70:
                        u = results[count]['url']
                        information = exe_summarizer(u)
                        count += 1
                except Exception:
                    information = '''
                    Among other things, the person will become ineligible to. receive asylum, as described in Bars to Receiving Asylum or Refugee Status. He or she may also lose eligibility for a U.S. visa or green card, as described. in Crimes. That Make U.S. Visa or Green Card Applicants Inadmissible. If the person is already in the U.S. with a visa or green card, he or she will likely be ordered removed, as described in Crimes. That Will Make an Immigrant Deportable. And if the person somehow gets as far as submitting an application for U.S. citizenship, the aggravated felony conviction will result in not only denial of that application and permanently barred from U.S. citizenship, but in his or her being placed in removal proceedings. There’s a sort of mismatch, in which state crimes that may sound minor to most people, did not involve violence, and may not even be called felonies are nevertheless viewed as aggravated felonies by federal immigration authorities.
                    '''
                    pass

                about += '''
                        "reason": "arrest",
                        "offense": "'''+offense+'''",
                        "information": "'''+information+'''",
                        "url": '''+url+'''
                }
                '''
                break
            elif word in overstay:
                # deported - > overstay -> visa type
                visa_type = ''
                for word in base_words:
                    if word in type:
                        visa_type = word

                about += '''
                        "reason": "overstay",
                        "type": "'''+visa_type+'''"
                }
                '''
                break
    elif stage1_ans == 'visa':

        for word in base_words:
            if word in visa:
                visa_type = ''
                for word in base_words:
                    if word in type:
                        visa_type = word

                information = ''
                url = '['
                count = 0

                try:
                    results = search(' '.join(text.split(' ')[-4:]))['webPages']['value']
                    for result in results:
                        url += '''{"link": "''' + result['url'] + '''", "name": "''' + result['name'] + '''"},'''
                    url = url[:-1]
                    url += ']'

                    while len(information) < 90:
                        u = results[count]['url']
                        information = exe_summarizer(u)
                        count += 1
                except Exception:
                    information = '''
                        There are various types of nonimmigrant visas for temporary visitors to travel to the U.S., if you are not a U.S. citizen or U.S. lawful permanent resident. It's important to have information about the type of nonimmigrant visa you will need for travel, and the steps required to apply for the visa at a U.S. Embassy or Consulate abroad.
                    '''
                    pass

                about += '''
                        "type": "''' + visa_type + '''",
                        "information": "''' + information + '''",
                        "url": ''' + url + '''
                }
                '''
                break

    elif stage1_ans == 'greencard':
        pass
    else:
        about = '''
            {
                "concern": "general"
            }
        '''

    built_json = ''

    try:
        built_json = json.dumps(json.loads('''
            {
                "name": "'''+name.title()+'''",
                "location": "'''+location+'''",
                "priority": "'''+priority+'''",
                "transcript": "'''+text+'''",
                "about": '''+about+'''
            }
        '''), indent=4)

    except Exception:
        print name
        print location
        print priority
        print about

    return built_json
Exemplo n.º 30
0
    print "Make sure you are connected to internet\n"
    print "Enter watson login details:\n"
    print "Username:"******"Password:"******"YOU ARE NOW TALKING TO KARRMA BOT\n\n"
        break
    except WatsonException:
        print "Username or password invalid, try again\n"
    except ConnectionError:
        print "Check your internet connection"
    except NewConnectionError:
        print "Check your internet connection"

######################################## TRAINING THE BOT #####################################

## angry bot
hitler_file = open("hitler.txt", 'r')
conversation_hitler = hitler_file.readlines
conversation_hitler = [x[1:-3] for x in conversation_hitler]