def test_concepts(self):
     c = features.Concepts()
     assert(c.name() == 'concepts')
     assert(c.toDict() == {})
     c = features.Concepts(limit=10)
     assert(c.name() == 'concepts')
     assert(c.toDict() == {'limit': 10})
Example #2
0
def analyze():
    inputType = request.form['button']
    if inputType == "Text":
        input = request.form['text-analysis']
        concepts = nlu.analyze(text=input, features=[features.Concepts()])
        # categories = nlu.analyze(text=input, features=[features.Categories()])
    else:
        input = request.form['url-analysis']
        concepts = nlu.analyze(url=input.strip(),
                               features=[features.Concepts()])
        # categories = nlu.analyze(url=input.strip(), features=[features.Categories()])

    return jsonify({'concepts': concepts["concepts"]})
Example #3
0
def concept_recog(path='./test.txt'):
    #with open(path, 'rt', encoding='utf-8') as f:
    #    inputs = f.read()
    with open(path, 'rb') as f:
        inputs = f.read().decode("UTF-8")

    natural_language_understanding = nlu(
        url=("https://gateway.aibril-watson.kr/" +
             "natural-language-understanding/api"),
        username="******",
        password="******",
        version="2017-02-27")

    response = natural_language_understanding.analyze(
        text=inputs,
        features=[
            Features.Concepts(
                # Concepts options
                limit=3)
        ])

    # print(json.dumps(response))
    texts = response['concepts']
    text_list = []
    for text in texts:
        text_list.append(text['text'])
    for text in text_list:
        print(text)
    return ' '.join(text_list)
def run_watson_nlu():
    files = glob.glob('work/bug-*.json')
    (user, passwd) = get_auth()
    for fname in files:
        with open(fname) as f:
            LOG.debug("Processing %s" % fname)
            bug = json.loads(f.read())
            num = bug["link"].split("/")[-1]
            with open("work/res-%s.json" % num, "w") as out:
                nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(
                    version='2017-02-27', username=user, password=passwd)
                res = nlu.analyze(text=bug["comments"],
                                  features=[
                                      features.Concepts(),
                                      features.Keywords(),
                                      features.Emotion(),
                                      features.Sentiment(),
                                  ])
                output = {
                    "link": bug["link"],
                    "tags": bug["tags"],
                    "importance": bug["importance"],
                    "length": len(bug["comments"]),
                    "results": res
                }
                out.write(json.dumps(output, indent=4))
Example #5
0
def processText(fname):
    print('fname', fname)
    in_text = readfile(fname)
    in_text = str(in_text)

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username="******",
        password="******",
        version="2017-02-27")

    response = natural_language_understanding.analyze(
        text=in_text,
        features=[
            Features.Concepts(
                # Concepts options
                limit=50),
            # Features.Keywords(
            #   # Keywords options
            #   # sentiment=True,
            #   # emotion=True,
            #   limit=10
            # ),
            # Features.Entities(
            # )
        ])

    # return jsonify(response)
    return json.dumps(response, indent=2)
Example #6
0
def analyze(html):
    response = nlu.analyze(
        html=html,
        features=[
            # features.Keywords(limit=32)
            features.Concepts(limit=32)
            # features.Entities(limit=32)
        ],
    )
    return response
Example #7
0
def nl_processing(reqd_text):
    response = natural_language_understanding.analyze(text=reqd_text,
                                                      features=[
                                                          features.Entities(),
                                                          features.Keywords(),
                                                          features.Emotion(),
                                                          features.Concepts(),
                                                          features.Sentiment()
                                                      ])
    return response
Example #8
0
def main():
    with open("compost.txt", "r") as f:
        strings = f.read().split()
    while (strings):
        response = NLU.analyze(text=strings.pop(0),
                               features=[
                                   features.Keywords(),
                                   features.Categories(),
                                   features.Concepts()
                               ])
        print str(response.keywords[0].text)
Example #9
0
 def map_feature(name):
     feature_name_mappings = {
         'keywords': features.Keywords(),
         'entities': features.Entities(),
         'concepts': features.Concepts(),
         'categories': features.Categories(),
         'sentiment': features.Sentiment(),
         'emotion': features.Emotion()
     }
     if name in feature_name_mappings:
         return feature_name_mappings[name]
     else:
         print("Invalid feature name")
         return None
def nlu(text):
    response = n.analyze(text=text,
                         features=[
                             features.Emotion(),
                             features.Concepts(),
                             features.Categories(),
                             features.Entities(),
                             features.Keywords(),
                             features.SemanticRoles(),
                             features.Relations(),
                             features.Sentiment()
                         ],
                         language='en')
    return json.dumps(response, indent=2)
Example #11
0
 def featureList(self, tags):
     f_list = []
     for tag in tags:
         if tag == "sentiment":
             f_list.append(features.Sentiment())
         elif tag == "categories":
             f_list.append(features.Categories())
         elif tag == "concepts":
             f_list.append(features.Concepts())
         elif tag == "emotion":
             f_list.append(features.Emotion())
         elif tag == "entities":
             f_list.append(features.Entities())
     return f_list
Example #12
0
def extract_data(text):
    # Use Watson's NLU API to extract the keywords, entities and concepts from a text
    bm_username = "******"
    bm_password = "******"

    nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(
        version='2017-02-27', username=bm_username, password=bm_password)
    ents = nlu.analyze(text=text,
                       features=[
                           features.Entities(),
                           features.Keywords(),
                           features.Concepts()
                       ])

    ents["tweet"] = text
    return ents
Example #13
0
def test_model():

    model = request.forms.get('model')

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username="******",
        password='******')

    response = natural_language_understanding.analyze(text=model,
                                                      features=[
                                                          features.Entities(),
                                                          features.Keywords(),
                                                          features.Concepts()
                                                      ])

    return (json.dumps(response, indent=2))
Example #14
0
def calc_concepts(text_input):
    import watson_developer_cloud.natural_language_understanding.features.v1 as features

    NLU = NaturalLanguageUnderstandingV1(
        username=os.environ.get("NLU_USERNAME"),
        password=os.environ.get("NLU_PASSWORD"),
        version='2016-05-19')

    #features = ['concepts', 'keywords']
    #tips = 'The IBM Watson™ AlchemyLanguage service is a collection of text analysis functions that derive semantic information from your content. You can input text, HTML, or a public URL and leverage sophisticated natural language processing techniques to get a quick high-level understanding of your content and obtain detailed insights such as sentiment for detected entities and keywords. See a video overview of the service here.'
    r = NLU.analyze(text=text_input, features=[features.Concepts()])
    concepts = r['concepts']
    dict_of_concepts = dict()
    for c in concepts:
        dict_of_concepts[c['text']] = c['relevance']
    #print(dict_of_concepts)
    return dict_of_concepts
def analyze(text, threshold=0.5):
    text = text.encode('ascii', errors='ignore')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')

    response = natural_language_understanding.analyze(text=text,
                                                      features=[
                                                          features.Entities(),
                                                          features.Keywords(),
                                                          features.Concepts(),
                                                          features.Sentiment()
                                                      ])

    decoder = json.JSONDecoder()

    decoded_response = decoder.decode(json.dumps(response, indent=2))
    language = decoded_response["language"]
    keywords = decoded_response["keywords"]
    entities = decoded_response["entities"]
    concepts = decoded_response["concepts"]
    sentiment = decoded_response["sentiment"]

    keywords = sorted(keywords, key=lambda x: -x['relevance'])
    keywords = [
        keyword for keyword in keywords if keyword['relevance'] >= threshold
    ]
    keywords = [keyword['text'] for keyword in keywords]

    entities = sorted(entities, key=lambda x: -x['relevance'])
    entities = [
        entity for entity in entities if entity['relevance'] >= threshold
    ]
    entities = [(entity['type'], entity['text']) for entity in entities]

    concepts = sorted(concepts, key=lambda x: -x['relevance'])
    concepts = [
        concept for concept in concepts if concept['relevance'] >= threshold
    ]
    concepts = [concept['text'] for concept in concepts]

    sentiment = (sentiment['document']['label'],
                 sentiment['document']['score'])
Example #16
0
def main(params):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=params["username"],
        password=params["password"],
        version=params["version"])
    response = natural_language_understanding.analyze(
        url=params["url"],
        features=[
            Features.Concepts(limit=1),
            Features.Entities(limit=1),
            Features.Keywords(limit=1),
            Features.Categories(),
            Features.Emotion(),
            Features.Sentiment(),
            Features.MetaData(),
            Features.Relations(),
            Features.SemanticRoles(limit=1)
        ])
    return response
Example #17
0
def wikiGetConcepts(page):
    wikitext = page.summary
    response = natural_language_understanding.analyze(
        text=wikitext, features=[Features.Concepts()])
    queryTerms = {}
    for concept in response['concepts']:
        if concept['relevance'] > 0.5:
            queryTerms[concept['text'].encode('ascii',
                                              'ignore')] = concept['relevance']
    sortedTerms = sorted(queryTerms, key=queryTerms.get, reverse=True)
    charlen = 0
    for i, term in enumerate(sortedTerms):
        charlen += len(term)
        if charlen >= 350:
            index = i
            break
        else:
            index = i
    return sortedTerms[:index]
Example #18
0
def db_watson_query():
        client = Cloudant(user_name, password, url = url)
        client.connect()
        databaseName = "catalog"
        myDatabase = client[databaseName]
        if(myDatabase.exists()):
                print("Successfully created a database {}".format(databaseName))


        result_collection = Result(myDatabase.all_docs, include_docs=True)
        print ("Retrieved full document:\n{0}\n".format(result_collection[0]))

        end_point = '{0}/{1}'.format(url, databaseName + "/_all_docs")
        params = {'include_docs': 'true'}
        response = client.r_session.get(end_point, params=params)

        #connect to Watson NLU
        natural_language_understanding = NaturalLanguageUnderstandingV1(
          username=user_name_watson,
          password=password_watson,
          version="2017-02-27")



        #feed data to watson api
        for i in range(0, 300):
                tmp = result_collection[i][0]['doc']

                response = natural_language_understanding.analyze(
                        text = tmp['description'],
                        features=[
                                Features.Concepts(
                                        # Concepts options
                                        limit=5
                                        )
                                ]
                        )
                jss = json.dumps(response, indent=2)
                parses = parseJson(response)
                mydocument = myDatabase[tmp['_id']]
                mydocument['keywords'] = parses
                mydocument.save()
        client.disconnect()
def execute_watson_request(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=constants.WATSON_USER,
        password=constants.WATSON_PASS,
        version="2017-02-27")

    try:
        response = natural_language_understanding.analyze(
            text=text,
            features=[
                features.Concepts(),
                features.Categories(),
                features.Emotion(),
                features.Entities(emotion=True, sentiment=True),
                features.Keywords(emotion=True, sentiment=True),
                features.Sentiment()
            ])
        return response
    except WatsonException as error:
        return str(error)
Example #20
0
def get_features(html):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version=BLUEMIX_VERSION,
        username=BLUEMIX_USER_NAME,
        password=BLUEMIX_USER_PASS)

    feature_dict = natural_language_understanding.analyze(
        html=html,
        features=[
            features.Concepts(),  # keep
            # features.Entities(emotion=True, sentiment=True),
            # features.Keywords(emotion=True, sentiment=True),
            # features.Categories(),
            # features.Emotion(document=True),
            # features.MetaData(),
            # features.SemanticRoles(entities=True, keywords=True),
            # features.Relations(),
            # features.Sentiment(document=True)
        ])
    return feature_dict
Example #21
0
def nlp(input_stuff):
    # Calls NaturalLanguageUnderstandingV1 API
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username="******",  # API Key
        password="******")  # Replace with personal API

    response = natural_language_understanding.analyze(
        text=input_stuff,
        features=[features.Concepts(), features.Entities(), features.Keywords(), features.Categories(), features.Emotion(), features.Sentiment(), features.Relations(), features.SemanticRoles()])
    nlu_data = {
        'sentiment': response["sentiment"],
        'semanticRoles': response["semantic_roles"],
        'concepts': response["concepts"],
        'entities': response["entities"],
        'relations': response["relations"],
        'concepts': response["concepts"],
        'categoreis': response["categories"]
    }
    nlu_data = [nlu_data]
    # print(nlu_data)
    return(nlu_data)
Example #22
0
    def understand_text(self):
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            username=nlu_settings.get("username"),
            password=nlu_settings.get("password"),
            version="2017-02-27")

        self.nl_understanding = natural_language_understanding.analyze(
            text=self.converted_text,
            features=[
                Features.Entities(emotion=True, sentiment=True, limit=100),
                Features.Keywords(emotion=True, sentiment=True, limit=100),
                Features.Categories(),
                Features.Concepts(),
                Features.Sentiment(),
                Features.Emotion(),
                #     Features.Feature(),
                #     Features.MetaData(),
                Features.Relations(),
                Features.SemanticRoles(),
            ])

        return self.nl_understanding
Example #23
0
def get_features_prof():
    list_profs = glob.glob(
        "C:\\Users\\Mukund\\Downloads\\findMyAdvisor\\findMyAdvisor\\prof_pages\\*"
    )
    dict_prof_bow = {}
    dict_prof_concept = {}
    for profs in list_profs:

        with open(profs, 'r') as myfile:
            data = myfile.read()
            try:
                response = natural_language_understanding.analyze(
                    text=data,
                    features=[
                        Features.Entities(),
                        Features.Keywords(),
                        Features.Concepts()
                    ])

                keywords = response["keywords"]
                entities = response["entities"]
                concepts = response["concepts"]
                bag_of_words = set()
                concept_set = set()
                for type in [keywords, entities, concepts]:
                    for item in type:
                        bag_of_words.add(item["text"].lower())
                for type in [concepts]:
                    for item in type:
                        concept_set.add(item["text"].lower())

                dict_prof_bow[profs.split('/')[-1]] = ' '.join(
                    list(bag_of_words))
                dict_prof_concept[profs.split('/')[-1]] = ' '.join(
                    list(concept_set))
            except:
                continue
    return dict_prof_bow, dict_prof_concept
Example #24
0
def callNLU(text):
    '''
	Checks what features are enabled, then makes a call to NLU and returns JSON. 
	:param text The string containing the information you want to analyse. 
	'''
    if text == None or text.strip() == '':
        return {}

    f = []
    if c.getboolean('nlu_feature', 'concepts'): f.append(features.Concepts())
    if c.getboolean('nlu_feature', 'entities'): f.append(features.Entities())
    if c.getboolean('nlu_feature', 'keywords'): f.append(features.Keywords())
    if c.getboolean('nlu_feature', 'categories'):
        f.append(features.Categories())
    if c.getboolean('nlu_feature', 'emotion'): f.append(features.Emotion())
    if c.getboolean('nlu_feature', 'semanticroles'):
        f.append(features.SemanticRoles())
    if c.getboolean('nlu_feature', 'relations'): f.append(features.Relations())
    if c.getboolean('nlu_feature', 'sentiment'): f.append(features.Sentiment())

    r = nlu.analyze(text=text, features=f)

    return r
Example #25
0
def get_features_resume(text):
    data = text
    try:
        response = natural_language_understanding.analyze(
            text=data,
            features=[
                Features.Entities(),
                Features.Keywords(),
                Features.Concepts()
            ])

        keywords = response["keywords"]
        entities = response["entities"]
        concepts = response["concepts"]

        bag_of_words = set()
        for type in [keywords, entities, concepts]:
            for item in type:
                bag_of_words.add(item["text"])

        return ' '.join(list(bag_of_words)).lower()
    except:
        return 'No Resume Found'
Example #26
0
def get_data_from_bluemix(target_url):
    nl_understanding = cache_get(target_url)
    if not nl_understanding:
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            username=nlu_settings.get("username"),
            password=nlu_settings.get("password"),
            version="2017-02-27")
        features = [
                Features.Entities(limit=100,emotion=True,sentiment=True),
                Features.Keywords(limit=100,emotion=True,sentiment=True),
                Features.Categories(),
                Features.Concepts(),
                Features.Sentiment(),
                Features.Emotion(),
                #     Features.Feature(),
                #     Features.MetaData(),
                Features.Relations(),
                Features.SemanticRoles(),

            ]
        nl_understanding = None

        for i in range(NUMBEROFTRIES):
            try:
                nl_understanding = natural_language_understanding.analyze(
                    url=target_url,
                    features=features
                )
            except:
                pass

            if nl_understanding:
                break
        cache_put(target_url, nl_understanding)

    return nl_understanding
Example #27
0
        if q in line.lower():
            questionToInsert = line.rstrip()
            questions[questionToInsert] = counter
            break

    #When the end of the grouping has been reached, run the APIs
    if (counter % lineCount == 0):
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            username="******",
            password="******",
            version="2017-02-27")

        response = natural_language_understanding.analyze(
            text=dialogue,
            features=[
                Features.Concepts(),
                #Features.Keywords(),
                Features.Categories()
            ])

        currConcepts = []
        currKeywords = []
        currCategories = []
        for concept in response['concepts']:
            if (concept['text'] not in concepts):
                currConcepts.append(concept['text'])
                concepts[concept['text']] = [counter]
            else:
                concepts[concept['text']].append(counter)
        '''for keyword in response['keywords']:
            if(keyword['text'] not in keywords):
Example #28
0
print("[SCRAPER] The Star stories downloaded successfully(?).")

feed = r.json()
articles = []

watson_failed = True

for article in feed:
    tags = []
    tags_weighted = []

    if not watson_failed:
        try:
            response = nlu.analyze(text=article['body'],
                                   features=[features.Concepts()])
            for concept in response['concepts'][:5]:
                tags.append(concept['text'])
                tags_weighted.append({concept['text']: concept['relevance']})

        except Exception as e:
            print(e)
            watson_failed = True

    article['sorted_tags'] = list(set(tags))
    article['theme'] = tags_weighted

    articles.append({'node': article})

data = {'nodes': articles, 'tags': []}
Example #29
0
def handle_message(conversation_client, slack_client, workspace_id, context,
                   message, channel, user):
    """Handler for messages coming from Watson Conversation using context.

        Fields in context will trigger various actions in this application.

        :param str message: text from UI
        :param SlackSender sender: used for send_message, hard-coded as Slack

        :returns: True if UI input is required, False if we want app
         processing and no input
        :rtype: Bool
    """
    global gv_nlu, gv_cortical_client, gv_bot_deafault_channel_name, gv_bot_deafault_channel_id, gv_ai
    url_list = []
    response = ""
    cortical_response_text = ""
    nlu_analyzed_text = ""
    nlu_responce_text = ""
    nlu_keyword = None
    nlu_entities = None
    context = None

    # extract URLs from the message of the post
    url_list = get_urls(slack_client, message)

    if url_list is not None:
        # send the message to user indicating that teh process of analysis started
        slack_client.api_call("chat.postMessage",
                              channel=channel,
                              text="analyzing . . . ",
                              as_user=True)
        for i in range(len(url_list)):
            try:
                # Analyze the URL article using WATSON Natural Language Understanding
                nlu_response = gv_nlu.analyze(url=url_list[i],
                                              return_analyzed_text=True,
                                              features=[
                                                  features.Categories(),
                                                  features.Concepts(),
                                                  features.Emotion(),
                                                  features.Entities(),
                                                  features.Keywords(),
                                                  features.MetaData(),
                                                  features.Relations(),
                                                  features.Sentiment()
                                              ])
                # get information from JSON format resulted by NLU
                nlu_responce_text, nlu_sentiment, nlu_categoties, nlu_entities, nlu_keyword, nlu_concepts, nlu_analyzed_text = convert_nlujson(
                    url_list[i], nlu_response)

            except WatsonException:
                # print(json.dumps(nlu_response, indent=2))
                nlu_responce_text = "Sentiments can not be retrieved from the URL"

            # performs CORTICAL SEMANTIC analysis and returns results as a response text
            cortical_response_text = cortical_analyze(nlu_analyzed_text,
                                                      nlu_keyword,
                                                      nlu_entities)

            # build response text
            title = "\n\n\n ===== Watson Sentiment Analysis =====\n"
            response = response + title + nlu_responce_text + cortical_response_text  ## Uncomment to add URL to the response text <+ url_list[i]>
            i = i + 1
    else:
        response = "No valid URL found !!!"

    return response
Example #30
0
def exe_api(text):

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')

    response = natural_language_understanding.analyze(
        text=text,
        features=[features.Entities(), features.Keywords(), features.Concepts(), features.Sentiment(), features.Emotion()])

    name = ''
    location = ''
    priority = 'LOW'

    for entity in response['entities']:
        if entity['type'] == 'Person':
            name = entity['text']
        elif entity['type'] == 'Location':
            location = entity['text']

    fear = response['emotion']['document']['emotion']['fear']
    anger = response['emotion']['document']['emotion']['anger']

    if fear >= 0.4 or anger >= 0.4:
        priority = 'HIGH'
    elif fear >= 0.3 or anger >= 0.3:
        priority = 'MEDIUM'


    deportation_count = 0
    visa_count = 0
    greencard_count = 0

    words = [w for w in text.split(' ') if not w in stopwords.words("english")]

    base_words = []

    for word in words:
        word = WordNetLemmatizer().lemmatize(word, 'v')
        base_words.append(word)
        if word in deportation:
            deportation_count += 1
        elif word in visa:
            visa_count += 1
        elif word in greencard:
            greencard_count += 1

    stage1 = {'deportation':deportation_count, 'visa':visa_count, 'greencard':greencard_count}
    stage1_ans = max(stage1, key=stage1.get)

    about = '''{
        "concern": "'''+stage1_ans+'''",
    '''

    if stage1_ans == 'deportation':

        for word in base_words:
            if word in arrest:
                # deportation -> arrested -> offense -> information
                offense = ''
                for w in base_words:
                    if w.lower() in offenses:
                        offense = w
                        break
                if offense == '':
                    keywords = [w['text'] for w in response['keywords']]
                    tags = nltk.pos_tag(keywords)
                    for tag in tags:
                        if 'NN' in tag:
                            offense = tag[0]
                            break

                information = ''
                url = '['
                count = 0


                try:
                    results = search('deportation because of '+offense)['webPages']['value']
                    for result in results:
                        url += '''{"link": "'''+result['url']+'''", "name": "'''+result['name']+'''"},'''
                    url = url[:-1]
                    url += ']'

                    while len(information) < 70:
                        u = results[count]['url']
                        information = exe_summarizer(u)
                        count += 1
                except Exception:
                    information = '''
                    Among other things, the person will become ineligible to. receive asylum, as described in Bars to Receiving Asylum or Refugee Status. He or she may also lose eligibility for a U.S. visa or green card, as described. in Crimes. That Make U.S. Visa or Green Card Applicants Inadmissible. If the person is already in the U.S. with a visa or green card, he or she will likely be ordered removed, as described in Crimes. That Will Make an Immigrant Deportable. And if the person somehow gets as far as submitting an application for U.S. citizenship, the aggravated felony conviction will result in not only denial of that application and permanently barred from U.S. citizenship, but in his or her being placed in removal proceedings. There’s a sort of mismatch, in which state crimes that may sound minor to most people, did not involve violence, and may not even be called felonies are nevertheless viewed as aggravated felonies by federal immigration authorities.
                    '''
                    pass

                about += '''
                        "reason": "arrest",
                        "offense": "'''+offense+'''",
                        "information": "'''+information+'''",
                        "url": '''+url+'''
                }
                '''
                break
            elif word in overstay:
                # deported - > overstay -> visa type
                visa_type = ''
                for word in base_words:
                    if word in type:
                        visa_type = word

                about += '''
                        "reason": "overstay",
                        "type": "'''+visa_type+'''"
                }
                '''
                break
    elif stage1_ans == 'visa':

        for word in base_words:
            if word in visa:
                visa_type = ''
                for word in base_words:
                    if word in type:
                        visa_type = word

                information = ''
                url = '['
                count = 0

                try:
                    results = search(' '.join(text.split(' ')[-4:]))['webPages']['value']
                    for result in results:
                        url += '''{"link": "''' + result['url'] + '''", "name": "''' + result['name'] + '''"},'''
                    url = url[:-1]
                    url += ']'

                    while len(information) < 90:
                        u = results[count]['url']
                        information = exe_summarizer(u)
                        count += 1
                except Exception:
                    information = '''
                        There are various types of nonimmigrant visas for temporary visitors to travel to the U.S., if you are not a U.S. citizen or U.S. lawful permanent resident. It's important to have information about the type of nonimmigrant visa you will need for travel, and the steps required to apply for the visa at a U.S. Embassy or Consulate abroad.
                    '''
                    pass

                about += '''
                        "type": "''' + visa_type + '''",
                        "information": "''' + information + '''",
                        "url": ''' + url + '''
                }
                '''
                break

    elif stage1_ans == 'greencard':
        pass
    else:
        about = '''
            {
                "concern": "general"
            }
        '''

    built_json = ''

    try:
        built_json = json.dumps(json.loads('''
            {
                "name": "'''+name.title()+'''",
                "location": "'''+location+'''",
                "priority": "'''+priority+'''",
                "transcript": "'''+text+'''",
                "about": '''+about+'''
            }
        '''), indent=4)

    except Exception:
        print name
        print location
        print priority
        print about

    return built_json