Exemple #1
0
def create_ping_checks_public_aliases_all_entities():
    """Create ping checks on all interfaces for all entities."""
    
    ents = get_entities()['values']
    
    for ent in ents:
        create_ping_checks_all_aliases(ent['id'])
Exemple #2
0
def app():
    file = open('data.json', 'w')
    json_string = '{"name":"'

    conversation_name = raw_input("Enter a name for this conversation: ")
    description = raw_input("Enter a description for this conversation: ")

    json_string += conversation_name
    json_string += '", '
    json_string += '"intents":['
    #Get intent and training data from user
    json_string += get_intent()

    #Use AlchemyAPI to get keywords so that users can get an idea for what kind of entities they should create
    get_analysis = raw_input(
        "Would you like to use a few sentences to help you come up with entities? (Y/N) "
    )
    if get_analysis == 'y' or get_analysis == 'Y':
        get_keywords()

    #Users create entities and values, and then get synonyms to those values
    json_string += '"entities":['
    json_string += get_entities()

    json_string += '"language:":"en","metadata":null,"description":"'
    json_string += description
    json_string += '",'
    json_string += '"dialog_nodes":[]}'

    json_obj = json.dumps(json_string)

    file.write(json_string)
Exemple #3
0
def entities_example(request):
    form = ClassifyForm(request.POST)
    if form.is_valid():
        sentence = form.cleaned_data['sentence']
        entities, metadata = get_entities(sentence)
        urls = separar(get_urls(metadata))
        entities_fixed = separar(juntar(limpieza(entities)))

        args = {'form': form, 'entities': entities_fixed, 'urls': urls}
        return render(request, 'entities_example.html', args)
    else:
        form = SignUpForm()
        args = {'form': form}
        return render(request, 'entities_example.html', args)
Exemple #4
0
def entities(handle, year=None, month=None, day=None):
    candidate, tweets, date = get_tweets_by_date(handle, year, month, day)
    top_tokens, top_hashtags, top_users, top_states = get_entities(tweets)
    entities = {
        "Top Tokens": top_tokens,
        "Top Hashtags": top_hashtags,
        "Top Users": top_users,
        "Top States": top_states
    }
    return render_template('entities.html',
                           title='Candidate Entities',
                           candidate=candidate,
                           date=date,
                           entities=entities)
Exemple #5
0
def main():
    mic = r.Recognizer()
    # En esta línea se obtiene el texto bien sea por voz o por texto
    text = "Cuántos departamentos tiene Colombia".lower()
    tokens = text.split(' ')  # tokenizacion del texto
    pln = p.NLP(text)  # Procesamiento del texto
    entities = e.get_entities(text)  # Obtencion de las entidades
    POS = pln.getPosDoc()  # Obtencion de las partes del habla
    no_stop = pln.getNoStopToken()  # Obtencion de las palabras de parada

    if 'ADJ' in POS:
        # Obtencion de todas las posibles palabras candidatas a ser tablas o campos
        for lista in POS['ADJ']:
            POS['NOUN'].append(lista)

    if 'NOUN' in POS:  # Si obtenemos palabras candidatas realizaremos el procesamiento
        nouns = POS['NOUN']
        tables = pdb.get_Possible_Table(nouns)  # Obtencion de posibles tablas
        fields = pdb.get_Possible_Fields(nouns)  # Obtencion de posibles campos
        # Método para saber si es una pregunta especial o no
        special_question = pdb.get_special_question(nouns, entities)
        if not special_question:
            print(fields, entities)
            if fields and entities:
                # Después de realizar todo el procesamiento de la frase, enviamos lo obtenido a una consulta
                db.consulta(entities[0][1], fields[0], tokens)
            else:
                print("No podemos realizar tu consulta")
        else:
            if c.how_many in tokens:
                size = db.depCol()  # Consulta de preguntas especiales
                print("Colombia tiene {} departamentos".format(size))
            else:
                db.depCol()  # Consulta de preguntas especiales
    else:
        print("No hemos podido procesar tu frase")
Exemple #6
0
def destroy_some_checks_all_entities(type):
    # type example: remote.ping
    ents = get_entities()['values']
    
    for ent in ents:
        destroy_all_checks(ent['id'], type)
    def parse(self, response):

        #Se guardan todos los links de imagenes
        Imagenes = response.xpath('//img[not(ancestor::footer) and not(ancestor::header)]/@src').extract()

        Videos = response.xpath('//iframe[not(ancestor::footer) and not(ancestor::header)]/@src').extract()



        #se guarda la url de la pagina actual
        url = response.url

        #Lista de parrafos, imagenes y listas de texto
        Lista_completa = response.xpath('//p[not(ancestor::footer) and not(ancestor::*[contains(@class,"nav")]) ]  | //img[not(ancestor::footer) and not(ancestor::header)]/@src | //iframe[not(ancestor::footer) and not(ancestor::header)]/@src | //ul[not(ancestor::footer) and not(@class)]').extract()


        #Se guarda el titulo de la pagina
        titulo = response.xpath('string(//title)').extract()[0]


        Lista_videos_final = []
        Lista_imagenes_final = []
        Lista_informaciones_final = []


        #
        k = 1
        l = 0
        leer = 1
        for item in Lista_completa:
            if leer == 1:
                if item in Imagenes:
                    link = ajuste_img_src(item,response)
                    width , height = getsizes(link)
                    Lista_imagenes_final.append([link,"imagen",k,titulo,url,width,height])
                    k = k + 1
                elif item in Videos:
                    Lista_videos_final.append([item,"video",k,titulo,url])
                    k = k + 1
                else:
                    soup = BeautifulSoup(item, 'html.parser')
                    texto = soup.get_text()
                    if not(texto == ""):
                        if texto.endswith(":") and (Lista_completa[l + 1] not in Imagenes) and (Lista_completa[l + 1] not in Videos):
                            soup2 = BeautifulSoup(Lista_completa[l + 1], 'html.parser')
                            if soup2.get_text() == "":
                                Lista_informaciones_final.append([texto,"informacion",k,titulo,url])
                            else:
                                Lista_informaciones_final.append([texto + "\n" + soup2.get_text(),"informacion",k,titulo,url])
                            leer = 0
                        else:
                            Lista_informaciones_final.append([texto,"informacion",k,titulo,url])
                        k = k + 1
            else:
                leer = 1
            l = l + 1


        #A partir de aca se pueden utilizar o guardar los parrafos, imagenes y videos, que estan en la Listas: Lista_imagenes_final , Lista_informaciones_final
        # y Lista_videos_final
        #cada elemento de la lista de parrafos contiene: [texto, tipo de dato, orden, titulo(tema), url de la pagina]
        #cada elemento de la lista de imagenes contiene: [url de imagen, tipo de dato, orden, titulo(tema), url de la pagina, ancho,alto]
        #cada elemento de la lista de videos contiene: [url de video, tipo de dato, orden, titulo(tema), url de la pagina]

        for img in Lista_imagenes_final:
            ScrapedImageItem(order=img[2],
                             topic=img[3],
                             url=img[4],
                             information=img[0],
                             width=img[5],
                             height=img[6],
                             tags=get_info_photo(img[0])).save()

        for info in Lista_informaciones_final:
            print(info)
            classified = classify('../TextClassifier/classifier_bayes.pickle', info[0])
            entities, meta = get_entities(info[0])
            tag = juntar(limpieza(entities))
            urls = get_urls(meta)
            ScrapedDataItem(order=info[2],
                            topic=info[3],
                            url=info[4],
                            information=info[0],
                            classification=classified,
                            tags=tag,
                            metadata=urls).save()

        for video in Lista_videos_final:
             # cada elemento de la lista de videos contiene: [url de video, tipo de dato, orden, titulo(tema), url de la pagina]
            ScrapedVideoItem(order=video[2], topic=video[3], url=video[4], information=video[0]).save()
Exemple #8
0
def create_ping_alarms(np_id):
    ents = get_entities()['values']
    for ent in ents:
        for chk in list_checks(ent['id'])['values']:
            create_ping_alarm(ent['id'], chk['id'], np_id)
Exemple #9
0
def analyze_text_block(text,
                       sentiment_library="textblob",
                       entity_library="spacy",
                       get_sentiment_per_topic=True):
    text = re.sub('\s+', ' ', text)
    text = text.replace("\n", ' ')

    entities_res = entities.get_entities(text, library=entity_library)
    keywords_res = keywords.get_keywords(text)
    sentiment_res = sentiment.get_sentiment(text, library=sentiment_library)
    lemmas_dict = {}

    # Calculate sentiment per lemmas, keywords and entities, by averaging
    # the sentiment for all the sentences that they appear in:
    if get_sentiment_per_topic:
        blob = TextBlob(text)
        for sentence in blob.sentences:
            sentence_score = sentiment.get_sentiment(
                str(sentence), library=sentiment_library)['sentiment.score']

            sentence_lemmas = lemmas.get_lemmas(sentence)

            sentence = str(sentence).lower()

            for lemma in sentence_lemmas:
                lemmatxt = lemma['text']
                if lemmatxt in lemmas_dict.keys():
                    lemmas_dict[lemmatxt]['sentiment.score'].append(
                        sentence_score)
                else:
                    lemmas_dict[lemmatxt] = {
                        'sentiment.score': [sentence_score]
                    }

            for keyword in keywords_res:
                word = keyword['text']
                if word.lower() in sentence:
                    if 'sentiment.score' not in keyword.keys():
                        keyword['sentiment.score'] = []
                    keyword['sentiment.score'].append(sentence_score)

            for entity in entities_res:
                word = entity['text']
                if word.lower() in sentence:
                    if 'sentiment.score' not in entity.keys():
                        entity['sentiment.score'] = []
                    entity['sentiment.score'].append(sentence_score)

        for keyword in keywords_res:
            # WARNING: This is a hack. Happens when we have different libraries not agreeing on sentence boundaries!
            if 'sentiment.score' not in keyword.keys():
                keyword['sentiment.score'] = [sentiment_res['sentiment.score']]

            keyword['num.sentences'] = len(keyword['sentiment.score'])
            keyword['sentiment.score'] = np.mean(keyword['sentiment.score'])

        for entity in entities_res:
            # WARNING: This is a hack. Happens when we have different libraries not agreeing on sentence boundaries!
            if 'sentiment.score' not in entity.keys():
                entity['sentiment.score'] = [sentiment_res['sentiment.score']]

            entity['num.sentences'] = len(entity['sentiment.score'])
            entity['sentiment.score'] = np.mean(entity['sentiment.score'])

        lemmas_res = []
        for lemma in lemmas_dict.keys():
            scores = lemmas_dict[lemma]['sentiment.score']
            lemmas_res.append({
                'text': lemma,
                'num.sentences': len(scores),
                'sentiment.score': np.mean(scores)
            })
    else:
        lemmas_res = lemmas.get_lemmas(text)

    results = {
        'entities': entities_res,
        'sentiment': sentiment_res,
        'keywords': keywords_res,
        'lemmas': lemmas_res
    }

    return (results)