Ejemplo n.º 1
0
def monkey_learn(texts, article_dicts_list):
    if not texts:
        return None

    ml = MonkeyLearn(cfg.MONKEYLEARN_TOKEN)

    # keyword extraction
    extraction = ml.extractors.extract('ex_y7BPYzNG',
                                       texts,
                                       use_company_names=1,
                                       max_keywords=10)

    # sentiment analysis
    classification = ml.classifiers.classify('cl_MX2qQKNi', texts)

    ml_results = []

    for i in range(0, len(extraction.result)):
        ml_result = {
            "id": article_dicts_list[i]['id'],
            "url": article_dicts_list[i]['url'],
            "classification": classification.result[i],
            "keyword_extraction": extraction.result[i]
        }

        # remove keyword position indicies
        for kw in ml_result['keyword_extraction']:
            if kw.get('positions_in_text'):
                del kw['positions_in_text']

        # convert dictionaries to JSON strings
        ml_results.append(json.dumps(ml_result, separators=(',', ': ')))

    # Return a list of JSON strings of results, 1 string per article
    return ml_results
Ejemplo n.º 2
0
def createMasterDict(input_list, model, API_token='<type Your API Key Here>'):
    if model in ["MonkeyLearn", "ML", "ml", "monkeylearn"]:
        ml = MonkeyLearn(API_token)
        model_id = 'ex_YCya9nrn'
        consolidated = []
        j = 1
        for data in input_list:
            features = {}
            features['paragraph'] = data
            features['para_ID'] = j
            j += 1
            result = ml.extractors.extract(model_id, [data])
            extrac = result.body[0]['extractions']
            keywords = []
            for i in range(len(extrac)):
                keywords.append(extrac[i]['parsed_value'])
            features['keywords'] = keywords
            consolidated.append(features)
        return consolidated
    elif model in ["rake", "Rake"]:
        r = Rake(
        )  # Uses stopwords for english from NLTK, and all puntuation characters.
        consolidated = []
        k = 1
        for para in input_list:
            features_ = {}
            features_['para_ID'] = k
            features_['paragraph'] = para
            k += 1
            r.extract_keywords_from_text(para)
            #print(r.get_ranked_phrases()) # To get keyword phrases ranked highest to lowest.
            features_['keywords'] = r.get_ranked_phrases()
            consolidated.append(features_)
        return consolidated
Ejemplo n.º 3
0
def monkey_learn(titles, urls):
    if not titles and urls:
        return None

    ml = MonkeyLearn(cfg.MONKEYLEARN_TOKEN)

    # keyword extraction
    extraction = ml.extractors.extract('ex_y7BPYzNG',
                                       titles,
                                       use_company_names=1,
                                       max_keywords=10)

    ml_results = []

    for i in range(0, len(extraction.result)):
        ml_result = {
            "id": str(hashlib.md5(titles[i].encode('utf8')).hexdigest()),
            "url": urls[i],
            "keyword_extraction": extraction.result[i]
        }

        # remove keyword position indicies
        for kw in ml_result['keyword_extraction']:
            if kw.get('positions_in_text'):
                del kw['positions_in_text']
        print ml_result
        # convert dictionaries to JSON strings
        ml_results.append(json.dumps(ml_result, separators=(',', ': ')))

    # Return a list of JSON strings of results, 1 string per article
    return ml_results
def sentiment(s):

    ml = MonkeyLearn('9ad4bee6b5e1a1f32ba54b9c4c68528e5d465442')
    data = [s]
    model_id = 'cl_pi3C7JiL'
    result = ml.classifiers.classify(model_id, data)
    li = (result.body)

    #li is in the below format:
    #[
    #   {
    #     "text": "This is a great app!",
    #     "external_id": null,
    #     "error": false,
    #     "classifications": [
    #       {
    #         "tag_name": "Positive",
    #         "tag_id": 33767179,
    #         "confidence": 0.998
    #       }
    #     ]
    #   }
    # ]

    li = li[0]['classifications'][0]
    if li['tag_name'] == 'Positive':
        return li['confidence']
    else:
        return 1 - li['confidence']
Ejemplo n.º 5
0
def keywords():
    ml = MonkeyLearn('adc2b91f1e63d6140722f8adf37cae5f1610a27c')
    typ = request.form['type']
    model_id = 'ex_YCya9nrn'
    print(type)
    output = request.form['output']
    print(output)
    print(type(output))
    if typ == "text":
        output = re.sub("[^a-zA-Z.,]", " ", output)
    print(output)

    keywordresult = ml.extractors.extract(model_id, [output])
    print(keywordresult.body)
    a = keywordresult.body[0]
    print(a)
    print(type(a))
    b = a['extractions']
    print(b)
    print(type(b))

    keywords = []
    entities = []

    for i in b:
        keywords.append(i['parsed_value'])

        print(i['parsed_value'])

    print(keywords)

    #return keywords
    return render_template('keywords.html', keyword=keywords)
def sentimentsAnalysis():
    columns = defaultdict(list)
    with open('media/recording1/transcript.csv') as f:
        reader = csv.DictReader(f)
        for row in reader:
            for (k, v) in row.items():
                columns[k].append(v)

    positiveCounter = 0
    negativeCounter = 0
    neutralCounter = 0
    ml = MonkeyLearn('ab7ba9286a2c0793d287a35eef87b272db9eac8c')
    model_id = 'cl_pi3C7JiL'

    for s in columns['sentence']:
        li = [s]
        result = ml.classifiers.classify(model_id, li).body
        print(result[0]['classifications'][0]['tag_name'])
        if result[0]['classifications'][0]['tag_name'] == 'Positive':
            positiveCounter += 1
        elif result[0]['classifications'][0]['tag_name'] == 'Neutral':
            neutralCounter += 1
        elif result[0]['classifications'][0]['tag_name'] == 'Negative':
            negativeCounter += 1

    print('Positive ' + positiveCounter)
    print('Negative ' + negativeCounter)
    print('Neutral ' + neutralCounter)

    f.close()
    return "Sentiments result => Neutral-" + neutralCounter + " Negative-" + negativeCounter + " Positive-" + positiveCounter
Ejemplo n.º 7
0
def app():
    st.title('Named Entity Recognization')
    st.write(
        "Named Entity Recognition is the task of identifying named entities (people, locations, organizations, etc.) in the input text."
    )
    text = st.text_input('Enter the text')
    st.write('or')
    uploaded_file = st.file_uploader("Upload a document", type='txt')
    if uploaded_file is not None:
        uploaded_file.seek(0)
        stringio = StringIO(uploaded_file.read().decode("utf-8"))
        file_text = stringio.read()
        st.write(file_text)

    if st.button('Execute'):
        ml = MonkeyLearn('b5c3a529126d072f0aa6d43003083ffdc3273a7f')
        models = ['ex_SmwSdZ3C', 'ex_vqBQ7V9B', 'ex_A9nCcXfn']
        model_name = ['PERSON', 'LOCATION', 'COMPANY']
        responses = [None for _ in range(len(models))]
        outputs = [None for _ in range(len(models))]
        with st.spinner('Extracting...'):
            if uploaded_file is not None:
                data = file_text
            else:
                data = text
            for idx, model_id in enumerate(models):
                responses[idx] = ml.extractors.extract(model_id, data=[data])
                outputs[idx] = responses[idx].body[0]['extractions']
        for idx, output in enumerate(outputs):
            st.write(model_name[idx], ' :')
            for item in output:
                st.write(item['extracted_text'])
Ejemplo n.º 8
0
def app():
    st.title('Sentiment Analysis')
    st.write(
        'Sentiment Analysis is the task of interpreting and classifying emotions (positive or negative) in the input text.'
    )
    text = st.text_input('Enter the text')
    st.write('or')
    uploaded_file = st.file_uploader("Upload a document", type='txt')
    if uploaded_file is not None:
        uploaded_file.seek(0)
        stringio = StringIO(uploaded_file.read().decode("utf-8"))
        file_text = stringio.read()
        st.write(file_text)

    if st.button('Execute'):
        ml = MonkeyLearn('b5c3a529126d072f0aa6d43003083ffdc3273a7f')
        model_id = 'cl_pi3C7JiL'
        with st.spinner('Classifying...'):
            if uploaded_file is not None:
                response = ml.classifiers.classify(model_id, [file_text])
            else:
                response = ml.classifiers.classify(model_id, [text])
            outputs = response.body[0]['classifications'][0]
        st.write('The model is are quite sure the sentence is',
                 outputs['tag_name'], '. (', outputs["confidence"] * 100,
                 ' %)')
def sentimentsAnalysis():   
    columns = defaultdict(list) 
    with open('media/recording1/transcript.csv') as f:
         reader = csv.DictReader(f) 
         for row in reader: 
             for (k,v) in row.items(): 
                 columns[k].append(v)

    positiveCounter = 0
    negativeCounter = 0
    neutralCounter = 0

    #ab7ba9286a2c0793d287a35eef87b272db9eac8c
    ml = MonkeyLearn('c9fa1e093134b6bdc811a42f55849bd06a751482')
    model_id = 'cl_pi3C7JiL'
    
    for s in columns['sentence']:
        li = [s]
        result = ml.classifiers.classify(model_id, li).body
        
        if result[0]['classifications'][0]['tag_name'] == 'Positive':
            positiveCounter += 1
        elif result[0]['classifications'][0]['tag_name'] == 'Neutral':
            neutralCounter += 1
        elif result[0]['classifications'][0]['tag_name'] == 'Negative':
            negativeCounter += 1  

    f.close()
    return f'Sentiments result => Neutral- {neutralCounter} Negative- {negativeCounter} Positive- {positiveCounter}'
 def sentimentValue(self, tweet):
     ml = MonkeyLearn('aeafaa0aa014ed56b663c49fa20ac81a68f2d194')
     model_id = 'cl_TWmMTdgQ'
     data = []
     data.append(tweet)
     result = ml.classifiers.classify(model_id, data)
     return result.body[0]['classifications'][0]
Ejemplo n.º 11
0
def entity_extract(Id, text, news):
    from monkeylearn import MonkeyLearn

    ml = MonkeyLearn('f61694907b120433ddc66da1880d537c5f9d8f1e')
    text_list = [text]
    module_id = 'ex_isnnZRbS'
    res = ml.extractors.extract(module_id, text_list)
    for row in res.result[0]:
        if not db.session.query(Keyword).filter(
                Keyword.key_name == row['entity']).count():
            key = Keyword(key_name=row["entity"])
            db.session.add(key)
            db.session.commit()
        else:
            key = Keyword.query.filter_by(key_name=row["entity"]).first()

        if news:
            nk = NewsKeyword(news_id=Id, key_id=key.id)
            db.session.add(nk)
            db.session.commit()
        else:
            # if not UserKeyword.query.filter_by(key_id=key.id, user_id=Id).count():  #may not be needed
            uk = UserKeyword(user_id=Id, key_id=key.id, priority=1)
            db.session.add(uk)
            db.session.commit()
Ejemplo n.º 12
0
def request_sentiment(message):
    data = [message]
    ml = MonkeyLearn(API_KEY_MONKEYLEARN)
    model_id = 'cl_pi3C7JiL'
    result = ml.classifiers.classify(model_id, data)
    #result_json = json.loads(result.body)
    sentiment = result.body[0]['classifications'][0]['tag_name']
    return sentiment
Ejemplo n.º 13
0
def get_keywords(data_):
    ml = MonkeyLearn('1c0950b2356a258e00b735ab4002679a9a1f3642')
    model_id = 'ex_YCya9nrn'
    result = ml.extractors.extract(model_id, data_)
    extraction = ''
    for ex in result.body[0]['extractions']:
        extraction += ex['parsed_value'] + " "
    return extraction.strip()
Ejemplo n.º 14
0
    def __init__(self, data):

        ml = MonkeyLearn(MONKEY_API_KEY)
        model_id = 'cl_pi3C7JiL'
        result = ml.classifiers.classify(model_id, data)
        res = result.body
        res1 = res[0]['classifications'][0]['tag_name']
        res2 = res[0]['classifications'][0]['confidence']
        self.full_res = res1 + ": " + str(res2)
Ejemplo n.º 15
0
def extract_scammer_name(message):
    ml = MonkeyLearn('d31c9827434f85201fc8ba34de9fa26a2ff91936')
    message = re.sub('\n', '', message)
    message = re.sub('\r', '', message)
    data = [message]
    model_id = 'ex_SmwSdZ3C'
    result = ml.extractors.extract(model_id, data)
    scammer_name = result.body[0]["extractions"][0]["parsed_value"]
    return scammer_name
Ejemplo n.º 16
0
def sentiment_analysis_monkeylearn(text):
    ml = MonkeyLearn('1a703adc6e4c0261a67e5e52de43071042af92d6')
    data = list()
    data.append(text)
    model_id = 'cl_pi3C7JiL'
    result = ml.classifiers.classify(model_id, data)
    print(result.body)
    return (result.body)[0]['classifications'][0]['tag_name'], (
        result.body)[0]['classifications'][0]['confidence']
Ejemplo n.º 17
0
def getVibe(data: list):
    ml = MonkeyLearn('8c2577ec1c3e63771337b22667d9d1c6f36c7675')
    model_id = 'cl_pi3C7JiL'
    result = ml.classifiers.classify(model_id, data)
    vibe = result.body[0].get('classifications')[0].get('tag_name')
    confidence = result.body[0].get('classifications')[0].get('confidence')
    if confidence >= 0.3:
        return vibe
    else:
        return 'Neutral'
Ejemplo n.º 18
0
 def __init__(self, token, module_id, field_to_classify,
              field_classification_output, batch_size, use_sandbox):
     self.items = []
     self.token = token
     self.module_id = module_id
     self.ml = MonkeyLearn(token)
     self.field_to_classify = field_to_classify
     self.field_classification_output = field_classification_output
     self.batch_size = batch_size
     self.use_sandbox = use_sandbox
Ejemplo n.º 19
0
def get_latest_messages():
    api = tweepy.API(auth.auth)

    public_tweets = api.home_timeline()
    for tweet in public_tweets:
        ml = MonkeyLearn(monkeylearn_credentials.MONKEY_AUTH_TOKEN)
        data = [tweet.text]
        model_id = 'cl_pi3C7JiL'
        result = ml.classifiers.classify(model_id, data)
        return jsonify(result.body), 200
Ejemplo n.º 20
0
def news_with_ticker(request, Ticker):
    url = 'https://finance.yahoo.com/quote/{}'.format(str(Ticker))
    r = requests.get(url)
    soup = BeautifulSoup(r.text, 'html.parser')
    if (soup):
        price = soup.find('table', {'class': 'W(100%)'}).find("tbody")
        price2 = price.find('tr', {'data-reactid': '40'})
        price3 = price.find('tr', {'data-reactid': '45'})
        if (price2 != None and price3 != None):
            url = ('https://newsapi.org/v2/everything?'
                   'q={}&'
                   'from=2021-05-02&'
                   'sortBy=popularity&'
                   'apiKey=e42cf979dc004ba6abfca80bb7fce05d'.format(
                       str(Ticker)))  # enter api key

            ml = MonkeyLearn(
                "3fa9643fa860ec3e9376994cd1fc534b850d4c7d")  # enter api key
            model_id = 'cl_pi3C7JiL'  # keep model id same
            response = requests.get(url)
            json_dict = (response.json())
            sentiment = []
            data = {}
            data['Details'] = []
            for i in range(10):
                sentiment.append(json_dict['articles'][i]['description'])

            for i in range(len(sentiment)):
                result = ml.classifiers.classify(model_id, [sentiment[i]])
                data['Details'].append({
                    'Date':
                    '2021-04-22',
                    'Tikr':
                    '{}'.format(str(Ticker)),
                    'Text':
                    str(result.body[0]['text']),
                    'Sentiment':
                    str(result.body[0]['classifications'][0]['tag_name']),
                    'Previous Close':
                    str(price2.text.replace("Previous Close", "")),
                    'Open':
                    str(price3.text.replace("Open", ""))
                })

            output = []
            j = 1
            for i in data['Details']:
                output.append({j: i['Text']})
                j += 1

            #response to be returned to frontend -> json format
            #returning as rest_framework response for react JS
            return Response(output)
    else:
        return HttpResponse("Not Found")
Ejemplo n.º 21
0
def tagallfb(request):
    fb = Feedback.objects.all()
    ml = MonkeyLearn('2996cb83b0f2c42cdd8d5785a11d5609b7db736d')
    model_id = 'cl_pi3C7JiL'
    for x in fb:
        data = [x.review]
        result = ml.classifiers.classify(model_id, data)
        x.sentiment = result.body[0]["classifications"][0]["tag_name"]
        print(x.review , ' ',x.sentiment)
        x.save()
    return HttpResponseRedirect('/hc')
Ejemplo n.º 22
0
def lambda_handler(event, context):
    # TODO implement

    res = requests.get(
        "https://m4fcmxys7i.execute-api.us-west-2.amazonaws.com/default/reddit_parse"
    )
    json_data = json.loads(res.text)

    total_symbols = res['total_symbols']
    posts_now = res['posts_now']
    posts_hot = res['posts_hot']
    posts_top = res['posts_top']

    posts = pd.concat([posts_now, posts_hot, posts_top])
    ##############RECEIVE FROM RES

    ml = MonkeyLearn('f8727eca4adb7979d592c3d905bfba05ff81863a')
    model_id = 'cl_pi3C7JiL'

    stock_sentiment_points = defaultdict(int)

    for i in range(len(posts)):
        curr_title = posts.iloc[i, 0]
        curr_body = posts.iloc[i, 6]
        curr_comments = " ".join(posts.iloc[i, 8])
        for symbol in total_symbols:
            if symbol in curr_title or symbol in curr_body or symbol in curr_comments:
                data = [curr_title + " " + curr_body + " " + curr_comments]
                result = ml.classifiers.classify(model_id, data)
                if result.body[0]["classifications"][0][
                        "tag_name"] == "Positive":
                    stock_sentiment_points[symbol] += 1
                else:
                    stock_sentiment_points[symbol] -= 1
                stock_sentiment_points[symbol] = stock_sentiment_points[
                    symbol] + curr_title.count("buy") + curr_body.count(
                        "buy") + curr_comments.count("buy")
                stock_sentiment_points[symbol] = stock_sentiment_points[
                    symbol] - curr_title.count("sell") + curr_body.count(
                        "sell") + curr_comments.count("sell")

    for symbol in total_symbols:
        if symbol not in stock_sentiment_points:
            stock_sentiment_points[symbol] = 0

    for key in stock_sentiment_points.keys():
        pos = "Hold"
        if stock_sentiment_points[key] > 0:
            pos = "Buy"
        elif stock_sentiment_points[key] < 0:
            pos = "Sell"
        table.put_item(Item={'stock': key, 'action': pos})
    response = {'message': 'Item added'}
    return {'statusCode': 200, 'body': response}
Ejemplo n.º 23
0
def get_keywords_from_description(description: str,
                                  number_keywords=5) -> List[str]:
    ml = MonkeyLearn('c3e523c989d56faa8e968f6881a0269fe6f5dc09')
    model_id_for_key_word_distraction = 'ex_YCya9nrn'

    response_body_str = ml.extractors.extract(
        model_id=model_id_for_key_word_distraction, data=[description]).body

    keywords = []
    for res in response_body_str:
        keywords.extend([kw['parsed_value'] for kw in res['extractions']])
    return keywords[:number_keywords]
Ejemplo n.º 24
0
def CallApis(text):
    os.environ[
        "GOOGLE_APPLICATION_CREDENTIALS"] = "C:/Users/Usuario/SocialHackers/src/claves.json"
    #import os
    #from google.oauth2 import service_account
    texto = str(text)
    print(texto + 'el texto del funcionario')
    #gmaps.configure(api_key="AIzaSyDo-i5z39is5UaVtmic0aAdenVlf33fkow")
    #credentials = service_account.Credentials.from_service_account_file("C:/Users/Usuario/claves.json")
    #client = translate.TranslateServiceClient(credentials=credentials)
    ml = MonkeyLearn('04331f53b46d850f750509ec4bf92f17b0df803a')
    translate_client = translate.Client()

    result = translate_client.translate(text, target_language='en')
    print(u"Text: {}".format(result["input"]))
    print(u"Translation: {}".format(result["translatedText"]))
    print(u"Detected source language: {}".format(
        result["detectedSourceLanguage"]))

    texttraducido = result["translatedText"]

    response_type = ml.classifiers.classify(model_id='cl_o46qggZq',
                                            data=[texttraducido])
    response_sentiment = ml.classifiers.classify(model_id='cl_pi3C7JiL',
                                                 data=[texttraducido])
    tag_type = response_type.body[0]['classifications'][0]['tag_name']
    tag_sentiment = response_sentiment.body[0]['classifications'][0][
        'tag_name']
    if tag_type == "Health & Medicine":
        return "Salud"
    elif tag_type == "Food & Drink":
        return "Comida"
    elif tag_type == "Environment":
        return "Medio Ambiente"
    elif tag_type == "Education":
        return "Educacion"
    elif tag_type == "Society":
        return "Trabajo"
    elif tag_sentiment == "Negative":
        return "Psicologia"
    else:
        return "Others"


#fig = gmaps.figure()
#fig

#headers = {"Accept": "application/json", "Content-Type": "application/json"}
#text = { "text": "'+translate+'" }
#url = "https://sentim-api.herokuapp.com/api/v1/"
#response = requests.post(url, data=json.dumps(text), headers=headers)
#print(response)
#print(u"Type: {}".format(response["type"]))
Ejemplo n.º 25
0
def classify_intent(msg_body):
    ml = MonkeyLearn('673dabe257b841679b142da12a8cf7683633e7a8')
    data = [msg_body]
    model_id = 'cl_VGoxtFhL'
    result = ml.classifiers.classify(model_id, data)
    # print(result.body)

    classification_result = result.body[0]['classifications']
    confidence = classification_result[0]['confidence']
    intent = classification_result[0]['tag_name']
    # print("Customer query: ", query)
    # print("Intent: ", classification_result[0]['tag_name'])
    return {'confidence': confidence, 'intent': intent}
Ejemplo n.º 26
0
def ml():
    ml = MonkeyLearn("f0662751f5792b646356ec5516580c239b71aefb")
    text_list = [
        "Disney Pixar Cars Toon: Mater's Tall tales - Walmart (Wii) Cars Toon: Mater's Tall Tales [Disney Pixar], a Nintendo console exclusive, is a comedy themed mini-game(s) / party game with action racing elements."
    ]
    module_id = 'cl_oFKL5wft'
    res = ml.classifiers.classify(module_id, text_list, sandbox=True)
    list1 = res.result
    str1 = ''.join(str(e) for e in list1)
    str2 = str1.rstrip('\n')
    str3 = str2.replace("u'", "'")
    str4 = str3.replace("'", '"')
    parsed = json.loads(str4)
    return parsed
Ejemplo n.º 27
0
def verifyOTP(request):
    fbid = request.GET['fbid']
    fb = get_object_or_404(Feedback,pk=fbid)
    otp = int(str(request.POST['d1'])+str(request.POST['d2'])+str(request.POST['d3'])+str(request.POST['d4'])+str(request.POST['d5'])+str(request.POST['d6']))
    if otp==fb.otp:
        fb.verified=True
        ml = MonkeyLearn('2996cb83b0f2c42cdd8d5785a11d5609b7db736d')
        data = [fb.review]
        model_id = 'cl_pi3C7JiL'
        result = ml.classifiers.classify(model_id, data)
        fb.sentiment = result.body[0]["classifications"][0]["tag_name"]
        fb.save()
        return render(request,'healthcenter/thanks.html')
    else:
        return render(request,'healthcenter/verify.html',{'em':fb.email,'fbid':fb.id,'fl':2})
Ejemplo n.º 28
0
 def get_context_data(self, **kwargs):
     cleanr = re.compile('<.*?>')
     ml = MonkeyLearn('3f5c2d1c62cfd67d36c38b31500b6d3f93fa9d9c')
     model_id = 'cl_pi3C7JiL'
     data = super().get_context_data(**kwargs)
     likes_connected = get_object_or_404(Article, id=self.kwargs['pk'])
     body = [re.sub(cleanr, '', likes_connected.body)]
     response = ml.classifiers.classify(model_id, body)
     liked = False
     if likes_connected.likes.filter(id=self.request.user.id).exists():
         liked = True
     data['number_of_likes'] = likes_connected.number_of_likes()
     data['post_is_liked'] = liked
     data['sentimentalAnanlysis'] = response.body[0]['classifications'][0][
         'tag_name']
     return data
Ejemplo n.º 29
0
def get_tweets(username):  #This will extract the tweets
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)

    api = tweepy.API(auth)
    number_of_tweets = 1

    tweets = api.user_timeline(screen_name=username)

    temp = []

    tweets_for_csv = [tweet.text for tweet in tweets]
    for j in tweets_for_csv:
        temp.append(j)

    amt = len(temp)  #This will get the total number of tweets retrieved
    rand_num = randrange(amt -
                         1)  #This will randomly select a tweet from the list
    post = temp[rand_num]

    ml = MonkeyLearn('dd20ef03405ebdd8301a985f30db2b2e0b570d92')
    data = [post]
    model_id = 'cl_4keevyV9'
    result = ml.classifiers.classify(model_id, data)

    counts = Counter(
        chain.from_iterable(i.keys()
                            for i in result.body[0]['classifications']))

    if len(
            result.body[0]['classifications']
    ) == 0:  #If the tweet doesn't belong in any categories the system will exit
        print(post)
        sys.exit("The tweet did not match any exisiting topics")

    for key, value in counts.items():
        tag_name = value
        break

    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print("Tweet:", result.body[0]['text'])
    for i in range(value):
        print("Tag:", result.body[0]['classifications'][i]['tag_name'],
              "\tConfidence:",
              result.body[0]['classifications'][i]['confidence'])
Ejemplo n.º 30
0
def run_pipeline(handle):
    ml = MonkeyLearn('')
    
    data = {
      "twitter_user_name": handle,
      "twitter_access_token_key": '',
      "twitter_consumer_key": '',
      "twitter_consumer_secret": '',
      "twitter_access_token_secret": ''
    }
    
    module_id = 'pi_JJ9JrKvk'
    res = ml.pipelines.run(module_id, data, sandbox=True)
    keyword_array = []
    num = len(res.result['keywords'][0]['keywords'])
    for x in range(0,num):
        keyword_array.append(res.result['keywords'][0]['keywords'][x]['keyword'])
    arr_of_arr.append(keyword_array)