Пример #1
0
def home(request):
	context = {}
	context['query'] = False
	context['search_query'] = ''

	if request.method == 'GET':
		return render(request, 'hue/home.html', context)

	if 'search_q' in request.POST:
		search_query = request.POST['search_q']
		context['search_query'] = search_query
        print(search_query)
        context['query'] = True

        # twitter.twitter_query(search_query)
        r = RedditParser()
        r.reddit_query(search_query, 25, 25)

        path = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe() ))[0],"datumbox")))
        ifile  = cmd_subfolder + '/data.json'
        ofile  = path + '/sentiment.csv'

        print ifile
        print ofile

        sentiment.analyze_sentiment(ifile, ofile, 0.1)

        path = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe() ))[0],"semantic-similarity-master")))
        cofile = path + '/reddit_senti.json'
        os.system(path + "/similar" + ' ' + ifile + ' ' + ofile + ' ' + cofile)
        with open(cofile) as data_file:
            data = json.load(data_file)
        context['data'] = json.dumps(data)

	return render(request, 'hue/home.html', context)
Пример #2
0
def discord_response(user, message_contents):
    """Returns the result of detect intent with texts as inputs.

    Using the same `session_id` between requests allows continuation
    of the conversation."""
    session_path = f"{agent}/sessions/{session_id}"
    #print(f"Session path: {session_path}\n")
    client_options = None
    agent_components = AgentsClient.parse_agent_path(agent)
    location_id = agent_components["location"]
    if location_id != "global":
        api_endpoint = f"{location_id}-dialogflow.googleapis.com:443"
        print(f"API Endpoint: {api_endpoint}\n")
        client_options = {"api_endpoint": api_endpoint}
    session_client = SessionsClient(client_options=client_options)

    input_string  = message_contents
    image_float   = analyze_sentiment(user, input_string)
    text_input    = session.TextInput(text=input_string)
    query_input   = session.QueryInput(text=text_input, language_code=language_code)
    request = session.DetectIntentRequest(
          session=session_path, query_input=query_input
		)
    response = session_client.detect_intent(request=request)

    #print("=" * 20)
    #print(f"Query text: {response.query_result.text}")
    response_messages = [
      " ".join(msg.text.text) for msg in response.query_result.response_messages
    ]
    return ((os.getcwd() + find_emotion_gif(image_float)), f"{' '.join(response_messages)}\n")
Пример #3
0
def review_highlights_(reviews):
    reviews_texts = " ".join([r['text'] for r in reviews])
    review_sentences = mining.to_sentences(reviews_texts)

    #pre-process text and find features
    review_words = mining.to_words(amazon_reviews)
    review_words = mining.pos_tag(review_words)
    review_words = [w for w in review_words if w[0] 
            not in mining.stopwords()]
    review_bigrams = mining.find_bigram_collocations(review_words)
    review_features = mining.amazon_features_from_collocations(
                review_bigrams)

    #find sentences that contain our features, summarize sentence groups
    #and score features for sentiment
    feature_sentences = {}
    feature_sentiments = {f:0 for f in amazon_features}

    for f in amazon_features:
        sentences = [s for s in review_sentences if f in s]
        cleaned_sentences = [mining.remove_stopwords(mining.to_words(s)) for s in sentences]
        #pick most central sentence
        central = summarize.rank_by_centrality(cleaned_sentences)
        feature_sentences[f] = sentences[central[0]]
        feature_sentiments[f] += sum([sentiment.analyze_sentiment(s, 
                                    amazon_features)[f] for s in sentences])
    content = {f:(feature_sentences[f], feature_sentiments[f])}

    return content
Пример #4
0
def run_app():
    all_articles = load_articles(
        'generated_data/recent_political_articles.json')
    sentiments: typing.Dict[str, dict] = {}
    print("Examining sentiments...")
    for article in all_articles:
        if article['document_type'] != 'article':
            continue
        article_path = 'generated_data/articles/%s.html' % str(
            article['_id'])[len("nyt://article/"):]
        print("Examining: %s" % article['headline']['main'])
        sentiment = analyze_sentiment(
            extract_article_text(article_path)).document_sentiment
        sentiments[article['_id']] = {
            'score': sentiment.score,
            'magnitude': sentiment.magnitude
        }
    print("Saving sentiments...")
    with open('generated_data/article_sentiments.json', 'w',
              encoding='utf-8') as sentiments_file:
        json.dump(sentiments,
                  sentiments_file,
                  sort_keys=True,
                  indent=4,
                  ensure_ascii=False)
    print("Done!")
Пример #5
0
def detect_intent(user): #, agent, session_id, language_code
    """Returns the result of detect intent with texts as inputs.

    Using the same `session_id` between requests allows continuation
    of the conversation."""
    session_path = f"{agent}/sessions/{session_id}"
    print(f"Session path: {session_path}\n")
    client_options = None
    agent_components = AgentsClient.parse_agent_path(agent)
    location_id = agent_components["location"]
    if location_id != "global":
        api_endpoint = f"{location_id}-dialogflow.googleapis.com:443"
        print(f"API Endpoint: {api_endpoint}\n")
        client_options = {"api_endpoint": api_endpoint}
    session_client = SessionsClient(client_options=client_options)

    input_string = input("Enter your prompt for bitberg")
    while(input_string != 'close'):
      image_float = analyze_sentiment(user, input_string)
      text_input = session.TextInput(text=input_string)
      query_input = session.QueryInput(text=text_input, language_code=language_code)
      request = session.DetectIntentRequest(
           session=session_path, query_input=query_input
			)
      response = session_client.detect_intent(request=request)
			#display image somehow
      #
      #
      ### display_gif(find_emotion_gif(image_float))
      #
      #
      if  -1.0 < image_float < -0.5:
          __display_gif('angry')
				#display angry
      elif image_float < 0.0:
          __display_gif('sad')
				#display sad
      elif image_float < 0.5:
          __display_gif('bored')
				#display bored
      else:
          __display_gif('happy')
				  #display happy
      print("=" * 20)
      print(f"Query text: {response.query_result.text}")
      response_messages = [
      " ".join(msg.text.text) for msg in response.query_result.response_messages
      ]
      print(f"Response text: {' '.join(response_messages)}\n")

      input_string = input()
Пример #6
0
def addComment(comment):
    # BASE_URL = "http://localhost:3000/comments"
    isDict = type(comment) == dict
    # awards = ''
    if (isDict):
        text = comment['body']
        id = comment['id']
        time = comment['created_utc']
        score = comment['score']
        parent_post = comment['link_id']
        parent_comment = "t_0 " if comment[
            'parent_id'] == parent_post else comment['parent_id']
        # if parent_post.startswith("t"):
        #     parent_post = parent_post[3:]
        author = comment['author']
    else:
        text = comment.body
        time = comment.created_utc
        id = comment.id
        score = comment.score
        parent_post = comment.link_id[3:]
        # parent_comment = "top_level" if comment.link_id[3:] == comment.parent_id[3:] else comment.parent_id[3:]
        parent_comment = "t_0" if comment.link_id == comment.parent_id else comment.parent_id
        author = comment.author
        # awards = item.all_awardings
    r = requests.get(url=BASE_URL + "/id/" + id)
    sentiment = s.analyze_sentiment(text)
    data = {
        "comment_id": id,
        "comment_date": time,
        "ticker": "TEST",
        "parent_post": parent_post,
        "parent_comment": parent_comment,
        "body": text,
        "score": score,
        "sentiment": sentiment,
        "author": author
    }
    r = requests.post(url=BASE_URL + "/comments", data=data)
Пример #7
0
            f.write(text)


if __name__ == "__main__":

    download()
    convert()

    csv_folder = "csv"

    if not os.path.exists(csv_folder):
        os.makedirs(csv_folder)

    txts = glob.glob("text/*.txt")

    for i, txtfile in enumerate(txts):
        author, results = sentiment.analyze_sentiment(txtfile)

        fname = author.replace(" ", "")  # Remove spaces

        np.savetxt(
            f"{csv_folder}/{fname}.csv",
            results,
            delimiter=",",
            header="window #, polarity, subjectivity",
        )

        print(f"Data saved to {csv_folder}/{fname}.csv [{i+1}/{len(txts)}]")

    print("\nDone!")