Exemple #1
0
class CommentProcessor:
    #processes the comments to find sentences with stock tickers.
    def __init__(self, subreddit, sentimentInterval, dataSet):
        self.subreddit, self.sentimentInterval = subreddit, sentimentInterval
        self.subInstance = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent="testscript by u/spaceballcookie").subreddit(subreddit)
        self.df = pd.DataFrame(columns=['postID', 'ticker', 'sentiment', 'magnitude','subreddit', 'permalink', 'date', 'body'])
        self.sentiment = Sentiment()
        self.dataSet = dataSet

    def getData(self):
        return self.df

    def parseSubmissions(self, type, args):
        #print('self.subInstance.'+type+'('+','.join(args)+ ')\n')
        for submission in eval('self.subInstance.'+type+'('+','.join(args)+ ')'):
            submission.comments.replace_more(limit=0)
            self.parseComments(submission.comments)

    def parseComments(self, comments):
        for comment in comments:
            #analyzeComment(comment.body)
            if not comment.body.isupper():
                self.sentiment.setWeighting(comment.score)
                self.searchString(comment)
            self.parseComments(comment.replies)

    def searchString(self,comment):
        wordsUsed = []
        for word in comment.body.split():
            if len(word) >= 2 and len(word) <= 4:
                if word.isupper():
                    if self.queryContainsWord(word) > 0 and not word in wordsUsed:
                        wordsUsed.append(word)
                        self.analyzeComment(comment, word)

    def analyzeComment(self,comment, word):
            sentences = comment.body.split('.')
            averageScore = 0
            averageMagnitude = 0
            sentenceCount = 0

            for sentence in sentences:
                sentenceCount += 1
                if word in sentence:
                    sentiment_dict = self.sentiment.getSentiment(sentence, True)
                else:
                    sentiment_dict = self.sentiment.getSentiment(sentence, False)
                averageScore += sentiment_dict["Score"]
                averageMagnitude += sentiment_dict["Magnitude"]

            averageScore = averageScore / sentenceCount
            averageMagnitude = averageMagnitude / sentenceCount
            #sentiment_dict["Score"] = averageScore
            #sentiment_dict["Magnitude"] = averageMagnitude
            self.df.loc[len(self.df.index)] = [comment.id, word, averageScore, averageMagnitude,
                                               self.subreddit,comment.permalink, comment.created_utc, comment.body]

    def queryContainsWord(self, word):
        return len(self.dataSet.query('Symbol == @word'))
Exemple #2
0
def SA_text(text_content, service_account):
    client = language_v1.LanguageServiceClient.from_service_account_json(
        service_account)
    type_ = language_v1.Document.Type.PLAIN_TEXT
    document = {"content": text_content, "type_": type_}
    encoding_type = language_v1.EncodingType.UTF8

    try:
        response = client.analyze_sentiment(request={
            'document': document,
            'encoding_type': encoding_type
        })
    except exceptions.InvalidArgument as e:
        # if there is an invalid exception
        print('ERROR ANALYSING TEXT USING GOOGLE ANALYSIS:')
        print(e.message)
        return None

    # Score is the overall emotional learning of the text
    # Magnitude indicates the overall strength of the emotion.

    score = response.document_sentiment.score
    magnitude = response.document_sentiment.magnitude
    sentimemt = Sentiment.Sentiment(score, magnitude)
    print('SA RESULTS TEXT: {}'.format(text_content))
    print(sentimemt)

    return sentimemt
Exemple #3
0
def data(message):
	a = s.sentiment(str(message))
	return jsonify({
		"message" : message,
		"description": "prediction is either 1 for positive or 0 for negative and the confidence value",
		"prediction":str(a[0]),
		"confidence":str(a[1] * 100)
		})
Exemple #4
0
def analyse_sentiment(data, tweets):

    analyser = st.Sentiment(tweets)
    vader_sentiments, vader_score = analyser.check_sentiment_vader()
    vader_data = append_to_data_frame(data, 'Nltk_Sentiment_Score',
                                      vader_score)
    vader_data = append_to_data_frame(vader_data, 'Nltk_Sentiment',
                                      vader_sentiments)

    return vader_data
Exemple #5
0
def home():
    if request.method == 'POST':
        data = request.form.get('text')
        # Make prediction

        pred = s.sentiment(data)
        if pred[0] == 1:
            ans = "Positive"
        elif pred[0] == 0:
            ans = "Negative"
        #print(pred)
        return render_template('index.html', sentiment=ans)
    return render_template('index.html', sentiment='')
    def test_count_tags(self):
        # Los valores esperados para el assertEqual son sacados de sp.dic
        an = AnalisisSentimiento()
        # 13, 16, 18, 19, 66
        tags = ["Posemo", "Negemo", "Anger", "Sad", "Swear"]

        tokens = senti.getTokens("bueno bueno bueno", an.d[0], an.d[1])
        self.assertEqual(an.count_tags(tokens, tags), [3, 0, 0, 0, 0])

        tokens = senti.getTokens("malo malo mala mal", an.d[0], an.d[1])
        self.assertEqual(an.count_tags(tokens, tags), [0, 4, 3, 0, 0])

        tokens = senti.getTokens("Me siento abandonado por la sociedad", an.d[0], an.d[1])
        self.assertEqual(an.count_tags(tokens, tags), [0, 1, 0, 1, 0])

        tokens = senti.getTokens("Puros bastardos bobalicones en este país!", an.d[0], an.d[1])
        self.assertEqual(an.count_tags(tokens, tags), [0, 2, 1, 0, 2])

        # Profesora/Ayudante: disculpen el lenguaje de este test, pero lo creo necesario, ya que twitter no es una instancia formal y se van a analizar algunos así.
        # La palabra "hueón" y sus derivados son tan comunes y ambivalentes en sentido en el lenguaje coloquial, que solo lo dejé como Swear.
        tokens = senti.getTokens("Tendrá cara de hueón, pero igual es sensato", an.d[0], an.d[1])
        self.assertEqual(an.count_tags(tokens, tags), [1, 0, 0, 0, 1])
Exemple #7
0
def main():
    review = Review("", "", "", "", "", "", "", "", "",
                    "")  # this will call your constructor
    # get 50 results from databases
    reviews = review.get_reviews("50")
    for a_review in reviews:
        #construction of sentiment table
        blob = TextBlob(a_review.text, analyzer=NaiveBayesAnalyzer())
        text_sentiment = blob.sentiment

        text_sentiment = text_sentiment[
            SENTIMENT_TYPE]  #text_sentiment will either be pos (for positive) or neg (for negative)
        #here is where we create a Sentiment object
        sentiment = Sentiment(a_review.review_id, a_review.business_id,
                              text_sentiment)
        sentiment.insert(
        )  #this will insert information into the sentiment table

    #construction of review_stats table
    review_stats = Review_stats("", "", "", "", "")
    review_stats.insert(
    )  #insert positive and negative reviews' information to review_stats table

    #construction of common_phrases table
    business = Business("", "", "", "", "", "", "", "", "", "", "")
    business_ids = business.get_all_business_ids(
    )  #acquire all business_ids from sentiment table
    Reviews = list()
    words = list()
    list_of_words = list()
    word_dictionary = dict()
    for business_id in business_ids:
        review = Review("", "", "", "", "", "", business_id.business_id, "",
                        "", "")
        Reviews = review.get_reviews_by_business_id(
        )  # get all reviews by business_id
        insert_words(business_id.business_id, Reviews,
                     word_dictionary)  # insert data into common_phrases table
Exemple #8
0
def index():
    form = NameForm()
    if form.validate_on_submit():
        score =Sentiment.predict_text(form.name.data)
        if score[0][0] >= 0.5:
          sentiment = 'Negative'
          category = 'danger'
        elif score[0][0] < 0.5:
          sentiment = 'Positive'
          category = 'success'
     
        flash('Sentiment :'  + sentiment , category)
        session['name'] = form.name.data
        return redirect(url_for('index'))
    return render_template('index.html', form=form, name=session.get('name'))
def process_video(frames, pnet, rnet, onet, sentiment_model):

    crops = []
    crop_idcs = []
    new_frames = []
    score = 100
    emotion = 'NA'

    for i, frame in enumerate(frames):

        bbox, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet,
                                          threshold, factor)

        frame = frame.copy()

        try:

            for box in bbox:

                w = box[2] - box[0]
                h = box[3] - box[1]
                #   plot the box using cv2
                crop_frame = frame[int(box[1]):int(box[1] + h),
                                   int(box[0]):int(box[0] + w)]

                if (i % 5 == 0):

                    emotion, score = sent.Sentiment_Analysis(
                        crop_frame, sentiment_model)

                frame = cv2.putText(frame, '%s %.3f%%' % (emotion, score),
                                    (int(box[0]), int(box[1] - 5)), font,
                                    fontScale, fontColor, lineType)

                crops.append(crop_frame)
                crop_idcs.append(i)
                frame = cv2.rectangle(frame, (int(box[0]), int(box[1])),
                                      (int(box[0] + w), int(box[1] + h)),
                                      (0, 0, 255), 2)

        except Exception as e:
            print(e)

        new_frames.append(frame)

    return crops, new_frames, crop_idcs
Exemple #10
0
 def textRequest(self, text):
    try:
       city = self.city
    except:
       city = "new york"
    locationList, locationIndex = insta.searchCity(city, 10)
    print("grams searched: " + str(len(locationList)))
    # filter the locations with negative sentiment
    removeList = []
    for locationDict in locationList:
       captions = locationDict['captions']
       if not Sentiment.judgeSetSentiment(captions):
          removeList.append(locationDict)
    print("finished sentiment judging")
    for locationDict in removeList:
       locationList.remove(locationDict)
    # get the locations with top three similarities
    text = "".join(l for l in text if l not in string.punctuation)
    print(text)
    textList = text.split()
    for locationDict in locationList:
       tags = locationDict['tags']
       for i in range(0, len(tags)):
          tags[i] = tags[i].name.strip("#")
       simScore = wa.getSetsSim(tags, textList)
       locationDict['simScore'] = simScore
    print("finished similarities")
    topThree = getTopLocation(locationList, 3)
    stringList = []
    for locationDict in topThree:
       point = locationDict['location'].point
       lat, lng = point.latitude, point.longitude
       ll = str(lat) + ',' + str(lng)
       searchLLResult, searchUrl = fs.searchLL(lat, lng)
       txt = searchLLResult.name
       #imageUrl = locationDict['images'][0]['standard_resolution'].url
       if searchUrl == "":
          imageUrl = locationDict['images'][0]['standard_resolution'].url
       else:
          imageUrl = searchUrl
       stringList.append(TRI.join([ll, txt, imageUrl]))      
    toSend = STAR.join(['H2'] + stringList)
    print(toSend)
    self.sendStr(toSend)
    print("sent")
Exemple #11
0
 def textRequest(self, text):
    try:
       city = self.city
    except:
       city = "new york"
    locationList, locationIndex = insta.searchCity(city, 10)
    print "grams searched: " + str(len(locationList))
    # filter the locations with negative sentiment
    removeList = []
    for locationDict in locationList:
       captions = locationDict['captions']
       if not Sentiment.judgeSetSentiment(captions):
          removeList.append(locationDict)
    print "finished sentiment judging"
    for locationDict in removeList:
       locationList.remove(locationDict)
    # get the locations with top three similarities
    text = "".join(l for l in text if l not in string.punctuation)
    print text
    textList = text.split()
    for locationDict in locationList:
       tags = locationDict['tags']
       for i in range(0, len(tags)):
          tags[i] = tags[i].name.strip("#")
       simScore = wa.getSetsSim(tags, textList)
       locationDict['simScore'] = simScore
    print "finished similarities"
    topThree = getTopLocation(locationList, 3)
    stringList = []
    for locationDict in topThree:
       point = locationDict['location'].point
       lat, lng = point.latitude, point.longitude
       ll = str(lat) + ',' + str(lng)
       searchLLResult, searchUrl = fs.searchLL(lat, lng)
       txt = searchLLResult.name
       #imageUrl = locationDict['images'][0]['standard_resolution'].url
       if searchUrl == "":
          imageUrl = locationDict['images'][0]['standard_resolution'].url
       else:
          imageUrl = searchUrl
       stringList.append(TRI.join([ll, txt, imageUrl]))      
    toSend = STAR.join(['H2'] + stringList)
    print toSend
    self.sendStr(toSend)
    print "sent"
Exemple #12
0
def data(message):
    a = s.sentiment(str(message))
    if a[0] == 1:
        return jsonify({
            "message":
            message,
            "response":
            "The message is positive with a confidence of {}".format(a[1] *
                                                                     100)
        })
    elif a[0] == 0:
        return jsonify({
            "message":
            message,
            "response":
            "The message is negative with a confidence of {}".format(a[1] *
                                                                     100)
        })
    def Analyze(self, str):
        """
        Descripcion: De los tweets recolectados, este metodo se encarga de identificar palabras clave
        para su analisis de sentimiento
        PreCondiciones: String obtenido corresponda a un tweet en su formato correcto. Que tweet tenga relacion a
        algun candidato del proceso constituyente
        PostCondiciones: Se entregue un sentimiento definido del tweet con su factor correspondiente.
        """
        # str es el contenido del tweet, retornado por getDB
        # Se ejecutaria getDB (con mongo) afuera, con esa informacion se llama a analyze, que dentro usa saveDB (con postgres).

        # TODO: ver negacion

        # Tablas que hay que tener en Postgres:
        #   (1) Top3 tweets por candidato, por emocion (1 tabla en total, con campo id_tweet (debe ser bigint!!), idCandidato, emocion, tagCount, autor, link, tweet)
        #   (2) Count de tweets analizados por candidato(1 tabla en total, con campos idCandidato y count)
        #   (3) Promedio de tagCount por emocion, por candidato (1 tabla en total, con campos idCandidato, emocion y promedio (debiese ser double))

        # Abrir conexión con psql
        try:
            conn = psycopg2.connect(
                database=self.pos_db, user=self.pos_user, password=self.pos_pass, host=self.pos_host, port=self.pos_port
            )
            cur = conn.cursor()
        except:
            return False

        # posemo 13, negemo 16, anger 18, sad 19, swear 66

        # Contar las ocurrencias de los tags en el tweet.
        tags = ["Posemo", "Negemo", "Anger", "Sad", "Swear"]
        tokens = senti.getTokens(str, self.d[0], self.d[1])
        tags_count = self.count_tags(tokens, tags)

        # Modificar, si es necesario, la base de datos.
        self.update_tops(cur, tags, tags_count, str)
        self.update_proms(cur, tags, tags_count)

        # Commitear los cambios y cerrar conexión
        conn.commit()
        cur.close()
        conn.close()

        return True
Exemple #14
0
def loadData(fname):
	data = []
	with open(fname) as fin:		
		for line in fin:
		   try:
		   	tweet = json.loads(line)
			text = tweet['text']
			tm = tweet['timestamp_ms']
			result = Sentiment.get_sentiment_info(text)		
			data = {}
			data['text'] = text
			data['timestamp_ms'] = tm
                        data['sentiment'] = result
			print(text)
			json_data = json.dumps(data)		
			data.append(json_data)
		   except:
			continue
	#print(data)
        return data
Exemple #15
0
def emotion_angry(Sentiment):
    speak("Hey ! You seem angry ? Wats the matter")
    response = takeCommand().lower()
    Sentiment += (S.get_Sentiment(response) * 5)
    check_Sentiment_level(Sentiment, '*****@*****.**', '*****@*****.**')
    return Sentiment
                    images_placeholder: images[batches[i - 1]:batches[i]],
                    phase_train_placeholder: False
                }
                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                embed_array.extend(embed.tolist())

            np.save('embeddings.npy', embed_array)


if __name__ == '__main__':

    sess = tf.Session()
    #Models
    pnet, rnet, onet = nets(sess, 'models/')
    sentiment_model = sent.Transfer_learning()
    yolo = YOLO()
    Video = 0
    if (Video == 1):
        print('Still')
        clip = VideoFileClip('Video/Video_1.mp4')
        fps = clip.fps
        print(fps)
        crops, new_frames, crop_idcs = process_video(
            clip.subclip(0, 10).iter_frames(), pnet, rnet, onet,
            sentiment_model)
        newer_frames = human_tracking(new_frames, yolo)
        np.save('face_crops.npy', crops)
        get_embeddings(crops)
        clip = ImageSequenceClip(new_frames, fps=fps)
        clip.write_videofile('Video_Output/newvideo.mp4', fps=fps)
Exemple #17
0
     trends = twitter_streaming.get_trendy(int(woeid))
     Hashtags = [str(x['name']) for x in trends[0]['trends']]
     print("Trends in your country:")
     Hash_tags = ", ".join(Hashtags)
     print(Hash_tags)
 except Exception as e:
     print(
         "Error getting current trends of your country.\nEncountered an exception while getting the trends in your area.\n\t%s"
         % e)
 track, number = str(
     input("Enter track and number of tweets(use \",\" as delimiter):")
 ).split(",")
 tweets, sentiments = twitter_streaming.get_data(track, int(number))
 data = data + tweets
 target = target + sentiments
 tf_idf = Filter.filter(data)
 classifier = Sentiment.learn(tf_idf[:1600000], target[:1600000])
 prediction = Sentiment.predict(tf_idf[1599999:], classifier)
 prediction = [int(x) for x in prediction]
 print("\nTotal tweets analyzed: %d" % len(prediction))
 total_positive = 0
 total_negative = 0
 for i in prediction:
     if int(i) == 0:
         total_negative += 1
     else:
         total_positive += 1
 print("Positive : %.2f%c" % (total_positive /
                              (float)(len(prediction)) * 100, '%'))
 print("Negative : %.2f%c" % (total_negative /
                              (float)(len(prediction)) * 100, '%'))
Exemple #18
0
import glob
import Sentiment
files = glob.glob('neg/*')

l = []
for f in files:
	S = Sentiment(f)
	l.append(S.average_sentiment())
Exemple #19
0
    # Capture frame-by-frame
    ret, frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30),
                                         flags=cv2.CASCADE_SCALE_IMAGE)

    # Draw a rectangle around the faces

    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        frame_crop = frame[y:y + h, x:x + w]  #, y:y+h]
        ## Perform Deep Learning
        model = sent.Transfer_learning()
        Emotion, percentage = sent.Sentiment_Analysis(frame_crop, model)
        cv2.imshow('Crop', frame_crop)
        print(Emotion)

    # Display the resulting frame
    cv2.imshow('Video', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
# Get topic as input
topic = input('Enter topic for analysis: (e.g. microsoft word)\n')

# Use topic as filename prefix
keys = topic.split(' ')
file_name = ''
for i in keys:
    file_name = file_name + i + '_'

# Fetch tweets related to topic
fetched_tweets = api.search(q=topic, count=1000, lang='en')
tweet_list = Tweet.getTweets(fetched_tweets)

# Set sentiment scores of tweets
Sentiment.setSentiment(tweet_list)
Sentiment.exportCSV(tweet_list, file_name)

# Read sentiment csv and convert it to a data frame
df = pd.read_csv(file_name + 'sentiments.csv')

# Parse content column into a list
tweets = df['content'].tolist()

# Plot most used words
an.plot_most_used_words(tweets, file_name)

# Plot sentiment labels
an.plot_sentiments(df, file_name)

# Create word cloud
Exemple #21
0
def emotion_happy(Sentiment):
    speak("Hey ! You seem happy ? Anything Special")
    response = takeCommand().lower()
    Sentiment += (S.get_Sentiment(response) * 2)
    return Sentiment
 def __init__(self, audio_path):
     self.AUDIO_PATH = audio_path
     self.sentiment = Sentiment.SentimentModel()
     self.speechRecognition = SpeechRecognition.SpeechRecognition()
Exemple #23
0
import Filter
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix


def evaluate_model(target_true,target_predicted):
    print("The confusion matrix is as follows:")
    print(confusion_matrix(target_true,target_predicted))
    print("The classification report is as follows:")
    print(classification_report(target_true,target_predicted))
    print("The accuracy score is {:.2%}".format(accuracy_score(target_true,target_predicted)))


if __name__ == "__main__":
    if not os.path.isfile("sentiments.pickle") or not os.path.isfile("tweets.pickle"):
        datafile = str(input("Enter path of training data(Should be in .csv):"))
        # replace 4th parameter with the column number of your tweets
        # replace 5th parameter with the column number of your sentiments
        data, target = load.load_data(datafile, ",", '"', 5, 0)
        load.save_sentiments(target)
        load.save_tweets(data)
    else:
        data = load.load_tweets()
        target = load.load_sentiments()
    tf_idf = Filter.filter(data)
    #test data is taken to be 40% of total data and remaining 60% is training data
    data_train, data_test, target_train, target_test = Sentiment.data_generate(tf_idf, 0.4, target)
    classifier = Sentiment.learn(data_train, target_train)
    prediction = Sentiment.predict(data_test, classifier)
    evaluate_model(target_test, prediction)
 def test_get_tweet_sentiment(self):
     obj = Sentiment.TwitterClient('@jashwanth0007')
     tweet = "d h nair  RSS and BJP has the DNA of cowards like V D Savarkar  Question them on issues  they run away   Rahul Gandhi"
     x = obj.get_tweet_sentiment(tweet)
     self.assertEqual(x, "neutral")
Exemple #25
0
import numpy as np
import Sentiment

review_path, label_path = 'data/reviews.txt', 'data/labels.txt'
seq_len = 200
split_fraction = 0.8

reviews, labels = Sentiment.load_test(review_path, label_path)
# Preprocess Data

preprocessed_text, split_reviews = Sentiment.preprocess_text(reviews)
words = preprocessed_text.split()
reviews_encoded, encoded_vocab = Sentiment.encode_text(preprocessed_text,
                                                       split_reviews)

encoded_labels = Sentiment.encode_labels(labels)
filtered_review_encoded, filtered_label_encoded = Sentiment.outlier_removal(
    reviews_encoded, encoded_labels)
padded_features = Sentiment.pad_features(filtered_review_encoded, seq_len)

# Generate training and test data
split_index = int(len(padded_features) * 0.8)
train_x, remaining_x = padded_features[:split_index], padded_features[
    split_index:]
train_y, remaining_y = filtered_label_encoded[:
                                              split_index], filtered_label_encoded[
                                                  split_index:]
test_idx = int(len(remaining_x) * 0.5)
val_x, test_x = remaining_x[:test_idx], remaining_x[test_idx:]
val_y, test_y = remaining_y[:test_idx], remaining_y[test_idx:]
# print out the shapes of your resultant feature data
import load
import Sentiment
import os

if __name__ == '__main__':
    if not os.path.isfile("sentiments.pickle") or not os.path.isfile(
            "tweets.pickle"):
        datafile = str(input("Enter path of training data(Should be in .csv)"))
        #replace 4th parameter with the column number of your tweets
        #replace 5th parameter with the column number of your sentiments
        data, target = load.load_data(datafile, ",", '"', 5, 0)
        load.save_sentiments(target)
        load.save_tweets(data)
    else:
        data = load.load_tweets()
        target = load.load_sentiments()
    data.append(str(input("Enter a tweet:")))
    target.append(0)
    tf_idf = Filter.filter(data)
    classifier = Sentiment.learn(tf_idf[:len(data) - 2],
                                 target[:len(target) - 2])
    prediction = Sentiment.predict(tf_idf[len(data) - 1], classifier)
    #sentiment greater than 2 is positive, less than 2 is negative and equal to 2 is neutral.
    #Update if have different type
    if int(prediction[0]) > 2:
        print("positive")
    elif int(prediction[0]) < 2:
        print("Negative")
    else:
        print("Neutral")
Exemple #27
0
from Sentiment import *
import sys

f = sys.argv[1]
S = Sentiment(f)
Exemple #28
0
access_token_secret = 'vknEzPskeXBMxQhGnSnbaawnn329h0aDM7rEGI1n6TaFx'
consumer_key = 'BYCrKwYk3oFI6r5g3x8mVKTcH'
consumer_secret = 'cpBLW4tqXNGZM43Vpar3H4bUJUIvMEep4NCiulL1oR7rJYVCAQ'
#

#
# MORE SPECIFIC SETTINGS
#
#size of the producer/consumer buffer

buffersize = 10
q = queue.Queue(buffersize)


# setup twitter and threading objects
twitter = Sentiment.TwitterThread(q,access_token, access_token_secret,
                            consumer_key, consumer_secret)
trading = Broker.TradingThread(q)


class ServiceExit(Exception):
    pass

def handler(signum, frame):
    twitter.shutdown()
    broker.shutdown()
    raise ServiceExit

def main():
    #signals!!!
    signal.signal(signal.SIGTERM, handler)
    signal.signal(signal.SIGTERM, handler)
Exemple #29
0
 def __init__(self, subreddit, sentimentInterval, dataSet):
     self.subreddit, self.sentimentInterval = subreddit, sentimentInterval
     self.subInstance = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent="testscript by u/spaceballcookie").subreddit(subreddit)
     self.df = pd.DataFrame(columns=['postID', 'ticker', 'sentiment', 'magnitude','subreddit', 'permalink', 'date', 'body'])
     self.sentiment = Sentiment()
     self.dataSet = dataSet