def sentiment_analytical_processor(unique_id, social_medium, size=10):

    if social_medium == 'twitter':
        connection = pika.BlockingConnection()
        channel = connection.channel()
        tweets = []
        count = 0
        for method_frame, properties, body in channel.consume(unique_id):
            print type (body)
            tweets.append(json.loads(body)['text'])
            print 'body %s' % body
            print 'properties %s' % properties
            print 'method_frame %s' % method_frame
            count += 1

            # Acknowledge the message
            channel.basic_ack(method_frame.delivery_tag)

            # Escape out of the loop after 10 messages
            if count == size:
                break
        print 'tweets string' , tweets
        tweets_str = ', '.join(tweets)
        # Cancel the consumer and return any pending messages
        requeued_messages = channel.cancel()
        print 'Requeued %i messages' % requeued_messages
        data = sa.sentiment(tweets_str)
        print data
        return data
Пример #2
0
 def _calculate_sentiment(post_id, comment_string):
     full_comment_str = ''
     for j in filtered_comments:
         for comment in j['comments']:
             full_comment_str += ' '
             full_comment_str += comment['message'].encode('UTF8')
     logger.debug(full_comment_str)
     data_ = sa.sentiment(full_comment_str)
     full_comment_str = ''
     data_['post_id'] = post_id
     analyzed_data.append(data_)
Пример #3
0
def sentiment_analysis(params):

        try:
            tokens = ast.literal_eval(params.token)
        except ValueError:
            tokens = params.token
        source = str(params.source)
        try:
            limit = params.limit
        except AttributeError:
            limit = ''
        try:
            since = params.since
        except AttributeError:
            since = ''
        try:
            until = params.until
        except AttributeError:
            until = ''
        try:
            post_ids = ast.literal_eval(params.post_ids)
        except AttributeError:
            post_ids = None
        page = 'me'
        try:
            page = str(params.page)
        except AttributeError:
            pass
        try:
            hash_tag = str(params.hash_tag)
        except AttributeError:
            hash_tag = ''
        #analyzed_data = 'Incorrect datasource name provided!'
        if source == 'twitter':
            api = SMAuth.tweepy_auth(tokens['consumer_key'], tokens['consumer_secret'], tokens['access_token'], tokens['access_token_secret'])
            data_ = Tw.hashtag_search(api,hash_tag)
            #lsi.initialize_stream(hash_tag, unique_id, tokens) # if already exits do something
            #analyzed_data = smlf.process_social_media_data(unique_id, hash_tag)
            data = sa.sentiment(data_)
            result = cmg.format_response(True,data,'Data successfully processed!')
            return result

        elif source == 'facebook':
            try:
                data = FB.get_page_posts_comments(tokens, limit, since, until, page, post_ids)
            except ValueError, err:
                data = cmg.format_response(False,err,'Error validating access token: This may be because the user logged out or may be due to a system error.',sys.exc_info())
                return data

            #full_comment = []
            analyzed_data = []

            def _calculate_sentiment(post_id, comment_string):
                full_comment_str = ''
                for j in filtered_comments:
                    for comment in j['comments']:
                        full_comment_str += ' '
                        full_comment_str += comment['message'].encode('UTF8')
                logger.debug(full_comment_str)
                data_ = sa.sentiment(full_comment_str)
                full_comment_str = ''
                data_['post_id'] = post_id
                analyzed_data.append(data_)

            threads = []
            if post_ids is not None:
                for post_id in post_ids:
                    filtered_comments = filter(lambda d: d['post_id'] in post_id, data)
                    t = threading.Thread(target=_calculate_sentiment, args=(post_id, filtered_comments))
                    t.start()
                    print "Thread started to calculate sentiment analysis for post_id: {0}".format(post_id)
                    threads.append(t)
                    #full_comment_str.join(full_comment)
                    #analysed_data = sa.sentiment(full_comment_str.join(filtered_comments))
                for t in threads:
                    try:
                        t.join()
                    except Exception, err:
                        print err

                data = cmg.format_response(True,analyzed_data,'Data successfully processed!')
                return data
Пример #4
0
                for t in threads:
                    try:
                        t.join()
                    except Exception, err:
                        print err

                data = cmg.format_response(True,analyzed_data,'Data successfully processed!')
                return data

            else:
                for post in data:
                    full_comment_str = ''
                    for comments in post['comments']:
                        full_comment_str +=' '
                        full_comment_str += comments['message']
                analysed_data = sa.sentiment(full_comment_str)
                data = cmg.format_response(True,analysed_data,'Data successfully processed!')
                return data

def build_bi_partite(params):

        token= ast.literal_eval(params.token)
        #source = str(params.source)
        try:
            limit = params.limit
        except AttributeError:
            limit = ''
        try:
            since = params.since
        except AttributeError:
            since = ''