Exemple #1
1
def main():
    (options, args) = get_parser()
    auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
    auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
    api = API(auth_handler=auth)
    stream = Stream(auth, CustomStreamListener(api, options))
    stream.timeout = None
    stream.userstream()
Exemple #2
0
def main():
    (options, args) = get_parser()
    auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
    auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
    api = API(auth_handler=auth)
    stream = Stream(auth, CustomStreamListener(api, options))
    stream.timeout = None
    stream.userstream()
Exemple #3
0
def main():
    try:
        stream = Stream(auth, CustomStreamListener())
        stream.timeout = None

        #userstreamの開始
        print("start")
        stream.userstream()
    except KeyboardInterrupt :
        print("\nexit: KeyboardInterrupt")
        return
Exemple #4
0
def home(request):
    # import twitter keys and tokens
    ckey = "Twitter_Consumer_Key"
    csecret = "Twitter_Consumer_Secret_Key"
    atoken = "Twitter_Access_Token"
    asecret = "Twitter_Access_Token_Secret_Key"

    # create instance of elasticsearch
    host = 'ElasticSearch_Host'  #Creata a domain on ElasticSearch Service and add the endpoint here
    awsauth = AWS4Auth('AWS_Access_Key', 'AWS_Access_Secret_Key', 'AWS_Region',
                       'es')
    es = elasticsearch.Elasticsearch(
        hosts=[{
            'host': host,
            'port': 443
        }],
        http_auth=awsauth,
        use_ssl=True,
        verify_certs=True,
        connection_class=elasticsearch.connection.RequestsHttpConnection)

    class TweetStreamListener(StreamListener):
        def __init__(self, time_limit=10):
            self.start_time = time.time()
            self.limit = time_limit

        # on success
        def on_data(self, data):
            # decode json
            dict_data = json.loads(data)

            if (time.time() - self.start_time) < self.limit:
                if 'user' in dict_data and dict_data['user']['location']:
                    try:
                        doc = {
                            "author": dict_data["user"]["screen_name"],
                            "date": dict_data["created_at"],
                            "location": {
                                "name": dict_data['user']['location'],
                                "coords": {
                                    "lat":
                                    geocoder.google(dict_data['user']
                                                    ['location']).latlng[0],
                                    "lon":
                                    geocoder.google(dict_data['user']
                                                    ['location']).latlng[1]
                                }
                            },
                            "message": dict_data["text"],
                            "my_id": query
                        }
                        es.index(index="tweetmap", doc_type="tweets", body=doc)
                    except:
                        pass
                    return True
            else:
                return False

        # on failure
        def on_error(self, status):
            print(status)

    # create instance of the tweepy tweet stream listener
    listener = TweetStreamListener()
    # set twitter keys/tokens
    auth = OAuthHandler(ckey, csecret)
    auth.set_access_token(atoken, asecret)
    # create instance of the tweepy stream
    stream = Stream(auth, listener)
    query = str(request.POST.get('myword'))
    stream.timeout = 10
    try:
        stream.filter(track=[query, '#' + query])
    except:
        pass
    pass_list = {}
    pass_list.setdefault('tweet', [])
    res = es.search(size=5000,
                    index="tweetmap",
                    doc_type="tweets",
                    body={"query": {
                        "match": {
                            "my_id": query
                        }
                    }})
    for j in res['hits']['hits']:
        pass_list['tweet'].append(j['_source'])
    pass_list_final = json.dumps(pass_list)
    return render(request, "index.html", {"my_data": pass_list_final})
Exemple #5
0
def gettweet(request):
    # import twitter keys and tokens
    pass_list = {}
    tweets = []
    print("this is getweet")

    # create instance of elasticsearch
    # es=Elasticsearch()
    es = Elasticsearch(hosts=[{
        'host': host,
        'port': 443
    }],
                       http_auth=awsauth,
                       use_ssl=True,
                       verify_certs=True,
                       connection_class=RequestsHttpConnection)

    query = str(request.POST.get('myword'))

    # query = str(request.POST)
    class TweetStreamListener(StreamListener):
        i = 0

        def __init__(self, time_limit=10):
            self.start_time = time.time()
            self.limit = time_limit

        # on success
        def on_data(self, data):

            # decode json
            dict_data = json.loads(data)
            print(self.i)
            if (time.time() - self.start_time) < self.limit:
                if 'user' in dict_data and dict_data['user']['location']:
                    try:
                        es.index(
                            index="sentiment",
                            doc_type="test-type",
                            id=self.i,
                            body={
                                "author":
                                dict_data["user"]["screen_name"],
                                "date":
                                dict_data["created_at"],
                                "location":
                                dict_data['user']['location'],
                                "lat":
                                geocoder.google(
                                    dict_data['user']['location']).latlng[0],
                                "lng":
                                geocoder.google(
                                    dict_data['user']['location']).latlng[1],
                                "message":
                                dict_data["text"]
                            })

                        print(
                            es.get(index='sentiment',
                                   doc_type='test-type',
                                   id=self.i))

                        self.i += 1

                    except:

                        pass
                    return True
            else:
                return False

        # on failure
        def on_error(self, status):
            print(status)

        def on_timeout(self):
            print("Timeout")

    # create instance of the tweepy tweet stream listener
    listener = TweetStreamListener()

    # set twitter keys/tokens
    auth = OAuthHandler(ckey, csecret)
    auth.set_access_token(atoken, asecret)

    # create instance of the tweepy stream
    stream = Stream(auth, listener)
    stream.timeout = 10

    # search twitter for particular keyword
    try:
        stream.filter(track=[query])
    except:
        print(listener.i)

    print(listener.i)
    '''
    for i in range(10):
        pass_list['tweet'].append(es.get(index="sentiment", doc_type='test-type', id=i))
    '''
    for tweetno in range(listener.i):
        tweets.append(
            es.get(index='sentiment', doc_type='test-type', id=tweetno))

    print(tweets)
    pass_list["mytweets"] = tweets
    pass_list_final = json.dumps(pass_list)
    return render(request, "index.php", {"my_data": pass_list_final})