Пример #1
0
#!/usr/bin/env python
# http://oscon.com/oscon2012/public/schedule/detail/24416

# twy_a.py (by Wesley Chun under Apache2 license)
# http://www.apache.org/licenses/LICENSE-2.0.html
from distutils.log import warn as printf
try:
    import twython
except ImportError:
    import twython3k as twython
import settings

printf('\n*** Get user status (authorization required)')
twitter = twython.Twython(
    twitter_token=settings.CONSUMER_KEY,
    twitter_secret=settings.CONSUMER_SECRET,
    oauth_token=settings.ACCESS_TOKEN,
    oauth_token_secret=settings.ACCESS_TOKEN_SECRET,
)
data = twitter.verifyCredentials()
status = data['status']
printf('''
    User: @%s
    Date: %s
    Tweet: %s''' % (data['screen_name'], status['created_at'], status['text']))
Пример #2
0
import twython as T, maccess, time, pickle
import multiprocessing as mp

terms = [
    "#jesus", "#g1", "#prayforthephilippines", "#felipao", "#ReadyForScorch",
    "#obama", "#dilma", "#science", "#god"
]
fnames = [
    "pickleDir/{}".format(term.replace("#", "HASH") + ".pickle")
    for term in terms
]
twitters = []
searchs = []
for tw, term in zip(maccess.TW, terms):
    twitter = T.Twython(app_key=tw.tak,
                        app_secret=tw.taks,
                        oauth_token=tw.tat,
                        oauth_token_secret=tw.tats)
    search = twitter.cursor(twitter.search, q=term)
    #search = twitter.search_gen(term)
    twitters.append(twitter)
    searchs.append(search)


def rodaSearch(search, fname, output, rr=None):
    if not rr:
        rr = []
    for result in zip(list(range(2500)), search):
        print("result " + fname)
        rr.append(result)
    f = open(fname, "wb")
    rrr = rr[:]
Пример #3
0
import webapp2
import time
import sys
import random
import twython
from twython import Twython, TwythonError
from google.appengine.ext import db

CONSUMER_KEY = ‘???’
CONSUMER_SECRET = ‘???’
ACCESS_KEY = ‘???’
ACCESS_SECRET = ‘???’

from datetime import *
today = date.today()
future = date(2018,11,5)
diff = future - today
NUMBER = diff.days

twitter = twython.Twython(
                          CONSUMER_KEY,
                          CONSUMER_SECRET,
                          ACCESS_KEY,
                          ACCESS_SECRET
                          )

photo = open("%s.jpg" %NUMBER,'rb')
response = twitter.upload_media(media=photo)
twitter.update_status(status="%s days until the United States Midterm Elections on November 6th, 2018. #election2018 #campaign2018 #politics" %NUMBER, media_ids=[response['media_id']])

Пример #4
0
#!/usr/bin/python
# coding: utf-8

import os
import twython
import urllib3.contrib.pyopenssl

#urllib3.contrib.pyopenssl.inject_into_urllib3()
api = twython.Twython(os.getenv("CONSUMER_KEY"),
                      os.getenv("CONSUMER_SECRET"),
                      os.getenv("ACCESS_KEY"),
                      os.getenv("ACCESS_SECRET"))

def tweet(s):
    try:
        api.update_status(status = s)
    except twython.TwythonError as e:
        print e

if __name__ == '__main__':
    tweet("test message 1")

Пример #5
0
import twython
import sys
import subprocess

app_key = "XXX"
app_secret = "XXX"
oauth_token = "XXX"
oauth_token_secret = "XXX"

client_args = {'verify': False}

twitter = twython.Twython(app_key,
                          app_secret,
                          oauth_token,
                          oauth_token_secret,
                          client_args=client_args)

twitter.verify_credentials()
pic = sys.argv[1]
print(pic)
try:

    f = open(pic, 'rb')
except IOError:
    print "Error: can't find or open file"
else:
    # Tweeting picture
    twitter.update_status_with_media(
        status="jarvis detected motion in workspace2", media=f)

subprocess.call(["mplayer", "/home/jarvis/alarm/audio/pic_posted.m4a"])
Пример #6
0
 def __init__(self) -> None:
     """Initialise the Twython object"""
     self.twitter = twython.Twython(APP_KEY, APP_SECRET)
     self.ids = self.get_tweet_ids()
     self.ids_seen = []
Пример #7
0
 def __init__(self, credentials):
     self.twitter = twython.Twython(*credentials)
Пример #8
0
import os, time
import cPickle as pickle
import twython as Twython
from urllib import quote
from SETTINGS import *

api = Twython.Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
latest_tweet_id = 0

def first_run():
    file_exists = os.path.exists('sav.p')
    if file_exists is False:
        user_timeline = api.get_user_timeline(screen_name=user_name, count=2)#This 'count' is how many tweets back from the most recent tweet that will be fowarded to telegram
        tweet_id = user_timeline[1]['id']
        file_pickle(tweet_id)
def get_timeline(latest_tweet_id):
    user_timeline = api.get_user_timeline(screen_name=user_name, since_id=latest_tweet_id)
    return user_timeline
def read_latest_id():
    line = file_unpickle()
    if len(str(line)) < 2:
        return 0
    else:
        return line
def send_message(msg):
    msg = quote(msg, safe='')
    link = 'https://api.telegram.org/bot'+telegram_token+'/sendMessage?chat_id=@'+channel_name+'\&text="' + msg + '"'
    os.system('curl '+ link)
   
def file_pickle(var):
    pickle.dump(var, open("sav.p", "wb"))
Пример #9
0
    humid_str = str("{0:.1f}".format(gsd.sensor_data['humidity']))
    pressure_str = str("{0:.1f}".format(gsd.sensor_data['pressure']))

    config_file = ConfigParser.SafeConfigParser()
    config_file_path = path.dirname(
        path.abspath(__file__)) + "/.twitter_config"
    config_file.read(config_file_path)

    consumerKey = config_file.get("settings", "consumerKey")
    consumerSecret = config_file.get("settings", "consumerSecret")
    accessToken = config_file.get("settings", "accessToken")
    accessSecret = config_file.get("settings", "accessSecret")
    replyTarget = config_file.get("settings", "replyTarget")

    api = twython.Twython(app_key=consumerKey,
                          app_secret=consumerSecret,
                          oauth_token=accessToken,
                          oauth_token_secret=accessSecret)

    tweet_str = "@" + replyTarget + " 現在の温度は" + temp_str + "度 湿度は" + humid_str + "% 気圧は" + pressure_str + "hPa です" + '\r\n'
    if gsd.sensor_data['temp'] > 25:
        tweet_str += "暑いですね" + '\r\n'

    if gsd.sensor_data['temp'] < 10:
        tweet_str += "寒いですね" + '\r\n'

    if gsd.sensor_data['humidity'] > 60:
        tweet_str += "むしむししますね" + '\r\n'

    try:
        api.update_status(status=tweet_str)
    except twython.TwythonError as e:
Пример #10
0
from geopy import Nominatim
import requests
import twython as tw
from motionless import DecoratedMap, LatLonMarker

WHITE_PAGES_API_KEY = 'ea04b1f16b56a1b1a21b0159b8b1990e'

TWITTER_KEY = 'YABlxDeSUvuJGLcJoGsuFpCvA'
TWITTER_SECRET = 'ALttKP1BNhsRqvGL2PR9mkcGwdgh3gKs05v8pfjJrLElrxjQ8L'
TWITTER_TOKEN = '4840597894-HxW7lZoZQIbmkEHTjfaeb9wCDVZyLNPxAvczMZr'
TWITTER_TOKEN_SECRET = 'ULfOa5W84nJCj5mEvULVqtmTDFWI2x8ooqgvzNonVqhIR'

SEARCH_RADIUS = '100'  # tweet search radius
SEARCH_UNITS = 'mi'

TWITTER = tw.Twython(TWITTER_KEY, TWITTER_SECRET, TWITTER_TOKEN,
                     TWITTER_TOKEN_SECRET)


def query_white_pages(phone_number):
    '''
        Returns the address of an input phone number
    '''
    req = 'https://proapi.whitepages.com/2.1/phone.json?api_key=%s&phone_number=%s' % (
        WHITE_PAGES_API_KEY, phone_number)
    result = requests.get(req)

    asDict = json.loads(result.text)

    locationValues = asDict['results'][0]['best_location']

    locKeys = [
Пример #11
0
 def __init__(self, consumer_key, consumer_secret,
              access_token_key, access_token_secret):
     self.twitter = twython.Twython(consumer_key, consumer_secret,
                                    access_token_key, access_token_secret)
Пример #12
0
import tweetlib
import sys
import os
import twython
import random
import sys

api = twython.Twython();

try:
    tweeterfile_loc = sys.argv[1];
    feedslib = sys.argv[2];
except:
    tweeterfile_loc = 'C:\\Users\\Stoop\\Desktop\\Scriptie\\tweetdata\\twitteraars.txt';
    feedslib = 'C:\\Users\\Stoop\\Desktop\\Scriptie\\tweetdata\\feeds\\';

#Get all tweeters, and shuffle them (so we don't always start with the same)
tweeterfile = open(tweeterfile_loc,'r');
tweeters = [];

for tweeter in tweeterfile:
    tweeters.append(tweeter[:-1]);

random.shuffle(tweeters);

#See if they have tweets we don't have (yet)
for tweeter in tweeters[:25]:

    if tweeter == '':
        continue;
Пример #13
0
import os
import twython
import nltk
import midiutil

APP_KEY = os.environ['APP_KEY']
APP_SECRET = os.environ['APP_SECRET']
authentication = twython.Twython(APP_KEY, APP_SECRET, oauth_version=2)
ACCESS_TOKEN = authentication.obtain_access_token()
twitter = twython.Twython(APP_KEY, access_token=ACCESS_TOKEN)


def get_pos(text):
    tokens = nltk.word_tokenize(text)
    tagged_tokens = nltk.pos_tag(tokens)
    return tagged_tokens


def clean_tweet(pos):
    cleaned = []
    is_username = False
    for tag in pos:
        print tag
        #eliminate emojis and punctuation
        if '\u' not in tag[0].encode('unicode-escape') and '\U' not in tag[
                0].encode('unicode-escape') and '.' not in tag[
                    1] and ',' not in tag[1] and ':' not in tag[1]:
            if '@' in tag[0]:
                #don't add the @ symbol on its own, add it to the username
                is_username = True
            else:
Пример #14
0
import os, time
try:
    import cPickle as pickle
except:
    import pickle

import twython as Twython
from urllib import quote
from src import setting as st

api = Twython.Twython(st.APP_KEY, st.APP_SECRET, st.OAUTH_TOKEN, st.OAUTH_TOKEN_SECRET)
latest_tweet_id = 0


def first_run():
    file_exists = os.path.exists('sav.p')
    if file_exists is False:
        user_timeline = api.get_user_timeline(screen_name=st.user_name,
                                              count=2)  # This 'count' is how many tweets back from the most recent tweet that will be fowarded to telegram
        tweet_id = user_timeline[1]['id']
        file_pickle(tweet_id)


def get_timeline(latest_tweet_id):
    user_timeline = api.get_user_timeline(screen_name=st.user_name, since_id=latest_tweet_id)
    return user_timeline


def read_latest_id():
    line = file_unpickle()
Пример #15
0
import os
import time


# Set working directory
os.chdir('/path/to/your/directory/')  # I set my parent directory to a folder containing subfolders for data, figures, and scripts

# Get OAuth credentials.  Need to copy the access token and access token secret as well.  Run this code each time you are using twitteR.
APP_KEY = 'aWZoLAPZ5aSgImEwvnKG4In1l'
APP_SECRET = 'TWXKRftQs9q5UFkJwrIOJA7muEBLL7L2HATJoZ0V8ANDMZmhBM'
OAUTH_TOKEN = '24574024-xETTs1DL50vFGJbztRlg79s4EMmzlEKIrDgx5q8bz'
OAUTH_TOKEN_SECRET = 'h38UP5vQpktpyiEOv7GBaObdR43zh1lcAvkI4O4WWwo4Y'


# Connect
connection = twy.Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)

# How many tweets to return per query
size = 100  # Modify as needed, maximum is 100

# One word
nba_tweets = connection.search(q='nba', count=size)  # Also returns hashtags, and Twitter is not case sensitive.

nba_tweets2 = connection.search(q='nba', count=size)  # Search queries launched almost simultaneously return different results.

# Mentions
obama = connection.search(q='@BarackObama', count=size)

# Multiple words
nba_tweets3 = connection.search(q='nba OR nra', count=size)
Пример #16
0
def get_twython_instance(api):
    """ Initialize an instance of Twython with variables needed """

    logger.info("Connecting to Twitter API...")
    return twython.Twython(api['CONSUMER_KEY'], api['CONSUMER_SECRET'],
                           api['ACCESS_TOKEN'], api['ACCESS_TOKEN_SECRET'])
Пример #17
0
 def setup_twitter(self):
     credentials = json.load(open('credentials.json'))
     self.twitter = twython.Twython(credentials["api_key"],
                                    credentials["api_secret"],
                                    credentials["access_token"],
                                    credentials["token_secret"])
Пример #18
0
import random
import sys
import twython

from generate import generate, get_subj_from_wikilink

consumer_key, consumer_secret, access_token, token_secret = sys.argv[1:]
twitter = twython.Twython(consumer_key, consumer_secret, access_token,
        token_secret)

pool = [s.strip() for s in open("pool.txt").readlines()]
if random.randrange(8) > 0:
    subj = get_subj_from_wikilink(
            'http://en.wikipedia.org' + random.choice(pool))
    status = generate(subj)
else:
    status = generate()
twitter.update_status(status=status)
Пример #19
0
def twitter_api(token, token_secret, consumer_key, consumer_secret):
    return twython.Twython(app_key=consumer_key,
                           app_secret=consumer_secret,
                           oauth_token=token,
                           oauth_token_secret=token_secret)
Пример #20
0
                                                'twitter', 'Response: 200 OK'))
        datadog_thread.daemon = True
        datadog_thread.start()

    tweets_read_file = os.path.join(bot.config_path, "Tweets Read.txt")
    if not os.path.isfile(tweets_read_file):
        bot.log.info("Creating: {}".format(tweets_read_file))
        with open(tweets_read_file, 'w') as file:
            file.write('')
        tweets_read = []
    else:
        with open(tweets_read_file, 'r') as file:
            tweets_read = file.read().splitlines()

    api = twython.Twython(twitter_keys['consumer_key'],
                          twitter_keys['consumer_secret'],
                          twitter_keys['access_token'],
                          twitter_keys['access_token_secret'])
    bot.api = api
    read_notifications()
    bot.log.info("Reading Twitter Stream.")
    stream = TwitterStream(twitter_keys['consumer_key'],
                           twitter_keys['consumer_secret'],
                           twitter_keys['access_token'],
                           twitter_keys['access_token_secret'],
                           timeout=120,
                           retry_count=3,
                           retry_in=10)
    stream.statuses.filter(
        track=', '.join([x.lower() for x in bot.settings['twitter_track']]))
Пример #21
0
#!/usr/bin/env python
# http://oscon.com/oscon2012/public/schedule/detail/24416

# twy_u.py (by Wesley Chun under CC-SA3.0 license)
from distutils.log import warn as printf
try:
    import twython
except ImportError:
    import twython3k as twython

TMPL = '''
     User: @%(from_user)s
     Date: %(created_at)s
     Tweet: %(text)s'''

printf('\n*** Search for "python" (authorization NOT required)')
twitter = twython.Twython()
data = twitter.search(q='python')
for tweet in data['results']:
    printf(TMPL % tweet)
Пример #22
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import twython, time, sys, json, datetime, argparse

aparser = argparse.ArgumentParser(description='Retrieve media tweets as list of IDs')
aparser.add_argument('-n', '--nb', help='Number of tweets to retrieve', default=50000)
aparser.add_argument('-o', '--outfile', help='Output file', required=True)
args = aparser.parse_args()

# Connect to Twitter
from config import *
twitter = twython.Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
#twitter.verify_credentials()

# Query
query = '(#Euro2016 AND since:2016-06-10 AND lang:fr'

# Open output file
outfile = open(args.outfile, 'w')

# Loop to retrieve tweets
nb = 0
nbMax = int(args.nb)
maxid = None
excludeRetweets = False
while nb < nbMax:
	print '--- Retrieved:', nb, '(maxid: ', str(maxid), ')'
	try:
		tweets = []
		print '--- Searching'
Пример #23
0
# Given a hashtag, get all the tweets, stuff them in a json.

import argparse, csv, collections, json, twython, ConfigParser, time
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', default='config.txt')
parser.add_argument('--hashtag', default='chi2016')
parser.add_argument('--outfile')
args = parser.parse_args()

config = ConfigParser.ConfigParser()
config.read(args.config_file)

# Man, I thought the runaround was easier than this.
twitter = twython.Twython(config.get('twitter', 'app_key'),
                          config.get('twitter', 'app_secret'),
                          oauth_version=2)
ACCESS_TOKEN = twitter.obtain_access_token()

twitter = twython.Twython(config.get('twitter', 'app_key'),
                          access_token=ACCESS_TOKEN,
                          oauth_version=2)

if not args.hashtag.startswith('#'):
    args.hashtag = '#' + args.hashtag

statuses = []
max_id = None
for i in range(10):  # retrieve max 10 * 100 tweets
    if not max_id:
        results = twitter.search(q=args.hashtag, count=100)
Пример #24
0
def main(unused):
    twitter = twython.Twython(CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN,
                              OAUTH_TOKEN_SECRET)
    if not twitter:
        logging.fatal('Invalid twitter credentials!')

    if not FLAGS.input_tweet_ids_file:
        logging.fatal('Must specify --input_tweet_ids_file!')

    if not FLAGS.output_tweets_directory:
        logging.fatal('Must specify --output_tweets_directory!')

    if not os.path.isdir(FLAGS.output_tweets_directory):
        os.makedirs(FLAGS.output_tweets_directory)

    # Prevents us from sending too many requests to Twitter too quickly.
    limiter = ratelimiter.RateLimiter(max_calls=FLAGS.rate_limit, period=1.5)

    # Fetches a single Tweet at a time.
    def GetTweet(id):
        with limiter:
            return twitter.show_status(id=id)

    # Fetches up to 100 Tweets at a time.
    def GetTweets(ids):
        if len(ids) > 100:
            logging.fatal('Max 100 ids per batch lookup')

        combined_ids = ','.join(ids)

        with limiter:
            return twitter.lookup_status(id=combined_ids)

    # Maps tweet id to the actual data of the tweet (text, timestamp, etc).
    tweet_id_to_tweet_data = dict()

    # Maps tweet id to which file the actual data is in.
    tweet_id_to_tweet_data_filename = dict()

    tweet_mapping_filename = os.path.join(FLAGS.output_tweets_directory,
                                          'tweet_mapping.json')
    if os.path.exists(tweet_mapping_filename):
        with open(tweet_mapping_filename, 'r') as tweet_mapping_file:
            tweet_id_to_tweet_data_filename = json.load(tweet_mapping_file)

    with open(FLAGS.input_tweet_ids_file, 'r') as input_tweet_ids_file:
        tweet_ids_to_fetch = []
        for tweet_id in input_tweet_ids_file:
            tweet_id = tweet_id.strip()

            # Already fetched this Tweet before, don't do it again.
            if tweet_id in tweet_id_to_tweet_data_filename:
                logging.info('Skipping fetch tweet ' + tweet_id)
                continue

            # Add this Tweet to the batch of Tweets to lookup next.
            tweet_ids_to_fetch.append(tweet_id)

            # Lookup in batches of 100
            if len(tweet_ids_to_fetch) < 100:
                continue

            logging.info('Fetching batch of tweets...')

            while True:
                try:
                    tweet_datas = GetTweets(tweet_ids_to_fetch)
                    for tweet_data in tweet_datas:
                        tweet_id = tweet_data['id_str']
                        tweet_id_to_tweet_data[tweet_id] = tweet_data

                    # Mark that we've already tried to fetch failures.
                    for tweet_id_to_fetch in tweet_ids_to_fetch:
                        if tweet_id_to_fetch not in tweet_id_to_tweet_data:
                            tweet_id_to_tweet_data[tweet_id_to_fetch] = {}

                    tweet_ids_to_fetch = []
                    break
                except twython.TwythonRateLimitError as err:
                    logging.info(str(err) + ' ... trying again')
                    continue
                except twython.TwythonError as err:
                    logging.fatal(err)

            # Dump the Tweets to a file in batches.
            if len(tweet_id_to_tweet_data) >= FLAGS.output_tweets_file_size:
                tweet_data_basename = GetNextBasename(
                    FLAGS.output_tweets_directory)
                tweet_data_filename = os.path.join(
                    FLAGS.output_tweets_directory, tweet_data_basename)
                with open(tweet_data_filename, 'w') as tweet_data_file:
                    json.dump(tweet_id_to_tweet_data, tweet_data_file)

                for tweet_id in tweet_id_to_tweet_data:
                    tweet_id_to_tweet_data_filename[
                        tweet_id] = tweet_data_basename

                with open(tweet_mapping_filename, 'w') as tweet_mapping_file:
                    json.dump(tweet_id_to_tweet_data_filename,
                              tweet_mapping_file)

                tweet_id_to_tweet_data = dict()
Пример #25
0
    from urllib2 import urlopen
from bs4 import BeautifulSoup
import requests

# GET TWEET
with open("twitter_keys.txt") as f:
    content = f.readlines()

# Twitter API keys go here
CONSUMER_KEY = content[0].rstrip()
CONSUMER_SECRET = content[1].rstrip()

OAUTH_TOKEN = content[2].rstrip()
OAUTH_TOKEN_SECRET = content[3].rstrip()

twitter = twython.Twython(CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN,
                          OAUTH_TOKEN_SECRET)

response = twitter.search(
    q=
    '#firstworldproblems AND [worst OR ruined OR dying OR worse OR hate OR annoying OR pissed OR annoyed OR panic OR suffering OR distraught OR bitch OR damn OR f*****g OR f****d OR hell OR starving OR stupid OR forever]',
    result_type='recent',
    lang='en',
    count=1)

first_tweet = response['statuses'][0]
first_world_tweet = first_tweet.get('text')
target = first_tweet['user']['screen_name']
targetID = first_tweet['id_str']

## use naive bayes classifier to classify the tweet
csvFile = pd.read_csv("Training_test/training.csv",