def submit_twit(): cK=text_input1.get() cS=text_input2.get() aT=text_input3.get() aS=text_input4.get() lat=text_input5.get() long=text_input6.get() rad=text_input7.get() twitter(cK, cS, aT, aS, lat, long, rad)
def post(self, user_name, project_name): try: # config = configparser.ConfigParser() # config.read('../grafluent.ini') bucket_name = os.getenv('DEFAULT_BUCKET') parser = reqparse.RequestParser() parser.add_argument( 'query', required=True, help= 'Please provide a valid username of hashtag to search for.', trim=True), parser.add_argument( 'num_req', required=True, type=int, help='Please provide the number of requests for Twitter API') args = parser.parse_args() if (re.match(r'^#', args['query'])): tweet = twitter(hashtag=args['query']) fname = '_'.join(re.findall(r"#(\w+)", tweet.getHashtag())) fname = fname.lower() else: tweet = twitter(twitterID=re.sub(r'^@', '', args['query'])) fname = tweet.getTwitterID().lower() tweets = tweet.createDF(args['num_req']) df_list = tweets.values.tolist() csv = bucket_name.strip( ) + '/%s/%s/source_documents/twitter_%s.csv' % ( user_name.strip(), project_name.strip(), fname.strip()) with s3.open(csv, 'w') as file: tweets.to_csv(file) return { 'status': 'Posted new data', 'file_name': str(fname) + '.csv', 'data': df_list }, 201 except tweepy.error.TweepError as exception: return {'status': 'Please enter valid query'}, 400
def test_twitter_username_with_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file_data(self, 'user_loneblockbuster.json') expected = u'2018-07-24 19:30:59 \x02loneblockbuster\x02: ' \ u'We never would have planted the ferns out ' \ u'front if we knew we\'d get so many death threats.' actual = twitter('loneblockbuster 10', api_key=FAKE_API_KEY) assert expected == actual
def test_twitter_username_with_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file(self, 'user_loneblockbuster.json') expected = u'2018-07-24 19:30:59 \x02loneblockbuster\x02: ' \ u'We never would have planted the ferns out ' \ u'front if we knew we\'d get so many death threats.' actual = twitter('loneblockbuster 10', api_key=FAKE_API_KEY) assert expected == actual
def getData(self): while True: # Preform SPI transaction and store returned bits in 'r' self.r = spi.xfer([1, (8 + self.channel) << 4, 0]) self.adcOut = ((self.r[1] & 3) << 8) + self.r[2] self.percent = 100 - int(round((self.adcOut - 300) / 7.24)) self.date = datetime.datetime.now().strftime("%B %d %Y") self.time = datetime.datetime.now().strftime("%H:%M") self.weekday = datetime.datetime.today().weekday() if self.percent > 70: message = "Hey %s, my water level is at %d, please water me" % (mymaintwitter, self.percent) twitter.twitter().tweet(message) continue with open("data.csv", "a") as out: fileWriter = csv.writer(out, delimiter=",") fileWriter.writerow([self.adcOut, self.percent, self.date, self.time, self.weekday]) time.sleep(1800)
def test_twitter_username_no_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file_data(self, 'user_loneblockbuster.json') expected = u'2018-08-16 17:38:52 \x02loneblockbuster\x02: ' \ u'We had the motto "When you\'re here you\'re ' \ u'family" before Olive Garden but like ' \ u'everything else it was taken away ' \ u'from us.' actual = twitter('loneblockbuster', api_key=FAKE_API_KEY) assert expected == actual
def test_twitter_username_no_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file(self, 'user_loneblockbuster.json') expected = u'2018-08-16 17:38:52 \x02loneblockbuster\x02: ' \ u'We had the motto "When you\'re here you\'re ' \ u'family" before Olive Garden but like ' \ u'everything else it was taken away ' \ u'from us.' actual = twitter('loneblockbuster', api_key=FAKE_API_KEY) assert expected == actual
def test_twitter_username_with_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file_data( self, "user_loneblockbuster.json" ) expected = ( u"2018-07-24 19:30:59 \x02loneblockbuster\x02: " u"We never would have planted the ferns out " u"front if we knew we'd get so many death threats." ) actual = twitter("loneblockbuster 10", api_key=FAKE_API_KEY) assert expected == actual
def test_twitter_hashtag_with_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file(self, 'hashtag_nyc.json') expected = u'2018-08-17 20:19:32 \x02Kugey\x02: ' \ u'I know for sure that life is beautiful ' \ u'around the world... \u2022 \u2022 Here ' \ u'is yet another iconic piece of the ' \ u'NYC skyline... \u2022 \u2022 #nyc ' \ u'#photography #streetphotography ' \ u'#urbanphotography\u2026 ' \ u'https://t.co/bq9i0FZN89' actual = twitter('#NYC 10', api_key=FAKE_API_KEY) assert expected == actual
def getData(self): while True: # Preform SPI transaction and store returned bits in 'r' self.r = spi.xfer([1, (8 + self.channel) << 4, 0]) self.adcOut = ((self.r[1] & 3) << 8) + self.r[2] self.percent = 100 - int(round((self.adcOut - 300) / 7.24)) self.date = datetime.datetime.now().strftime("%B %d %Y") self.time = datetime.datetime.now().strftime("%H:%M") self.weekday = datetime.datetime.today().weekday() if self.percent > 70: message = "Hey %s, my water level is at %d, please water me" % ( mymaintwitter, self.percent) twitter.twitter().tweet(message) continue with open('data.csv', 'a') as out: fileWriter = csv.writer(out, delimiter=',') fileWriter.writerow([ self.adcOut, self.percent, self.date, self.time, self.weekday ]) time.sleep(1800)
def test_twitter_hashtag_with_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file_data(self, 'hashtag_nyc.json') expected = u'2018-08-17 20:19:32 \x02Kugey\x02: ' \ u'I know for sure that life is beautiful ' \ u'around the world... \u2022 \u2022 Here ' \ u'is yet another iconic piece of the ' \ u'NYC skyline... \u2022 \u2022 #nyc ' \ u'#photography #streetphotography ' \ u'#urbanphotography\u2026 ' \ u'https://t.co/bq9i0FZN89' actual = twitter('#NYC 10', api_key=FAKE_API_KEY) assert expected == actual
def test_twitter_username_no_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file_data( self, "user_loneblockbuster.json" ) expected = ( u"2018-08-16 17:38:52 \x02loneblockbuster\x02: " u"We had the motto \"When you're here you're " u'family" before Olive Garden but like ' u"everything else it was taken away " u"from us." ) actual = twitter("loneblockbuster", api_key=FAKE_API_KEY) assert expected == actual
def test_twitter_hashtag_with_tweet_number(self, mock_http_get): mock_http_get.return_value = get_fixture_file_data(self, "hashtag_nyc.json") expected = ( u"2018-08-17 20:19:32 \x02Kugey\x02: " u"I know for sure that life is beautiful " u"around the world... \u2022 \u2022 Here " u"is yet another iconic piece of the " u"NYC skyline... \u2022 \u2022 #nyc " u"#photography #streetphotography " u"#urbanphotography\u2026 " u"https://t.co/bq9i0FZN89" ) actual = twitter("#NYC 10", api_key=FAKE_API_KEY) assert expected == actual
def main(argv=None): ''' app entry point ''' # gets a twitter object tw = twitter(TWITTER['CONSUMER_KEY'], TWITTER['CONSUMER_SECRET'], \ TWITTER['ACCESS_TOKEN'], TWITTER['ACCESS_TOKEN_SECRET']) # gets a bitly object bl = bitly(BITLY['USER'], BITLY['APIKEY']) # tries to load the history from the file. If an exception is raised, # istantiates an empty dictionary object try: history = pickle.load(open(HISTORY_FILE, 'rb')) except: history = dict() # cycles through the RSSs defined in settings for rsskey, rssvalue in RSS.iteritems(): # gets a feed object fd = feeds(rssvalue['RSS']) # tries to load last timestamp. If an exception is raised, # initializes it with the init value defined in settings try: last_timestamp = history[rsskey] except: last_timestamp = (rssvalue['HISTORY'])['INIT_VALUE'] history[rsskey] = last_timestamp # gets the updated feeds entries = fd.get_updated_feeds(rssvalue['HISTORY'], last_timestamp) # cycles through the feeds, tweetin them for feed in entries: link = bl.shorten_url(getattr(feed, rssvalue['LINK'])) tweet = getattr(feed, rssvalue['TEXT']) length = TWITTER['TWEET_LENGTH'] - len(rssvalue['HASHTAG']) \ - len(link) - 10 tweet = rssvalue['HASHTAG'] + ' ' + tw.truncate(tweet, length) \ + ' ' + link tw.update_status(tweet, DEBUG) # updates the last timestamp history[rsskey] = fd.get_last_timestamp() # saves the history pickle.dump(history, open(HISTORY_FILE, 'wb')) sys.exit(0)
def main(): from twitter import twitter #twittersetup. api_key = os.getenv('API_KEY') api_secret = os.getenv('API_SECRET') api_token = os.getenv('API_TOKEN') api_token_secret = os.getenv('API_TOKEN_SECRET') twitter = twitter(api_key, api_secret, api_token, api_token_secret) #twittersearch. query = os.environ['TWEETER_ACCOUNT'] number_of_days = float(os.getenv('DOWNLOAD_TIME')) twitter.streamingapi(query, 'ficherotemp', number_of_days, s3=False) # mongosetup. dbobject = MongoClient(os.getenv('MONGODB_HOST'), int(os.getenv('MONGODB_PORT'))) database = dbobject.hadoop_sentiment_analysis collection = database[str(query)] with open('ficherotemp.json', 'r') as file: #we perform the sentiment_analysis. number_of_instances = os.getenv('INSTANCE_TYPE') number_of_core_instances = os.getenv('NUM_CORE_INSTANCES') mr_job = MRJOB(args=[ file.name, '-r', 'emr', '--no-output', '-c', './app/mrjob.conf', '--instance-type', number_of_instances, '--num-core-instances', number_of_core_instances ]) with mr_job.make_runner() as runner: runner.run() print("Lanzamos el job de map reduce") #we store sentiment results in a list of diccionaries. results = [{ i[0]: i[1] } for i in mr_job.parse_output(runner.cat_output())] if len(results) > 0: collection.insert_many(results) print('Resultados almacenados en base de datos con exito.') else: print( 'Error al insertar en mongodb. documents must be a non-empty list.' )
def test_twitter_hashtag_no_tweet_number(self, mock_http_get, mock_random_randint): mock_http_get.return_value = get_fixture_file(self, 'hashtag_nyc.json') # This plugin chooses a random value. # I chose this value randomly by rolling a D20. mock_random_randint.return_value = 6 expected = u'2018-08-17 20:19:56 \x02The_Carl_John\x02: ' \ u'RT @ItVisn How many records can your ' \ u'company afford to lose? https://t.co/iJzFYtJmCh ' \ u'Be proactive and Protect Your Business ' \ u'#CyberSecurity #DataProtection ' \ u'#DataBreaches #SmallBusiness #Tech ' \ u'#Data #NYC #Technology #DarkWeb #Ransomware ' \ u'#Malware #Phishing #Business ' \ u'https://t.co/xAJVRhjOww' actual = twitter('#NYC', api_key=FAKE_API_KEY) assert expected == actual
def test_twitter_hashtag_no_tweet_number(self, mock_http_get, mock_random_randint): mock_http_get.return_value = get_fixture_file_data(self, 'hashtag_nyc.json') # This plugin chooses a random value. # I chose this value randomly by rolling a D20. mock_random_randint.return_value = 6 expected = u'2018-08-17 20:19:56 \x02The_Carl_John\x02: ' \ u'RT @ItVisn How many records can your ' \ u'company afford to lose? https://t.co/iJzFYtJmCh ' \ u'Be proactive and Protect Your Business ' \ u'#CyberSecurity #DataProtection ' \ u'#DataBreaches #SmallBusiness #Tech ' \ u'#Data #NYC #Technology #DarkWeb #Ransomware ' \ u'#Malware #Phishing #Business ' \ u'https://t.co/xAJVRhjOww' actual = twitter('#NYC', api_key=FAKE_API_KEY) assert expected == actual
def get(self, myHandle): try: #try to get twitter keys from keys file t = tw.twitter("keys") except: #no keys exist, run the stub functions resp = noKeys() return resp f = ff.ffmpeg() #create an ffmpeg object numTweets = 20 #number of tweets to get handlesQ = queue.Queue() #queue to hold twitter handles in the order the api received the handle tweetsQ = queue.Queue() #queue to hold tweets in the order they were tweeted by the handle #imagesQ = queue.Queue() #queue to hold tweet images handlesQ.put(myHandle) #add twitter handle to queue myPic = t.get_profilePic(myHandle) #get the handles profile picture myTweets = t.get_tweets(myHandle, numTweets) #get the handles tweets #thread to get the tweets t1 = threading.Thread(name="producer", target=getTweets, args=(tweetsQ, myHandle, myPic, myTweets)) t1.setDaemon(True) t1.start() #thread to convert tweets to images t2 = threading.Thread(name="imageConverter", target=tweetsToPics, args=(tweetsQ, f)) t2.setDaemon(True) t2.start() #thread to convert the images to video t3 = threading.Thread(name="videoCreator", target=videoProcessor, args=(handlesQ, f)) t3.setDaemon(True) t3.start() handlesQ.join() #block until the video is created myFile = os.getcwd() + '/' + myHandle + '_' + r'twitter_feed.mp4' return send_file(myFile)
def handle(): ''' main function with logic and some handlers ''' global GAMEMODE global NOTIFY data = request.get_json() if not data.get('text'): #logic for no message request data.update({"text":""}) #could be different, depend on environment #return 'success' helper = BotHelper(data, TOKEN['access_token']) ''' switcher to activate\deactivate gamemode (input message lenth counter) and conversation with weather notification ''' if '#start' in data['text'].lower(): GAMEMODE.append(data['conversation']['id']) return 'success' elif '#stop' in data['text'].lower(): try: GAMEMODE.remove(data['conversation']['id']) except ValueError: print 'Invalid gamemode action' elif '#notify' in data['text'].lower(): NOTIFY.append(data['conversation']['id']) print '@@@ this conv enabled notification -----> ' + data['conversation']['id'] return 'success' elif '#offnotify' in data['text'].lower(): try: NOTIFY.remove(data['conversation']['id']) except ValueError: print 'Invalid notify mode action' else: pass ''' permanent check of ALL messages ''' if helper.perm(): helper.sender() if data.get('isGroup'): ''' block for Group chat ''' return 'succes' ''' block for private chat ''' #if data['id'][-4:] == '0000' or data.get('membersAdded') or '#init' in data['text'].lower(): if data.get('membersAdded') or '#init' in data['text'].lower(): #bot's greetings helper.add_text('Hello Human (wave)') helper.sender() twitter_start = 'Do you follow my creator\'s twitter? Here is his last tweet: {}. Link: {} \n I can show you ' \ 'last tweet for any of users. Just type me a user name like @slotomania \n' \ 'I can play with you a very simple game. I will count your message length. Cool, yeah? \n ' \ 'Type #start or #stop to start or stop game accordingly.' tweet_txt, tweet_link = twitter('ar4i_ua') helper.add_text(twitter_start.format(tweet_txt,tweet_link)) helper.sender() elif data['conversation']['id'] in GAMEMODE: #working on game reply helper.msg_len_updater() helper.sender() else: #reply to recieved valid twitter user name usr = helper.twitter_usr() if usr: tweet_txt, tweet_link = twitter(usr) twitter_res = 'Last tweet of user {}: {} \nLink: {}' helper.add_text(twitter_res.format(usr, tweet_txt, tweet_link)) helper.sender() else: pass return 'success'
plt.savefig(file_name) plt.close() if __name__ == '__main__': Accuracy={} F1ScoreMacro={} F1ScoreMicro={} if int((sklearn.__version__).split(".")[1]) < 18: from sklearn.cross_validation import train_test_split else: from sklearn.model_selection import train_test_split print('Welcome to the world of high and low dimensions!') data_set_name = input('Enter dataset name:') print(str(data_set_name)," dataSet:") if(data_set_name.lower()=='twitter'): twitter.twitter() else: if (data_set_name.lower() == 'dolphin'): # Dolphin Data Set ip_file_path = 'data/dolphins/' ip_file_name = 'dolphins.csv' out='dolphins' ip_label_file_name = 'dolphins_label.csv' data_matrix = np.genfromtxt(ip_file_path + ip_file_name, delimiter=' ') else: #Pubmed Data Set ip_file_path = 'data/pubmed/' ip_file_name = 'pubmed.csv' out='dolphins' ip_label_file_name = 'pubmed_label.csv' data_matrix = np.genfromtxt(ip_file_path+ip_file_name, delimiter=' ')
from threading import Thread import threading from flask.ext.socketio import SocketIO, emit, join_room, leave_room, close_room, disconnect from twitter import twitter from machine_learning import ML_Model app = Flask(__name__) app.debug = True app.config['SECRET_KEY'] = 'secret!' socketio = SocketIO(app) thread = None url = "https://stream.twitter.com/1.1/statuses/sample.json?language=en" parameters = [] twitter = twitter() stop = False first = True ml = ML_Model() def background_thread(): global stop stop = False count = 0 global ml while True: response = twitter.twitterreq(url, "GET", parameters)
def __init__(self): super().__init__() self.twitter = twitter()
from twitter import twitter from sys import argv api_key = argv[1] api_secret = argv[2] api_token = argv[3] api_token_secret = argv[4] twitter = twitter(api_key, api_secret, api_token, api_token_secret) #we are going to retrieve data about #ElClasico. twitter.streamingapi(['Elclasico', 'elclásico', 'ElClasico', 'elclasico'], 'elclasico21102017', 'twitterjavierllorente', 0.8)
import twitter import weibo import time import config #if __name__ == "main": tw = twitter.twitter() wb = weibo.weibo() timeGap = config.getConfig("other", "timegap") while True: newT = tw.getNew() if len(newT) > 0: print("ready to publish...") wb.publishList(newT) time.sleep(timeGap)
def input_worker(job_id, redis_host): print('Worker') flow_controller = hadoopy_rt.FlowController(job_id, redis_host) while True: for x in twitter(fruits): flow_controller.send(0, x)
def open_url(self): self.twitter = twitter.twitter() self.request_token,self.request_token_secret = self.twitter.get_request_token() self.twitter.get_verifier(self.request_token) #open varifiy url
def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent=parent) self.twitter = twitter.twitter() self.setUI()
import twitter twitter_search = twitter.twitter(domain="search.twitter.com") trends = twitter_search.trends() [trend['name'] for trend in trends['trends']]
def chat(user_inp, *args): while True: #Get user input inp = user_inp if not inp: return "Please say something!" #instantiates translate object trans = translate(inp) isInFrench = False if(trans.detectFrench() == True): #if the input is detected to be in french isInFrench = True #variable to check if original input was in french inp = trans.translateFRtoEN(inp) #translate french input to english #instantiates sentiment object s = sentiment(inp) #instantiates twitter object twit = twitter(inp) #determines if input is not a sentiment. if(s.isNotSentiment()): #Run every sentence with different synonym combinations till one is recognized sentence_list = synonym_sentences(inp) for inp in sentence_list: #results will hold the predicted value of the tags in corrispondence with the user's input results = model.predict([l.bag_of_words(inp, words)])[0] #Grab the highest result and store it in results_index results_index = numpy.argmax(results) #Grab the tag belonging to the highest result global tag tag = labels[results_index] #Un-comment the code below to see the probability % of each tag that matches in results, and the tag that has the max probability. #print(results) #print(tag) #Check if the probability is higher than a set amount. We use 0.8 here to determine if we want to bot to give a random #response or for it to say "it didn't understand" if results[results_index] > 0.8: for t in data["intents"]: if t['tag'] == tag: responses = t['responses'] if(isInFrench == True): return trans.translateENtoFR(random.choice(responses)) #translate random response to French else: return random.choice(responses) global others others = ["I didn't quite understand", "I failed to understand what you were trying to say!", "Come again?", "Could you please repeat that for me?", "What language is that?"] if(isInFrench == True): return trans.translateENtoFR(random.choice(others)) #translate random response to French else: return random.choice(others) elif(inp.lower() == 'view recent tweet' or inp.lower() == 'see recent tweet'): #if input is 'view recent tweet' return twit.retrieve_tweets() #return the most recent tweet from Justin Trudeau else: #Determines sentiment value and returns appropriate response. sent = s.sentiment_analysis() if(isInFrench == True): return t.translateENtoFR(s.sentimentNumber(sent)) #translate sentiment response to French else: return s.sentimentNumber(sent)
def getTwitterData(): data = request.get_json() tweets = twitter() # instantiate an object of twitter assert isinstance(tweets, twitter) predicted_text = tweets.PredictText(data) return jsonify({'result': predicted_text})
F1ScoreMacro = {} F1ScoreMicro = {} # endregion start = time.time() print('Welcome to the world of high and low dimensions!') # Take File Path as Command Line Arguments testfile = sys.argv[2] testlabelfile = sys.argv[4] DataSetName = str(sys.argv[6]) if (DataSetName.lower() == 'twitter'): # If Data-Set Name is Twitter. testMatrix = np.genfromtxt(testfile, delimiter=' ') testlabelMatrix = np.genfromtxt(testlabelfile, delimiter=' ') twitter.twitter(testMatrix, testlabelMatrix) else: # If Data-Set Name is Dolphin/Pubmed. if (DataSetName.lower() == 'dolphin'): # region Dolphin Data Set inputFilePath = '../data/dolphins/' inputFileName = 'dolphins.csv' out = 'dolphins' inputLabelFileName = 'dolphins_label.csv' dataMatrix = np.genfromtxt(inputFilePath + inputFileName, delimiter=' ') # endregion else: # region Pubmed Data Set inputFilePath = '../data/pubmed/' inputFileName = 'pubmed.csv'
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # /********************************************************************************** # * _author : Domeniko Gentner # * _mail : [email protected] # * _repo : https://git.tuxstash.de/gothseidank/tweet2png # * _license : This project is under MIT License # *********************************************************************************/ from twitter import twitter from sys import exit from platform import system import json twitter = twitter() tweet = twitter.get_latest_tweet() handle = twitter.handle() if system().lower() == "windows": with open('latest_tweet.json', 'w') as fp: json.dump(tweet, fp, indent=4, sort_keys=True) if system().lower() == "linux": with open(twitter.output_location(), 'w') as fp: json.dump(tweet, fp, indent=4, sort_keys=True) exit(0)
from zunguang import zunguang zunguang_path = os.path.join(os.path.abspath(".."), "zunguang") os.chdir(zunguang_path) zunguang.ZunGuang().main() ameblo() bcy() chang_ba() fiveSing() google_plus() instagram() keyakizaka46_diary() lofter() meipai() meituzz() miaopai() nana_go_go() nico_nico() nogizaka46_blog() tuchong() tumblr() twitter() weibo() weibo_article() weishi() yizhibo() fkoji() zunguang() # tool.shutdown()
elif 'pencil art' in query: speak('Intializing Process For Pencil Art') from pencil_art import pencil_art pencil_art() elif 'vector art' in query: speak('Intializing Process For Vector Art') from vector_art import vector_art vector_art() elif 'screen recorder' in query: speak('Starting Screen recorder') from Screen_Recorder import Screen_Recorder Screen_Recorder() elif 'twitter' in query: speak('Opening twitter') from twitter import twitter twitter() elif 'close' or 'bye' or 'exit' in query: sys.exit() else: query = query speak("searching") try: res = client.query(query) results = next(res.results).text speak(results) print(results) except: results = wikipedia.summary(query) speak('Getting Details From Wikipedia') speak(results) print(results)