def tweet_data(): body = request.json data = body['data'] columns = body['columns'] text_inserts = body['text_inserts'] tweet.tweet(data, columns, text_inserts) return redirect(url_for("index"))
def main(speech_file): with open(speech_file, 'rb') as speech: speech_content = base64.b64encode(speech.read()) service = get_speech_service() service_request = service.speech().syncrecognize( body={ 'config': { 'encoding': 'LINEAR16', # raw 16-bit signed LE samples 'sampleRate': 16000, # 16 khz 'languageCode': 'ja_JP', # a BCP-47 language tag }, 'audio': { 'content': speech_content.decode('UTF-8') } }) response = service_request.execute() gcloud_dump = json.dumps(response) gcloud_data = json.loads(gcloud_dump) tweet_text = gcloud_data['results'][0]['alternatives'][0]['transcript'] print("以下の内容をツイートしますか?") print(tweet_text, " [y/N]") user_ans = input() if user_ans == 'y': tweet.tweet(tweet_text) else: print("ツイートしませんでした。") exit()
def main(): current_day = datetime.datetime.today().weekday() tweet.tweet("/home/hugo/lunch_tweeter/tw.yaml", get_konigs_menu(current_day)) tweet.tweet("/home/hugo/lunch_tweeter/tw.yaml", get_nanna_menu(current_day))
def POST(self): i = web.input() f = form_talk() if not f.validates(i): return render_template("talks/submit", form=f) key = new_talk(i) if config.get('from_address') and config.get('talk_submission_contact'): email = render_template("talks/email", i) web.sendmail( from_address=config.from_address, to_address=config.talk_submission_contact, subject=web.safestr(email.subject.strip()), message=web.safestr(email) ) dir = config.get("talks_dir", "/tmp/talks") write("%s/%s.txt" % (dir, time.time()), simplejson.dumps(i)) tweet.tweet("talk_template", title=i.title, author=i.authors, url=web.ctx.home + "/" + key) add_flash_message("info", "Thanks for submitting your talk. The selection committee will review your talk and get in touch with you shortly.") raise web.seeother("/" + key)
def test_posts_multiple_temperatures(self, mock_twitter_api): mock_api = mock_twitter_api.return_value temperatures = dict() temperatures['Sensor1'] = 14.0 temperatures['Sensor2'] = 15.0 tweet(temperatures) mock_api.PostUpdate.assert_called_with('Sensor1: 14.0, Sensor2: 15.0')
def post(self): message = xmpp.Message(self.request.POST) if message.body.lower() == 'latest': message.reply("Latest Message:") greetings = db.GqlQuery("SELECT * FROM Messages ORDER BY date DESC LIMIT 1") for greeting in greetings: message.reply(str(greeting.content)) elif message.body[0:5].lower() == 'post:': greeting = Messages() greeting.content = str(message.body[6:66]) greeting.city = 'Topeka' greeting.state = 'Kansas' greeting.country = 'USA' greeting.lat = 39.0536 greeting.lon = -95.6775 greeting.put() message.reply("Thanks for posting " + str(greeting.content) + "!") message.reply("Visit http://led.chrjo.com to see your message!") tweet.tweet(greeting.content, greeting.city, greeting.state, greeting.country) elif message.body.lower() == 'help': message.reply("Choose from the following commands:") message.reply("Latest - to show latest message") message.reply("Post: <message> - to post a message to the sign") message.reply("Help - to show this guide") message.reply("Also visit http://led.chrjo.com for more features!") else: message.reply("Greetings! Type help to show possible commands")
def respond(self): super(FamilyOrFriend, self).respond() # Works fine mp3sound = "sounds/beep-01.mp3" # Works fine call(["omxplayer", mp3sound, "-o", "local"]) # Works fine sendmail() # Works fine. tweet() # Works fine. # Todo2 make this work, Ekiga.net (and Twinkle) and the phone-app of linphone os.system("./call.sh") # does not work. Fails to connect to the phone
def post(conf): try: status = conf.get_queue().pop() except IndexError: conf.error('The Queue is empty.') tweet.tweet(conf, status) return
def Get(self): msg = "Getting: %s " % self.displayName() print msg logger.info(msg) tweet.tweet("START:Download - %s" % self.displayName()) filename = os.path.join(os.path.expanduser(TORRENT_DIRECTORY), "%s rtRSS.torrent" % self.displayName()) try: urllib.urlretrieve(self.url, filename) except IOError, e: logger.exception("Error retrieving torrent from: %s at: %s" % (self.url, filename))
def func(): try: r = urllib2.urlopen(url) root = json.loads(r.read()) for data in root['results']: if u'Niantic, Inc.' in data[u'sellerName']: print "Gotcha!" tw.tweet() return print "has not been released yet in Japan" finally: r.close()
def tweet_route(): """ The endpoint for a webhook of sorts that should trigger a post. """ try: tweet() except Exception as e: # TODO: Fix this up. Better error handling. print(e) return "There was an error." return "Tweeted"
def main(): try: res = requests.get(URL, params=params) data = res.json() events = filter_events(data['events']) api = login() for event in events: tweet(api, event) print(f'Tweeted event {event["name"]} successfully!') time.sleep(3 * 60) except Exception as e: logging.error(e)
def check(): with open('used.txt') as f: used = f.readlines() for entryN in enumerate(NewsFeed.entries[:5]): id = entryN[1].id.split("/")[-1] usedIds = [] for ind in used: usedIds.append(ind.split("\n")[0]) if id in usedIds: pass else: print("New article!") tweet(prprTxt(entryN[1])) with open("used.txt", "a") as f: f.writelines(id + "\n")
def tweet(self): # tell the world (test account) import tweet p=tweet.tweet() p.main() tweet_url = "http://twitter.com/#!/squid/status/%s" % (p.last_tweet["id"],) return tweet_url
def send_post(self, post): t = tweet.tweet(self.config['twitter']) i = myinsta(self.config['instagram']) fb = facebk.facebk(self.config['facebook']) p = pintrst.mypint(self.config['pinterest']) posts = self.format_post(post) dry = True if t.connected and i.connected and fb.connected and p.connected and dry == False: # Twitter twit = t.send_post(posts['twitpost']) # Insta insta = i.send_post(posts['instapost']) # Facebook facebook = fb.send_post(posts['facebkpost']) # Pinsterest pinterest = p.send_post(posts['pintpost']) elif dry == True: logging.info('Dry mode : t=%s i=%s f=%s p=%s' % (t.connected, i.connected, fb.connected, p.connected)) else: logging.info('Connection failed : t=%s i=%s f=%s p=%s' % (t.connected, i.connected, fb.connected, p.connected)) logging.info('Failed to connect to at least one social service\n\ Please verify credentials configuration in config.json\n\ and rerun this script') exit(0)
def tweet(self): # tell the world (test account) import tweet p=tweet.tweet() p.set_options(force=True, verbose=True, lag=0, ) p.main() tweet_url = "http://twitter.com/#!/squid/status/%s" % (p.last_tweet["id"],) return tweet_url
def callbackSee(client, userdata, message): print "Topic="+message.topic image = camera.takePicture() awsUtils.copyLocalFileToS3(image) print "Picture uploaded" labels = RekognitionApi.detectLabels(reko, image) RekognitionUtils.printLabelsInformation(labels) faces = RekognitionApi.detectFaces(reko, image) newImage, faceCounter = RekognitionUtils.generateOutputImage(image, faces) faceMessage, labelMessage = RekognitionUtils.generateMessages(faceCounter, labels) print "Face message: " + faceMessage print "Label message: " + labelMessage PollyApi.speak(polly, faceMessage) PollyApi.speak(polly, labelMessage) if message.payload == "tweet": tweet.tweet(newImage, faceMessage) print "Tweet sent"
def tweeter_thread(queue): while True: item = queue.pop() if item is not None: tweeted = False while not tweeted: try: if len(item) == 1: tweet(item[0]) tweeted = True elif len(item) == 2: tweeted = tweet(item[0], item[1]) tweeted = True except TwythonError as e: print('Twython Error') print(e) init_tweet() tweeted = False
def tweet_learning_blog_url(): HASH_TAG = "\n#プログラミング #インターン\n" urls = get_learning_blog_urls() for i, url in enumerate(urls): if url not in load_tweeted_url(): soup = BeautifulSoup(request.urlopen(url).read(), "html.parser") title = soup.select_one( "#Single-content > div > div.single-title > h2") # noqa #501 tweet(title.text + "\n\n" + HASH_TAG + url) write_tweeted_url(url) print(url + "(" + str(i + 1) + "/" + str(len(urls)) + ")") if (i + 1) == len(url): break else: sleep(7200) else: continue
def testTweet(self): list_entries = [] for i in range(3): rand = random.getrandbits(256) test_entry = TEST_ENTRY.copy() test_entry['title'] = 'FOR TEST ' + hex(rand) + ' ' \ + test_entry['title'] list_entries.append(test_entry) unfinished = tweet(list_entries) self.assertEqual(0, len(unfinished))
def tweet_(): #Read temp and light level. tw_temp_c = temp.read_temp_c() tw_light = light.RCtime(22) #Load the date and time. now = datetime.datetime.now() #Format time like bash shell. dateF = now.strftime("%a %b %d %H:%M CDT %Y") #Set Tweet contents. tweettext = "The lights are currently " + str(tw_light) + ".\nThe current water temperature is: " + str(tw_temp_c) + u'\xB0' + "C \nPrepared: " + str(dateF) #Send the Tweet. print tweet.tweet(tweettext.encode('UTF-8')) #Blink the Blue LED. led.blink(25, 1)
def scoreTweets(self): for currentTweet in Version_1.tweets: # Gets the score for the current tweet in the Version1.tweets list tweetScore = Version_1.getScore(self, currentTweet.getText()) # Makes a tmp tweet object using the new score from above tmpTweet = tweet.tweet(str(tweetScore), currentTweet.id, currentTweet.text) # Puts the tmp tweet into the scored_tweets list for later computations Version_1.scored_tweets.append(tmpTweet)
def callbackRead(client, userdata, message): print "Topic=" + message.topic print "Message=" + message.payload image = camera.takePicture() awsUtils.copyLocalFileToS3(image) print "Picture uploaded" text = RekognitionApi.detectText(reko, image) print text if message.payload.startswith("read"): PollyApi.speak(polly, text) elif message.payload.startswith("translate"): src_language_code = ComprehendApi.detectLanguage(comprehend, text) dest_language = message.payload.split(' ')[1] dest_language_code = language_info[dest_language]['code'] voice = language_info[dest_language]['voice'] print src_language_code, dest_language_code, voice if src_language_code == 'en' or dest_language_code == 'en': text = TranslateApi.translateText(translate, text, src_language_code, dest_language_code) else: text = TranslateApi.translateText(translate, text, src_language_code, 'en') text = TranslateApi.translateText(translate, text, 'en', dest_language_code) print text PollyApi.speak(polly, text, voice=voice) elif message.payload.startswith("language"): language_code = ComprehendApi.detectLanguage(comprehend, text) language = language_name[language_code] print language_code, language text = "I believe this is " + language PollyApi.speak(polly, text) else: print "Wrong Command, Please Enter Again" return if message.payload.endswith("tweet"): tweet.tweet(image, text) print "Tweet sent"
def jsonin(): load_url = "https://www.toyota-ct.ac.jp/information/" html = requests.get(load_url) soup = BeautifulSoup(html.content, "html.parser") widget = soup.find(class_="widget_list") values = [] values2 = [] test = {} test2 = {} name_list = [ "key1", "key2", "key3", "key4", "key5", "key6", "key7", "key8", "key9", "key10", "key11", "key12" ] #jsonにデータがある場合は比較させる #読み込み for element in widget.select('li > a'): values.append(element.text) values2.append(element.get('href')) #比較処理 json_open = codecs.open('value.json', 'r', 'utf-8') json_data = json.load(json_open) if values[0] == json_data["key1"]: pass else: #13回読み込み処理 for i in range(12): #辞書型に変換 key = name_list[i] test[key] = values[i] test2[key] = values2[i] #jsonファイルに辞書型を書き込み f = codecs.open('value.json', 'w', 'utf-8') json.dump(test, f, ensure_ascii=False, indent=4) f = open('value2.json', 'w') json.dump(test2, f, ensure_ascii=False, indent=4) #ツイートさせる tweet.tweet(values[0], values2[0])
def POST(self): if not is_admin(): return render_template("permission_denied", "/blog/new", "Only admin users can create new blog posts.") i = web.input() f = form_new() if not f.validates(i): return render_template("blog/new", form=f) key = "blog/%d" % web.ctx.site.seq.next_value("post") d = { "key": key, "type": "post", "title": i.title, "body": i.body, "author": context.user and context.user.key, "timestamp": datetime.datetime.utcnow().isoformat() } web.ctx.site.store[d['key']] = d tweet.tweet("blog_template", title=d['title'], url=web.ctx.home + "/" + key) raise web.seeother('/' + key)
def main(): logger.setLevel(LOGLEVEL) filehandler = logging.handlers.RotatingFileHandler(os.path.expanduser(LOGFILE), maxBytes=10000, backupCount=5) filehandler.setLevel(FILE_LOGLEVEL) streamhandler = logging.StreamHandler(sys.stdout) streamhandler.setLevel(CONSOLE_LOGLEVEL) formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s", "%m/%d/%Y %I:%M:%S %p") filehandler.setFormatter(formatter) streamhandler.setFormatter(formatter) logger.addHandler(filehandler) logger.addHandler(streamhandler) logger.info("== STARTING ==") t = TransmissionTorrent() if not t.name: return tweet.tweet("SUCCESS:Download - %s" % t.name) t.createMediaLibraryCopy()
def createMediaLibraryCopy(self): dest = None category = "" if self.isTVShow(): dest = TV_DIRECTORY category = "TV Show" elif self.isMovie(): dest = MOVIE_DIRECTORY category = "Movie" if not (self.mediaFilePath and dest): logger.debug("Not adding %s to media library" % self.name) return if self.isMovie(): path, mediaFileName = os.path.split(self.mediaFilePath) if not nameIsMovie(mediaFileName): logger.debug("This is a movie, but the media file needs to be renamed.") # The torrent was identified as a movie, but the mediafile was not # Create a new 'dest' with the torrent name base, ext = os.path.splitext(mediaFileName) newDestFileName = self.name + ext newDest = os.path.join(dest, newDestFileName) dest = newDest logger.info("Copying %s to %s" % (self.mediaFilePath, dest)) path, mediaFileName = os.path.split(self.mediaFilePath) try: shutil.copy(self.mediaFilePath, dest) except: logger.exception("Error while copying %s to %s" % (self.mediaFilePath, dest)) tweet.tweet("ERROR:%s - %s" % (category, mediaFileName)) else: logger.info("Copy complete") tweet.tweet("SUCCESS:%s - %s" % (category, mediaFileName)) pass
def tweet_qiita_url(): PAGE = "1" PAR_PAGE = "10" HASH_TAG = "\n#プログラミング " for USER_ID in load_user_ids(): response = connect_qiita(USER_ID, PAGE, PAR_PAGE) blog_data_list = get_blog_data_list(response) for i, blog_data in enumerate(blog_data_list): msg = blog_data["title"] + "\n\n" for tags in blog_data["tags"]: if not tags["name"] == "プログラミング": if len(msg + HASH_TAG + "#" + tags["name"] + " ") > 144: break else: HASH_TAG += "#" + tags["name"] + " " msg += HASH_TAG.strip(" ") + "\n" + blog_data["url"] tweet(msg) print(USER_ID + ": " + blog_data["url"] + "(" + str(i + 1) + "/" + str(len(blog_data_list)) + ")") sleep(1800)
def callbackSee(client, userdata, message): print "Topic=" + message.topic print "Message=" + message.payload image = camera.takePicture() if message.payload.startswith("mxnet"): # Detect image with MXNet mxnetimage = inception.load_image(image) prob = inception.predict(mxnetimage, model) topN = inception.get_top_categories(prob, synsets) print topN speech = inception.get_top1_message(topN) print speech PollyApi.speak(polly, speech) if message.payload.endswith("tweet"): tweet.tweet(image, speech) print "Tweet sent" elif message.payload.startswith("reko"): # Detect image with Rekognition awsUtils.copyLocalFileToS3(image) print "Picture uploaded" labels = RekognitionApi.detectLabels(reko, image) RekognitionUtils.printLabelsInformation(labels) faces = RekognitionApi.detectFaces(reko, image) celebs = RekognitionApi.detectCelebrities(reko, image) newImage, faceCounter = RekognitionUtils.generateOutputImage( image, faces) faceMessage, labelMessage = RekognitionUtils.generateMessages( faceCounter, celebs, labels) print "Face message: " + faceMessage #print "Label message: " + labelMessage PollyApi.speak(polly, faceMessage) #PollyApi.speak(polly, labelMessage) if message.payload.endswith("tweet"): tweet.tweet(newImage, faceMessage) print "Tweet sent" else: print "Wrong Command, Please Enter Again"
def user(self): datalines = [] with open(self.Dir, 'r', encoding='utf-8') as f: datalines = f.read().splitlines() self.tweetCnt = len(datalines) for line in datalines: tw = tweet(line, self.wordMap, self.uniWordMap) tw.tweet() self.wordMap = tw.wordMap self.uniWordMap = tw.uniWordMap self.tweets.append(tw) datalines.clear()
def readTweetsFromExcelFile(self,excelfile): tweets = [] df = pd.read_csv(excelfile) print(df.columns) for (i, row) in df.iterrows(): tweetIn = tweet( username = row['username'], text = row['tweet'], date = row['date'] , retweets = row['retweets_count'], bioguide = row['id__bioguide'] , legtype = row['type'], state = row['state'], party = row['party'] ) tweets.append(tweetIn) return self.insertOriginalIndex(tweets)
def loadTweets(self): start_time = time.time() # Creates the positive word list positive_tmp = open('positive_words.txt', 'r') for line in positive_tmp: tmp = line.replace("\n", "") Version_1.positive_words.append(tmp) positive_tmp.close() # Createst the negative word list negative_tmp = open('negative_words.txt', 'r') for line in negative_tmp: tmp = line.replace("\n", "") Version_1.negative_words.append(tmp) negative_tmp.close() with open('NLP_Training.csv', 'r') as training_tweets: # Loads the CSV -> Reader -> List csv_temp = csv.reader(training_tweets) csv_list = list(csv_temp) # Prints the time it took to load the CSV timetook = time.time() - start_time print(f"It took {timetook} seconds to load the files") # Number of times to loop num_of_tweets = 100 while (num_of_tweets > 0): # Picks a number from the full range of the tweets 0 - 1.6 Million random_tweet_number = random.randint(0, 1600000) random_tweet = csv_list[random_tweet_number] ##Test print function ##print(random_tweet[2]) ##Makes a new tweetobject and adds it to the list of known tweets tweet_obj = tweet.tweet(random_tweet[0], random_tweet[1], random_tweet[2]) # print(tweet_obj.score + tweet_obj.id + tweet_obj.text) Version_1.tweets.append(tweet_obj) num_of_tweets -= 1 # Closes the temp tweets file training_tweets.close()
def tweet(mode='daily'): '''Read ques, tweet a tweet, and write ques''' with open(que_file, 'rb') as f: ques = pickle.load(f) if len(ques) == 1: generator.make_que(mode) que = ques.popleft() status = generator.tweet_generator(*que) if sys.argv[1] == 'test': print('status:', status) else: res = tweet(**status) # update db uploaded_img_url = res['entities']['media'][0]['url'] cards = pd.read_csv(db_file) filename = basename(img_path) cards.loc[cards['filename'] == filename, 'uploaded_img_url'] = uploaded_img_url cards.to_csv(db_file, index=False) with open(que_file, 'wb') as f: pickle.dump(ques, f) sched.print_jobs()
def main(): resp = requests.get( 'http://eprint.iacr.org/eprint-bin/search.pl?last=31&title=1', headers=HTTP_HEADERS ) if resp.status_code != 200: msg = 'request failed: ' + str(resp.status_code) \ + '\n\n' + resp.text raise Exception(msg) my_parser = EPrintParser() curr_list = my_parser.feed(resp.text) if curr_list is None \ or not isinstance(curr_list, list) \ or len(curr_list) < 20: # in case the crawled page has problems return my_storage = Storage() prev_list = my_storage.retrieve() if prev_list is None \ or not isinstance(prev_list, list) \ or len(prev_list) == 0: my_storage.save(curr_list) else: sentry_client.user_context({ 'prev_list': prev_list, 'curr_list': curr_list, }) list_updated = [i for i in curr_list if i not in prev_list] if len(list_updated): list_untweeted = tweet(list_updated) list_to_save = [i for i in curr_list if i not in list_untweeted] my_storage.save(list_to_save)
def main(): resp = requests.get( 'http://eprint.iacr.org/eprint-bin/search.pl?last=31&title=1', headers=HTTP_HEADERS) if resp.status_code != 200: msg = 'request failed: ' + str(resp.status_code) \ + '\n\n' + resp.text raise Exception(msg) my_parser = EPrintParser() curr_list = my_parser.feed(resp.text) if curr_list is None \ or not isinstance(curr_list, list) \ or len(curr_list) < 20: # in case the crawled page has problems return my_storage = Storage() prev_list = my_storage.retrieve() if prev_list is None \ or not isinstance(prev_list, list) \ or len(prev_list) == 0: my_storage.save(curr_list) else: sentry_client.user_context({ 'prev_list': prev_list, 'curr_list': curr_list, }) list_updated = [i for i in curr_list if i not in prev_list] if len(list_updated): list_untweeted = tweet(list_updated) list_to_save = [i for i in curr_list if i not in list_untweeted] my_storage.save(list_to_save)
x2 = x1 + 70 + json_response[0]["faceRectangle"]["width"] + 70 y1 = json_response[0]["faceRectangle"]["top"] - 150 y2 = y1 + 150 + json_response[0]["faceRectangle"]["height"] + 70 x1 = x1 if x1 > 0 else 0 y1 = y1 if y1 > 0 else 0 x2 = x2 if x2 < width else width - 1 y2 = y2 if y2 < height else height - 1 #print(x1, x2, y1, y2) cropped_frame = frame[y1:y2, x1:x2] cropped_frame_image = Image.fromarray(cropped_frame) cropped_frame_image.save('crop.png') #tweet.tweet("sad", cropped_frame) #cv2.imshow("cropped", cropped_frame) #cv2.waitKey(0) tweet.tweet( "I'm feeling " + str( (json_response[0]["faceAttributes"]["emotion"]["sadness"]) * 100) + "% sad :(", 'crop.png') openurl.play_cat_video() k = cv2.waitKey(1) count = count + 1 time.sleep(delay) # When everything done, release the capture cam.release() cv2.destroyAllWindows()
nodos.append(negativo.getNumeroNodos()) maximo = sorted(nodos) neutro.setBalance(maximo[2]) positivo.setBalance(maximo[2]) negativo.setBalance(maximo[2]) conn=conex.conexion() sql=" SELECT cuerpo,id_articulo FROM `clasificar_"+str(tabla)+"` " conn.ejecutarSql(sql) resultadotweet=conn.getResultado() tiempo=0 for regtweet in resultadotweet: inicioTweet=time.time() cuerpo=regtweet[0] id=str(regtweet[1]) tweetEvaluar=evaluar.tweet(cuerpo,stpw) palabrasTweet=tweetEvaluar.getPalabrasTweet() evaluacionNeutro=ev.evaluacion(palabrasTweet,neutro,0,analisis,ponderadis,ponderapares) #evaluacionNeutro.debug("neutro") evaluacionPositivo=ev.evaluacion(palabrasTweet,positivo,tweetEvaluar.getEmoticonPos(),analisis,ponderadis,ponderapares) #evaluacionPositivo.debug("positivo") if tweetEvaluar.getValencia() > 5: #print contval #print "ayuda valence positivo: "+ str((valence-5)*(contval-1)/1000) costo=evaluacionPositivo.getCosto()-((tweetEvaluar.getValencia()-5)*(tweetEvaluar.getContVal()-1)/1000) evaluacionPositivo.setCosto(costo) evaluacionNegativo=ev.evaluacion(palabrasTweet,negativo,tweetEvaluar.getEmoticonNeg(),analisis,ponderadis,ponderapares)
# prof研の前回の人数をdbから取得 cur.execute('SELECT curt FROM remain WHERE prof=%s;', (prof, )) c = cur.fetchone()[0] if int(curt) > c: # 増えていた場合 cur.execute('UPDATE remain SET curt=%s WHERE prof=%s;', ( int(curt), prof, )) d = int(curt) - c tw = prof + '研の希望者が増えました.\n現在 ' + curt + ' / ' + cap + '名 (+' + str( d) + '名)' print(tw) tweet(twitter, tw) elif int(curt) < c: # 減っていた場合 cur.execute('UPDATE remain SET curt=%s WHERE prof=%s;', ( int(curt), prof, )) d = c - int(curt) tw = prof + '研の希望者が減りました.\n現在 ' + curt + ' / ' + cap + '名 (-' + str( d) + '名)' print(tw) tweet(twitter, tw) conn.commit() # インターバルを設ける
['e', 'ぁ', 'ぃ', 'ぅ', 'ぇ', 'ぉ', 'a', 'b', 'c', 'd'], ['j', '', '', '', '', '', 'f', 'g', 'h', 'i'], ['o', '', '', '', '', '', 'k', 'l', 'm', 'n'], ['t', '', '', 'っ', '', '', 'p', 'q', 'r', 's'], ['y', '', '', '', '', '', 'u', 'v', 'w', 'x'], ['', '', '', '', '', '', 'z', '', '', ''], ['', '', '', '', '', '', '', '', '', ''], ['小', 'ゃ', '', 'ゅ', '', 'ょ', '', '', ' ', ''], ['', '', '', '', '', '', '', '', '', '']] ツイート文字列 = [] 小文字モード = 'オフ' for イ in range(0, int(len(ツータッチ)), 2): if 文字リスト[int(ツータッチ[イ])][int(ツータッチ[イ + 1])] != '小': if 小文字モード == 'オフ': 一時的な変数 = 文字リスト[int(ツータッチ[イ])][int(ツータッチ[イ + 1])] ツイート文字列.append(一時的な変数) elif 小文字モード == 'オン': 一時的な変数 = 小文字リスト[int(ツータッチ[イ])][int(ツータッチ[イ + 1])] ツイート文字列.append(一時的な変数) else: if 小文字モード == 'オフ': 小文字モード = 'オン' else: 小文字モード = 'オフ' ツイートテキスト = ''.join(ツイート文字列) tweet.tweet(str(ツイートテキスト))
def notification(): notification = request.json # Store all the attributes coming in the notification attrs = {} msg = '' for attr in notification['contextResponses'][0]['contextElement']['attributes']: name = attr['name'] value = pretty(attr['value']) if name == 'msg': msg = value else: if name in UNITS: unit = UNITS[name] else: unit = '' attrs[name] = value + unit # Debug #print 'msg: ' + msg #for key in attrs.keys(): # print key + " -> " + attrs[key] if msg == RESET_MSG: # The 'reset' message is not an actual one, thus nothing to Publish return Response(status=200) # We pass to lowercase, to make regex matching simpler, thus 'Give me', 'give me', 'Give Me' are all the same lc_msg = msg.lower() m = re.match('give me (.*)', lc_msg) # If the msg is a "Give me" one, then post the value of the attribute if m != None: # Take only the first letters of the attribute, thus 'Give me temperature' and 'Give me tem' work the same attr = m.group(1)[:3] if attr in attrs.keys(): final_msg = 'Current ' + attr + ' at @FIware stand is ' + attrs[attr] + ' ' + HASHTAG else: # Unknown attribute: nothing to publish return Response(200) else: # If the msg is a normal one, then pick two attribute at random attr1 = random.choice(attrs.keys()) attr2 = random.choice(attrs.keys()) while attr1 == attr2: attr2 = random.choice(attrs.keys()) attrs_string = ' ' + attr1 + '=' + attrs[attr1] + ' ' + attr2 + '=' + attrs[attr2] + ' ' max_user_msg = TWITTER_MAX_LENGTH - len(attrs_string) - len(HASHTAG) #print str(max_user_msg) final_msg = msg[:max_user_msg] + attrs_string + HASHTAG # Debug #print str(len(final_msg)) #print final_msg # Tweet it! tweet(final_msg) # Before returning, we 'reset' the message. Otherwise, several consecutive msg with the same text # (e.g. 'Give me hum) will be ignored (except the first one) reset() return Response(status=200)
def test_doesnt_do_anything_if_passed_dict_is_empty(self): temperatures = dict() tweet(temperatures)
try: if html_fn != "none": try: html_result = html_load_gaap.html_load(html_fn, name, cik, file_date, form, symbol, eh_sk) except: pass fulfill_html.fulfill(file_date,0,0,form,company_sk,eh_sk) #Tweet about it if symbol != "NA": try: tweet.tweet(name, exchange, symbol, form) except: "no tweets!" except: print "no html fulfill for you! "+company_sk, name, symbol, form except: pass if form == "10-K" or form == "10-Q": for file in sorted(glob.glob( os.path.join(temp_path, '*[0-9].xml') )):
logger.debug("Sending annual email to %s..." % emailrecipient) #sending annual email to myself to make sure everything is running smoothly sendmail(emailrecipient, emailsubject, emailbody) logger.info("Checking time...") #checktime sleeps until the given hours in the parameters checktime(tweettimes[0],tweettimes[1]) #these are for debugging #print "sleeping" #sleep(30) logger.info("Tweeting...") #emailrecipient is needed because if there is a tweet error, it will send to that email tweet(nextTweet, emailrecipient) logger.info("Adding Tweet to pasttweets.txt...") #adding tweet that was just tweeted to past tweets tweetretriever.writeToPastTweets(nextTweet) logger.info("Checking for new DMs...") #check DMs newDMs = getNewDMs() #set up DMs to be sent in email emailbody = "@youraccount has %s new DMs.\n\n" % (len(newDMs)) for x in newDMs: emailbody += x + "\n" emailbody += "\n"
def tweet_data(): body = request.json tweet_list = body['tweetDict'] tweet.tweet(tweet_list) return redirect(url_for("index"))
def temp_change_tweet(message): # if time.time() - last_temp_tweet[0] > 300: tweet.tweet(message)
CK = '***' CS = '***' AT = '***' AS = '***' while True : text = getText.getText(url, keyList) str_text = str(text[0]) str_text = re.sub('<.*?>', '', str_text) # str_text.replace('白', '') str_text = str_text.replace('[', '') str_text = str_text.replace(']', '') # print(str_text) textList = [] textList.append(str_text) f = open('title.txt', 'r') data = f.read() f.close() if data != textList[0] : tweet(CK, CS, AT, AS, textList) f = open('title.txt', 'w') f.write(textList[0]) f.close() # print(textList[0]) # else : # print('same') time.sleep(1)
def job(): #put code to be run hourly in here tweet() print("This code is working...") print(time1 - time())
print "Cutting a sample of %s" % (end - start) sample = src[start:end] lines += sample.split(". ") # now have lines, a list of "sentences" from the samples # of sources. while len(lines) > 0: try: index = randint(0, len(lines) - 1) line = lines[index] final += line lines.remove(line) except UnicodeDecodeError: lines = lines[0:index] + lines[index + 1 :] final_final = "" for c in final: if ord(c) < 127: final_final += c final = final_final print "Final is %s long" % len(final) print final print "Posting Final to twitter..." tweet(final, "willsburroughs", "m0rdecai!")
def post(self): greeting = Messages() if users.get_current_user(): greeting.author = users.get_current_user() try: greet = unicode(self.request.get('content').decode('utf-8')) except UnicodeEncodeError: greet = "I don't speaka your language!" self.response.out.write('<script type="text/javascript">window.location = "http://www.youtube.com/watch?v=DNT7uZf7lew"</script>') return if len(greet) < 1: greet = "I'm an Idiot!" greeting.content = greet[:60] greeting.ipaddr = str(self.request.remote_addr) '''ip_greets = db.GqlQuery("SELECT * FROM Messages ORDER BY date DESC LIMIT 1 WHERE ipaddr = '" + greeting.ipaddr + "'") for ip_greet in ip_greets: try: if ip_greet.blocktime >= (greeting.date.minutes - ip_greet.minutes): self.redirect('/') elif ip_greet.blocktime*2 >= (greeting.date.minutes - ip_greet.minutes): greeting.blocktime = ip_greet.blocktime * 2 else: greeting.blocktime = 1 except: greeting.blocktime = 1''' greeting.blocktime = 1 greetings = db.GqlQuery("SELECT * FROM Messages ORDER BY date DESC LIMIT 20") found = False if greeting.ipaddr.startswith("128.113") or greeting.ipaddr.startswith("128.213") or greeting.ipaddr.startswith("129.161") or greeting.ipaddr.startswith("129.5"): greeting.city = "RPI" greeting.state = "New York" greeting.country = "United States" greeting.lat = 42.7495 greeting.lon = -73.5951 else: for greet in greetings: if greet.ipaddr==greeting.ipaddr: greeting.city = greet.city greeting.state = greet.state greeting.country = greet.country try: greeting.lat = float(greet.lat) greeting.lon = float(greet.lon) except TypeError: greeting.lat = 39 greeting.lon = -70 found = True break if found == False: location = parse(str(self.request.remote_addr)) greeting.city = location['city'] greeting.state = location['state'] greeting.country = location['country'] try: greeting.lat = float(location['lat']) greeting.lon = float(location['lon']) except TypeError: greeting.lat = 39 greeting.lon = -70 greeting.put() #Tweet the message tweet.tweet(greeting.content, greeting.city, greeting.state, greeting.country) #Google Talk to Christian user_address = ['*****@*****.**','*****@*****.**'] msg = "New LED Sign Message: " + str(greeting.content) + " - from " + str(greeting.city) for user in user_address: chat_message_sent = False status_code = xmpp.send_message(user, msg) #mail.send_mail(sender="*****@*****.**",to="*****@*****.**",subject="LED",body=str(greeting.content)) #mail.send_mail(sender="*****@*****.**",to="*****@*****.**",subject="LED",body=str(greeting.content)) self.redirect('/')
m = re.match(r"([0-9a-f]{2} ){9}t=([+-]?[0-9]+)", line) if m: value = str(float(m.group(2)) / 1000.0) f.close() except (IOError), e: print time.strftime("%x %X"), "Error reading", path, ": ", e return value # define pathes to 1-wire sensor data pathes = ( "/sys/bus/w1/devices/28-00044e7e83ff/w1_slave", "/sys/bus/w1/devices/28-0004632c98ff/w1_slave", "/sys/bus/w1/devices/28-00000609b2f2/w1_slave" ) # read sensor data data = 'N' for path in pathes: temperatures = dict() temperatures['Sensor1'] = read_sensor(path) data += ':' data += read_sensor(path) time.sleep(1) tweet.tweet(temperatures) # insert data into round-robin-database rrdtool.update( "%s/temperature.rrd" % (os.path.dirname(os.path.abspath(__file__))), data)