def post_random_tweet(self): session = connect_sql() quote = random_quote(session) quote.used = True session.commit() try: tweet(quote.to_tweet_string()) except: logging.error("Could not tweet : %s, too long." % quote.to_tweet_string()) finally: session.close()
def save(self, *args, **kwargs): if self.date_published is None: self.date_published = ( datetime.now() ) # if the date isn't set it needs to be set now or get_absolute_url() breaks if self.is_published is True and self.is_modified is False and self.tweet is True: absolute_url = settings.SITE_URL + self.get_absolute_url() utils.tweet(self.title.encode(), absolute_url) self.is_modified = True super(Post, self).save(*args, **kwargs) try: ping_google(sitemap_url="/sitemap.xml") except Exception: # Bare 'except' because we could get a variety # of HTTP-related exceptions. pass
def main(): prev_Y = [] now_Y = [] diff_Y = [] incr_Y = [] decr_Y = [] for i in data_teacher: prev_Y.append(0) now_Y.append(0) diff_Y.append(0) incr_Y.append(0) decr_Y.append(0) # 1つ前と現在のhtmlデータの入手関数 prev_student_deploy_list = [] student_deploy_list = [] parseHtml(getPrevLog(THIS_DIRECTORY + 'log_student_deploy.txt'), prev_student_deploy_list) parseHtml(get_student_deploy.get_html(), student_deploy_list) # 応募人数計算関数 personNumber(prev_student_deploy_list, prev_Y) personNumber(student_deploy_list, now_Y) # 1つ前と現在の差計算関数 setDiff_Y(prev_Y, now_Y, diff_Y) setIncrAndDect_Y(diff_Y, incr_Y, decr_Y) # 前回の数値が変化したか flag = changeValue(incr_Y, decr_Y) # print(flag) if flag == True: # 各リストのデバッグ print("pre_person :", prev_Y) print("now_person :", now_Y) print("----> incr :", incr_Y) print("----> decr :", decr_Y) # グラフの色設定関数 setGraphColor(now_Y) # グラフ描画関数 savename = drawGraph(prev_Y, now_Y, diff_Y, incr_Y, decr_Y) tweet(savename) else: print("====>( No Change Value )") return
def brew(coffee_type, db): if bottle.request.headers.environ.get("REMOTE_ADDR") != "127.0.0.1": return json_return(403, "Can only start a brew from the monitor host - sorry.") data = db.execute("select id, name from coffees") coffees = {row["id"]: row["name"] for row in data} if coffee_type not in coffees.keys(): return json_return(403, "Invalid coffee type.") row = db.execute("select dts, coffee from raw_log order by dts desc").fetchone() now = datetime.datetime.now() if row: dt = datetime.datetime.strptime(row["dts"], "%Y-%m-%d %H:%M:%S") delta = now - dt if delta.seconds < BREW_TIME: if coffee_type != row["coffee"]: db.execute('update raw_log set coffee=%d where dts="%s"' % (coffee_type, row["dts"])) return json_return(200, "brew type changed. Estimated completion: %s" % estimate_complete(dt)) else: return json_return(200, "brew already in progress. Estimated completion: %s" % estimate_complete(dt)) db.execute("insert into raw_log (coffee) values (%d)" % coffee_type) eta = estimate_complete(now) ret_str = "Brew started: %s. Estimated completion: %s" % (coffees[coffee_type], eta) tweet(ret_str) publish( json.dumps( { "human": ret_str, "type": "start", "coffee": coffees[coffee_type], "start": now.strftime("%Y%m%d%H%M%S"), "estimate": eta.strftime("%Y%m%d%H%M%S"), } ) ) os.system("(%s/followup.sh %d %s)&" % (os.path.dirname(__file__), BREW_TIME, coffees[coffee_type])) return json_return(200, ret_str)
#!/usr/bin/env python from utils import tweet, publish from datetime import datetime from time import sleep import json import sys try: cmd = sys.argv.pop(0) delay = int(sys.argv.pop(0)) coffee = sys.argv.pop(0) except: print "usage: %s delay coffee" % cmd sys.exit(1) sleep(delay) msg = "A pot of %s is ready for you. [%s]" % (coffee, datetime.now().strftime("%Y-%m-%d %H:%M:%S")) tweet(msg) publish( json.dumps(({"type": "complete", "coffee": coffee, "end": datetime.now().strftime("%Y%m%d%H%M%S"), "human": msg})) )
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # -- generate data set file_list = glob.glob('./data/*.json') raw_datasets = DataProcessor(file_list).generate_datasets() # -- tokenize corpus = Corpus(raw_datasets) datasets = corpus.tokenize_all() vocab_size = len(corpus.dictionary) # --- tokenize data train_data = utils.div_to_batch(datasets[0], args.batch_size).to(device) val_data = utils.div_to_batch(datasets[1], args.batch_size).to(device) test_data = utils.div_to_batch(datasets[2], args.batch_size).to(device) # -- define model model = RNN(vocab_size, args.embed_size, args.hidden_size, args.num_layers, args.dropout).to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # -- train and validate model trainer = Trainer(model, train_data, val_data, test_data, optimizer, criterion, vocab_size, args) trainer.train() trainer.test() # -- generate tweets utils.tweet(corpus, 100, device)
header = f'[{store_name}, {store_city}, {store_region}]' slot_id = f'{header}_{details}' if store_name not in index: index.setdefault(store_name, []) if details not in index[store_name]: new_slots += 1 index[store_name].append(details) print(f'TimeSlot Available: {details}') # tweet or clean up # if new_slots == 1: # # get the most recently added slot # details = index[store_name][-1] # tweet(header, new_slots, store_city, details) if new_slots > 0: tweet(header, new_slots, store_city) write_store(store_city, full_slots) elif counter == 0: write_store(store_city, full_slots) elif full_slots == 0: if read_store(store_city) != 0: write_store(store_city, full_slots) index[store_name] = [] else: # if non-200 re-authenticate print( f'Error: {res.status_code} response. Trying to re-authenticate' ) headers = authy.authenticate() except Exception as e: