def find_neighbors(NODE_ID, inactive_node): logger.critical("Node %s disconnected. Looking up neighbors for recovery", NODE_ID) neighbors = inactive_node['neighbors'].copy() active_neighbors = [] for neighbor in neighbors: if int(neighbor) in userlist: neighbor_node = userlist[int(neighbor)] if not neighbor_node['secondary_ip']: active_neighbors.append(neighbor) if active_neighbors: elected_id = active_neighbors[0] elected_node = userlist.get(int(elected_id)) logger.info("Found neighbors : %s. Assigning %s for recovery", active_neighbors, elected_id) room = elected_node['sid'] emit("recover", { 'disconnected_node': NODE_ID, 'recovery_node': elected_id, 'ip': inactive_node['primary_ip'], 'netmask': inactive_node['primary_netmask'], 'active_neighbors': active_neighbors }, room=room) else: logger.critical("No active neighbors found")
def on_status(self, status): if not status.retweeted and status.user.screen_name != "thedebugducky": try: status.retweet() logger.info( f"RT successful from @{status.user.screen_name}. Status ID: {status.id}." ) except tweepy.TweepError as e: if e.args[0][0]['code'] == 327: logger.info( f"Tweet from @{status.user.screen_name} already RT'd.") else: logger.error(f"Error {e} on listener.", exc_info=True)
def create_api(): auth = tweepy.OAuthHandler(API_KEY, API_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) try: api.verify_credentials() except Exception as e: logger.error(f"Error {e} creating API.", exc_info=True) logger.info("API successfully created.") return api
def job_function(): for element in SUPPORTED_INSTRUMENTS: instrument = element["instrument"] period = element["period"] point = element["point"] last_learn_time = read_time(LAST_LEARN_FILE, instrument, period) if ((datetime.now() - last_learn_time).total_seconds() / 60 / 60) > LEARN_PERIOD_HOURS: filename = datasets.get_daily_dataset_file(instrument, period) if datasets.daily_dataset_exists(instrument, period): logger.info("job_function: Start learning %s %s", instrument, period) x, y = model.read_data([filename], instrument, period, point) model.train_model(x, y, instrument, period, verbose=0) write_time(LAST_LEARN_FILE, instrument, period) else: logger.info("job_function: Daily dataset %s not found. Waiting...", str(filename))
def restore_node(json): if json['status']: node_id = json['NODE_ID'] secondary_ip = json['secondary_ip'] secondary_netmask = json['secondary_netmask'] userlist[node_id]['secondary_ip'] = secondary_ip userlist[node_id]['secondary_netmask'] = secondary_netmask logger.info( "Recovery Success by node %s with new Virtual IP as: %s. Updating records...", node_id, userlist[node_id]['secondary_ip']) restored_node = json['restore_node'] recovery_time_delta = time.time() - recovery_init_time[restored_node] logger.warning("Total time taken for restoring IP: %.2f seconds", recovery_time_delta) recovery_node_mapper.pop(int(restored_node)) else: logger.critical("IP restoration attempts failed")
def train_model(x, y, instrument, period, verbose=2): for i in range(MAX_MODELS_COUNT): model_file = create_model_filename(instrument, period, index=i) if isfile(model_file): remove(model_file) logger.info("PREVIOUS MODELS REMOVED") temp_model_file = create_model_filename(instrument, period, temp=True) if isfile(temp_model_file): remove(temp_model_file) x = np.expand_dims(x, axis=3) x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.05, random_state=2020) history = fit_model(x_train, y_train, x_valid, y_valid, temp_model_file, verbose) val_loss_limit = history.history["val_loss"][0] for val_loss in history.history["val_loss"]: if val_loss < val_loss_limit: val_loss_limit = val_loss val_loss_limit = round(val_loss_limit, 2) - 0.01 models_count = 0 while models_count < MAX_MODELS_COUNT: history = fit_model(x_train, y_train, x_valid, y_valid, temp_model_file, verbose) for val_loss in history.history["val_loss"]: if val_loss < val_loss_limit: rename( temp_model_file, create_model_filename(instrument, period, index=models_count)) models_count += 1 break logger.info("******* MODEL %s %s TRAINED SUCCESS ********", instrument, period) K.clear_session()
def append_instrument(): instrument, period = get_base_params() supported_instruments_lock.acquire() SUPPORTED_INSTRUMENTS.append( { "instrument": instrument, "period": period, "point": request.json["point"] } ) supported_instruments_lock.release() logger.info("append_instrument: SUPPORTED_INSTRUMENTS: %s", str(SUPPORTED_INSTRUMENTS)) return jsonify( { "status": "success", "desc": "success" } )
def remove_instrument(): instrument, period = get_base_params() for_remove = None for element in SUPPORTED_INSTRUMENTS: if element["instrument"] == instrument and element["period"] == period: for_remove = element supported_instruments_lock.acquire() if for_remove is not None: SUPPORTED_INSTRUMENTS.remove(for_remove) supported_instruments_lock.release() logger.info("remove_instrument: SUPPORTED_INSTRUMENTS: %s", str(SUPPORTED_INSTRUMENTS)) return jsonify( { "status": "success", "desc": "success" } )
def datacheck(): instrument, period = get_base_params() last_data_update_time = read_time(LAST_DATA_UPDATE_FILE, instrument, period) data_is_actual = ((datetime.now() - last_data_update_time).total_seconds() / 60) <= DATA_UPDATE_PERIOD_MINUTES if datasets.daily_dataset_exists(instrument, period) and data_is_actual: answer = "NONE" else: answer = "UPLOAD" logger.info("datacheck: %s", str(answer)) return jsonify( { "status": "success", "desc": "success", "answer": answer, "train_length": REQUESTED_DATA_LENGTH, "predict_length": model.FRAME_LENGTH } )
def welcome_call(json): json['sid'] = request.sid connection_id = int(json['NODE_ID']) sid_mapper.update({request.sid: connection_id}) if connection_id in inactive_list.keys(): inactive_list.pop(connection_id) userlist[connection_id] = json logger.info('Node {0} has rejoined with session ID: {1}'.format( connection_id, request.sid)) if connection_id in recovery_node_mapper: logger.warning('Attempting restoration of IP...') recovery_init_time[connection_id] = time.time() recovery_node_id = recovery_node_mapper[connection_id] recovery_node = userlist[int(recovery_node_id)] room = recovery_node['sid'] emit("restore", {'restore_node': connection_id}, room=room) return logger.info('Node {0} has joined with session ID: {1}'.format( connection_id, request.sid)) userlist[connection_id] = json return
def favorite_and_retweet_user_status(api, user): for status in api.user_timeline(screen_name=user, count=1): if not status.favorited: try: status.favorite() logger.info( f"@{user}'s latest status has been successfully liked.") except Exception as e: logger.error( f"{e} while attempting to favorite @{user}'s latest status.", exc_info=True) if not status.retweeted: try: status.retweet() logger.info( f"@{user}'s latest status has been successfully retweeted." ) except Exception as e: logger.error( f"{e} while attempting to retweet @{user}'s latest status.", exc_info=True)
def update_node(json): if 'secondary_ip' in json: node_id = json['NODE_ID'] secondary_ip = json['secondary_ip'] secondary_netmask = json['secondary_netmask'] userlist[node_id]['secondary_ip'] = secondary_ip userlist[node_id]['secondary_netmask'] = secondary_netmask logger.info( "Recovery Success by node %s with new Virtual IP as: %s. Updating records...", node_id, userlist[node_id]['secondary_ip']) disconnected_node = json['disconnected_node'] recovery_time_delta = time.time( ) - recovery_init_time[disconnected_node] logger.warning("Total time taken for node recovery: %.2f seconds", recovery_time_delta) recovery_node_mapper.update({disconnected_node: node_id}) else: disconnected_node = json['disconnected_node'] active_neighbors = json['active_neighbors'] recovery_ip = json['ip'] active_neighbors.remove(str(json['recovery_node'])) logger.warning("recovery failed. Refined active list is: %s", json['active_neighbors']) if active_neighbors: recovery_node_id = active_neighbors[0] logger.info("Assigning %s for recovery", recovery_node_id) recovery_node = userlist[int(recovery_node_id)] room = recovery_node['sid'] emit("recover", { 'disconnected_node': disconnected_node, 'recovery_node': recovery_node_id, 'ip': json['ip'], 'netmask': json['netmask'], 'active_neighbors': active_neighbors }, room=room) else: logger.critical("All failover recovery attempts failed")
def reply_to_mentions(api, since_id): recent_since_id = since_id replies = [ "Hi there! The debugging ducky here. How can I help you? 8)", "Hiya! How's it going? :)", "Did you just call The Debug Ducky?", "Hey there! How can I help?", "Quack, quack!!!" ] for status in tweepy.Cursor(api.mentions_timeline, since_id=since_id).items(): random_idx = randrange(5) recent_since_id = max(status.id, recent_since_id) if "this is cs50" in status.text.lower(): logger.info( f"Replying 'This is CS50!' to @{status.user.screen_name}...") try: api.update_status( status=f"This is CS50! @{status.user.screen_name}", in_reply_to_status_id=status.id) logger.info( f"Replied 'This is CS50!' to @{status.user.screen_name}.") except Exception as e: logger.info(f"{e} while replying 'This is CS50!'.") return recent_since_id elif "hello" in status.text.lower(): logger.info(f"Replying to @{status.user.screen_name}...") try: api.update_status( status=f"{replies[random_idx]} @{status.user.screen_name}", in_reply_to_status_id=status.id) logger.info( f"Replied {replies[random_idx]} to @{status.user.screen_name}." ) except Exception as e: logger.error(f"{e} while replying.", exc_info=True) return recent_since_id return recent_since_id
def predict(): instrument, period = get_base_params() last_learn_time = read_time(LAST_LEARN_FILE, instrument, period) if ((datetime.now() - last_learn_time).total_seconds() / 60 / 60) > LEARN_PERIOD_HOURS: logger.info("predict: model %s %s is outdated", instrument, period) answer = "NONE" else: rates = get_rates() logger.info("input data from %s to %s", rates[0][0].strftime("%Y.%m.%d %H:%M:%S"), rates[len(rates) - 1][0].strftime("%Y.%m.%d %H:%M:%S")) point = None for element in SUPPORTED_INSTRUMENTS: if element["instrument"] == instrument and element["period"] == period: point = element["point"] predict_lock.acquire() trend = model.predict_trend(rates[:, [1, 2, 3, 4, 5]], instrument, period, point) predict_lock.release() if trend == "UP": answer = "OP_BUY" elif trend == "DOWN": answer = "OP_SELL" else: answer = "NONE" logger.info("predict: %s", str(answer)) return jsonify( { "status": "success", "desc": "success", "answer": answer } )
'active': userlist, 'inactive': inactive_list, 'recovery_mapper': recovery_node_mapper } return jsonify(registered_users) @app.route('/') def monitor(): context = { 'active': userlist, 'inactive': inactive_list, 'recovery_mapper': recovery_node_mapper } context = jsonify(context) return render_template('index.html', context=context) @app.route('/clear') def clear_lists(): flask_logger = logging.getLogger('werkzeug') flask_logger.error("Deleting inactive Nodes") inactive_list.clear() return redirect(url_for("index")) if __name__ == "__main__": gunicorn_logger = logging.getLogger('gunicorn.error') logger.handlers = gunicorn_logger.handlers logger.info("recovery server active") socketio.run(app)
def save_rates(instrument, period, rates): logger.info("saved %s rates", str(len(rates))) frame = pd.DataFrame( data=rates, columns=['time', 'open', 'high', 'low', 'close', 'volume']) frame.to_csv(get_daily_dataset_file(instrument, period), index=False)
def test_model(data_file, instrument, period, point, plot_results=False): scaler_file = create_scaler_filename(instrument, period) models = [] for i in range(MAX_MODELS_COUNT): models.append( load_model(create_model_filename(instrument, period, index=i), custom_objects={'f1': f1})) scaler = joblib.load(scaler_file) data = pd.read_csv(data_file) data = data.drop(columns=DROP_COLUMNS) true_predicts = 0 false_predicts = 0 lb = np.zeros(len(data)) lb.fill(-1) for i in range(FRAME_LENGTH + 2, len(data)): frame = [] for j in range(i - FRAME_LENGTH - 2, i - 2): frame.append(data.iloc[j]) frame = np.asarray(frame) trend = predict_trend(frame, instrument, period, point, models, scaler, False) close0 = round(data.iloc[i - 3][3] / point) close1 = round(data.iloc[i - 2][3] / point) close2 = round(data.iloc[i - 1][3] / point) close3 = round(data.iloc[i][3] / point) if trend == "UP": lb[i] = 1 if \ close1 - close0 > PRICE_DELTA * 0.5 or \ close2 - close0 > PRICE_DELTA * 0.5 or \ close3 - close0 > PRICE_DELTA * 0.5: true_predicts += 1 else: false_predicts += 1 elif trend == "DOWN": lb[i] = 0 if \ close0 - close1 > PRICE_DELTA * 0.5 or \ close0 - close2 > PRICE_DELTA * 0.5 or \ close0 - close3 > PRICE_DELTA * 0.5: true_predicts += 1 else: false_predicts += 1 else: lb[i] = 2 logger.info("***********************************") logger.info("* DATA FILE: " + str(data_file) + " *") logger.info("***********************************") logger.info("true_predicts: " + str(true_predicts)) logger.info("false_predicts: " + str(false_predicts)) logger.info("predicts rate: " + str(true_predicts / (1 if false_predicts == 0 else false_predicts))) if plot_results: plt.figure(figsize=(50, 20)) for i in range(len(lb)): if lb[i] == 0: plt.axvline(i, 0, 1.5, color='red') elif lb[i] == 1: plt.axvline(i, 0, 1.5, color='green') plt.plot(data.iloc[:, [3]].values, color='black', marker='o') plt.savefig("res.png") plt.close() return true_predicts, false_predicts
if __name__ == '__main__': # TODO 增加定时执行功能 while (1): try: ser = serial.Serial('/dev/ttyAMA0', 115200, timeout=1) gsm = GSM(ser) mail_sender = MailSender() while 1: msgs = gsm.read_messages() gsm.delete_messages() for i in decode_encoded_messages(msgs): try: logger.info(f'接收到短信:{i}, 开始写sms.db') with open('sms.db', 'a') as f: f.write('%s\n' % i) logger.info('写db成功,开始发送邮件') mail_sender.send( '短信通知', '%s\r\n%s\r\n%s\r\n' % (i[0], i[1], i[2])) sys.stdout.flush() time.sleep(2) except Exception: logger.error(f"发送短信出错了,跳过该条短信:{i}", exc_info=True) time.sleep(5) time.sleep(10)
def main(keywords): api = create_api() myStreamListener = MyStreamListener(api) myStream = tweepy.Stream(api.auth, myStreamListener) logger.info("Connecting to Stream...") myStream.filter(track=keywords, languages=['en'], is_async=True, stall_warnings=True) logger.info("Connected to Stream.") since_id = get_latest_mention_id(api) + 1 while True: logger.info("Checking mentions...") since_id = reply_to_mentions(api, since_id) logger.info("Checking @davidjmalan's tweets...") favorite_and_retweet_user_status(api, "davidjmalan") logger.info("Checking @cs50's tweets...") favorite_and_retweet_user_status(api, "cs50") logger.info("Sleeping for 1 hour...") sleep(60 * 60) logging.info("main has returned.")
def multiply(x, y): return x * y def divide(x, y): try: result = x / y except ZeroDivisionError: logger.error("Tried to divide by zero") else: return result num1 = 5 num2 = 0 add_result = add(num1, num2) sub_result = subtract(num1, num2) mul_result = multiply(num1, num2) div_result = divide(num1, num2) if add_result == 15: logger.debug("Addition works perfectly.") elif add_result < 15: logger.warning("Its horrible add function.") elif add_result < 0: logger.info("Gosh, you got to test your function.") else: logger.critical("I have no idea what's going on.")
def __init__(self, first, last): self.first = first self.last = last logger.info('Employee created: {}'.format(self.fullname))