def __init__(self): try: if self.loadingdone: pass except: self.loadingdone = True self.openhab = pf_openhab() self.name = "state_handler" self.database = main_database() self.setting_handler = setting_handler() self.message_handler = message_handler() self.logging = Logging() settings_c = Settings() self.host = settings_c.get_setting("main", "hostname") self.port = settings_c.get_setting("main", "port") self.http = urllib3.PoolManager() self.timeouts = {} self.update_timeout("screensaver", 60) self.update_timeout("message", -1) self.screentrigger = 0 self.screen_state = True try: self.im_size = settings_c.get_setting("album", "image_size") self.album_url = settings_c.get_setting("album", "random_picture_url") except Exception as e: self.logging.error("Invalid album settings: " + str(e), location=self.name) self.album_url = "none" try: self.album_info_url = settings_c.get_setting("album", "picture_info_url") if self.album_info_url == None: raise Exception("No data") except: self.logging.warn("No album info url", location=self.name) self.album_info_url = "none"
def __init__(self): try: if self.loadingdone: pass except: self.loadingdone = True self.started = True self.logging = Logging() self.name = "mqtt" self.settings_c = Settings() try: self.server = self.settings_c.get_setting("mqtt", "server") self.port = int(self.settings_c.get_setting("mqtt", "port")) self.timeout = int( self.settings_c.get_setting("mqtt", "timeout")) except Exception as e: self.logging.error("Configuration not correct, check settings", location=self.name) self.listeners = [] self.client = mqtt_cl.Client() self.client.on_connect = self.on_connect self.client.on_message = self.on_message self.client.connect(self.server, self.port, self.timeout) self.client.loop_start() self.logging.info("Connected to mqtt broker at: " + self.server, location=self.name)
def __init__(self, sym, s_date, e_date, path=None): self.symbol = sym self.d_data = {} self.log = Logging() if not path: s = Symbol(sym, s_date, e_date) self.d_data[s.name] = s
def __init__(self): self.openhab = pf_openhab() self.logging = Logging() self.name = "widget_handler" PATH = os.path.dirname(os.path.abspath(__file__)) self.template_dir = PATH + "/../templates/" self.imported_widget_classes = {}
def __init__(self, params={}, debug=0): self.parameters = params self.debug = 0 self.num_params = len(params) self.info = {} self.results = {} self.logger = Logging() self.processor = Processor()
def __init__(self): try: if self.loadingdone: pass except: self.loadingdone = True self.logging = Logging() self.scheduled_tasks = [] self.add_task(self.logging, "log_rotate", (time.time() + 4 * 60 * 60), period=24 * 60 * 60)
def __init__(self): self.openhab = pf_openhab() self.name = "page_handler" self.database = main_database() self.setting_handler = setting_handler() self.message_handler = message_handler() self.widgets_handler = widgets_handler() self.state_handler = state_handler() self.logging = Logging() settings_c = Settings() self.host = settings_c.get_setting("main", "hostname") self.port = settings_c.get_setting("main", "port") self.enable_screen_control = settings_c.get_setting( "main", "enable_screen_control") self.openhab_host = settings_c.get_setting("main", "openhab_ip") self.openhab_port = settings_c.get_setting("main", "openhab_port")
def __init__(self): try: if self.doneloading: pass except: self.name = "Wunderground" self.doneloading = True self.settings_c = Settings() self.logging = Logging() try: self.apikey = self.settings_c.get_setting("wunderground", "apikey") self.updateperiod = int(self.settings_c.get_setting("wunderground", "updateperiod")) self.language = self.settings_c.get_setting("wunderground", "language") except Exception as e: self.logging.error("Check your wunderground settings: " + str(e), location="wunderground") self.locations = {}
def __init__(self, api_key, location, update = 15, language = "EN", fetch = False): args = {} args["apikey"] = api_key args["location"] = location args["language"] = language args["fetch"] = -1 args["sub"] = "fetch" self.args = args self.data_update = 0 self.update_time = update*60 self.logging = Logging() self.name = "Wunderground" if fetch: self.fetch_data()
def __init__(self): self.openhab = pf_openhab() self.name = "item_handler" self.database = main_database() self.setting_handler = setting_handler() self.page_handler = page_handler() self.message_handler = message_handler() self.logging = Logging() settings_c = Settings() self.host = settings_c.get_setting("main", "hostname") self.port = settings_c.get_setting("main", "port") self.enable_clock = settings_c.get_setting("main", "enable_clock") self.enable_album = settings_c.get_setting("main", "enable_album") self.openhab_server = settings_c.get_setting("main", "openhab_ip") self.openhab_port = settings_c.get_setting("main", "openhab_port") self.http = urllib3.PoolManager() self.timeout_message_popup = 0 self.saved_chart_periods = {} self.screentrigger = 0
def __init__(self, symbol, s_date=None, e_date=None): self.log=Logging() self.name=symbol self.created=datetime.datetime.utcnow() self.log.info("created {}".format(self.name)) try: self.share=Share(symbol) except: self.log.error("platform is offline or not connecting") if s_date and e_date: self.begin=s_date self.end=e_date try: self.share=Share(symbol) self.data=self.share.get_historical(self.begin, self.end) self.log.refresh("{} data collected".format(self.name)) except: self.log.error("platform is offline or not connecting")
def __init__(self): try: if self.loadingdone: pass except: self.loadingdone = True self.logging = Logging() settings_c = Settings() self.openhab_server = settings_c.get_setting("main", "openhab_ip") self.openhab_port = settings_c.get_setting("main", "openhab_port") self.host = settings_c.get_setting("main", "hostname") self.port = settings_c.get_setting("main", "port") self.sitemap_name = settings_c.get_setting("main", "sitemap") self.sitemap_name = "main" if self.sitemap_name == None else self.sitemap_name self.resize_icons = settings_c.get_setting("main", "resize_icons") self.iconh = 64 self.iconw = 64 self.name = "pf_openhab" self.http = urllib3.PoolManager()
def __init__(self): try: if self.loadingdone: pass except: self.loadingdone = True self.database = main_database() self.popupflag = False self.popupactive = False self.logging = Logging() self.setting_handler = setting_handler() self.setting_popup_timeout = self.setting_handler.get_setting( "message_timeout") self.page_handler = page_handler.page_handler() self.toast_flag = False self.toast_db = [] self.toast_message = "none" self.toast_sender = "none" self.toast_received = 0 self.mqtt = mqtt() settings_c = Settings() self.mqtt_topics = settings_c.get_setting("messaging", "mqtt_topics").split(",") for topic in self.mqtt_topics: self.mqtt.add_listener(topic, self, "received_mqtt_message") ##send information messages on first run if not self.database.data["settings"]["first_run_messages_flag"]: self.database.data["settings"][ "first_run_messages_flag"] = True message = "On a black screen, clock or photoframe, click on the bottom left corner to return to the main screen. " self.database.data["messages"].append([ time.time(), "HABframe", "Return from screensaver", message, False ]) message = "Thank you for using HABframe!" self.database.data["messages"].append([ time.time(), "HABframe", "welcome message", message, False ]) self.database.save_datafile() self.del_popup_flag()
def __init__(self): try: if self.loadingdone: pass except: self.loadingdone = True settings_c = settings.Settings() self.logging = Logging() self.data_location = settings_c.get_setting("data", "location") PATH = os.path.dirname(os.path.abspath(__file__)) self.datafile = PATH + "/../" + self.data_location + "/main_db.pckl" my_file = Path(self.datafile) if not my_file.is_file(): self.logging.write("Creating database", level=1) self.data = { "last_saved": time.time(), "settings": {}, "messages": [] } self.data["settings"]["main_page"] = 0 self.data["settings"]["items_per_page"] = 0 self.data["settings"]["sensors_per_page"] = 0 self.data["settings"]["screen_timeout"] = 1 self.data["settings"]["album_timeout"] = 0 self.data["settings"]["message_timeout"] = 0 self.data["settings"]["mouse"] = 0 self.data["settings"]["screen"] = 0 self.data["settings"]["frame"] = 0 self.data["settings"]["frame_info"] = 0 self.data["settings"]["frame_td"] = 0 self.data["settings"]["frame_display_time"] = 0 self.data["settings"]["toast_timeout"] = 0 self.data["settings"]["clock_type"] = 0 self.data["settings"]["chart_period"] = 0 self.data["settings"]["first_run_messages_flag"] = False self.data["messages"] = [] self.save_datafile() self.data = self.load_datafile()
import sys import time import datetime from bot import Bot from logger import Logging from parsing import parse # set logging log = Logging() log.sys() # read dotenv variables try: with open('.env', 'r') as handler: lines = [i.strip() for i in handler.readlines()] except FileNotFoundError as e: log.tell(str(e)) sys.exit() env = dict() for line in lines: items = line.split('=') env |= dict([items]) log.tell('Succesfuly read .env file') # init telegram bot try: bot = Bot(env['TELEGRAM_BOT_TOKEN'], env['TELEGRAM_ADMIN_ID'], env['TELEGRAM_CHAT_ID'])
from flask_cors import CORS PATH = os.path.dirname(os.path.abspath(__file__)) sys.path.append(PATH + "/classes") sys.path.append(PATH + "/classes/general") sys.path.append(PATH + "/data") from HABframe_main import habframe_main from settings import Settings from logger import Logging ##load settings settings_class = Settings() port = settings_class.get_setting("main", "port") main = habframe_main() logging = Logging() app = Flask("HABFrame", template_folder=PATH + '/templates', static_folder=PATH + "/static") CORS(app) @app.route('/', defaults={'path': ''}, methods=['GET', 'POST']) @app.route('/<path:path>', methods=['GET', 'POST']) def catch_all(path): env = get_env(request) key = get_key(env) response = main.process_request(env) return format_response(response)
def main(FLAGS): logger = Logging() nn = network.Network() settings = Parameters() greed = Epsilon_Greedy() replay = Replay() #X_state, actor_q_values, actor_vars, critic_..., copy_ops, copy_critic... X_state = tf.placeholder(tf.float32, shape=[ None, settings.input_height, settings.input_width, settings.input_channels ]) #setup both an actor and a 'critic' network -- methodology to predict and play as cost function (y-y') actor_q_values, actor_vars = nn.q_network(X_state, name='q_networks/actor') critic_q_values, critic_vars = nn.q_network(X_state, name='q_networks/critic') copy_ops = [ actor_var.assign(critic_vars[var_name]) for var_name, actor_var in actor_vars.items() ] copy_critic_to_actor = tf.group(*copy_ops) #x_action, q_value X_action = tf.placeholder(tf.int32, shape=[None]) q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, settings.n_outputs), axis=1, keep_dims=True) #y, cost, globalstep, optimzier, training_op, init, saver y = tf.placeholder(tf.float32, shape=[None, 1]) cost = tf.reduce_mean(tf.square(y - q_value)) global_step = tf.Variable(0, trainable=False, name='global_step') optimizer = tf.train.AdamOptimizer(settings.learning_rate) training_op = optimizer.minimize(cost, global_step=global_step) init = tf.global_variables_initializer() saver = tf.train.Saver() logger.info('finished setup') #with tf.Session as sess: ********put this in driver with tf.Session() as sess: # check for saved session if os.path.isfile(settings.checkpoint_path): save.restore(sess, settings.checkpoint_path) else: init.run() while True: step = global_step.eval() if step >= settings.n_steps: break settings.iteration += 1 if settings.done: logger.info('uh oh hit the done!') settings.obs = settings.env.reset() #manufactured 'fast forward' for skip in range(settings.skip_start): obs, reward, settings.done, info = settings.env.step(0) state = network.preprocess_observation(obs, settings.mspacmancolor) #print(reward, settings.done, info) # vector of q values q_values = actor_q_values.eval(feed_dict={X_state: [state]}) # epsilon greedy method to retrieve next action action = greed.e_greedy(q_values, step) # take this action obs, reward, settings.done, info = settings.env.step(action) next_state = network.preprocess_observation( obs, settings.mspacmancolor) # record these actions in replay memory replay.replay_memory.append( (state, action, reward, next_state, 1.0 - settings.done)) state = next_state if settings.iteration < settings.training_start or settings.iteration % settings.training_interval != 0: continue X_state_val, X_action_val, rewards, X_next_state_val, continues = ( replay.sample_memories(settings.batch_size)) next_q_values = actor_q_values.eval( feed_dict={X_state: X_next_state_val}) max_next_q_values = np.max(next_q_values, axis=1, keepdims=True) y_val = rewards + continues * settings.discount_rate * max_next_q_values training_op.run(feed_dict={ X_state: X_state_val, X_action: X_action_val, y: y_val }) if step % settings.copy_steps == 0: #logger.info('copy to critic') copy_critic_to_actor.run() if step % settings.save_steps == 0: logger.info('saving settings of iteration %g' % (settings.iteration)) logger.results('action, reward, done, info: ') print(action, reward, settings.done, info) saver.save(sess, settings.checkpoint_path) logger.info('saving settings of iteration %g' % (settings.iteration)) logger.results('action, reward, done, info:') print(action, reward, settings.done, info) logger.info('program finished')
def __init__(self): try: if self.loadingdone: pass except: self.loadingdone = True self.name = "setting_handler" self.database = main_database() self.logging = Logging() self.http = urllib3.PoolManager() self.settings = {} #self.settings["main_page"] = ["Front page", ["OpenHAB", "Weather"]] self.settings["screen_timeout"] = [ "Screen timeout (min)", [1, 3, 5, 10, 15, 30, "off"] ] self.settings["album_timeout"] = [ "Album timeout (hr)", [1, 3, 5, 10, 15, 30] ] self.settings["message_timeout"] = [ "Message timeout (s)", [10, 30, 60, 120, 300] ] self.settings["toast_timeout"] = [ "Info timeout (s)", [7, 10, 15, 20, 30] ] self.settings["mouse"] = ["Mouse button", ["on", "off"]] self.settings["items_per_page"] = [ "Number of items per page", [6, 8, 9, 12] ] self.settings["sensors_per_page"] = [ "Number of sensor items per page", [6, 8, 9, 12] ] self.settings["screen"] = ["Screen", ["on", "off"]] self.settings["frame"] = ["Photo / Clock", ["photoframe", "clock"]] self.settings["frame_info"] = [ "Frame Info", ["none", "load", "album", "both"] ] self.settings["frame_td"] = [ "Frame Time/Date", ["none", "clock", "date", "both"] ] self.settings["frame_display_time"] = [ "Photo display time", ["short", "medium", "long", "extra long"] ] self.settings["clock_type"] = ["Clock type", ["digital", "analog"]] self.settings["chart_period"] = [ "Default chart period", ["auto", "4 hours", "12 hours", "1 day", "3 days", "1 week"] ] for key, item in self.settings.items(): value = self.database.data["settings"][key] self.settings[key].append(int(value)) settings_c = Settings() self.enable_screen_control = settings_c.get_setting( "main", "enable_screen_control") try: topics = settings_c.get_setting( "main", "mqtt_control_topic").split(",") self.mqtt = mqtt() for topic in topics: self.mqtt.add_listener(topic, self, "received_mqtt_message") except: self.logging.warn("Mqtt not configured for handling settings", location="settings_handler") if self.enable_screen_control in ["pi", "black"]: pass elif self.enable_screen_control == "url": try: self.screen_control_on_url = settings_c.get_setting( "main", "screen_on_url") self.screen_control_off_url = settings_c.get_setting( "main", "screen_off_url") self.settings["screen_control_on_url"] = [ 0, 0, self.screen_control_on_url ] self.settings["screen_control_off_url"] = [ 0, 0, self.screen_control_off_url ] except: self.logging.error( "Add settings 'screen_on_url' and 'screen_off_url' for external url screen control", location="settings_handler") self.enable_screen_control = "black" elif self.enable_screen_control == "cmd": try: self.screen_control_on_cmd = settings_c.get_setting( "main", "screen_on_cmd") self.screen_control_off_cmd = settings_c.get_setting( "main", "screen_off_cmd") self.settings["screen_control_on_cmd"] = [ 0, 0, self.screen_control_on_cmd ] self.settings["screen_control_off_cmd"] = [ 0, 0, self.screen_control_off_cmd ] except: self.logging.error( "Add settings 'screen_on_cmd' and 'screen_off_cmd' for external command screen control", location="settings_handler") self.enable_screen_control = "black" elif self.enable_screen_control != "off": self.logging.error( "Incorrect screen control enable settings, screen control is off", location="settings_handler") self.enable_screen_control = "off" self.settings["main_enable_clock"] = [ 0, 0, settings_c.get_setting("main", "enable_clock") ] self.settings["main_enable_album"] = [ 0, 0, settings_c.get_setting("main", "enable_album") ] if settings_c.get_setting("main", "enable_album") == "0": self.logging.warn("Album not enabled, setting frame to clock", location="settings_handler") self.__set_setting("frame", "clock") elif settings_c.get_setting("main", "enable_clock") == "0": self.logging.warn( "clock not enabled, setting frame to photoframe", location="settings_handler") self.__set_setting("frame", "photoframe") if settings_c.get_setting( "main", "enable_clock") == "0" and settings_c.get_setting( "main", "enable_album") == "0": self.logging.warn( "Album and clock not enabled, turning off screen setting", location="settings_handler") self.__set_setting( "screen", "off" ) ##in this case only the screensaver determines if the screen is turned on or off self.settings["main_screen_control"] = [ 0, 0, self.enable_screen_control ]
import SentimentModel from logger import Logging from lib_import import * from textClassification import allforClassification # Insert all the sub-folder path under Libs folder into system path so that we can import different modules current_working_path = os.path.dirname(os.path.abspath(__file__)) for directory in os.listdir(os.path.join(current_working_path, "Libs")): sys.path.insert( 0, os.path.join(os.path.join(current_working_path, "Libs"), directory)) #model reload file_name = os.path.join(current_working_path, 'scv_model_v3.sav') feature_name = os.path.join(current_working_path, 'feature3.pkl') #logger initiate script_name = os.path.basename(__file__).split(".")[0] Logger = Logging(current_working_path, script_name, "_log") Logger.write_separator(2) #get the output from the remainlist.txt file and use the outputlist as input '''if we want to use split methods, edit this part''' #CHANGE BELOW '''df_remains = pd.read_csv('remainlist.csv') inputcsvs_all = df_remains.remainlist.tolist() totalnum = len(inputcsvs_all) splitnum = int(totalnum/4) inputcsvs = inputcsvs_all[ : splitnum]''' #inputcsvs_full = glob.glob(inputParameters.read_directory+'*.csv') #inputcsvs = [file.split('/')[-1] for file in inputcsvs_full] #CHANGE ABOVE totalnum = 1 inputcsvs = ['163610_163610_ice_markets_2016-02-01_2016-02-29.csv']
'''logger解耦,生成多个logger实例''' from logger import Logging import foo logging = Logging('main', 'test.log') logger = logging.logger logger.info('main 收到一个请求') foo.foo() print(logging.th.baseFilename) print(logger.name)
from logger import Logging log_path = './logs/logger_test.log' log_lable = "logger_test" idc = "BJ" log = Logging("%s.%s" % (log_path, idc), "%s.%s" % (log_lable, idc)) log.logger.info("info test ok") log.logger.debug("debug test ok") log.logger.error("error test ok") log.logger.warning("warning test ok") #log.logger.log("log test ok") log.logger.exception("exception test ok")
def __init__(self): self.logger = Logging()
from logger import Logging from symbol import Symbol from loader import Loader print "***** logging test *****" l = Logging() l.error("missing symbol") l.info("missing symbol") l.refresh("missing symbol") l.buy("missing symbol") l.profit("missing symbol") l.terminate("missing symbol") print "***** symbol test *****" s = Symbol('AMD') s.market_cap() print s.market_cap s.earnings_per_share() print s.eps print "***** loader test *****" load = Loader('AMD', '2016-11-01', '2016-11-21') amd = load.get_data('AMD') amd.book_value() print amd.book print load.data_to_csv('AMD')
from logger import Logging # NOTE: Logger() 为独立对象。name 参数不能带主程序的 logger name,会产生重复对象。 module_logging = Logging('foo', 'test.log') module_logger = module_logging.logger def foo(): module_logger.info(u'foo.foo 收到一个请求') if __name__ == '__main__': foo()
def __init__(self): self._logging = Logging() pass
from __future__ import print_function import argparse import sys import collections import time import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from blackbox import BlackBox from logger import Logging import random FLAGS = None logger = Logging() def deepnn(x): """deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784) for example Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout """ # Reshape to use within a convolutional neural net. # Last dimension is for "features" - there is only one here, since images are # grayscale -- it would be 3 for an RGB image
def main(_): # Import data CSV_FILE = '~/store/fraud_data/creditcard.csv' YCOL = 'Class' logger = Logging() proc = Processor() #TODO make this test suite data = proc.load_csv(CSV_FILE) data = proc.normalize_col(data, 'Amount') data = data.drop(['Time'], axis=1) X = proc.get_xvals(data, YCOL) y = proc.get_yvals(data, YCOL) #print data.describe() Xu, yu = proc.under_sample(data, YCOL) Xu_train, Xu_test, yu_train, yu_test = proc.cross_validation_sets( Xu, yu, .3, 0) X_train, X_test, y_train, y_test = proc.cross_validation_sets(X, y, .3, 0) x = tf.placeholder(tf.float32, [None, 29]) W = tf.Variable(tf.zeros([29, 1])) b = tf.Variable(tf.zeros([1])) y = tf.matmul(x, W) + b # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 1]) # The raw formulation of cross-entropy, # # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)), # ) reduction_indices=[1])) # # can be numerically unstable. # # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw # outputs of 'y', and then average across the batch. #cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) #cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y,1e-10,1.0))) cross_entropy = tf.reduce_sum(tf.square(tf.subtract(y_, y))) train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy) sess = tf.InteractiveSession() tf.global_variables_initializer().run() # Train y_test = y_test.as_matrix() for i in range(20): #batch_xs, batch_ys = mnist.train.next_batch(100) #batch_xs = X_train #batch_ys = y_train.as_matrix() sess.run(train_step, feed_dict={x: X_train, y_: y_train.as_matrix()}) # Test trained model print("[model] training is complete ***************** ") correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean( tf.subtract(tf.cast(correct_prediction, tf.float32), y_test[:10000])) print('accuracy: %s' % sess.run(accuracy, feed_dict={ x: X_test.head(10000), y_: y_test[:10000] })) #cp = sess.run(tf.cast(correct_prediction, tf.float32), feed_dict={x: X_test.head(10000), y_: y_test[:10000]}) #lacc = tf.subtract(tf.cast(correct_prediction, tf.float32), y_test[:10000]) #cp = sess.run(lacc, feed_dict={x: X_test.head(10000), y_ : y_test[:10000]}) #count = 0 #for idx, c in enumerate(cp): #if c != y_test[idx]: ##print(idx, c, y_test[idx]) #continue #else: #count +=1 #print((count/float(10000))) sess.close()