def __init__(self, timeout): from src.net.loop import EventLoop from src.net.connector import Connector from src.util.logger import Logger Logger.start_logger_service() # 日志服务,每一个线程一个 self.loop = EventLoop(timeout) self.tcp_conn = None self.connector = Connector(self.loop) self.connector.set_new_conn_callback(self.new_connection)
def __init__(self): self.LOGGER = Logger(self) self.controller = Adafruit_MotorHAT(addr=0x60, i2c_bus=1) self.motors = [0, 0, 0, 0] # Initialize motor subscribers rospy.init_node('interface') rospy.Subscriber("/motor", String, self.on_motor_callback, queue_size=1) # Turn off motors when the script exits. atexit.register(self.turn_off_motors) # Motor tracking r = rospy.Rate(2) while not rospy.is_shutdown(): self.LOGGER.info(str(self.motors)) r.sleep()
class LogInterface(object): def __init__(self, log_dir): self.logger = Logger(log_dir) self.registered_models = [] self.step_dict = {} def __get_step(self, name, step): if step == -1: if name in self.step_dict: currStep = self.step_dict[name] else: currStep = self.step_dict[name] = 0 self.step_dict[name] += 1 else: currStep = step return currStep def log_scalar(self, name, item, step=-1): currStep = self.__get_step(name, step) self.logger.scalar_summary(name, item, currStep) def log_scalar_dict(self, item_dict, prefix='', step=-1): for name, item in item_dict.items(): currStep = self.__get_step(prefix + '_' + str(name), step) self.log_scalar(prefix + '_' + str(name), item, step) def log_vector(self, name, vector, step=-1): """Logs each element of a numpy vector as a seperate graph on the same plot. """ currStep = self.__get_step(name, step) for i, elem in enumerate(np.nditer(vector, flags=["refs_ok"])): self.log_scalar(name + "_" + str(i), float(elem), currStep) def register_model(self, model): self.registered_models.append(model) def log_model(self): def to_np(val): return val.data.cpu().numpy() for model in self.registered_models: for name, value in model.named_parameters(): name = name.replace('.', '/') currStep = self.__get_step(name, -1) self.logger.histo_summary(name, to_np(value), currStep) self.logger.histo_summary(name + '/grad', to_np(value.grad), currStep) def log_image(self, name, image, step=-1): currStep = self.__get_step(name, step) self.logger.image_summary(name, [image], currStep) def log_image_dict(self, image_dict, step=-1): for name, image in image_dict.items(): self.log_image(name, image, step)
class Driver: ''' Driver class contains method and initialization of commandline arguments ''' # command line argument initialization parser = argparse.ArgumentParser() parser.add_argument('--file', '--f', default="../input/input.txt", help='Input file path') parser.add_argument("--topxltv", default=2, help='Finds top x ltv valued customers') parser.add_argument('--loglevel', default='INFO', help='Level of logging to use (default:WARNING, \ values: CRITICAL, ERROR, WARNING, INFO, DEBUG)') parser.add_argument('--output', default='../output/output.txt', help='Output File path') args = parser.parse_args() if __name__ == '__main__': # Instatiation of logger object logObj = Logger() log = logObj.logger(args.loglevel) # Instatiation of FileProcessor object file_process = FileProcessor(log) log.info("File Processed") # Reads the data from input file data = file_process.read_file(args.file) # Instatiation of EventMapper object event = event_mapper.EventMapper(log) # Instatiation of Data D from DataStore D = DataStore() for e in data: # Feeding event one by one to Ingest method event.Ingest(e, D) # Call for calculating top x customer based on LTV value if int(args.topxltv) > 0: x = int(args.topxltv) else: x = 2 write_data = event.TopXSimpleLTVCustomers(x, D) # Write top x customers to the output file file_process.write_file(args.output, write_data)
class Interface: """ Abstract: Simple script to baseline motor execution. """ def __init__(self): self.LOGGER = Logger(self) self.controller = Adafruit_MotorHAT(addr=0x60, i2c_bus=1) self.motors = [0, 0, 0, 0] # Initialize motor subscribers rospy.init_node('interface') rospy.Subscriber("/motor", String, self.on_motor_callback, queue_size=1) # Turn off motors when the script exits. atexit.register(self.turn_off_motors) # Motor tracking r = rospy.Rate(2) while not rospy.is_shutdown(): self.LOGGER.info(str(self.motors)) r.sleep() def turn_off_motors(self): """ Turns off all motors. """ self.LOGGER.info("Stopping motors...") self.turn_motors([0, 0, 0, 0]) def turn_motors(self, values): """ ACTUALLY turns the motors. """ if self.motors == values or len(values) is not 4: return self.LOGGER.info("Motors turning: " + str(values)) while self.motors[0] != values[0] and self.motors[1] != values[1] and self.motors[2] != values[2] and self.motors[3] != values[3]: for idx, motor_speed in enumerate(values): direction = Adafruit_MotorHAT.FORWARD if self.motors[idx] > 0 else Adafruit_MotorHAT.BACKWARD if self.motors[idx] < 0 else Adafruit_MotorHAT.BRAKE self.controller.getMotor(idx + 1).run(direction) self.motors[idx] += (1 if self.motors[idx] < values[idx] else -1 if self.motors[idx] > values[idx] else 0) self.controller.getMotor(idx + 1).setSpeed(int(abs(self.motors[idx]))) time.sleep(0.001) def on_motor_callback(self, value_str_obj): """ Performs the actual motor operations. """ values = list(map(lambda x: float(x), value_str_obj.data.replace("\\", "").replace(" ", "").split(","))) self.turn_motors(values) self.motors = values
def __init__(self): self.logger = Logger(self.__class__.__name__)
class Simulation: def __init__(self): self.logger = Logger(self.__class__.__name__) def start(self, servers_count): env = simpy.Environment() start_time = env.now # create db db = Database(env, self.logger, 0, dict( max_connections=100 )) # create bd query pattern query_pattern = ( dict(type=FullScanQuery, count=lambda: maybe.maybe(0, 1, 90)), dict(type=CoveringIndexQuery, count=lambda: random.randint(0, 10)), dict(type=IndexQuery, count=lambda: random.randint(0, 2)), dict(type=RangeQuery, count=lambda: random.randint(0, 2)), ) # create servers servers = [] for i in range(0, servers_count): server = Server(env, self.logger, i, dict( cores=20, db=db, render_time=TrTime(30, 100), query_pattern=query_pattern, db_latency_time=TrTime(20, 30), balancer_latency_time=TrTime(10, 20) )) servers.append(server) server.start() # create balancer balancer = Balancer(env, self.logger, 0, dict( mode=BalanceMode.ROUND_ROBIN, servers=servers, cache_size=100, cache_time=1*30*1000, # half of minute render_time=TrTime(2, 8), balance_time=TrTime(1, 2), sender_processes=4, receiver_processes=4, max_clients=100000)) balancer.start() # create pager pager = Pager(env, dict( dynamic_pages_count=10, static_files_count=50, static_files_per_page=3 )) # create clients clients = [] for i in range(0, 1000): client = Client(env, self.logger, i, dict( balancer_pipe=balancer.get_clients_pipe(), pager=pager, type=ClientType.PC, guest=maybe.maybe(True, False, 50), page_idle_time=TrTime(1*1000, 3*1000), request_idle_time=TrTime(0.1*1000, 0.2*1000), uplink_speed=TrTime(1, 3), downlink_speed=TrTime(10, 30) )) clients.append(client) client.start() env.run(until=10*60*1000) # 10 minutes # analise statistics self.logger.log(self, "-----------------------") self.logger.log(self, "Statistics (for %s servers)" % servers_count) self.logger.log(self, "-----------------------") rps = float(balancer.get_requests_count()) / (env.now - start_time) * 1000 self.logger.log(self, "[Balancer] RPS: %f" % rps) for server in servers: rps = float(server.get_requests_count()) / (env.now - start_time) * 1000 self.logger.log(self, "[Server %d] RPS: %f" % (server.id, rps)) average_request_time = 0 for client in clients: average_request_time += client.get_average_request_time() average_request_time /= len(clients) self.logger.log(self, "Average request time: %f" % average_request_time) return average_request_time
from os import environ from os.path import join, dirname from dotenv import load_dotenv sys.path.append(join(dirname(__file__), '../..')) from src.util.logger import Logger # Load Slack API Token load_dotenv(join(join(dirname(__file__), '../..'), '.env')) TOKEN = environ.get("SLACK_API_TOKEN") # Create Slack client client = slack.WebClient(token=TOKEN) LOGGER = Logger() def get_ip_address(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) return s.getsockname()[0] def notify_ip(): """ Notifies the Slack channel of the car's IP address. """ ip = get_ip_address() LOGGER.info("IP: " + ip) LOGGER.info("Token: " + TOKEN) response = client.chat_postMessage(channel='#autonomy-bot', text="Hi! SSH into me with `ssh pi@" + ip + "`.")
def __init__(self): self.LOGGER = Logger(self)
def run_test(): import time import logging from src.util.logger import Logger start = time.time() logger = Logger(flush_internal=3, buffer_len_bound=20480) i = 0 while i < 100000: logger.error("hello world {}".format(i)) i += 1 delta = time.time() - start print delta # 对比,效率主要区别在文件打开次数上 start = time.time() logger_name = 'simple_logger' logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler('test.log') file_handler.setFormatter( logging.Formatter( "%(asctime)s %(thread)d %(levelname)s %(pathname)s:%(lineno)d %(message)s" )) logger.addHandler(file_handler) i = 0 while i < 100000: logger.error("hello world {}".format(i)) i += 1 delta = time.time() - start print delta # TEST version 2.0 Logger.start_logger_service() LOG.error("TEST")
def __init__(self, log_dir): self.logger = Logger(log_dir) self.registered_models = [] self.step_dict = {}
# encoding=utf8 import sys, os sys.path.append(os.path.realpath('.')) if __name__ == '__main__': import time from src.net.loop import EventLoop from src.util.timer import Timer, TimerQueue from src.util.logger import Logger Logger.start_logger_service() timer_queue = TimerQueue(EventLoop(0.01)) def test_func(): print 'hello' def test_func1(): print 'hello repeat' timer_ins = Timer(0, test_func) print timer_ins.timer_id timer_ins1 = Timer(1, test_func1) print timer_ins1.timer_id timer_queue.add_timer(timer_ins) timer_queue.add_timer(timer_ins1) while 1: timer_queue.schedule() time.sleep(1) pass