def __init__(self): import config import tz_info globals.system_log = logger.create("service") globals.access_log = logger.create("access") globals.system_log.info("*** CybroScgiServer %s started ***" % const.ApplicationVersion) globals.tz_info = tz_info.TimezoneInfo() globals.sys_status = sys_status.SystemStatus() globals.controllers = cybrocontrollers.CybroControllers() globals.config = config.GlobalConfig() globals.transaction_pool = transaction_pool.TransactionPool() if sys_config.DebugRConsole: from rfoo.utils import rconsole rconsole.spawn_server() globals.system_log.warning("Debug rconsole server spawned.") if sys_config.DebugTcpServer: import tcp_logger_server globals.tcp_log_server = tcp_logger_server.create( sys_config.DebugTcpServerPort)
async def create_task(self, to_task): """ Создание записи в таблице task. :param to_task: - словарь с нужными для записи в таблицу данными. """ conn = await self._connect_database() if conn: try: task_id = await conn.fetchval("\ INSERT INTO task(date, \ local_date, \ device_id, \ type) \ VALUES($1, $2, $3, $4)\ RETURNING id;", to_task['date'], to_task['local_date'], to_task['device_id'], to_task['type'] ) await conn.close() # print('task\'s created') return task_id except Exception as e: logger.create('Произошла ошибка при создании записи ' 'в сущности task. Метод create_task', e) await conn.close() return False
def init_controller(controller_id): globals.system_log = logger.create("service") globals.access_log = logger.create("access") globals.tz_info = tz_info.TimezoneInfo() globals.sys_status = sys_status.SystemStatus() globals.controllers = cybrocontrollers.CybroControllers() globals.config = config.GlobalConfig() globals.transaction_pool = transaction_pool.TransactionPool() globals.udp_proxy = udp_proxy.UDPProxy() globals.udp_proxy.start() global controller controller = globals.controllers.create(controller_id, False) global cybro_comm cybro_comm = cybrocomm.CybroComm(1, controller_id) cybro_comm.controller = controller cybro_comm.data_received_event = threading.Event() global alloce # read file alloc always controller.read_alloc_file_immediately() alloce = alloc.Allocation(controller_id) alloce.read()
async def get_devices(self, status_type_device): """Получение списка активных устройств упаковки из базы. Создание списка экземпляров классов Device :param status_type_device: - статус устройства: 1 - устройство без чеков 2 - устройство с чеками """ conn = await self._connect_database() if conn: try: rows = await conn.fetch('\ SELECT id, title FROM polycomm_device \ INNER JOIN timestamps ON \ polycomm_device.code = CAST(timestamps.devicecode as int) \ and timestamps.ready = True \ and timestamps.status_type_device = $1;', status_type_device) except Exception as e: logger.create('Произошла ошибка при получении списка активных' ' устройств из базы. Метод get_devices', e) return False finally: await conn.close() devices_list = list() for row in rows: devices_list.append(Device(device_id=row['id'], name=row['title'])) return devices_list.copy()
async def create_task_to_event(self, to_task_event): """ Создание записи в таблице task_to_event. :param to_task_event: - словарь с нужными для записи в таблицу данными. """ conn = await self._connect_database() if conn: try: parent_id = await conn.fetchval("\ INSERT INTO task_to_event(event_id, \ table_name, \ ord, \ parent_id, \ created_date, \ task_id) \ VALUES($1, $2, $3, $4, $5, $6)\ RETURNING id;", to_task_event['event_id'], to_task_event['table_name'], to_task_event['ord'], to_task_event['parent_id'], datetime.now(), to_task_event['task_id'] ) await conn.close() # print('task_to_event created') #FIXME return parent_id except Exception as e: logger.create('Произошла ошибка при создании записи в ' 'сущности task_to_event', e) await conn.close() return False
def force_check(message): try: bot.send_message(config.GROUP_ID, 'Сейчас будет выполнена проверка всех сервисов.' ' Если будут ошибки, я сообщу о них.', parse_mode='Markdown') except: logger.create('Произошла ошибка при попытке отправки сообщения') run_check()
def download_pattern(self, path): try: ssh = self.connect_ssh() sftp = ssh.open_sftp() sftp.get(os.path.join(*path), os.path.join(os.getcwd(), self.local_logs_directory, path[-1])) ssh.close() except: logger.create('Произошла ошибка при попытке' ' загрузки файла по SFTP')
async def _connect_database(self): """Создание подключения к базе.""" try: conn = await asyncpg.connect( f'postgresql://{self.pg_user}:{self.pg_password}' f'@{self.pg_host}/{self.pg_db}') return conn except Exception as e: logger.create('Произошла ошибка при попытке подключения к' ' базе данных. Метод _connect_database', e) return False
def check_supervisor_status(self): if ConnectToFirstServer().check_supervisor() != len( config.SUPERVISOR_LIST): try: bot.send_message( config.GROUP_ID, 'Наблюдаются проблемы с *supervisor*. Количество' ' запущенных воркеров отличается от заданных.', parse_mode='Markdown') except: logger.create('Произошла ошибка' ' при попытке отправки сообщения')
def connect_ssh(self): try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=self.ssh_host, port=self.ssh_port, username=self.username, password=self.password) except: logger.create('Произошла ошибка при попытке' ' соединения с сервером по SSH') return return ssh
def check_supervisor(self): try: ssh = self.connect_ssh() self.supervisor_count = 0 stdin, stdout, stderr = ssh.exec_command( 'service supervisor status') data = (stdout.read() + stderr.read()).decode('utf-8') for item in config.SUPERVISOR_LIST: if item in data: self.supervisor_count += 1 ssh.close() return self.supervisor_count except: logger.create('Произошла ошибка при' ' попытке проверки статуса supervisor')
def __init__(self): self.db = db.create_db_connection() self.items = [] self.session_data = {} if globals.system_log == None: globals.system_log = logger.create("service") self.data_sync = DataSync(self)
def __init__(self): if globals.system_log == None: globals.system_log = logger.create("service") self.db = db.create_db_connection() self.controllers = controllers.Controllers(self.db) self.stats = DataLoggerStats() self.terminate_event = threading.Event() threading.Thread.__init__(self)
def __init__(self): if globals.system_log == None: globals.system_log = logger.create("service") if sys_config.DatabaseEngine == "mysql": self.db = db.DBaseMySQL() else: globals.system_log.error("Unknown DataLogger database engine: %s." % sys_config.DatabaseEngine) quit() self.terminate_event = threading.Event() threading.Thread.__init__(self)
def __init__(self): self.cybrobase = cybrocomm.CybroBase() self.transaction_id_lock = threading.Lock() if sys_config.DebugComm: self.comm_debug_log = logger.create("comm") self.connect() threading.Thread.__init__(self) self.daemon = True
async def update_status_and_resolved(self, task_id): """ Обновление status и параметра resolved в сущности task для событий, из списка 'line_event'(к которым нет претензий). :param task_id: - id события в сущности task. """ conn = await self._connect_database() if conn: try: await conn.execute("\ UPDATE task \ SET status = 1, resolved = True \ WHERE id = $1;", task_id ) # print('task_id status and resolved is updated') #FIXME: except Exception as e: logger.create('Произошла ошибка при обновлении записи в ' 'сущности task. Метод update_status_and_resolved', e) finally: await conn.close()
async def create_polycommissue_event(self, event): """ Создание записи в таблице polycommissue. Suitcase.issue_attrib['type'] in {7, 8, 9} :param event: - упаковка, с искусственным оповещением. """ conn = await self._connect_database() if conn: try: await conn.execute("\ INSERT INTO polycommissue(id, \ localdate, \ device, \ total, \ suitcase, \ duration, \ type, \ date, \ createtime) \ VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9);", event.issue_attrib['id'], event.issue_attrib['localdate'], event.device_id, event.issue_attrib['total'], event.issue_attrib['suitcase'], event.duration, event.issue_attrib['type'], event.issue_attrib['date'], datetime.now()) await conn.close() # print('issue created') #FIXME except Exception as e: logger.create('Произошла ошибка при создании записи ' 'в сущности polycommissue. ' 'Метод create_polycommissue_event', e) await conn.close() return False
async def get_receipts(self, device_id): """ Получение чеков из базы. Создание списка экземпляров классов Receipts :param device_id: - id активного устройства, полученный методом get_devices() """ conn = await self._connect_database() if conn: try: rows = await conn.fetch("\ SELECT DISTINCT receipts.receipt_id, dateclose, polycomm_device.id, \ receipts.quantitypackageone, receipts.quantitypackagedouble, \ receipts.status, receipts.dateclosemoscow \ FROM receipts \ LEFT JOIN polycomm_device on \ CAST(receipts.devicecode as int) = polycomm_device.code \ WHERE polycomm_device.id = $1 \ and status = 0;", device_id) except Exception as e: logger.create('Произошла ошибка при получении списка ' 'чеков из базы. Метод get_receipts', e) return False finally: await conn.close() receipts_list = list() for row in rows: receipts_list.append(Receipts( receipt_id=row['receipt_id'], receipts_timestamp=row['dateclose'], device_id=row['id'], quantitypackageone=row['quantitypackageone'], quantitypackagedouble=row['quantitypackagedouble'], status=row['status'], dateclosemoscow=row['dateclosemoscow'])) return receipts_list.copy()
async def get_issue(self, device_id): """ Получение оповещений из базы. Создание списка экземпляров классов Issue :param device_id: - id активного устройства, полученный методом get_devices() """ conn = await self._connect_database() if conn: try: rows = await conn.fetch("\ SELECT suitcase, localdate, device, polycomm_issue_type.title, \ status, polycommissue_id, date \ FROM polycommissue \ INNER JOIN polycomm_issue_type ON \ polycommissue.type = polycomm_issue_type.id \ WHERE device = $1 \ and status = 0;", device_id) except Exception as e: logger.create('Произошла ошибка при получении списка ' 'оповещений из базы. Метод get_issue', e) return False finally: await conn.close() issue_list = list() for row in rows: issue_list.append(Issue(suitcase_id=row['suitcase'], issue_time=row['localdate'], device_id=row['device'], issue_type=row['title'], status=row['status'], polycommissue_id=row['polycommissue_id'], moscow_date=row['date'] )) return issue_list.copy()
async def get_suitcases(self, device_id): """ Получение упаковок из базы. Создание списка экземпляров классов Suitcase :param device_id: - id активного устройства, полученный методом get_devices() """ conn = await self._connect_database() if conn: try: rows = await conn.fetch("\ SELECT id, dateini_local, local_date, package_type,\ polycom_id, totalid, status, duration, date\ FROM polycomm_suitcase\ WHERE status = 0 and device_id = $1;", device_id) except Exception as e: logger.create('Произошла ошибка при получении списка ' 'упаковок из базы. Метод get_suitcases', e) return False finally: await conn.close() suitcases_list = list() for row in rows: suitcases_list.append(Suitcase(suitcase_id=row['id'], suitcase_start=row['dateini_local'], suitcase_finish=row['local_date'], package_type=row['package_type'], polycom_id=row['polycom_id'], totalid=row['totalid'], status=row['status'], duration=row['duration'], moscow_date=row['date'], device_id=device_id) ) return suitcases_list.copy()
async def get_alarm(self, device_id): """ Получение уведомлений из базы. Создание списка экземпляров классов Alarm :param device_id: - id активного устройства, полученный методом get_devices() """ conn = await self._connect_database() if conn: try: rows = await conn.fetch("\ SELECT polycommalarm_id, localdate, device, \ polycomm_alarm_type.title, polycommalarm.status, date \ FROM polycommalarm \ INNER JOIN polycomm_alarm_type ON \ polycomm_alarm_type.id = polycommalarm.alarmtype \ WHERE device = $1 \ and status = 0;", device_id) except Exception as e: logger.create('Произошла ошибка при получении списка ' 'уведомлений из базы. Метод get_alarm', e) return False finally: await conn.close() alarm_list = list() for row in rows: alarm_list.append(Alarm( polycommalarm_id=row['polycommalarm_id'], alarm_time=row['localdate'], alarm_device_id=row['device'], alarm_type=row['title'], status=row['status'], moscow_date=row['date'] )) return alarm_list.copy()
def __init__(self, client, config): self.client = client self.config = config self.name = config['Server Name'] self.diaries = {} for db in self.config["DBs"]: x = diary.publisher(db) self.diaries[x.db_name] = x self.loggers = [] for log in config['Loggers']: self.loggers.append(logger.create(log)) self.command_set = self.generate_commands()
def run(self, serial_command, **args): if not self.logger: settings = sublime.load_settings("serial_monitor.sublime-settings") self.logger = logger.create("serial_monitor", settings.get("log_level")) self.last_settings = sublime.load_settings(serial_constants.LAST_USED_SETTINGS) self.logger.debug("Running command: {}, args: {}".format(serial_command, args)) try: func = self.arg_map[serial_command] except KeyError: self.logger.error("Unknown serial command: {0}".format(serial_command)) return # Create a CommandArgs object to pass around the args command_args = SerialSettings(func, **args) func(command_args) sublime.save_settings(serial_constants.LAST_USED_SETTINGS)
def check_error(self, count_errors, clean_list_error, name_file): if count_errors > 0: try: bot.send_message( config.GROUP_ID, f'У нас есть некоторое количество(*{count_errors}*) ' f'проблем в *{name_file}*. Сейчас пришлю лог с ними.', parse_mode='Markdown') with open( os.path.join(os.getcwd(), config.LOCAL_LOGS_DIRECTORY, name_log)[:-4] + '_errors.log', 'r', encoding='utf-8') as file: bot.send_document(config.GROUP_ID, file) except: logger.create('Произошла ошибка при' ' попытке отправки сообщения') elif clean_list_error == 0: try: bot.send_message( config.GROUP_ID, f'В лог *{name_file}* не приходит информация.' ' Проверьте, пожалуйста, исправность системы.', parse_mode='Markdown') except: logger.create('Произошла ошибка при' ' попытке отправки сообщения') try: for file in os.listdir(config.LOCAL_LOGS_DIRECTORY): os.remove( os.path.join(os.getcwd(), config.LOCAL_LOGS_DIRECTORY, file)) except: logger.create('Произошла ошибка при попытке' ' очистки локальной папки с логами')
os.environ["CUDA_VISIBLE_DEVICES"] = "1" import common import eval_tool import torch import torch.nn as nn import logger import numpy as np from dbface import DBFace from evaluate import evaluation # create logger trial_name = "small-H-dense-wide64-UCBA" jobdir = f"jobs/{trial_name}" log = logger.create(trial_name, f"{jobdir}/logs/eval.log") # load and init model model = DBFace(has_landmark=True, wide=64, has_ext=True, upmode="UCBA") model.load(f"{jobdir}/models/150.pth") model.eval() model.cuda() # load dataset mean = [0.408, 0.447, 0.47] std = [0.289, 0.274, 0.278] files, anns = zip( *common.load_webface("webface/val/label.txt", "webface/WIDER_val/images")) # forward and summary prefix = "webface/WIDER_val/images/"
import pymysql import entity import logger log = logger.create() class DbAccess(object): def __init__(self, host, user, passwd, db="ragnarok"): self.conn = pymysql.connect(host=host, user=user, password=passwd, database=db) def get_login_by_account_id(self, account_id): query = "SELECT * FROM login WHERE account_id={}".format(account_id) log.debug("Executing SQL query '{}'".format(query)) cur = self.conn.cursor(pymysql.cursors.DictCursor) cur.execute(query) data = cur.fetchall() if len(data) == 0: log.debug("Query returned no data") return None login = entity.Login(**(data[0])) log.info("Query fetched login {{ {} }}".format(", ".join( "({} = {})".format(k, v) for k, v in login.__dict__.items() if type(k) is str))) return login
def __init__(self, client_nad, plc_nad): self.client_nad = client_nad self.plc_nad = plc_nad self.bandwidth_list = [] self.log = logger.create("c%d" % plc_nad)
outobjs = eval_tool.detect_images_giou_with_netout( hm, tlrb, landmark, threshold=0.1, ibatch=0) im1 = image.copy() for obj in outobjs: common.drawbbox(im1, obj) common.imwrite(f"{jobdir}/imgs/train_result.jpg", im1) def train(self): lr_scheduer = {1: 1e-3, 2: 2e-3, 3: 1e-3, 60: 1e-4, 120: 1e-5} # train self.model.train() for epoch in range(self.epochs): if epoch in lr_scheduer: self.set_lr(lr_scheduer[epoch]) self.train_epoch(epoch) file = f"{jobdir}/models/{epoch + 1}.pth" common.mkdirs_from_file_path(file) torch.save(self.model.module.state_dict(), file) trial_name = "small-H-dense-wide64-UCBA-keep12-ignoresmall" jobdir = f"jobs/{trial_name}" log = logger.create(trial_name, f"{jobdir}/logs/{trial_name}.log") app = App("webface/train/label.txt", "webface/WIDER_train/images") app.train()
def __init__(self, config_path="./config/"): super(Subscriber, self).__init__() self.config = configuration.load(config_path) self.log = logger.create('CLIENT') self.connect()
def route_received_data(self, data, address, c): ip = address[0] port = address[1] # globals.sys_status.udp_rx_count += 1 try: # check_received_frame will raise exception if received frame is invalid self.cybrobase.check_received_frame(data) try: frame = cybrocomm.CommFrame(data) except: return if frame.type == 0: globals.sys_status.udp_rx_count += 1 if sys_config.RelayEnable and globals.relay != None and globals.relay.process_relay_message( ip, port, data, frame.from_nad, frame.to_nad): # it's relay message. processed - leave routing return controller = globals.controllers.get_by_nad(frame.from_nad) if sys_config.DebugComm: self.comm_debug_log.info( " RX [%d] %s:%d %s" % (frame.from_nad, ip, port, binascii.b2a_hex(data))) if sys_config.DebugTcpServer: globals.tcp_log_server.debug( " RX [%d] %s:%d %s" % (frame.from_nad, ip, port, binascii.b2a_hex(data))) is_push_message = self.cybrobase.is_push_message(frame) is_broadcast_message = self.cybrobase.is_broadcast_message(frame) if controller == None and frame.from_nad != 1 and ( is_push_message or is_broadcast_message == False): if is_push_message: controller = globals.controllers.create( frame.from_nad, is_push_message) elif frame.to_nad == 1: controller = globals.broadcastController globals.controllersForNadList.append(frame.from_nad) if controller != None: if is_push_message: if not sys_config.PushEnable: return globals.sys_status.push_count += 1 cybro_log = logger.create("c%d" % frame.from_nad) cybro_log.info("c%d push from %s:%d." % (frame.from_nad, ip, port)) print("c%d push from %s:%d." % (frame.from_nad, ip, port)) print c.PushRequest("c%d push from %s:%d." % (frame.from_nad, ip, port)) if sys_config.DebugTcpServer: globals.tcp_log_server.info("c%d push from %s:%d." % (frame.from_nad, ip, port)) controller.set_push_data(ip, int(port)) controller.set_write_push_ack_request() if sys_config.ReadAllocAfterPush: controller.perform_maintenance_read() controller.sys_status.bytes_transfered += len(data) else: # print "rx data %s" % (binascii.b2a_hex(data)) controller.comm_proxy.on_receive_frame(data) controller.config.ip = ip controller.config.port = int(port) except Exception, e: globals.system_log.error( "(UDPProxy::route_received_data(address: %s:%d) Exception) %s" % (ip, port, e)) if sys_config.DebugTcpServer: globals.tcp_log_server.error( "(UDPProxy::route_received_data(address: %s:%d) Exception) %s" % (ip, port, e))
#!/usr/bin/env python from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient from garden_controller import GardenController import logger import json import pushover import time _HOST = "aa40w08kkflrp-ats.iot.eu-west-1.amazonaws.com" _PORT = 443 _ROOT_CA_PATH = "./root-CA.crt" _CLIENT_ID = "garden-controller" _CLIENT_NAME = "Garden Controller" _LOG = logger.create("iot_listener", logger.INFO) logger.create("AWSIoTPythonSDK.core", logger.WARN) class IoTCoreClient: def __init__(self): self._iot = AWSIoTMQTTShadowClient(_CLIENT_ID, useWebsocket=True) self._iot.configureEndpoint(_HOST, _PORT) self._iot.configureCredentials(_ROOT_CA_PATH) self._iot.configureAutoReconnectBackoffTime(1, 32, 20) self._iot.configureConnectDisconnectTimeout(10) self._iot.configureMQTTOperationTimeout(5) self._iot.connect() def create_shadow_handler(self, thing_name, handler): shadow = self._iot.createShadowHandlerWithName(thing_name, True)
def register_logger(self): log_path = self.config['log_path'] self.log = logger.create(name='SERVER', log_path=log_path)