def send(to, subject, text, params={}, bcc=[], cc=[]): global emailThread Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8') text = u"<html>" + text + u"</html>" msg = MIMEText(text, 'html', 'utf-8') msg['Subject'] = subject msg['From'] = config.mail_sender() if not 'Sender' in msg: msg['Sender'] = config.get('return_path') msg['To'] = (','.join(to)) if isinstance(to, (list)) else to if len(cc) > 0: msg['Cc'] = (','.join(cc)) if isinstance(cc, (list)) else cc for key in params.keys(): msg[key] = params[key] send_to = set((to if isinstance(to, (list)) else [ to ]) +\ (cc if isinstance(cc, (list)) else [ cc ]) +\ (bcc if isinstance(bcc, (list)) else [ bcc ])) # Vlozime email do fronty queueLock.acquire() emailQueue.put(emailData(msg['Sender'], send_to, msg.as_string())) if emailThread and emailThread.isAlive(): queueLock.release() else: queueLock.release() emailThread = sendThread() emailThread.start()
def gui_main(self): self.b.update(self.tol_bar.b) self.head.set_format( ('DBT', 'DBK', 'DBS')[config.getint('Preferens', 'd')]) self.numstr_var.set('') # scl = config.getboolean('Scale', 'amplscl') # if scl: # self.board.sclrbar.pack(side=tk.RIGHT, fill=tk.Y, expand=False) # показать шкалу yscroll = config.getboolean('Scale', 'yscroll') if yscroll: self.board.sbar.pack(side=tk.LEFT, fill=tk.Y) # показать полосу прокрутки self.board.canv.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) # пакуем здесь self.open_port() self.init_board() path = pathlib.Path(config.get('Dir', 'dirprj')) # path = os.path.abspath(path) if path.exists(): self.dbname = path.joinpath(path.name + '.db') # path + '.db' self.tol_bar.set_dir(str(path) + ' ...Галс не выбран') else: self.tol_bar.set_dir('Проект не выбран!') self.tol_bar.pr_name.set(' ?! ') self.b['bgals'].config(state='disabled')
def _send(to, subject, text, params, bcc, cc, plaintext=None): """Odeslani emailu.""" sender = config.mail_sender() if sender is None: logger.get_log().warning( f"Skipping sending mail to '{to}', because sender is not set in config" ) return global emailThread Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8') text = "<html>" + text + "</html>" msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = sender if 'Sender' not in params: msg['Sender'] = config.get('return_path') if 'Return-Path' not in params: msg['Return-Path'] = config.get('return_path') msg['To'] = (','.join(to)) if isinstance(to, (list)) else to if len(cc) > 0: msg['Cc'] = (','.join(cc)) if isinstance(cc, (list)) else cc for key in list(params.keys()): msg[key] = params[key] if plaintext is not None: msg.attach(MIMEText(plaintext, 'plain', 'utf-8')) msg.attach(MIMEText(text, 'html', 'utf-8')) send_to = set((to if isinstance(to, (list)) else [to]) + (cc if isinstance(cc, (list)) else [cc]) + (bcc if isinstance(bcc, (list)) else [bcc])) # Vlozime email do fronty queueLock.acquire() emailQueue.put(emailData(msg['Sender'], send_to, msg.as_string())) if emailThread and emailThread.isAlive(): queueLock.release() else: queueLock.release() emailThread = sendThread() emailThread.start()
def main() -> State: """Initial entry point.""" # This is just to make sure APIC access is functional and credentials work # Actual clients used for the upgrade are initialized as needed client = login_loop_for(120, config) if client is None: return State.FAIL # Pre change checks cli_header("Pre-change snapshot") if pre_post.init(timeout=120) == State.FAIL: log.error("Failed pre-check collection.") return State.FAIL cli_header("Pre-change health check") if health.run(timeout=120) == State.FAIL: log.error("Failed pre-change health check.") return State.FAIL # Pre upgrade prep cli_header("Configuration backup") if upgrade.backup(600) == State.FAIL: log.error("Failed configuration backup.") return State.FAIL cli_header("Tech support") if upgrade.tech_support(600) == State.FAIL: log.error("Failed collecting tech support.") return State.FAIL # APIC upgrade cli_header("APIC upgrade") if upgrade.upgrade_apics(3600) == State.FAIL: log.error("Failed upgrading APICs.") return State.FAIL cli_header("APIC post-upgrade comparison checks") if pre_post.run(timeout=3600) == State.FAIL: log.error("Failed post-check.") return State.FAIL cli_header("APIC post-upgrade health checks") if health.run(timeout=600) == State.FAIL: log.error("Failed health check.") return State.FAIL # Switch upgrades for group in config.get("firmware_groups", []): cli_header("Switch upgrade") if upgrade.upgrade_switches(group, 3600) == State.FAIL: log.error(f"Failed switch upgrade for group {group}.") cli_header("Switch post-upgrade comparison checks") if pre_post.run(timeout=3600) == State.FAIL: log.error("Failed post-check.") return State.FAIL cli_header("Switch post-upgrade health checks") if health.run(timeout=600) == State.FAIL: log.error("Failed health check.") return State.FAIL return State.OK
def opendir_gals(self): """Открыть существующий проект""" path = pathlib.Path(config.get('Dir', 'dirprj')).parent # path = config.get('Dir', 'dirprj') # path = os.path.dirname(path) name = askdirectory(initialdir=path) if name: # dir_ = os.path.abspath(name) self.prepare_gals(name) self.tol_bar.set_dir(name + ' ...Галc не выбран') return name
def open_port(self): """Открытие портов""" super().open_port() port_rep = config.get('Port', 'port_rep') baudrate_rep = config.getint('Port', 'baudrate_rep') try: self.pser.open_port(port_rep) self.pser.tty.baudrate = baudrate_rep except port_exc: self.stbar.set_rep(f'Не открыть порт {port_rep}') self.stbar.set_icon_rep(self.img_['networky']) if self.pser.is_open(): self.stbar.set_rep(self.pser.get_port_info('РЕПИТЕР')) self.stbar.set_icon_rep(self.img_['networkon'])
def __init__(self, config, triggerHandler): super(MagSwitchSensor, self).__init__(config, triggerHandler) util.config.validateConfig(config.get('data'), CONFIG_KEYS) self.config = config['data'] self.triggerHandler = triggerHandler self.lastTrigger = 0 self.state = 0 self.initState() if platform == 'rpi': self.rpiInitState() else: self.genericInitState()
def open_port(self): """Открытие портов""" port_pui = config.get('Port', 'port_pui') baudrate_pui = config.getint('Port', 'baudrate_pui') port_gps = config.get('Port', 'port_gps') baudrate_gps = config.getint('Port', 'baudrate_gps') try: self.ser.open_port(port_pui) self.ser.tty.baudrate = baudrate_pui except port_exc: self.stbar.set_device(f'Не открыть порт {port_pui}') self.stbar.set_icon(self.img_['networky']) if self.ser.is_open(): self.stbar.set_device(self.ser.get_port_info()) self.stbar.set_icon(self.img_['networkon']) try: self.gser.open_port(port_gps) self.gser.tty.baudrate = baudrate_gps except port_exc: self.stbar.set_gps(f'Не открыть порт {port_gps}') self.stbar.set_icon_gps(self.img_['networky']) if self.gser.is_open(): self.stbar.set_gps(self.gser.get_port_info('НАП СНС')) self.stbar.set_icon_gps(self.img_['networkon'])
def run(timeout: int = 600) -> State: """Initial entry point.""" if backup(timeout) == State.FAIL: log.error("Failed configuration backup.") return State.FAIL if tech_support(timeout) == State.FAIL: log.error("Failed collecting tech support.") return State.FAIL if upgrade_apics(timeout) == State.FAIL: log.error("Failed upgrading APICs.") return State.FAIL for group in config.get("firmware_groups", []): if upgrade_switches(group, timeout) == State.FAIL: log.error(f"Failed switch upgrade for group {group}") return State.FAIL return State.OK
def gals(self, name): """Выбор галса""" path = pathlib.Path(config.get('Dir', 'dirprj')) dir_gals = path.joinpath( 'Исходные данные') # каталог галсов 'base_data' # if name in (i.name for i in os.scandir(dir_gals)): if name in os.listdir(dir_gals): if not box.askyesno( '!', 'Файл с таким именем уже существует!\n Переписать файл?'): return self.file_gals = path.joinpath('Исходные данные', name) # имя файла данных 'base_data' self.tol_bar.set_dir(str(self.file_gals)) head = [ 'format_', 'glub', 'ampl', 'lenth', 'timdata', 'shir', 'dolg', 'vs', 'kurs', 'vz', 'zg', 'ku', 'depth', 'rej', 'frek', 'cnt', 'm', 'm_man', 'color_mm', 'm_avto' ] for i in range(20): head.append(f'g{i}') head.append(f'a{i}') head.append(f'l{i}') with open(self.file_gals, 'w', newline='') as f: # пишем в файл шапку a! f_csv = csv.writer(f) f_csv.writerow(head) fname = pathlib.Path(name).stem md5 = hashlib.md5(fname.encode('utf-8')).hexdigest() # md5 = hashlib.md5(os.path.splitext(name)[0].encode('utf-8')).hexdigest() self.tbname = f'tb_{md5}' # "tb_" префикс т.к. имя не может начмнаться с цмфры try: create_table(self.dbname, self.tbname) # создать таблицу и если надо базу except sqlite3.OperationalError as err: if str(err) == f'table {self.tbname} already exists': del_table(self.dbname, self.tbname) else: box.showerror('!', f'Ошидка базы данных!\n{str(err)}') req.num = 0 self.del_metka_man() return True
import pika import requests import json import datetime from util import MyLogger, config, insert_sql headers = {'Content-type': 'application/json'} # Connection connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost')) channel = connection.channel() # Queue queue = config.get('eth_hook_excpetion', 'queue') channel.queue_declare(queue=queue,durable=True) logs_directory = config.get('eth_hook_excpetion', 'logs') category = config.get('eth_hook_excpetion', 'category') def callback(ch, method, properties, body): """ This method is called every time there is a new element in queue (var : queue) :param ch: :param method: :param properties: :param body: :return: """ try: # Logger
import json import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, train_test_split import mlflow from util import ( DataPrepUtil, initialize_column_transformer, initialize_pipeline, plot_roc_curve, plot_learning_curve, config, record_hyperparameters ) if __name__ == "__main__": # Data cleaning step data_prep_util = DataPrepUtil() X_train, y_train = data_prep_util.load_train_data(config.get('paths').get('train_data')) X_train, X_test, y_train, y_test = train_test_split( X_train, y_train, test_size=config.get('test_size'), random_state=config.get('random_state'), stratify=y_train ) # Hyperparameter tuning, metric and parameter logging mlflow.set_experiment(config.get('mlflow').get('exp_name')) RUN_NAME = os.path.basename(__file__).split('.')[0] with mlflow.start_run(run_name=RUN_NAME): print('Starting run {}...'.format(RUN_NAME)) mlflow.log_param('test_size', config.get('test_size')) pipeline = initialize_pipeline( initialize_column_transformer(scale_values=True), LogisticRegression( solver=config.get('hyperparams').get('log_reg').get('solver'), random_state=config.get('random_state'),
import pika import requests import json import datetime from util import MyLogger, config, send_notification, insert_sql headers = {'Content-type': 'application/json'} # Connection connection = pika.BlockingConnection( pika.ConnectionParameters(host='localhost')) channel = connection.channel() # Queue queue = config.get('erc_hook_main', 'queue') channel.queue_declare(queue=queue, durable=True) logs_directory = config.get('erc_hook_main', 'logs') category = config.get('erc_hook_main', 'category') exception_queue = config.get('erc_hook_excpetion', 'queue') def callback(ch, method, properties, body): """ This method is called every time there is a new element in queue (var : queue) :param ch: :param method: :param properties: :param body: :return: """ try:
def get_prj_name(): return pathlib.Path(config.get('Dir', 'dirprj')).name
def __init__(self, config): super(SMTPReporter, self).__init__(config) self.smtpConfig = config['data'] util.config.validateConfig(config.get('data'), CONFIG_KEYS)
import json import os from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV, train_test_split import mlflow from util import (DataPrepUtil, initialize_column_transformer, initialize_pipeline, plot_roc_curve, plot_learning_curve, config, record_hyperparameters) if __name__ == "__main__": # Data cleaning step data_prep_util = DataPrepUtil() X_train, y_train = data_prep_util.load_train_data( config.get('paths').get('train_data')) X_train, X_test, y_train, y_test = train_test_split( X_train, y_train, test_size=config.get('test_size'), random_state=config.get('random_state'), stratify=y_train) # Hyperparameter tuning, metric and parameter logging mlflow.set_experiment(config.get('mlflow').get('exp_name')) RUN_NAME = os.path.basename(__file__).split('.')[0] with mlflow.start_run(run_name=RUN_NAME): print('Starting run {}...'.format(RUN_NAME)) mlflow.log_param('test_size', config.get('test_size')) pipeline = initialize_pipeline( initialize_column_transformer(scale_values=True), RandomForestClassifier(random_state=config.get('random_state'))) param_grid = {
import datetime import redis from apscheduler.schedulers.blocking import BlockingScheduler from util import insert_sql, increment_sql, rpc_request, update_sql, send_notification, find_sql_join, config, MyLogger # Redis Connection pool = redis.ConnectionPool(host=config.get('redis', 'host'), port=int(config.get('redis', 'port')), db=int(config.get('redis', 'db'))) redis_conn = redis.Redis(connection_pool=pool) # Blockchain Node url = config.get('node', 'url') confirmation_threshold = int(config.get('eth', 'confirmations')) logs_directory = config.get('eth_block', 'logs') category = config.get('eth_block', 'category') hook_queue = config.get('eth_hook_main', 'queue') def block_crawler(): """ Block Crawling process :return: """ obj_logger = MyLogger(logs_directory, category) obj_logger.msg_logger('Getting Block Numbers.....') # Get Current Block from RPC current_block = int(
import datetime import redis from apscheduler.schedulers.blocking import BlockingScheduler from util import insert_sql, rpc_request, send_notification, find_sql_join, MyLogger, config # Redis Connection pool = redis.ConnectionPool(host=config.get('redis', 'host'), port=int(config.get('redis', 'port')), db=int(config.get('redis', 'db'))) redis_conn = redis.Redis(connection_pool=pool) # Blockchain Node logs_directory = config.get('eth_mempool', 'logs') category = config.get('eth_mempool', 'category') hook_queue = config.get('eth_hook_main', 'queue') def mempool_crawler(): """ Mempool Process :return: """ obj_logger = MyLogger(logs_directory, category) obj_logger.msg_logger('#' * 100) obj_logger.msg_logger('Getting Mempool Data') # Get Mempool Data mempool_transaction_data = rpc_request(obj_logger, 'eth_getBlockByNumber', ['pending', True]).get( 'result',
import json import os from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV, train_test_split import mlflow from util import ( DataPrepUtil, initialize_column_transformer, initialize_pipeline, plot_roc_curve, plot_learning_curve, config, record_hyperparameters ) if __name__ == "__main__": # Data cleaning step data_prep_util = DataPrepUtil() X_train, y_train = data_prep_util.load_train_data(config.get('paths').get('train_data')) X_train, X_test, y_train, y_test = train_test_split( X_train, y_train, test_size=config.get('test_size'), random_state=config.get('random_state'), stratify=y_train ) # Hyperparameter tuning, metric and parameter logging mlflow.set_experiment(config.get('mlflow').get('exp_name')) RUN_NAME = os.path.basename(__file__).split('.')[0] with mlflow.start_run(run_name=RUN_NAME): print('Starting run {}...'.format(RUN_NAME)) mlflow.log_param('test_size', config.get('test_size')) pipeline = initialize_pipeline( initialize_column_transformer(scale_values=True), KNeighborsClassifier() ) param_grid = {
# import tkinter as tk from ttkthemes import ThemedTk from util import config theme = config.get('Theme', 'theme') application = ThemedTk(theme=theme) # application = ThemedTk(theme="radiance") # plastik clearlooks elegance radiance # arc black blue equilux itft1 keramik kroc # application.set_theme('arc') # application = tk.Tk() import bso def main(): # application = tk.Tk() application.withdraw() # hide # application.title("БСО (Блок сбора и обработки информации)") window = bso.App(application, 1100, 450, "БСО") application.protocol("WM_DELETE_WINDOW", window.exit_) application.minsize(900, 550) # application.wm_state('zoomed') application.deiconify() # show application.mainloop() main()
import json import datetime import redis from apscheduler.schedulers.blocking import BlockingScheduler import web3 from util import insert_sql, rpc_request, send_notification, find_sql_join, MyLogger, config # Redis Connection pool = redis.ConnectionPool( host = config.get('redis', 'host'), port = int(config.get('redis', 'port')), db = int(config.get('redis', 'db')) ) redis_conn = redis.Redis(connection_pool=pool) # Blockchain Node abi_file = config.get('erc20', 'abi') logs_directory = config.get('erc_mempool', 'logs') category = config.get('erc_mempool', 'category') hook_queue = config.get('erc_hook_main', 'queue') def mempool_crawler(): """ Mempool Process :return: """ obj_logger = MyLogger(logs_directory,category) obj_logger.msg_logger('#'*100) obj_logger.msg_logger('Getting Mempool Data')
def init(timeout: int = 3600) -> State: """Always create a new snapshot, for the start of the upgrade""" if os.path.isfile(config.get("snapshot_file")): os.remove(config.get("snapshot_file")) return run(timeout=timeout)