class Sql: # mysql 操作类 def __init__(self): self.db_conf = Config() host = self.db_conf.get_named_key('Mysql-Database', 'host') db = self.db_conf.get_named_key('Mysql-Database', 'db') user = self.db_conf.get_named_key('Mysql-Database', 'user') password = self.db_conf.get_named_key('Mysql-Database', 'password') charset = self.db_conf.get_named_key('Mysql-Database', 'charset') self.conn = pymysql.connect(host=host, user=user, password=password, db=db, charset=charset) self.cur = self.conn.cursor() # 执行一条sql 语句 (增、删、改) def execute_sql(self, sql, data): self.cur.execute(sql, data) self.conn.commit() # 查询 def search(self, sql): self.cur.execute(sql) self.conn.commit() # 关闭数据库 def close_mysql(self): self.cur.close() self.conn.close()
def __create_driver(self): try: browser = Config().get_property("parameters", "browser") except NonExistedProperty as err: self.log.write_log("Non-existed para: parameters--browser", self.mode) return self.log.write_log("Initializing...", self.mode) if browser == "phantomjs": exec_path = Config().get_property("path", "phantomjs_exec_path") dcap = dict(DesiredCapabilities.PHANTOMJS) # Set header of request dcap["phantomjs.page.settings.userAgent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " \ "AppleWebKit/537.36 (KHTML, like Gecko) "\ "Chrome/58.0.3029.110 Safari/537.36" # Not to load images dcap["phantomjs.page.settings.loadImages"] = False # Create drivers (Cost most time) driver = webdriver.PhantomJS(exec_path, desired_capabilities=dcap) elif browser == "chrome": exec_path = Config().get_property("path", "chrome_driver_path") driver = webdriver.Chrome(executable_path=exec_path) else: self.log.write_log("Invalid browser parameter.", self.mode) return None return driver
def __init__(self, logger): ''''' 指定保存日志的文件路径,日志级别,以及调用文件 将日志存入到指定的文件中 ''' #读取配置文件中的日志设置 cf = Config() self.log_dir = cf.get_value("log.conf", "basiclog", "log_dir") self.format = cf.get_value("log.conf", "basiclog", "format") # 创建一个logger self.logger = logging.getLogger(logger) self.logger.setLevel(logging.DEBUG) cur_date = time.strftime('%Y-%m-%d', time.localtime(time.time())) package_path = os.path.abspath("..") file_path = os.path.join(package_path, self.log_dir) file_name = cur_date + ".log" log_file = os.path.join(file_path, file_name) fh = logging.FileHandler(log_file) fh.setLevel(logging.INFO) # 再创建一个handler,用于输出到控制台 ch = logging.StreamHandler() ch.setLevel(logging.INFO) # 定义handler的输出格式 formatter = logging.Formatter(self.format) fh.setFormatter(formatter) ch.setFormatter(formatter) # 给logger添加handler self.logger.addHandler(fh) self.logger.addHandler(ch)
def test_should_raise_exception_if_key_not_found() -> None: config = Config(os.path.join(os.path.dirname(__file__), '../../app.ini')) try: config.get_path('no_existing_key') fail('it should raise exception') except KeyError as e: assert_that(e.args[0]).is_equal_to('no_existing_key')
def create_vm(self, arch, vm_name): if self.vm_is_exist(vm_name): self._logger.error('VM [%s] already exist.' % vm_name) return if not have_privileges(): self._logger.error('You need root permissions to create VM [%s]' % vm_name) return disk_path = self._copy_disk(arch, vm_name) kernel_path = Config.get_kernel_path(arch) ram = Config.get_ram_size(arch) if arch == VMArch.x86_64.value: create_cmd = 'virt-install --connect %s --name %s'\ ' --ram %s --arch x86_64' \ ' --disk %s,bus=virtio,format=raw' \ ' --boot kernel=%s,kernel_args="root=/dev/vda console=ttyS0"' \ ' --network network=default' \ ' --hvm --noautoconsole' \ % (self._target, vm_name, ram, disk_path, kernel_path) self._logger.info('creating VM [%s] ...' % vm_name) self._logger.debug(create_cmd) self._logger.debug(command(create_cmd)) self._add_libvmi_conf(arch, vm_name) return
def get_options(options: Dict): f = lambda k, t: None if options[k] == 'None' else t(options[k]) op = Config() op.add_dict( dict([['inference', bool(options['--inference'])], ['algo', str(options['--algorithm'])], ['algo_config', f('--config-file', str)], ['env', f('--env', str)], ['port', int(options['--port'])], ['unity', bool(options['--unity'])], ['graphic', bool(options['--graphic'])], ['name', f('--name', str)], ['save_frequency', f('--save-frequency', int)], ['models', int(options['--models'])], ['store_dir', f('--store-dir', str)], ['seed', int(options['--seed'])], ['max_step', f('--max-step', int)], ['max_episode', f('--max-episode', int)], ['sampler', f('--sampler', str)], ['load', f('--load', str)], ['fill_in', bool(options['--fill-in'])], ['prefill_choose', bool(options['--prefill-choose'])], ['gym', bool(options['--gym'])], ['gym_agents', int(options['--gym-agents'])], ['gym_env', str(options['--gym-env'])], ['gym_env_seed', int(options['--gym-env-seed'])], ['render_episode', f('--render-episode', int)], ['info', f('--info', str)]])) return op
def main(): """Call when graph_generator starts.""" # Setup signal handlers signal.signal(signal.SIGINT, exit_handler) signal.signal(signal.SIGTERM, exit_handler) # Setup the configuration and mongo connection Config.init_logging(log) mongo = Mongo.connect(log, Config.MONGO_URI) if mongo is None: raise ConnectionError # Create the graph generator gen = GraphGenerator( log, mongo, Config.GENERATOR_K, Config.GENERATOR_ITER, Config.GENERATOR_CUT_D, Config.GENERATOR_SCALE, Config.GENERATOR_DELTA_B, Config.GENERATOR_DELTA_T, ) # Run the graph generator in a loop, run every GRAPH_UPDATE_RATE seconds while 1: gen.run() time.sleep(Config.GENERATOR_RATE)
class HmdClient(object): def __init__(self): self.conf = Config() self.conf.init('brain-ta.conf') remote = 'localhost:{0}'.format(self.conf.get('brain-ta.hmd.front.port')) channel = grpc.insecure_channel(remote) self.stub = hmd_pb2_grpc.HmdClassifierStub(channel) def set_model(self, model_name, target_list): model = hmd_pb2.HmdModel() model.lang = lang_pb2.kor model.model = model_name rules_list = list() for item_dict in target_list: category = item_dict['category'] rule = item_dict['rule'] category_list = category.split('!@#$') hmd_client = hmd_pb2.HmdRule() hmd_client.rule = rule hmd_client.categories.extend(category_list) rules_list.append(hmd_client) model.rules.extend(rules_list) self.stub.SetModel(model) model_key = hmd_pb2.ModelKey() model_key.lang = lang_pb2.kor model_key.model = model_name
def config_log(cls): cf = Config() log_dir = os.path.join(pro_path, cf.get_runtime("log_dir")) today = time.strftime("%Y%m%d", time.localtime(time.time())) log_file = os.path.join(log_dir, today + ".log") # 获取一个标准的logger, 配置loglevel cls.logger = logging.getLogger() cls.logger.setLevel( eval("logging." + cf.get_runtime("log_level").upper())) # 建立不同handler fh = logging.FileHandler(log_file, mode="a") ch = logging.StreamHandler() # 定义输出格式 ft = logging.Formatter( "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" ) fh.setFormatter(ft) ch.setFormatter(ft) # 把定制handler 添加到我们logger cls.logger.addHandler(fh) cls.logger.addHandler(ch)
def test_user_center(self): # 设置url wallet_list = Config(url_file).get('user_center') self.url = wallet_list My_http.set_url(self.url) print('第一步:设置url:' + self.url) if self.token == '0': token = Get_token().get_token() elif self.token == '1': token = None #设置headers headers = Config().get('headers') headers['jcobToken'] = token #在headers中添加token My_http.set_headers(headers) print('第二步:设置header(token等)') print(headers) #设置params params = Config().get('params') My_http.set_params(params) print('第三步:设置params') print(params) # 发送请求 self.return_json = My_http.get() print(self.return_json.json()) method = str(self.return_json.request)[ int(str(self.return_json.request).find('[')) + 1:int(str(self.return_json.request).find(']'))] #读取请求类型 print("第四步:发送请求\n\t\t请求方法:" + method) # 校验结果 self.check_result() print('第五步:检查结果')
def get_buffer(buffer_args: Config): if buffer_args.get('buffer_size', 0) <= 0: logger.info( 'This algorithm does not need sepecify a data buffer oustside the model.' ) return None _type = buffer_args.get('type', None) if _type == 'ER': logger.info('ER') from utils.replay_buffer import ExperienceReplay as Buffer elif _type == 'PER': logger.info('PER') from utils.replay_buffer import PrioritizedExperienceReplay as Buffer elif _type == 'NstepER': logger.info('NstepER') from utils.replay_buffer import NStepExperienceReplay as Buffer elif _type == 'NstepPER': logger.info('NstepPER') from utils.replay_buffer import NStepPrioritizedExperienceReplay as Buffer elif _type == 'EpisodeER': logger.info('EpisodeER') from utils.replay_buffer import EpisodeExperienceReplay as Buffer else: logger.info('On-Policy DataBuffer') return None return Buffer(batch_size=buffer_args['batch_size'], capacity=buffer_args['buffer_size'], **buffer_args[_type].to_dict)
def __init__(self): self.config = Config() self.host = self.config.get('mysqlconf', 'MySqlhost') self.user = self.config.get('mysqlconf', 'MySqluser') self.password = self.config.get('mysqlconf', 'MySqlpwd') self.dbname = self.config.get('mysqlconf', 'MySqldb') self.initMysql()
def startWatchdog(): if not Config.isSetted(): raise RuntimeError("Server not configured") config = Config.config("library-watchdog") print("Starting watchdog with config: " + str(config)) patterns = config["patterns"] ignore_patterns = config["ignore-patterns"] ignore_directories = config["ignore-directories"] case_sensitive = config["case-sensitive"] path = config["path"] recursively = config["recursively"] watchdog_events = PatternMatchingEventHandler( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, case_sensitive=case_sensitive) watchdog_events.on_created = on_created watchdog_events.on_deleted = on_deleted watchdog_events.on_modified = on_modified watchdog_events.on_moved = on_moved observer = Observer() observer.schedule(event_handler=watchdog_events, recursive=recursively, path=path) observer.daemon = True observer.start() print("Watchdog started...") return observer
def __init__(self, args): self.args = args self.conf = Config() self.conf.init('brain-ta.conf') if args.engine.lower() == 'nlp1': self.remote = 'localhost:{0}'.format( self.conf.get('brain-ta.nlp.1.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub( channel) elif args.engine.lower() == 'nlp2': self.remote = 'localhost:{0}'.format( self.conf.get('brain-ta.nlp.2.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub( channel) elif args.engine.lower() == 'nlp3': self.remote = 'localhost:{0}'.format( self.conf.get('brain-ta.nlp.3.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub( channel) else: print 'Not existed Engine' raise Exception('Not existed Engine')
def buildCorrectedData(header, spot_data, gathering_data): position = gathering_data[:, 0] speed = gathering_data[:, 1] alpha = (10000 / header['LineFreq']) # factor to fix frequency difference between xps and tdi cam aligned_size = min(int((len(position) / alpha)), len(spot_data)) spot_data = spot_data[1:aligned_size, :] # calculate valid lines and values x = [] y = [] for line in range(0, len(spot_data)): # position doesn't exist try : current_position = getFrequencyAlignedData(line, alpha, position) current_speed = getFrequencyAlignedData(line, alpha, speed) except IndexError: break # only store data in radius of pixelCount pixels # remove outlier and backwards travel if Config.get('FP_START') <= current_position and Config.get('FP_END') >= current_position \ and current_speed > 0 \ and np.min(spot_data[line, :]) < Config.get('CLAMP_MAX_INTENSITY') \ and np.max(spot_data[line, :]) >= Config.get('CLAMP_MIN_INTENSITY'): x.append(current_position) y.append(spot_data[line, :]) return np.array(x), np.array(y)
def main(): """Call when data_collector starts.""" # Setup the configuration and mongo connection Config.init_logging(log) mongo = Mongo.connect(log, Config.MONGO_URI) # Populate database with known berths if not already there if mongo is not None and "BERTHS" not in mongo.collections(): log.info("Loading known berths into database") with open("./berths.json") as berths_file: berths_data = json.load(berths_file) for key, set_data in berths_data.items(): mongo.update("BERTHS", {"NAME": key}, {"$set": set_data}) # Setup the STOMP national rail data feed collector and connect feeds = [] if Config.COLLECTOR_PPM: feeds.append(Feeds.PPM) if Config.COLLECTOR_TD: feeds.append(Feeds.TD) if Config.COLLECTOR_TM: feeds.append(Feeds.TM) collector = STOMPCollector( mongo, feeds, Config.COLLECTOR_ATTEMPTS, Config.COLLECTOR_NR_USER, Config.COLLECTOR_NR_PASS, ) collector.start() # Infinite loop while 1: time.sleep(30)
def run_workers(spawn_worker, get_logger, process_count=0): """ Run workers :param spawn_worker: Worker :param get_logger: Logger :param process_count: Process count """ conf = Config() conf.init('biz.conf') logq = multiprocessing.Queue() if process_count == 0: if len(sys.argv) > 1: process_count = int(sys.argv[1]) else: process_count = multiprocessing.cpu_count() process_list = list() for i in range(process_count): p = spawn_worker(i, conf, logq) p.start() process_list.append(p) while True: try: log_level, message = logq.get() get_logger.log(log_level, message) except Exception: break # process last log time.sleep(0.2) while logq.empty() is False: log_level, message = logq.get() get_logger.log(log_level, message) get_logger.info('Waiting for child processes...') for p in process_list: p.terminate()
def create_dash(server): """Create the Dash instance for this application. Args: server (flask.Flask): flask application Returns: dash.Dash: dash application """ app = Dash( name=__package__, server=server, suppress_callback_exceptions=True, external_stylesheets=[dbc.themes.LUX], ) # Initialise logging Config.init_logging(app.logger) server.logger.removeHandler(app.logger.handlers[0]) # Initialise the mongo database app.mongo = Mongo.connect(app.logger, app.server.config["MONGO_URI"]) # Update the Flask config a default "TITLE" and then with any new Dash # configuration parameters that might have been updated so that we can # access Dash config easily from anywhere in the project with Flask's # 'current_app' server.config.setdefault("TITLE", "Dash") # Set the app name app.title = "thetrains" return app
def arg_parser(): project_name = "%s - %s" % (Config.get_value( EnvType.PROJECT.value), Config.get_value(EnvType.VERSION.value)) # create the top-level parser parser = ArgumentParser() parser.add_argument('--version', action='version', version=project_name) subparsers = parser.add_subparsers( help='You can analyze with running VMs or create VMs') # create the parser for the "run" command parser_run = subparsers.add_parser('run', help='Analysis for running VMs') parser_run.set_defaults(which='run') group_run = parser_run.add_argument_group('run') group_run_ex = group_run.add_mutually_exclusive_group() group_run_ex.add_argument('-a', '--all', dest='allvm', action='store_true', default=False, help='All VM') group_run_ex.add_argument('-v', '--vm', dest='vm', type=str, action='store', default='', help='The VM you want to analysis') # create the parser for the "create" command parser_create = subparsers.add_parser('create', help='Create VMs') parser_create.set_defaults(which='create') parser_create.add_argument('name', type=str, help='The VM name you want to create') parser_create.add_argument('architecture', type=str, help='The VM architecture you want to create') parser_create.add_argument('-m', '--amount', type=int, action='store', default=1, help='The VM amount you want to create') args = parser.parse_args() if args.which == 'run': if not args.allvm and args.vm == '': parser_run.print_usage() print "%s run: error: you have to select either of -a and -v" % __file__ sys.exit(2) elif args.which == 'create': if not Config.arch_is_available(args.architecture): parser_create.print_usage() print "%s create: error: your arch %s is not support" % ( __file__, args.architecture) sys.exit(2) return args
def _create_VirtVM(self, vm): vm_arch = self.get_vm_arch(vm) name = vm.name() virtVM = LibvirtVM(name, vm_arch, kb_to_mb(vm.info()[Const.MEMSIZE]), Config.get_value('username', vm_arch), Config.get_value('password', vm_arch)) return virtVM
def __init__(self, test_case=None): self.parser = None self.config = Config() self.driver = None if test_case is not None: self.browser = test_case.BROWSER self.headless = test_case.HEAD_LESS logger.config_logger(test_case.__name__, self.browser)
def __init__(self, args): self.args = args self.conf = Config() self.conf.init('brain-ta.conf') remote = 'localhost:{0}'.format( self.conf.get('brain-ta.hmd.front.port')) channel = grpc.insecure_channel(remote) self.stub = hmd_pb2_grpc.HmdClassifierStub(channel)
def __init__(self): self.email = Config().get('email') self.user = self.email.get('user') self.password = self.email.get('password') self.host = self.email.get('host') self.receivers = self.email.get('receivers') # 支持list self.subject = self.email.get('subject') self.contens = self.email.get('contens')
def __init__(self): logger.debug("camera: started") self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.bind((Config.get("CAM_HOST"), Config.get("CAM_PORT"))) self.socket.listen(10) self.program = Neunkdemo()
def test_wallet_list(self): # 设置url wallet_list = Config(url_file).get('wallet_list') self.url = wallet_list.get(self.date_index) My_http.set_url(self.url) print('第一步:设置url:' + self.url) if self.token == '0': token = Get_token().get_token() elif self.token == '1': token = None # 设置headers headers = Config().get('headers') headers['jcobToken'] = token My_http.set_headers(headers) print('第二步:设置header(token等)') print(headers) # 设置params params = Config().get('params') My_http.set_params(params) print('第三步:设置params') print(params) if self.method == 'post': # 设置data data = { 'dateIndex': self.dateIndex, 'gameType': self.gameType, 'qiuFlag': self.qiuFlag } My_http.set_data(data) print(data) print('第四步:设置data') # 发送请求 self.return_json = My_http.postWithJson() print(self.return_json.json()) method = str(self.return_json.request )[int(str(self.return_json.request).find('[')) + 1:int(str(self.return_json.request).find(']'))] print("第五步:发送请求\n\t\t请求方法:" + method) # 校验结果 self.check_result() print('第六步:检查结果') elif self.method == 'get': # 发送请求 self.return_json = My_http.get() print(self.return_json.json()) method = str(self.return_json.request )[int(str(self.return_json.request).find('[')) + 1:int(str(self.return_json.request).find(']'))] print("第四步:发送请求\n\t\t请求方法:" + method) # 校验结果 self.check_result() print('第五步:检查结果')
def __init__(self): self.db_conf = Config() host = self.db_conf.get_named_key('Mysql-Database', 'host') db = self.db_conf.get_named_key('Mysql-Database', 'db') user = self.db_conf.get_named_key('Mysql-Database', 'user') password = self.db_conf.get_named_key('Mysql-Database', 'password') charset = self.db_conf.get_named_key('Mysql-Database', 'charset') self.conn = pymysql.connect(host=host, user=user, password=password, db=db, charset=charset) self.cur = self.conn.cursor()
def __init__(self, logger): self.conf = Config() self.conf.init('biz.conf') self.dsn_tns = self.conf.get('oracle.dsn').strip() passwd = decrypt_string(self.conf.get('oracle.passwd')) self.conn = cx_Oracle.connect(self.conf.get('oracle.user'), passwd, self.dsn_tns) self.logger = logger self.cursor = self.conn.cursor()
def __init__(self): """ Init the engine """ # Read the config config = Config(CONFIG_FILE) serial_rfid_device = config.get_value_for("serial_rfid_device") # Define the rfid device self.rfid = RfidSerialMFRC522(serial_rfid_device)
def startWebService(observer=None): webservice.config["DEBUG"] = Config.config("web-server")["debug"] webservice.config["SECRET_KEY"] = b'_5#y2L"F4Q8z\n\xec]/' socketio = SocketIO(webservice) socketio.run(webservice, port=Config.config("web-server")["port"], host='0.0.0.0') stopWatchdog(observer)
class NlpClient(object): def __init__(self, args): self.args = args self.conf = Config() self.json_printer = JsonPrinter() self.conf.init('brain-ta.conf') if args.engine.lower() == 'nlp1': self.remote = 'localhost:{0}'.format(self.conf.get('brain-ta.nlp.1.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub(channel) elif args.engine.lower() == 'nlp2': self.remote = 'localhost:{0}'.format(self.conf.get('brain-ta.nlp.2.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub(channel) elif args.engine.lower() == 'nlp3': self.remote = 'localhost:{0}'.format(self.conf.get('brain-ta.nlp.3.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub(channel) else: print 'Not existed Engine' raise Exception('Not existed Engine') def analyze(self, target_text): in_text = nlp_pb2.InputText() try: in_text.text = target_text except Exception: target_text = unicode(target_text, 'euc-kr').encode('utf-8') in_text.text = target_text in_text.lang = lang_pb2.kor in_text.split_sentence = True in_text.use_tokenizer = False in_text.use_space = self.args.space in_text.level = 0 in_text.keyword_frequency_level = 0 ret = self.stub.Analyze(in_text) # Result to Json format # json_text = json_format.MessageToJson(ret, True, True) # data = json.loads(json_text) # self.json_printer.pprint(data) result_list = list() for idx in range(len(ret.sentences)): nlp_word = str() morph_word = str() # text = ret.sentences[idx].text analysis = ret.sentences[idx].morps for ana_idx in range(len(analysis)): if analysis[ana_idx].type in ['VV', 'VA', 'VX', 'VCP', 'VCN']: nlp_word += ' {0}다'.format(analysis[ana_idx].lemma) morph_word += ' {0}다/{1}'.format(analysis[ana_idx].lemma, analysis[ana_idx].type) else: nlp_word += ' {0}'.format(analysis[ana_idx].lemma) morph_word += ' {0}/{1}'.format(analysis[ana_idx].lemma, analysis[ana_idx].type) nlp_sent = nlp_word.encode('utf-8').strip() morph_sent = morph_word.encode('utf-8').strip() result_list.append((target_text, nlp_sent, morph_sent)) return result_list
def create_app(filename): app = Flask(__name__) app.config.update(Config.load(filename)) db.init_app(app) migrate.init_app(app, db) logging.config.dictConfig(Config.load("logger")) from app.api.v1.test import api_v1 app.register_blueprint(api_v1) app.json_encoder = JSONEncoder return app
def __init__(self, logger_name='JMTool Autotest'): self.logger = logging.getLogger(logger_name) logging.root.setLevel(logging.NOTSET) c = Config().get('log') self.log_file_name = time.strftime("%Y-%m-%d-%H-%M-%S") + '_test.log' # 日志文件 self.backup_count = c.get('backup') if c and c.get('backup') else 5 # 保留的日志数量 # 日志输出级别 self.console_output_level = c.get('console_level') if c and c.get('console_level') else 'WARNING' self.file_output_level = c.get('file_level') if c and c.get('file_level') else 'DEBUG' # 日志输出格式 pattern = c.get('pattern') if c and c.get('pattern') else '%(asctime)s - %(name)s - %(levelname)s - %(message)s' self.formatter = logging.Formatter(pattern)
def __init__(self): """ Init the engine """ logging.info("Starting RfidDoor") # Read the config logging.info("Read the configuration") config = Config(CONFIG_FILE) status_led_pin = int(config.get_value_for("status_led_pin")) door_relay_pin = int(config.get_value_for("door_relay_pin")) serial_rfid_device = config.get_value_for("serial_rfid_device") smtp_server = config.get_value_for("smtp_server") self.my_email = config.get_value_for("my_email") # Define the status led logging.info("Setup status led") self.status_led = Led("Status led", status_led_pin) # make the led blink 5 times quickly to say it is starting self.status_led.blink_n(5) # Define the rfid device logging.info("Setup rfid device") self.rfid = RfidSerialMFRC522(serial_rfid_device) # make the led blink 3 seconds to say the rfid has been successfully initiated self.status_led.blink(3000) # Define the relay logging.info("Setup relay") self.door_relay = Relay("Door relay", door_relay_pin) # Create the mail sender logging.info("Setup email sender") self.mail_sender = MailSender(smtp_server) # send an email to tell the script is starting (will help to detect some unusual reboot) self.mail_sender.send("Rfid door : starts!", self.my_email, self.my_email, "Rfid door script has just started") # Init the security manager logging.info("Setup the security manager") self.security = Security(CONFIG_FILE)
def __init__(self): TweakPage.__init__(self, _('Third-Party Software Sources'), _('After every release of Ubuntu there comes a feature freeze.\nThis means only applications with bug-fixes get into the repository.\nBy using third-party DEB repositories, you can always keep up-to-date with the latest version.\nAfter adding these repositories, locate and install them using Add/Remove.')) self.__config = Config() sw = gtk.ScrolledWindow() sw.set_shadow_type(gtk.SHADOW_ETCHED_IN) sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) self.pack_start(sw) self.treeview = SourcesView() self.treeview.connect('sourcechanged', self.colleague_changed) self.treeview.selection.connect('changed', self.on_selection_changed) self.treeview.set_sensitive(False) self.treeview.set_rules_hint(True) sw.add(self.treeview) self.expander = gtk.Expander(_('Details')) self.pack_start(self.expander, False, False, 0) self.sourcedetail = SourceDetail() self.expander.set_sensitive(False) self.expander.add(self.sourcedetail) hbox = gtk.HBox(False, 5) self.pack_end(hbox, False, False, 0) un_lock = PolkitButton() un_lock.connect('changed', self.on_polkit_action) hbox.pack_end(un_lock, False, False, 0) self.refresh_button = gtk.Button(stock = gtk.STOCK_REFRESH) self.refresh_button.set_sensitive(False) self.refresh_button.connect('clicked', self.on_refresh_button_clicked) hbox.pack_end(self.refresh_button, False, False, 0)
class ThirdSoft(TweakPage): def __init__(self): TweakPage.__init__(self, _('Third-Party Software Sources'), _('After every release of Ubuntu there comes a feature freeze.\nThis means only applications with bug-fixes get into the repository.\nBy using third-party DEB repositories, you can always keep up-to-date with the latest version.\nAfter adding these repositories, locate and install them using Add/Remove.')) self.__config = Config() sw = gtk.ScrolledWindow() sw.set_shadow_type(gtk.SHADOW_ETCHED_IN) sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) self.pack_start(sw) self.treeview = SourcesView() self.treeview.connect('sourcechanged', self.colleague_changed) self.treeview.selection.connect('changed', self.on_selection_changed) self.treeview.set_sensitive(False) self.treeview.set_rules_hint(True) sw.add(self.treeview) self.expander = gtk.Expander(_('Details')) self.pack_start(self.expander, False, False, 0) self.sourcedetail = SourceDetail() self.expander.set_sensitive(False) self.expander.add(self.sourcedetail) hbox = gtk.HBox(False, 5) self.pack_end(hbox, False, False, 0) un_lock = PolkitButton() un_lock.connect('changed', self.on_polkit_action) hbox.pack_end(un_lock, False, False, 0) self.refresh_button = gtk.Button(stock = gtk.STOCK_REFRESH) self.refresh_button.set_sensitive(False) self.refresh_button.connect('clicked', self.on_refresh_button_clicked) hbox.pack_end(self.refresh_button, False, False, 0) def update_thirdparty(self): self.treeview.update_model() def on_selection_changed(self, widget): model, iter = widget.get_selected() if iter is None: return home = model.get_value(iter, COLUMN_HOME) url = model.get_value(iter, COLUMN_URL) description = model.get_value(iter, COLUMN_COMMENT) self.sourcedetail.set_details(home, url, description) def on_polkit_action(self, widget, action): if action: if proxy.get_proxy(): self.treeview.set_sensitive(True) self.expander.set_sensitive(True) WARNING_KEY = '/apps/ubuntu-tweak/disable_thidparty_warning' if not self.__config.get_value(WARNING_KEY): dialog = WarningDialog(_('It is a possible security risk to ' 'use packages from Third-Party Sources.\n' 'Please be careful and use only sources you trust.'), buttons = gtk.BUTTONS_OK, title = _('Warning')) vbox = dialog.get_child() hbox = gtk.HBox() vbox.pack_start(hbox, False, False, 0) checkbutton = GconfCheckButton(_('Never show this dialog'), WARNING_KEY) hbox.pack_end(checkbutton, False, False, 0) hbox.show_all() dialog.run() dialog.destroy() else: ServerErrorDialog().launch() else: AuthenticateFailDialog().launch() def colleague_changed(self, widget): self.refresh_button.set_sensitive(True) self.emit('update', 'sourceeditor', 'update_sourceslist') def on_refresh_button_clicked(self, widget): dialog = UpdateCacheDialog(widget.get_toplevel()) res = dialog.run() proxy.set_list_state('normal') widget.set_sensitive(False) dialog = QuestionDialog(_('You can install the new applications through Add/Remove.\nDo you want to go now?'), title = _('The software information is up-to-date now')) if dialog.run() == gtk.RESPONSE_YES: self.emit('update', 'installer', 'deep_update') self.emit('call', 'mainwindow', 'select_module', {'name': 'installer'}) else: self.emit('update', 'installer', 'deep_update') dialog.destroy()
#!/usr/bin/env python # encoding: utf-8 import common.db import datetime import sys from common.config import Config def initDB(ip, dbname, name, pwd): return common.db.Connection(ip, dbname, name, pwd, True, 'utf8mb4') if __name__ == "__main__": COMPANYUSERID = 0 env = sys.argv[1] config = Config(env) ip = config.getValue("db", "ip") username = config.getValue("db", "username") databaseName = config.getValue("db", "database") password = config.getValue("db", "password") conn = initDB(ip, databaseName, username, password) sum = 0 #ISOTIMEFORMAT = '%Y-%m-%d %H:%M:00' #time = datetime.datetime.today() #timeStrNew = time.strftime(ISOTIMEFORMAT) #timeStrOld = (time - datetime.timedelta(minutes=5)).strftime(ISOTIMEFORMAT) revenueTableName = "revenue_record" accountTableName = "account" RevenueRes = conn.query("SELECT `money`,`out_order_id`,`data_version` FROM `"+revenueTableName+"` where `status`=0 and `to_user_id`=0" ) for data in RevenueRes: sum = sum + data["money"] i = 0