def reset_logger_position(loggers, workersDir, start_time, start_rowid): for loggerName in loggers: _, worker = os.path.splitext(loggerName) position_file = os.path.join(workersDir, "%s.position" % worker) st_dt = time.strptime(start_time, '%Y-%m-%d %H:%M:%S') start_tstamp = int(time.mktime(st_dt)) line = "%d,%d\n" % (start_tstamp, start_rowid) util.write_first_line_nothrow(position_file, line) elog.force("%d ('%s'), %d => position file: %s", start_tstamp, start_time, start_rowid, position_file) pass
def startup(loggers, config): processes = {} # 主进程退出信号 signal.signal(signal.SIGINT, onSigInt) signal.signal(signal.SIGTERM, onSigTerm) for loggerClassName in loggers: elog.force("create process for logger: %s", loggerClassName) p = Process(target = logger_worker, args = (loggers[loggerClassName], config, exit_queue, 0.01)) p.daemon = True processes[loggerClassName] = p pass run_forever(processes, exit_event) pass
def main(parser, appConfig, loggerConfig): import utils.logger (options, args) = parser.parse_args(args=None, values=None) loggerDictConfig = utils.logger.set_logger(loggerConfig, options.log_path, options.log_level) elog.force("%s-%s start", APPNAME, APPVER) # 当前脚本绝对路径 abspath = util.script_abspath(inspect.currentframe()) options.artifactName = options.artifactName \ .replace('${artifactId}', options.artifactId) \ .replace('-', '_') \ .replace('.', '_') options.artifactRootdir = options.artifactRootdir \ .replace('${projectRootdir}', appConfig['projectRootdir']) \ .replace('${artifactId}', options.artifactId) \ .replace('${artifactName}', options.artifactName) if options.create_project: util.info2("projectRootdir = '%s'" % appConfig['projectRootdir']) util.info2("sb2template = '%s'" % appConfig['sb2template']) util.print_options_attrs(options, [ 'groupId', 'artifactId', 'artifactName', 'artifactVersion', 'artifactDescription', 'artifactRootdir' ]) if util.dir_exists(options.artifactRootdir) and not options.force: elog.warn( "artifactRootdir has already existed. (using '--force' to overwrite it.)" ) sys.exit(-1) pass create_sb2_project(appConfig, options) elog.force("%s-%s exit.", APPNAME, APPVER) pass
def main(parser, appConfig, loggerConfig): import utils.logger (options, args) = parser.parse_args(args=None, values=None) loggerDictConfig = utils.logger.set_logger(loggerConfig, options.log_path, options.log_level) elog.force("%s-%s starting", APPNAME, APPVER) # 当前脚本绝对路径 abspath = util.script_abspath(inspect.currentframe()) util.print_options_attrs(options, ['source_dbicfg', 'dest_sqlfile']) absYamlFile = os.path.abspath(options.source_dbicfg) absSqlFile = os.path.abspath(options.dest_sqlfile) module, _ = os.path.splitext(os.path.basename(absYamlFile)) util.info2(absYamlFile) util.info2(absSqlFile) # 打开配置文件 fd = open(absYamlFile) data = fd.read() fd.close() # 载入配置 dict = yaml.load(data) # 创建 sqlfile create_phoenix_cresql(dict, module.lower(), absSqlFile, options.force, dict.get('constants')) util.warn( "NOW YOU CAN USE BELOW COMMAND TO CREATE TABLES IN HBASE-PHOENIX !") elog.force_clean(" $ sqlline.py zkhost:zkport /path/to/file.cresql") util.info("Sample:\n $ ./sqlline.py localhost:2182 %s" % absSqlFile) pass
def logger_worker(loggerSet, config, exit_queue, timeout_ms): (loggerModule, loggerConfig) = loggerSet loggerClass = loggerModule.create_logger_instance(loggerConfig) elog.force("worker process(%d) for %s start ...", os.getpid(), loggerClass.logger_name) is_exit, exit_arg = (False, None) while not is_exit: is_exit, exit_arg = util.is_exit_process(exit_queue, timeout_ms) if is_exit: exit_queue.put(('EXIT', exit_arg)) break loggerModule.log_messages(loggerClass) pass else: elog.fatal("worker process exit: %r", exit_arg) pass
def run_forever(processes, exit_event): for name, proc in processes.items(): elog.force("start worker process: %s", name) proc.start() idle_queue = Queue(1) while not exit_event.isSet(): try: func, arg = idle_queue.get(block=True, timeout=3) except Empty: pass else: for name, proc in processes.items(): exit_queue.put(('EXIT', 'main process exit.')) for name, proc in processes.items(): proc.join() elog.force("main process exit.") pass
def main(parser): (options, args) = parser.parse_args(args=None, values=None) # 当前脚本绝对路径 abspath = util.script_abspath(inspect.currentframe()) if not options.path: elog.warn("No path specified. using: -P, --path=PATH") exit(-1) # 取得配置项 options.path 的绝对路径 root_path = util.source_abspath(APPFILE, options.path, abspath) # 取得文件扩展名数组 file_exts = [] filters = parse_strarr(options.filter) for filter in filters: if filter.startswith('.'): if filter not in file_exts: file_exts.append(filter) if filter in source_filters_dict.keys(): for ext in source_filters_dict[filter]: if ext not in file_exts: file_exts.append(ext) curtime = time.time() elog.force("path: %r", root_path) elog.force("exts: %r", file_exts) elog.force("recursive: %r", options.recursive) elog.force("timestamp: %r", curtime) if options.author: elog.force("author: %r", options.author) sweep_path(root_path, file_exts, options.recursive, options.author, curtime) pass
def main(parser): (options, args) = parser.parse_args(args=None, values=None) # 子进程退出后向父进程发送的信号 ## signal.signal(signal.SIGCHLD, util.sig_chld) # 进程退出信号 signal.signal(signal.SIGINT, util.sig_int) signal.signal(signal.SIGTERM, util.sig_term) # 当前脚本绝对路径 abspath = util.script_abspath(inspect.currentframe()) if not options.path: options.path = os.getcwd() elog.warn( "No path specified. using current working dir or using: --path='PATH'" ) if not options.srcs: elog.error("No source strings specified. using: --srcs='STRINGS'") sys.exit(1) if not options.dsts: elog.warn("No destigation strings specified. using: --dsts='STRINGS'") # 取得配置项options.path的绝对路径 root_path = util.source_abspath(APPFILE, options.path, abspath) srcs = parse_strarr(options.srcs) dsts = parse_strarr(options.dsts) elog.force("path: %s", root_path) elog.force("sour = %r", srcs) elog.force("dest = %r", dsts) founds = [] sweep_dir(root_path, srcs, founds) elog.force("Total %d files found", len(founds)) if len(founds) > 0: if options.replace: if len(srcs) == len(dsts): for pf in founds: ctime, mtime = None, None fts = file_times(pf) if fts: ctime, mtime = fts else: elog.warn("missing file: %s", pf) continue for i in range(0, len(srcs)): srcstr = srcs[i] dststr = None if i < len(dsts): dststr = dsts[i] if dststr: ds = dststr.replace('$(mtime)', mtime).replace( '$(ctime)', ctime) if options.whole_line: cmd = "sed -i 's/%s.*/%s/g' '%s'" % (srcstr, ds, pf) else: cmd = "sed -i 's/%s/%s/g' '%s'" % (srcstr, ds, pf) elog.debug(cmd) (status, output) = commands.getstatusoutput(cmd) if status != 0: elog.error( "failed to command: \"%s\", output: %r", sed, output) elog.force("Total %d files replaced", len(founds)) else: elog.error( "Failed to replace for srcs(%r) mismatched with dsts(%r)", srcs, dsts) pass else: elog.warn("No files to be replaced. Using: --replace") pass pass
def main(config, parser): import utils.logger as logger (options, args) = parser.parse_args(args=None, values=None) logConfigDict = logger.set_logger(config['logger'], options.log_path, options.log_level) loggers = {} if config['loggers'] and len(config['loggers']): loggers = load_logger_workers('loggers', config['loggers'], { 'logger_config' : logConfigDict, 'logger_stash' : options.logger_stash, 'batch_rows' : options.batch_rows, 'end_time' : options.end_time, 'end_rowid' : options.end_rowid }) if len(loggers) > LOGGER_WORKERS_MAX: elog.error("too many logger workers. please increase LOGGER_WORKERS_MAX and try!") exit(-1) found_workers = list_logger_workers(logConfigDict, config['loggers_abspath']) if options.list_logger_workers: for logger_worker in found_workers: elog.info("found worker: %s (%s/%s.py)", logger_worker, config['loggers_abspath'], logger_worker) elog.force("total %d workers: %r", len(found_workers), found_workers) return if options.add_logger: add_logger(config, found_workers) return if options.remove_logger: remove_logger(config, found_workers) return if len(loggers) == 0 and options.force: loggers = load_logger_workers('loggers', found_workers, { 'logger_config' : logConfigDict, 'logger_stash' : options.logger_stash, 'batch_rows' : options.batch_rows, 'end_time' : options.end_time, 'end_rowid' : options.end_rowid }) if options.reset_logger_position: if len(loggers): reset_logger_position(loggers, config['loggers_abspath'], options.start_time, options.start_rowid) else: elog.error("--reset-position ignored: logger worker not found. use --force for all.") pass if options.startup: if len(loggers): startup(loggers, config) else: elog.error("--startup ignored: logger worker not found. use --force for all.") pass pass
def start_run_project(artifactDir): runcmd = "mvn spring-boot:run" elog.force(runcmd) os.chdir(artifactDir) os.system(runcmd) pass