#!/usr/bin/env python # -*- coding:utf-8 -*- import logging from utils import config_util from utils import time_util from utils import file_util conf = config_util.getDict('log-conf') ''' loggername 参数最好指定,如果不指定可能会导致日志重复输出 ''' class logger: def __init__(self, loggername='default'): self.logger = logging.getLogger(loggername) self.logger.setLevel(logging.DEBUG) fmt = logging.Formatter(conf.get('logs.format')) # 设置Console日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(getLevel(conf.get('logs.clevel'))) # 设置文件日志 logsPath = file_util.getHome() + '/logs/aoam.log' fh = logging.FileHandler(filename=sP(logsPath), encoding='utf-8') fh.setFormatter(fmt) fh.setLevel(getLevel(conf.get('logs.flevel'))) self.logger.addHandler(sh)
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re from utils.logger import logger from utils import config_util, exeCmd from utils.environment_util import environment_util env = environment_util() conf = config_util.getDict('flume') log = logger(loggername='flume') ''' 检测索引是否存在,如果集群挂了,是无法返回索引的存在与否 def checkServer(): hostAndPorts = conf.get('hosts') es = Elasticsearch(hostAndPorts) #print(es.indices.exists(index="test")) #填写对应索引 lists = es.cluster.health() logger.info("elasticsearch集群状态:",end="") ES_cluster_status = lists["status"] if ES_cluster_status == "green": logger.info("####集群处于健康状态####") elif ES_cluster_status == "yellow": logger.info("集群处于亚健康状态") elif ES_cluster_status == "red": logger.warn("集群挂了") logger.info("elasticsearch集群节点数:"+lists['number_of_nodes'])
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re from utils.logger import logger from utils import config_util, exeCmd from utils.environment_util import environment_util env = environment_util() conf = config_util.getDict('zookeeper') log = logger(loggername='zookeeper') def getQuorumPeerMain(hostAndPorts): dicts = {} for hostAndPort in hostAndPorts: host = hostAndPort.split(':')[0] dicts[host] = 'QuorumPeerMain' return dicts def startZk(host, server): ZOOKEEPER_HOME = env.ZOOKEEPER_HOME log.warn('开始启动 {host} 节点 {server} 服务'.format(host=host, server=server)) _shell = 'ansible client -l {host} -a "{ZOOKEEPER_HOME}'.format(host=host, ZOOKEEPER_HOME=ZOOKEEPER_HOME) _shell = _shell + '/bin/zkServer.sh start"' exeCmd.run(_shell)
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re from utils.logger import logger from utils import config_util, exeCmd from utils.environment_util import environment_util env = environment_util() conf = config_util.getDict('hive') log = logger(loggername='hive') def getHiveServer2(hostAndPorts): dicts = {} for hostAndPort in hostAndPorts: host = hostAndPort.split(':')[0] dicts[host] = 'org.apache.hive.service.server.HiveServer2' return dicts def getHiveMetaStore(keys, dicts): for key in keys: key = key.split(':')[0] if key in dicts: dicts[key] = dicts[ key] + ',org.apache.hadoop.hive.metastore.HiveMetaStore' else:
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re from utils.logger import logger from utils import config_util, exeCmd from utils.environment_util import environment_util env = environment_util() conf = config_util.getDict('tomcat') log = logger(loggername='tomcat') def getBootstrap(hostAndPorts): dicts = {} proNumDicts = {} proNums = conf.get('tomcat.process.number').split(',') i = 0 for hostAndPort in hostAndPorts: host = hostAndPort.split(':')[0] dicts[host] = 'Bootstrap' proNumDicts[host] = proNums[i] i += 1 return dicts, proNumDicts def startBootstrap(host, server):
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re from utils.logger import logger from utils import config_util, exeCmd from utils.environment_util import environment_util env = environment_util() conf = config_util.getDict('kafka') log = logger(loggername='kafka') def getKafka(hostAndPorts): dicts = {} for hostAndPort in hostAndPorts: host = hostAndPort.split(':')[0] dicts[host] = 'Kafka' return dicts def startKafka(host, server): KAFKA_HOME = env.KAFKA_HOME log.warn('开始启动 {host} 节点 {server} 服务'.format(host=host, server=server)) _shell = 'ansible client -l {host} -a "{KAFKA_HOME}'.format( host=host, KAFKA_HOME=KAFKA_HOME) _shell = _shell + '/{scriptName}"'.format(
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re from utils.logger import logger from utils import config_util, exeCmd from utils.environment_util import environment_util env = environment_util() conf = config_util.getDict('azkaban') log = logger(loggername='azkaban') def getWeb(hostAndPorts): dicts = {} for hostAndPort in hostAndPorts: host = hostAndPort.split(':')[0] dicts[host] = 'AzkabanWebServer' return dicts def getExe(keys, dicts): for key in keys: key = key.split(':')[0] if key in dicts: dicts[key] = dicts[key] + ',AzkabanExecutorServer' else: dicts[key] = 'AzkabanExecutorServer'
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re import os from utils.logger import logger from utils import config_util, exeCmd #from elasticsearch import Elasticsearch conf = config_util.getDict('elasticsearch') log = logger(loggername='elasticsearch') ''' 判断传入的ip等参数是否存在端口 1)存在端口 2)不存在端口 ''' def checkExistPort(hostsOrhosts_Port): hosts = hostsOrhosts_Port[0] if hosts.find(':') != -1: return True else: return False '''
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import sys sys.path.append('/zywa/aoam') import re import os from utils.logger import logger from utils import config_util, exeCmd conf = config_util.getDict('storm') log = logger(loggername='storm') ''' 检测索引是否存在,如果集群挂了,是无法返回索引的存在与否 def checkServer(): hostAndPorts = conf.get('hosts') es = Elasticsearch(hostAndPorts) #print(es.indices.exists(index="test")) #填写对应索引 lists = es.cluster.health() logger.info("elasticsearch集群状态:",end="") ES_cluster_status = lists["status"] if ES_cluster_status == "green": logger.info("####集群处于健康状态####") elif ES_cluster_status == "yellow": logger.info("集群处于亚健康状态") elif ES_cluster_status == "red": logger.warn("集群挂了") logger.info("elasticsearch集群节点数:"+lists['number_of_nodes']) logger.info("elasticsearch集群节点数:"+lists['number_of_data_nodes'])