def mysql_backup(): MYSQL = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB) cmds = "select ip,port from mysqldb where master='是';" results = MYSQL.Run(cmds) key = 'mysql_backup' Redis.delete('finish_backup') if results: try: for host in BACKUP_SERVERS: Redis.delete('%s_%s'%(key,host)) i = len(BACKUP_SERVERS) for info in results: info = [str(m) for m in info] # 设置binlog过期时间 MHOST,MPORT = info MDB = 'mysql' MYSQL_SET = Mysql.MYSQL(USER, PASSWORD, MHOST, MPORT, MDB) cmds = 'set global expire_logs_days=15;' MYSQL_SET.Run(cmds) if info[0] not in NOT_BACKUP_MYSQL: i = i-1 Redis.lpush('%s_%s'%(key,BACKUP_SERVERS[i]),info) if i == 0: i = len(BACKUP_SERVERS) MYSQL_SET.Close() except Exception as e: loging.write(e) MYSQL.Close()
def task_tables_info(): MYSQL_IDC = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB) Table = 'tableinfo' cmds = ("TRUNCATE TABLE %s;" %Table,"select ip,port,db from mysqldb;") results = map(MYSQL_IDC.Run,cmds) log_path = '/tmp/tables_info.log' for host,port,dbs in results[1]: try: if '172.16.9.' not in host: MYSQL = Mysql.MYSQL(USER,PASSWORD,host,port,'mysql') cmd = "show variables like 'version';" version = MYSQL.Run(cmd) version = version[0][1] or 'None' for db in dbs.split('|'): cmd = "show table status from %s;" %db results = MYSQL.Run(cmd) if results: for table_info in results: Table_Name = table_info[0] Engine = table_info[1] or 'None' Rows = table_info[4] or 0 Charset = table_info[14] or 'None' cmd = ("insert into %s (ip,port,database_name,table_name,Engine_name,Rows,Charset,version) VALUES ('%s',%i,'%s','%s','%s',%i,'%s','%s');" %(Table,host,int(port),db,Table_Name,Engine,Rows,Charset,version)) loging.write(cmd, log_path=log_path) MYSQL_IDC.Run(cmd) MYSQL.Close() except Exception as e: loging.write(e,log_path=log_path) continue MYSQL_IDC.Close()
def mysql_scheduler(): t = time.strftime('%Y-%m-%d', time.localtime()) MYSQL = Mysql.MYSQL(USER, PASSWORD, HOST, PORT, DB) def Run_sql(val): id, IP, PORT, DB = val[:4] CMD = val[5] val = Mysql.Query_sql(IP, PORT, DB, CMD) if val: val = str(val).replace("'", '') else: val = 'None' cmd = "update sql_scheduler set status = '已执行' ,results = '%s' where id = '%s';" % ( val, id) loging.write(cmd, log_path=log_path) MYSQL.Run(cmd) MYSQL.Close() try: cmd = "select * from sql_scheduler where status = '未执行' and time = '%s';" % t values = MYSQL.Run(cmd) MYSQL.Close() if values: # 并发执行 POOLS = Third_pool(10) POOLS.map_async(Run_sql, values) POOLS.close() POOLS.join() except Exception as e: loging.write(e, log_path=log_path)
def get_twemproxy_redis(): MYSQL = Mysql.MYSQL(USER, PASSWORD, HOST, PORT, DB) redis_info = {} for twemproxy_ip in TWEMPROXY_HOSTS: try: sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM) sock.connect((twemproxy_ip,22222)) INFOS = json.loads(sock.recv(86400)) for key in INFOS: if 'redis_' in key: IP_list = [] for ip_key in INFOS[key]: if '172.16.' in ip_key: IP_list.append(ip_key.split(':')[0]) redis_info[key] = IP_list except Exception as e: loging.write(e) continue cmd = "TRUNCATE TABLE twemproxyInfo;" MYSQL.Run(cmd) for key in redis_info: for ip in redis_info[key]: try: Redis = redis.StrictRedis(host=ip,port=6379,db=0,socket_timeout=1) Keys = Redis.info()['db0']['keys'] cmd = "insert into twemproxyInfo (serviceGroup,clientIP,clientKeyItems) VALUES('%s','%s','%s');"%(key,ip,Keys) MYSQL.Run(cmd) except: continue MYSQL.Close()
def mysql_backup(): MYSQL = Mysql.MYSQL(USER, PASSWORD, HOST, PORT, DB) cmds = "select ip,port from mysqldb where master='是';" results = MYSQL.Run(cmds) key = 'mysql_backup' Redis.delete('finish_backup') if results: try: for host in BACKUP_SERVERS: Redis.delete('%s_%s' % (key, host)) i = len(BACKUP_SERVERS) for info in results: info = [str(m) for m in info] # 设置binlog过期时间 MHOST, MPORT = info MDB = 'mysql' MYSQL_SET = Mysql.MYSQL(USER, PASSWORD, MHOST, MPORT, MDB) cmds = 'set global expire_logs_days=15;' MYSQL_SET.Run(cmds) if info[0] not in NOT_BACKUP_MYSQL: i = i - 1 Redis.lpush('%s_%s' % (key, BACKUP_SERVERS[i]), info) if i == 0: i = len(BACKUP_SERVERS) MYSQL_SET.Close() except Exception as e: loging.write(e) MYSQL.Close()
def Run(self, cmd): try: self.cur.execute(cmd) self.cnx.commit() return [cu for cu in self.cur if self.cur if self.cur] except Exception as e: return e loging.write(e)
def check_ip(*args, **kwargs): try: if request.headers['X-Forwarded-For']: ip = request.headers['X-Forwarded-For'] except: ip = request.remote_addr if '172.16.' not in ip: loging.write(ip) return render_template_string('非法IP地址访问!') return func(*args, **kwargs)
def Query_sql(ip, port, db, cmd): MYsql = MYSQL(user=user, password=password, host=ip, port=port, db=db) try: MYsql.Run("SET NAMES UTF8") return MYsql.Run(cmd) except Exception as e: loging.write(e) return e finally: MYsql.Close()
def LOCK(*args, **kwargs): try: if RC.exists('host_lock') and RC.get('pid_lock'): if HOST == RC.get('host_lock') and PID == int(RC.get('pid_lock')): loging.write('host:%s pid:%s task:%s run......' % (HOST,PID, func.__name__)) return func(*args, **kwargs) else: return None return None except Exception as e: if e: logging.error(str(e))
def scheduler_lock(): try: if RC.exists('scheduler_lock'): if ip != RC.get('scheduler_lock'): # 随机休眠 time.sleep(choice([1,2,3,2,1])) raise AssertionError else: RC.set('scheduler_lock', ip) RC.expire('scheduler_lock',5) except Exception as e: loging.write(e)
def Run(self): if HOST in task_servers: self.scheduler.add_job(Task.analytics_internet_logs, 'date', run_date=self.run_date,id='analytics_internet_logs', replace_existing=True) self.scheduler.add_job(Task.analytics_internet2_logs, 'date', run_date=self.run_date,id='analytics_internet2_logs', replace_existing=True) self.scheduler.add_job(Task.analytics_internet3_logs, 'date', run_date=self.run_date,id='analytics_internet3_logs', replace_existing=True) self.scheduler.add_job(Task.analytics_intranet_logs, 'date', run_date=self.run_date,id='analytics_intranet_logs',replace_existing=True) self.scheduler.add_job(Task.analytics_intranet2_logs, 'date', run_date=self.run_date,id='analytics_intranet2_logs', replace_existing=True) self.scheduler.add_job(Task.WAF_logs, 'date', run_date=self.run_date,id='WAF_logs',replace_existing=True) self.scheduler.add_job(Task.httpry_logs,'date', run_date=self.run_date,id='httpry_logs', replace_existing=True) self.scheduler.start() loging.write('Scheduler_run start......') else: loging.write('%s not in task server list!' %HOST)
def clear_kestrel(): MYSQL = Mysql.MYSQL(USER, PASSWORD, HOST, PORT, DB) cmd = "select kestrel_ip,kestrel_port,kestrel_key from kestrel where kestrel_num ='0';" results = MYSQL.Run(cmd) if results: MYSQL.Close() for ip,port,key in results: loging.write('%s %s %s delete......\n') %(ip,port,key) try: Kestrel = memcache.Client(['%s:%s'%(ip,port)],debug=0,socket_timeout=1) Kestrel.delete(str(key)) except: continue
def Run_sql(val): id, IP, PORT, DB = val[:4] CMD = val[5] val = Mysql.Query_sql(IP, PORT, DB, CMD) if val: val = str(val).replace("'", '') else: val = 'None' cmd = "update sql_scheduler set status = '已执行' ,results = '%s' where id = '%s';" % ( val, id) loging.write(cmd, log_path=log_path) MYSQL.Run(cmd) MYSQL.Close()
def op_log(self): db = db_op.op_log c = db(date=self.date, time=self.time, ip=self.ip, user=self.user, access=self.action) try: db_op.DB.session.add(c) db_op.DB.session.commit() except Exception as e: loging.write(e) finally: db_op.DB.session.remove()
def LOCK(*args, **kwargs): try: if RC.exists('host_lock') and RC.get('pid_lock'): if HOST == RC.get('host_lock') and PID == int( RC.get('pid_lock')): loging.write('host:%s pid:%s task:%s run......' % (HOST, PID, func.__name__)) return func(*args, **kwargs) else: return None return None except Exception as e: if e: logging.error(str(e))
def auto_init_system(): for ip in Redis.lpop(init_key): try: ssh = SSH.ssh(username=INIT_USER, ip=ip) for cmd in CMDS: results = ssh.Run(cmd) if not results['stderr']: loging.write('init %s system start ......' % ip) ip_list = ip.split('.') hostname = 'S%s-%s.server.baihe' % (ip_list[2], ip_list[3]) for h in ('45', '46'): Redis.lpush('clear%s_salt-minon' % h, hostname) ssh.Close() except: pass
def auto_init_system(): for ip in Redis.lpop(init_key): try: ssh = SSH.ssh(username=INIT_USER,ip=ip) for cmd in CMDS: results = ssh.Run(cmd) if not results['stderr']: loging.write('init %s system start ......' % ip) ip_list = ip.split('.') hostname = 'S%s-%s.server.baihe' % (ip_list[2], ip_list[3]) for h in ('45', '46'): Redis.lpush('clear%s_salt-minon' % h, hostname) ssh.Close() except: pass
def scheduler_lock(): try: if RC.exists('host_lock'): if HOST == RC.get('host_lock') and PID == int(RC.get('pid_lock')): RC.expire('host_lock',30) RC.expire('pid_lock', 30) loging.write('lock_info:host>>%s pid>>%s unlock......' % (HOST,PID)) else: raise AssertionError else: RC.set('host_lock',HOST) RC.set('pid_lock',PID) RC.expire('host_lock',30) RC.expire('pid_lock', 30) except: pass
def scheduler_lock(): try: if HOST in task_servers: if RC.exists('host_lock') and RC.get('pid_lock'): if HOST == RC.get('host_lock') and PID == int(RC.get('pid_lock')): RC.expire('host_lock',30) RC.expire('pid_lock', 30) loging.write('lock_info:host>>%s pid>>%s unlock......' % (HOST,PID)) else: raise AssertionError else: RC.set('host_lock',HOST) RC.set('pid_lock',PID) RC.expire('host_lock',30) RC.expire('pid_lock', 30) else: raise AssertionError except: pass
def Run(self): if HOST in task_servers: self.scheduler.add_job(Task.analytics_internet_logs, 'date', run_date=self.run_date, id='analytics_internet_logs', replace_existing=True) self.scheduler.add_job(Task.analytics_internet2_logs, 'date', run_date=self.run_date, id='analytics_internet2_logs', replace_existing=True) self.scheduler.add_job(Task.analytics_internet3_logs, 'date', run_date=self.run_date, id='analytics_internet3_logs', replace_existing=True) self.scheduler.add_job(Task.analytics_intranet_logs, 'date', run_date=self.run_date, id='analytics_intranet_logs', replace_existing=True) self.scheduler.add_job(Task.analytics_intranet2_logs, 'date', run_date=self.run_date, id='analytics_intranet2_logs', replace_existing=True) self.scheduler.add_job(Task.WAF_logs, 'date', run_date=self.run_date, id='WAF_logs', replace_existing=True) self.scheduler.add_job(Task.httpry_logs, 'date', run_date=self.run_date, id='httpry_logs', replace_existing=True) self.scheduler.start() loging.write('Scheduler_run start......') else: loging.write('%s not in task server list!' % HOST)
def del_zabbix_events(): try: HOST = '172.16.4.93' PORT = 3306 DB = 'zabbix' MYSQL = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB) cmd = "select eventid from events order by eventid limit 10000;" results = MYSQL.Run(cmd) MYSQL.Close() def Delete(eventid): MySql = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB) cmd = "delete from events where eventid=%i" % int(eventid[0]) MySql.Run(cmd) MySql.Close() pool = ThreadPool(8) pool.map(Delete, results) pool.close() pool.join() loging.write('del_last_eventid:%s' %results[-1][0]) except Exception as e: loging.write(e)
def del_zabbix_events(): try: HOST = '172.16.4.93' PORT = 3306 DB = 'zabbix' MYSQL = Mysql.MYSQL(USER, PASSWORD, HOST, PORT, DB) cmd = "select eventid from events order by eventid limit 10000;" results = MYSQL.Run(cmd) MYSQL.Close() def Delete(eventid): MySql = Mysql.MYSQL(USER, PASSWORD, HOST, PORT, DB) cmd = "delete from events where eventid=%i" % int(eventid[0]) MySql.Run(cmd) MySql.Close() pool = ThreadPool(8) pool.map(Delete, results) pool.close() pool.join() loging.write('del_last_eventid:%s' % results[-1][0]) except Exception as e: loging.write(e)
def op_operation(self): db = db_op.op_operation try: c = db(date=self.date, time=self.time, user=self.user, project=self.project, version=self.version, action=self.action, Type=self.Type, work=self.work, grade=self.grade, changelog=self.changelog) db_op.DB.session.add(c) if self.Type == '线上': db.query.filter( and_(db.project == self.project, db.Type != '测外')).update({db.Type: self.Type}) db_op.DB.session.commit() except Exception as e: loging.write(e) finally: db_op.DB.session.remove()
def check_publish(): loging.write('check publish......') def rollback_java(Project, warname, ServerList): information = {} information['warname'] = warname information['warTagName'] = Project information['ServerList'] = ServerList information['Action'] = 'publish' information['key'] = 'auto_rollback' information['Gray'] = False information['Type'] = '2' information['Way'] = 'SVN' Redis.delete('auto_rollback') Redis.lpush('publish_java', information) def rollback_php(Project, App, ServerList): Info = {} Info['action'] = 'publish' Info['key'] = 'auto_rollback' Info['path'] = Project Info['app'] = App Info['sip'] = ServerList Info['gray'] = False Info['Type'] = '2' Info['Way'] = 'SVN' Redis.lpush('publish_php', str(Info)) td = time.strftime('%Y-%m-%d',time.localtime()) tt = (datetime.datetime.now()-datetime.timedelta(hours=4)).strftime('%H:%M:%S') MYSQL = Mysql.MYSQL(USER, PASSWORD, HOST, PORT, DB) cmd = "SELECT DISTINCT(project) FROM op_operation WHERE TYPE = '灰度' AND DATE = '{0}' AND TIME <= '{1}';".format(td,tt) result = MYSQL.Run(cmd) if result: for Project in result: os.system("/bin/tomail [email protected] 灰度发布警告 {0} 项目已经保持灰度状态超过4个时间,请相关开发人员尽快处理!".format(Project[0])) cmd = "SELECT DISTINCT(project) FROM op_operation WHERE DATE = '{0}' AND TIME < '{1}' AND TYPE = '测外';".format(td,tt) results = MYSQL.Run(cmd) if results: results = [str(re[0]) for re in results] for Project in results: cmd = "SELECT DISTINCT(project) FROM op_operation WHERE DATE = '{0}' AND TIME > '{1}' AND TYPE = '线上' AND project = '{2}' ;".format(td,tt,Project) if MYSQL.Run(cmd): pass results.remove(Project) if results: for PRoject in results: if '.war.zip' in PRoject: project = PRoject.split('-')[:-1] project = '-'.join(project) cmd = "SELECT ip,USER FROM java_list WHERE project = '{0}' AND TYPE = '2'".format(project + '.war') ServerList = MYSQL.Run(cmd) cmd = "SELECT project FROM op_operation WHERE TYPE = '线上' AND ACTION <> 'restart' and project like '{0}-%.war.zip' ORDER BY id DESC LIMIT 1;".format(project) loging.write('java:{0}\n'.format(ServerList)) elif 'tags' in PRoject: App = PRoject.split('/')[3] project = PRoject.split('tags')[-1] project = project.replace('/','').split('-')[:-1] project = '-'.join(project) cmd = "SELECT ip,USER FROM php_list WHERE project = '{0}' AND TYPE = '2'".format(App) ServerList = MYSQL.Run(cmd) cmd = "SELECT project FROM op_operation WHERE TYPE = '线上' AND ACTION <> 'restart' and project like '%{0}%' ORDER BY id DESC LIMIT 1;".format(project) loging.write('php:{0}\n'.format(ServerList)) else: continue result = MYSQL.Run(cmd) loging.write('project:{0}\n'.format(result)) #os.system("/bin/tomail [email protected] 测外发布警告 {0} 项目已经保持测外状态超过4个时间,请相关开发人员尽快处理!".format(Project[0])) MYSQL.Close()
def redis_cluster_info(): try: dt = time.strftime('%Y-%m-%d',time.localtime()) tt = time.strftime('%H:%M:%S',time.localtime()) ot = (datetime.datetime.now() - datetime.timedelta(days=7)).strftime('%Y-%m-%d') RC_JAVA = RedisCluster(startup_nodes=cluster_java_nodes, decode_responses=True) results = RC_JAVA.info() Redis_Key = 'redis_cluster_java_info' for host in results: try: if results[host]['role'] == 'master': key_commands = '%s_redis_commands' % host key_offset = '%s_redis_offset' % host key_net_input = '%s_redis_net_input' % host key_net_output = '%s_redis_net_output' % host key_keys = '%s_redis_keys' % host Master_Info = {} Master_Info['maxmemory_policy'] = results[host]['maxmemory_policy'] Master_Info['used_memory_human'] = results[host]['used_memory_human'] Master_Info['slave_host'] = '%s:%s'%(results[host]['slave0']['ip'],results[host]['slave0']['port']) Master_Info['slave_state'] = results[host]['slave0']['state'] Master_Info['rejected_connections'] = results[host]['rejected_connections'] Master_Info['redis_version'] = results[host]['redis_version'] Master_Info['redis_mode'] = results[host]['redis_mode'] Master_Info['uptime_in_days'] = results[host]['uptime_in_days'] Master_Info['space_keys'] = results[host]['db0']['keys'] old_offset = new_offset = int(results[host]['slave0']['offset']) if RC.exists(key_offset): old_offset = int(RC.get(key_offset)) RC.set(key_offset,new_offset) Master_Info['slave_offset'] = new_offset - old_offset #连接数 connected_clients = results[host]['connected_clients'] #增量keys old_keys = new_keys = int(results[host]['db0']['keys']) if RC.exists(key_keys): old_keys = int(RC.get(key_keys)) RC.set(key_keys,int(new_keys)) add_keys = new_keys - old_keys #命中率 HitRate = int(float(results[host]['keyspace_hits']) / (float(results[host]['keyspace_hits']) + float(results[host]['keyspace_misses'])) * 100) # 执行指令 old_commands = new_commands = int(results[host]['total_commands_processed']) if RC.exists(key_commands): old_commands = int(RC.get(key_commands)) RC.set(key_commands,int(new_commands)) commands = (new_commands - old_commands)/60 #入口流量 old_net_input = new_net_input = int(results[host]['total_net_input_bytes']) if RC.exists(key_net_input): old_net_input = int(RC.get(key_net_input)) RC.set(key_net_input,int(new_net_input)) net_input = (new_net_input - old_net_input)/1024/1024 # 出口流量 old_net_output = new_net_output = int(results[host]['total_net_output_bytes']) if RC.exists(key_net_output): old_net_output = int(RC.get(key_net_output)) RC.set(key_net_output,int(new_net_output)) net_output = (new_net_output - old_net_output)/1024/1024 c = db_idc.idc_redis_cluster_info(getdate =dt,gettime =tt,master=host,add_keys=add_keys, connected_clients=connected_clients, HitRate=HitRate,commands=commands,net_input=net_input,net_output=net_output) db_idc.DB.session.add(c) db_idc.DB.session.commit() db = db_idc.idc_redis_cluster_info v = db.query.filter(db.getdate <= ot).all() if v: for c in v: db_idc.DB.session.delete(c) db_idc.DB.session.commit() RC.hset(Redis_Key,host,Master_Info) except Exception as e: loging.write(e) continue except Exception as e: loging.write(e) finally: db_idc.DB.session.remove()
def redis_cluster_info(): try: dt = time.strftime('%Y-%m-%d', time.localtime()) tt = time.strftime('%H:%M:%S', time.localtime()) ot = (datetime.datetime.now() - datetime.timedelta(days=7)).strftime('%Y-%m-%d') RC_JAVA = RedisCluster(startup_nodes=cluster_java_nodes, decode_responses=True) results = RC_JAVA.info() Redis_Key = 'redis_cluster_java_info' for host in results: try: if results[host]['role'] == 'master': key_commands = '%s_redis_commands' % host key_offset = '%s_redis_offset' % host key_net_input = '%s_redis_net_input' % host key_net_output = '%s_redis_net_output' % host key_keys = '%s_redis_keys' % host Master_Info = {} Master_Info['maxmemory_policy'] = results[host][ 'maxmemory_policy'] Master_Info['used_memory_human'] = results[host][ 'used_memory_human'] Master_Info['slave_host'] = '%s:%s' % ( results[host]['slave0']['ip'], results[host]['slave0']['port']) Master_Info['slave_state'] = results[host]['slave0'][ 'state'] Master_Info['rejected_connections'] = results[host][ 'rejected_connections'] Master_Info['redis_version'] = results[host][ 'redis_version'] Master_Info['redis_mode'] = results[host]['redis_mode'] Master_Info['uptime_in_days'] = results[host][ 'uptime_in_days'] Master_Info['space_keys'] = results[host]['db0']['keys'] old_offset = new_offset = int( results[host]['slave0']['offset']) if RC.exists(key_offset): old_offset = int(RC.get(key_offset)) RC.set(key_offset, new_offset) Master_Info['slave_offset'] = new_offset - old_offset #连接数 connected_clients = results[host]['connected_clients'] #增量keys old_keys = new_keys = int(results[host]['db0']['keys']) if RC.exists(key_keys): old_keys = int(RC.get(key_keys)) RC.set(key_keys, int(new_keys)) add_keys = new_keys - old_keys #命中率 HitRate = int( float(results[host]['keyspace_hits']) / (float(results[host]['keyspace_hits']) + float(results[host]['keyspace_misses'])) * 100) # 执行指令 old_commands = new_commands = int( results[host]['total_commands_processed']) if RC.exists(key_commands): old_commands = int(RC.get(key_commands)) RC.set(key_commands, int(new_commands)) commands = (new_commands - old_commands) / 60 #入口流量 old_net_input = new_net_input = int( results[host]['total_net_input_bytes']) if RC.exists(key_net_input): old_net_input = int(RC.get(key_net_input)) RC.set(key_net_input, int(new_net_input)) net_input = (new_net_input - old_net_input) / 1024 / 1024 # 出口流量 old_net_output = new_net_output = int( results[host]['total_net_output_bytes']) if RC.exists(key_net_output): old_net_output = int(RC.get(key_net_output)) RC.set(key_net_output, int(new_net_output)) net_output = (new_net_output - old_net_output) / 1024 / 1024 c = db_idc.idc_redis_cluster_info( getdate=dt, gettime=tt, master=host, add_keys=add_keys, connected_clients=connected_clients, HitRate=HitRate, commands=commands, net_input=net_input, net_output=net_output) db_idc.DB.session.add(c) db_idc.DB.session.commit() db = db_idc.idc_redis_cluster_info v = db.query.filter(db.getdate <= ot).all() if v: for c in v: db_idc.DB.session.delete(c) db_idc.DB.session.commit() RC.hset(Redis_Key, host, Master_Info) except Exception as e: loging.write(e) continue except Exception as e: loging.write(e) finally: db_idc.DB.session.remove()