def byteify(input, encoding='utf-8'): if isinstance(input, dict): return { byteify(key): byteify(value) for key, value in input.iteritems() } elif isinstance(input, list): return [byteify(element) for element in input] elif isinstance(input, unicode): return input.encode(encoding) else: return input
def startListen(self, opts): try: subNode = opts['id'] from salt.newrun import (json, byteify, MessageType) redischannel_sub = self.redisInstance.pubsub() redischannel_sub.subscribe(self._master_pub_topic.split(',')) pool = Pool(20000) for message in redischannel_sub.listen(): try: messageType = byteify(message) if messageType['type'] == 'message': maid_log.info("received master data: %s" % messageType['data']) wrapMesage = json.loads(messageType['data']) self.redisInstance.publish( wrapMesage['tempTopic'], json.dumps( { 'type': MessageType.PING, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) ##fork sub process to handle the task maid_log.info("fork process for: " % wrapMesage) # p = multiprocessing.Process(target=self.run, args=(wrapMesage, subNode, opts)) # p.start() g = pool.spawn(self.run, wrapMesage, subNode, opts) #g.join() g.start() except Exception, e: maid_log.info(traceback.format_exc()) #print('traceback.format_exc():\n%s' % traceback.format_exc()) except Exception, e: maid_log.info(traceback.format_exc())
def run(self, wrapMessage, subNode, opts): try: self.local_client = get_local_client(auto_reconnect=True) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return error = None if 'type' in wrapMessage: if wrapMessage['type'] == FunctionType.SALT_CP: kwargs = wrapMessage['kwargs'] # check if contains the ip list # the final wait for execute ip list fntgt = [] tgt_type = 'glob' if isinstance(kwargs['tgt'], list): tgt_type = 'list' localAcceptIpList = self.getAcceptIp() fntgt = [ ip for ip in kwargs['tgt'] if ip in localAcceptIpList ] if len(fntgt) == 0: self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.INTERRUPT, 'message': 'Can not find any matched ip.', 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return else: kwargs['tgt'] = fntgt else: if kwargs['tgt'] == '*': pass else: if not is_valid_ip(kwargs['tgt']): self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.INTERRUPT, 'message': 'Find invalid ip: %s' % kwargs['tgt'], 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return cp_arg = wrapMessage['cp_arg'] args = [ kwargs['tgt'], 'cp.recv', cp_arg, opts['timeout'], ] args = byteify(args) args.append(tgt_type) print('args: %s' % args) cp_result = self.local_client.cmd(*args) print('cp_result: %s' % cp_result) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps({ 'cp_result': cp_result, 'dataType': 'SALT_CP' }, ensure_ascii=False, encoding='utf-8')) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.WORK, 'result': True, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) elif wrapMessage['type'] == FunctionType.FIND_ACCEPT: localAcceptIpList = self.getAcceptIp() self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'ip_list': localAcceptIpList, 'dataType': 'ACCEPT_MINION' }, ensure_ascii=False, encoding='utf-8')) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.WORK, 'result': True, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) elif wrapMessage['type'] == FunctionType.SYNC_RUN: kwargs = wrapMessage['kwargs'] #check if contains the ip list #the final wait for execute ip list fntgt = [] if isinstance(kwargs['tgt'], list): localAcceptIpList = self.getAcceptIp() fntgt = [ ip for ip in kwargs['tgt'] if ip in localAcceptIpList ] if len(fntgt) == 0: self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.INTERRUPT, 'message': 'Can not find any matched ip.', 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return else: kwargs['tgt'] = fntgt else: if kwargs['tgt'] == '*': pass else: if not is_valid_ip(kwargs['tgt']): self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.INTERRUPT, 'message': 'Find invalid ip: %s' % kwargs['tgt'], 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return cmd_func = self.local_client.cmd_cli maid_log.info('for loop kwargs: %s' % kwargs) for full_ret in cmd_func(**kwargs): try: import salt.cli.salt client = salt.cli.salt.SaltCMD() ret_, out, retcode = client._format_ret(full_ret) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'ret_': ret_, 'out': out, 'retcode': retcode }, ensure_ascii=False, encoding='utf-8')) except Exception, e: maid_log.info('traceback.format_exc():\n%s' % traceback.format_exc()) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.WORK, 'result': True, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) maid_log.info('end for loop: %s' % json.dumps( { 'type': MessageType.WORK, 'result': True, 'error': error, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) elif wrapMessage['type'] == FunctionType.ASYNC_RUN: kwargs = wrapMessage['kwargs'] # check if contains the ip list # the final wait for execute ip list fntgt = [] if isinstance(kwargs['tgt'], list): localAcceptIpList = self.getAcceptIp() fntgt = [ ip for ip in kwargs['tgt'] if ip in localAcceptIpList ] if len(fntgt) == 0: self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.INTERRUPT, 'message': 'Can not find any matched ip.', 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return else: kwargs['tgt'] = fntgt else: if kwargs['tgt'] == '*': pass else: if not is_valid_ip(kwargs['tgt']): self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.INTERRUPT, 'message': 'Find invalid ip: %s' % kwargs['tgt'], 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return jid = self.local_client.cmd_async(**kwargs) maid_log.info('Executed command with job ID: {0}'.format(jid)) # NOTE: just save jid mapping 24h self.redisInstance.sadd(wrapMessage['jid'], jid) self.redisInstance.expire(wrapMessage['jid'], 86400) maid_log.info('Latest jid change for: {0}'.format(jid)) # self.redisInstance.set('p_s_job_{0}'.format(jid), wrapMessage['jid']) # self.redisInstance.expire('p_s_job_{0}'.format(jid), 86400) # subCacheKey = 'jobs_subcache_{0}_{1}'.format(wrapMessage['jid'], subNode) # maid_log.info('Handle subCacheKey: {0}, {1}'.format(subCacheKey, jid)) # self.redisInstance.set(subCacheKey, jid) # self.redisInstance.expire(subCacheKey, 86400) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.WORK, 'result': True, 'error': error, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return elif wrapMessage['type'] == FunctionType.LOOKUP_JID: kwargs = wrapMessage['kwargs'] maid_log.info('Lookup job ID: {0}'.format(kwargs['jid'])) subCacheKey = 'jobs_subcache_{0}_{1}'.format( kwargs['jid'], subNode) subCacheValue = self.redisInstance.get(subCacheKey) maid_log.info( '-------newrun-------subCacheValue-----------: %s' % subCacheValue) if subCacheValue: import salt.config __opts__ = salt.config.client_config( os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master')) creds = { 'username': SaltStaticConstants.API_USER, 'password': SaltStaticConstants.API_PWD, 'eauth': SaltStaticConstants.API_EAUTH } import salt.auth self.auth = salt.auth.Resolver(__opts__) tokenObj = self.auth.mk_token(creds) maid_log.info( '-------newrun-------tokenObj3-----------: %s' % tokenObj) lowstate = {'client': 'runner'} lowstate.update({ 'fun': 'jobs.list_job', 'jid': subCacheValue }) lowstate = [lowstate] job_ret_info = list( self.exec_lowstate(token=tokenObj['token'], lowstate=lowstate, opts=__opts__)) maid_log.info( '-------newrun-------job_ret_info-----------: %s' % job_ret_info) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps({'jobrets': job_ret_info}, ensure_ascii=False, encoding='utf-8')) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.WORK, 'result': True, 'error': error, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return elif wrapMessage['type'] == FunctionType.LIST_JOBS: kwargs = wrapMessage['kwargs'] import salt.config __opts__ = salt.config.client_config( os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master')) creds = { 'username': SaltStaticConstants.API_USER, 'password': SaltStaticConstants.API_PWD, 'eauth': SaltStaticConstants.API_EAUTH } self.auth = salt.auth.Resolver(__opts__) tokenObj = self.auth.mk_token(creds) print('-------newrun-------tokenObj-----------: %s' % tokenObj) lowstate = {'client': 'runner'} lowstate.update({'fun': 'jobs.list_jobs'}) lowstate = [lowstate] job_ret_info = list( self.exec_lowstate(token=tokenObj['token'], lowstate=lowstate, opts=__opts__)) print('-------newrun-------list_jobs-----------: %s' % job_ret_info) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps({'jobrets': job_ret_info}, ensure_ascii=False, encoding='utf-8')) self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.WORK, 'result': True, 'error': error, 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return else: self.redisInstance.publish( wrapMessage['tempTopic'], json.dumps( { 'type': MessageType.INTERRUPT, 'message': 'Unvalid function type: {0}'.format( wrapMessage['type']), 'sub_ip': subNode }, ensure_ascii=False, encoding='utf-8')) return
def apiv2_async(self, *args, **kwargs): # ''' # Execute the salt api v2 aysnc # ''' self.retReturns = {} try: import salt.client import cherrypy salt_config = cherrypy.config['saltopts'] sub_timeout = salt_config['channel_sub_timeout'] sub_node = '' _channel_redis_sentinel = salt_config['channel_redis_sentinel'] _channel_redis_password = salt_config['channel_redis_password'] _master_pub_topic = salt_config['id'] self.bootConfig = { '_sub_timeout': sub_timeout, '_sub_node': sub_node, '_channel_redis_sentinel': _channel_redis_sentinel, '_channel_redis_password': _channel_redis_password, '_master_pub_topic': _master_pub_topic } from salt.newrun import (json, byteify, MessageType) kwargs = byteify(kwargs) self.kwargsFormat(kwargs) if isinstance(kwargs['tgt'], str): if kwargs['tgt'] != '*': tgtTemp = kwargs['tgt'] kwargs['tgt'] = tgtTemp.split(",") if len(kwargs['tgt']) > 1: kwargs['tgt_type'] = 'list' else: kwargs['tgt_type'] = 'glob' kwargs['tgt'] = tgtTemp passedTgtFilter = self.kwargsTgtFilter(kwargs) if not passedTgtFilter: return self.retReturns else: kwargs['tgt_type'] = 'glob' elif isinstance(kwargs['tgt'], list): kwargs['tgt_type'] = 'list' passedTgtFilter = self.kwargsTgtFilter(kwargs) if not passedTgtFilter: return self.retReturns else: print('unvalid tgt type: %s' % type(kwargs['tgt'])) return self.retReturns import salt.newrun # NOTE: generate a jid for saltx jid = salt.utils.jid.gen_jid() wrapMesage = { 'type': salt.newrun.FunctionType.ASYNC_RUN, 'jid': jid, 'kwargs': kwargs, 'tempTopic': str(salt.newrun.uuid.uuid1()) + "_" + jid } api_log.info('async run with wrapMesage: %s' % wrapMesage) from salt.redis.RedisWrapper import Singleton redisWrapper = Singleton(**self.bootConfig) # NOTE: must publish cmd after registered the redis listen # else we will miss ping message redisWrapper.redisInstance.publish( redisWrapper.master_pub_topic, salt.newrun.json.dumps(wrapMesage)) redisWrapper.redisInstance.connection_pool.disconnect() self.retReturns = { "_links": { "jobs": [{ "href": ("/jobs/%s" % jid) }] }, "return": [{ "jid": ("%s" % jid), "minions": ([] if (kwargs['tgt'] == '*') else kwargs['tgt']) }] } return self.retReturns except: api_log.info(traceback.format_exc())
def apiv2_sync(self, *args, **kwargs): # ''' # Execute the salt api v2 # ''' self.retReturns = {} try: import salt.client import cherrypy salt_config = cherrypy.config['saltopts'] sub_timeout = salt_config['channel_sub_timeout'] sub_node = '' _channel_redis_sentinel = salt_config['channel_redis_sentinel'] _channel_redis_password = salt_config['channel_redis_password'] _master_pub_topic = salt_config['id'] self.bootConfig = { '_sub_timeout': sub_timeout, '_sub_node': sub_node, '_channel_redis_sentinel': _channel_redis_sentinel, '_channel_redis_password': _channel_redis_password, '_master_pub_topic': _master_pub_topic } # NOTE: Only in super master, filter no-response ip, when use saltx def getPassedIp(): import numpy numpy.warnings.filterwarnings('ignore') passed_ip = numpy.loadtxt('/data0/md/ip.md', dtype=numpy.str) return passed_ip.tolist() from salt.newrun import (json, byteify, MessageType) kwargs = byteify(kwargs) self.kwargsFormat(kwargs) minionLogList = set() if isinstance(kwargs['tgt'], str): if kwargs['tgt'] != '*': tgtTemp = kwargs['tgt'] kwargs['tgt'] = tgtTemp.split(",") if len(kwargs['tgt']) > 1: kwargs['tgt_type'] = 'list' minionLogList = set(kwargs['tgt']) else: kwargs['tgt_type'] = 'glob' kwargs['tgt'] = tgtTemp minionLogList.add(kwargs['tgt']) passedTgtFilter = self.kwargsTgtFilter(kwargs) if not passedTgtFilter: return self.retReturns else: kwargs['tgt_type'] = 'glob' elif isinstance(kwargs['tgt'], list): minionLogList = set(kwargs['tgt']) kwargs['tgt_type'] = 'list' passedTgtFilter = self.kwargsTgtFilter(kwargs) if not passedTgtFilter: return self.retReturns else: print('unvalid tgt type: %s' % type(kwargs['tgt'])) return self.retReturns import salt.newrun wrapMesage = { 'type': salt.newrun.FunctionType.SYNC_RUN, 'kwargs': kwargs, 'tempTopic': str(salt.newrun.uuid.uuid1()) + getRandomSuffix() } api_log.info(wrapMesage) from salt.redis.RedisWrapper import Singleton redisWrapper = Singleton(**self.bootConfig) selfIp = salt_config['id'] redisChannel = redisWrapper.redisInstance.pubsub() redisChannel.subscribe(wrapMesage['tempTopic']) noResponseRet = [] noConnectRet = [] emptyRet = [] # retcodes = [] comeSubList = getAcceptIp() if selfIp in comeSubList: comeSubList.remove(selfIp) syndic_count = len(comeSubList) resultCount = 0 pingCount = 0 resultPingSet = set() resultExeSet = set() executeStart = time.time() # NOTE: must publish cmd after registered the redis listen # else we will miss ping message redisWrapper.redisInstance.publish( redisWrapper.master_pub_topic, salt.newrun.json.dumps(wrapMesage)) for message in redisChannel.listen(): try: messageType = byteify(message) if messageType['type'] == 'message': ##result +1 only when receive sub return execute data resultMessage = messageType['data'] try: callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: resultPingSet.add(messageIp) pingCount += 1 else: if messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: resultExeSet.add(messageIp) resultCount += 1 # if messageIp not in readyBackupMaidSet: # resultCount += 1 else: # filter no return received of sub node retJsonObj = callResult['ret_'] if retJsonObj: # reset start time executeStart = time.time() if callResult['out'] == 'no_return': if '[No response]' in json.dumps( retJsonObj): noResponseRet.append( callResult) else: noConnectRet.append(callResult) else: for k, v in retJsonObj.items(): minionLogList.discard(k) if callResult['retcode'] == 0: tmpRet = retJsonObj self.retReturns = dict( self.retReturns, **tmpRet) else: emptyRet.append(callResult) else: # TODO handle other messages? pass except: resultCount += 1 api_log.info(traceback.format_exc()) pass ##check sub timeout, if no node running again # api_log.info(resultExeSet.difference(comeSubList)) # api_log.info('{}, {}, {}, {}, {}'.format(syndic_count, resultCount, pingCount, resultExeSet, [item for item in resultExeSet if item not in comeSubList])) losePingCount = syndic_count - pingCount runningCount = syndic_count - resultCount - losePingCount if pingCount < syndic_count and runningCount <= 0: if (time.time() - executeStart) > sub_timeout: break elif syndic_count == resultCount and resultCount == pingCount: break elif pingCount == syndic_count and runningCount > 0: if (time.time() - executeStart) > sub_timeout: break elif len(minionLogList) <= 0: break except: api_log.info(traceback.format_exc()) pass redisChannel.unsubscribe(wrapMesage['tempTopic']) redisChannel.connection_pool.disconnect() ##begin print error returns for result in emptyRet: if result['ret_']: tmpRet = result['ret_'] self.retReturns = dict(self.retReturns, **tmpRet) for result in noResponseRet: if result['ret_']: tmpRet = result['ret_'] self.retReturns = dict(self.retReturns, **tmpRet) for result in noConnectRet: if result['ret_']: tmpRet = result['ret_'] self.retReturns = dict(self.retReturns, **tmpRet) disconnectedSyndic = set(comeSubList).difference(resultPingSet) if disconnectedSyndic: api_log.info('With disconnected syndic: %s' % list(disconnectedSyndic)) except: api_log.error("sync throw error:") api_log.error(traceback.format_exc()) return self.retReturns
def apiv2_jobs(self, fun, timeout=None, full_return=False, **kwargs): # ''' # Execute the salt api v2 lookup job # ''' import salt.newrun if fun == salt.newrun.FunctionType.LOOKUP_JID: self.retReturns = { 'info': [{ 'User': salt.newrun.SaltStaticConstants.API_USER, 'Minions': [], 'Arguments': [], 'Result': {}, 'Target': [] }], 'return': [{}] } elif fun == salt.newrun.FunctionType.LIST_JOBS: self.retReturns = {'return': []} else: return {} try: import salt.client import cherrypy salt_config = cherrypy.config['saltopts'] sub_timeout = salt_config['channel_sub_timeout'] sub_node = '' _channel_redis_sentinel = salt_config['channel_redis_sentinel'] _channel_redis_password = salt_config['channel_redis_password'] _master_pub_topic = salt_config['id'] self.bootConfig = { '_sub_timeout': sub_timeout, '_sub_node': sub_node, '_channel_redis_sentinel': _channel_redis_sentinel, '_channel_redis_password': _channel_redis_password, '_master_pub_topic': _master_pub_topic } from salt.newrun import (json, byteify, MessageType) wrapMesage = { 'type': fun, 'kwargs': kwargs, 'tempTopic': '%s%s%s' % (salt.newrun.TopicType.JOBS, str(salt.newrun.uuid.uuid1()), getRandomSuffix()) } api_log.info("jobswrapMesage: %s" % wrapMesage) from salt.redis.RedisWrapper import Singleton redisWrapper = Singleton(**self.bootConfig) selfIp = salt_config['id'] redisChannel = redisWrapper.redisInstance.pubsub() redisChannel.subscribe(wrapMesage['tempTopic']) comeSubList = getAcceptIp() if selfIp in comeSubList: comeSubList.remove(selfIp) syndic_count = len(comeSubList) resultCount = 0 pingCount = 0 resultPingSet = set() resultExeSet = set() executeStart = time.time() # add backup node to resultExeSet # backupMaster = salt_config['backup_master'] # api_log.info("backupMaster: {}".format(backupMaster)) # backupMasterArray = backupMaster.split(',') # backupCount = len(backupMasterArray) # for m in backupMasterArray: # resultExeSet.add(m) # api_log.info("resultExeSetArray: {}".format(resultExeSet)) # NOTE: must publish cmd after registered the redis listen # else we will miss ping message redisWrapper.redisInstance.publish( redisWrapper.master_pub_topic, salt.newrun.json.dumps(wrapMesage)) calLoopCount = 0 tmpPercentCount = 0 for message in redisChannel.listen(): try: messageJson = byteify(message) if messageJson['type'] == 'message': ##result +1 only when receive sub return execute data resultMessage = messageJson['data'] try: #api_log.info(resultMessage) callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) api_log.info( 'callback result for: %s, with: %s, result: %s' % (kwargs['jid'], wrapMesage['tempTopic'], callResult)) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: resultPingSet.add(messageIp) pingCount += 1 else: if messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: resultExeSet.add(messageIp) resultCount += 1 else: # filter no return received of sub node if callResult['jobrets']: if fun == salt.newrun.FunctionType.LOOKUP_JID: tmpRet = callResult['jobrets'] retInfo = self.retReturns['info'][ 0] if 'Function' in tmpRet[0]: if tmpRet[0][ 'Function'] != 'unknown-function': retInfo[ 'Function'] = tmpRet[ 0]['Function'] retInfo['jid'] = kwargs['jid'] if 'Target-type' in tmpRet[0]: retInfo[ 'Target-type'] = tmpRet[0][ 'Target-type'] if 'StartTime' in tmpRet[0]: retInfo['StartTime'] = tmpRet[ 0]['StartTime'] if 'Target' in tmpRet[0]: if isinstance( tmpRet[0]['Target'], list): retInfo[ 'Target'] = ",".join( tmpRet[0] ['Target']) else: if tmpRet[0][ 'Target'] != 'unknown-target': retInfo[ 'Target'] = tmpRet[ 0]['Target'] if 'Arguments' in tmpRet[0]: retInfo['Arguments'] = tmpRet[ 0]['Arguments'] if 'Minions' in tmpRet[0]: retInfo['Minions'] = retInfo[ 'Minions'] + tmpRet[0][ 'Minions'] if 'Result' in tmpRet[0]: retInfo['Result'] = dict( retInfo['Result'], **tmpRet[0]['Result']) if tmpRet[0]['Result']: for key, value in tmpRet[0][ 'Result'].items(): subRet = { key: value['return'] } z = self.retReturns[ 'return'][0].copy() z.update(subRet) self.retReturns['return'][ 0] = z elif fun == salt.newrun.FunctionType.LIST_JOBS: self.retReturns[ 'return'] = self.retReturns[ 'return'] + callResult[ 'jobrets'] else: pass else: # TODO handle other messages? pass except: resultCount += 1 api_log.error(traceback.format_exc()) pass ##check sub timeout, if no node running again resultCount = len(resultExeSet) losePingCount = syndic_count - pingCount runningCount = syndic_count - resultCount - losePingCount if syndic_count == pingCount and pingCount == resultCount: break api_log.info( '%s, %s, %s, %s, %s' % (syndic_count, resultCount, pingCount, runningCount, [i for i in comeSubList if i not in resultExeSet])) if pingCount < syndic_count and runningCount <= 0: if (time.time() - executeStart) > sub_timeout: break # if pingCount == syndic_count: # nowPercentCount = float(resultCount) / float(syndic_count) # # if nowPercentCount >= 0.9: # calLoopCount += 1 except: api_log.error(traceback.format_exc()) pass # check if reach syndic count, break out if (resultCount - syndic_count) >= 0: break api_log.info("calLoopCount: {}".format(calLoopCount)) if calLoopCount >= 2: break redisChannel.unsubscribe(wrapMesage['tempTopic']) redisChannel.connection_pool.disconnect() disconnectedSyndic = set(comeSubList).difference(resultPingSet) if disconnectedSyndic: api_log.info('With disconnected syndic: %s' % list(disconnectedSyndic)) # In end of process, distinct target list api_log.info("self.retReturns: {}".format(self.retReturns)) return self.retReturns except: api_log.error(traceback.format_exc())
def run_oldstyle_v1(self): ''' Make the salt client call in old-style all-in-one call method ''' arg = [self._load_files(), self.opts['dest']] self.newopt = self.opts sub_timeout = self.newopt['channel_sub_timeout'] if self.opts['timeout'] > sub_timeout: sub_timeout = self.opts['timeout'] self.bootConfig = { '_sub_timeout': sub_timeout, '_sub_node': '', '_channel_redis_sentinel': self.newopt['channel_redis_sentinel'], '_channel_redis_password': self.newopt['channel_redis_password'], '_master_pub_topic': self.newopt['id'] } import salt.newrun clientPub = salt.newrun.MasterPub(**self.bootConfig) # read tgt list from file if self.opts['file_target']: try: with open(self.newopt['tgt']) as xf: xfContent = xf.read().strip("\n").strip(' ') if xfContent == '': self.exit( 2, 'Find empty ip list from {0}, pls check.\n'.format( self.newopt['tgt'])) return if ',' in xfContent: self.newopt['tgt'] = xfContent.split(",") self.selected_target_option = 'list' elif '\n' in xfContent: self.newopt['tgt'] = xfContent.split("\n") self.selected_target_option = 'list' else: print('Find invalid args with -X.') return except IOError as exc: self.exit(2, '{0}\n'.format(exc)) return from salt.newrun import (json, byteify, MessageType) arg = byteify(arg) wrapMesage = { 'type': salt.newrun.FunctionType.SALT_CP, 'kwargs': self.newopt, 'cp_arg': arg, 'tempTopic': ('cp_%s' % str(salt.newrun.uuid.uuid1())) } redisChannel = clientPub.getRedisInstance().pubsub() redisChannel.subscribe(wrapMesage['tempTopic']) clientPub.publishToSyndicSub(salt.newrun.json.dumps(wrapMesage)) comeSubList = clientPub.pullAccept() ping1stCount = 0 work1stcount = 0 for message in redisChannel.listen(): try: messageJson = byteify(message) if messageJson['type'] == 'message': resultMessage = messageJson['data'] try: callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: ping1stCount += 1 elif messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: work1stcount += 1 if ping1stCount == work1stcount and work1stcount == len( comeSubList): break else: print('invalid callresult: %s' % callResult) else: # filter no return received of sub node cp_result = callResult['cp_result'] if cp_result: salt.output.display_output( cp_result, self.opts.get('output', 'nested'), self.opts) else: pass # print('callResult: %s' % callResult) except: print(traceback.format_exc()) pass except: print(traceback.format_exc()) pass
def new_run(self): # ''' # Execute the salt command line # ''' import salt.client # self.parse_args() # print('################:%s' % (self.config['order_masters']==True)) signal.signal(signal.SIGINT, self.quit) signal.signal(signal.SIGTERM, self.quit) if self.config['log_level'] not in ('quiet', ): # Setup file logging! self.setup_logfile_logger() verify_log(self.config) try: # We don't need to bail on config file permission errors # if the CLI process is run with the -a flag skip_perm_errors = self.options.eauth != '' self.local_client = salt.client.get_local_client( self.get_config_file_path(), skip_perm_errors=skip_perm_errors, auto_reconnect=True) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.preview_target: minion_list = self._preview_target() self._output_ret(minion_list, self.config.get('output', 'nested')) return if self.options.timeout <= 0: self.options.timeout = self.local_client.opts['timeout'] # read tgt list from file if self.options.file_target: try: with open(self.config['tgt']) as xf: xfContent = xf.read().strip("\n").strip(' ') if xfContent == '': self.exit( 2, 'Find empty ip list from {0}, pls check.\n'.format( self.config['tgt'])) return if ',' in xfContent: self.config['tgt'] = xfContent.split(",") self.selected_target_option = 'list' elif '\n' in xfContent: self.config['tgt'] = xfContent.split("\n") self.selected_target_option = 'list' else: print('Find invalid args with -X.') return except IOError as exc: self.exit(2, '{0}\n'.format(exc)) return kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid } # kwargs = self.config # kwargs['timeout'] = self.options.timeout # kwargs['show_timeout'] = self.options.show_timeout # kwargs['show_jid'] = self.options.show_jid kwargs['delimiter'] = self.options.delimiter if self.selected_target_option: kwargs['tgt_type'] = self.selected_target_option else: kwargs['tgt_type'] = 'glob' if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') if getattr(self.options, 'return_config'): kwargs['ret_config'] = getattr(self.options, 'return_config') if getattr(self.options, 'return_kwargs'): kwargs['ret_kwargs'] = yamlify_arg( getattr(self.options, 'return_kwargs')) if getattr(self.options, 'module_executors'): kwargs['module_executors'] = yamlify_arg( getattr(self.options, 'module_executors')) if getattr(self.options, 'metadata'): kwargs['metadata'] = yamlify_arg(getattr(self.options, 'metadata')) # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in kwargs and 'key' not in kwargs and self.options.eauth: # This is expensive. Don't do it unless we need to. import salt.auth resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli(self.options.eauth, res) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth self.newopt = self.config sub_timeout = self.newopt['channel_sub_timeout'] if self.options.timeout > sub_timeout: sub_timeout = self.options.timeout self.bootConfig = { '_sub_timeout': sub_timeout, '_sub_node': '', '_channel_redis_sentinel': self.newopt['channel_redis_sentinel'], '_channel_redis_password': self.newopt['channel_redis_password'], '_master_pub_topic': self.newopt['id'] } # NOTE: Only in super master, filter no-response ip, when use saltx def getPassedIp(): import numpy numpy.warnings.filterwarnings('ignore') passed_ip = numpy.loadtxt('/data0/md/ip.md', dtype=numpy.str) return passed_ip missedList = set() runAllminion = False if isinstance(kwargs['tgt'], list): passed_ip = getPassedIp() kwargs['tgt'] = [i for i in kwargs['tgt'] if i not in passed_ip] if len(kwargs['tgt']) == 0: print_cli('There are nothing iplist to be apply.') return else: if kwargs['tgt'] != '*': passed_ip = getPassedIp() if kwargs['tgt'] in passed_ip: print_cli('There are nothing iplist to be apply.') return else: runAllminion = True import salt.newrun clientPub = salt.newrun.MasterPub(**self.bootConfig) if self.config['async']: # NOTE: generate a jid for saltx jid = salt.utils.jid.gen_jid() wrapMesage = { 'type': salt.newrun.FunctionType.ASYNC_RUN, 'jid': jid, 'kwargs': kwargs, 'tempTopic': str(salt.newrun.uuid.uuid1()) } clientPub.publishToSyndicSub(salt.newrun.json.dumps(wrapMesage)) print_cli('Executed command with master job ID: {0}'.format(jid)) return else: wrapMesage = { 'type': salt.newrun.FunctionType.SYNC_RUN, 'kwargs': kwargs, 'tempTopic': str(salt.newrun.uuid.uuid1()) } batch_hold = 0 lossSyndic = [] repeatet = set() emptyRet = [] noResponseRet = [] noConnectRet = [] # running, make sure to be batch count global batch_running batch_running = set() # init batch ip batch_init = set() comeSubList = clientPub.pullAccept() resultPingSet = [] resultExeSet = [] global normalsize normalsize = 0 sucset = set() debugSet = set() def batchExecuteCallback(selfIp, clientPub, redisChannel): while len(batch_running) <= 0: tmpKwargs = wrapMesage['kwargs'] try: for i in range(batch_hold): batch_running.add(batch_init.pop()) # trigger to sub run tmpKwargs['tgt'] = list(batch_running) wrapMesage['kwargs'] = tmpKwargs batchRun(wrapMesage, selfIp, clientPub, redisChannel) except: if len(batch_running) > 0: # trigger to sub run tmpKwargs['tgt'] = list(batch_running) wrapMesage['kwargs'] = tmpKwargs batchRun(wrapMesage, selfIp, clientPub, redisChannel) else: break ##begin print error returns for result in emptyRet: if result['ret_']: # begin print in client console self._output_ret(result['ret_'], result['out']) for result in noResponseRet: if result['ret_']: for k, v in result['ret_'].items(): if k not in sucset: # begin print in client console self._output_ret(result['ret_'], result['out']) for result in noConnectRet: if result['ret_']: for k, v in result['ret_'].items(): if k not in sucset: # begin print in client console self._output_ret(result['ret_'], result['out']) disconnectedSyndic = set(comeSubList).difference(resultPingSet) if disconnectedSyndic: print_cli('With disconnected syndic: %s' % list(disconnectedSyndic)) if len(missedList) > 0 or len(lossSyndic) > 0: print('missed maids: {}\nmissed minions: {}'.format( ",".join(lossSyndic), ",".join(missedList))) if len(repeatet) > 0: print('Find some minion run repeated: {}'.format(repeatet)) global normalsize print( 'normal size: {}\nmissed size: {}\nempty size: {}'.format( normalsize, len(missedList), len(emptyRet))) redisChannel.unsubscribe(wrapMesage['tempTopic']) redisChannel.connection_pool.disconnect() def batchRun(wrapMesage, selfIp, clientPub, redisChannel): # NOTE: batch running mode # handle special syndic if selfIp in comeSubList: comeSubList.remove(selfIp) syndic_count = len(comeSubList) resultCount = 0 pingCount = 0 executeStart = time.time() normalDone = False # NOTE: must publish cmd after registered the redis listen # else we will miss ping message # tmpKwargs1 = wrapMesage['kwargs'] # batch_running = set(tmpKwargs1['tgt']) #print('publish wrapMesage: %s' % wrapMesage) clientPub.publishToSyndicSub( salt.newrun.json.dumps(wrapMesage)) from salt.newrun import (json, byteify, MessageType) for message in redisChannel.listen(): try: messageJson = byteify(message) if messageJson['type'] == 'message': resultMessage = messageJson['data'] try: callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: resultPingSet.append(messageIp) elif messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: resultExeSet.append(messageIp) else: main_log.info( 'invalid callresult: %s' % callResult) else: # filter no return received of sub node retJsonObj = callResult['ret_'] if retJsonObj: # reset start time executeStart = time.time() for k, v in retJsonObj.items(): # reset running and wait node batch_running.discard(k) if callResult[ 'out'] == 'no_return': if '[No response]' in json.dumps( retJsonObj): noResponseRet.append( callResult) else: noConnectRet.append( callResult) else: # put successed ip to tmp set for k, v in retJsonObj.items(): sucset.add(k) # NOTE: debug if k in debugSet: repeatet.add( json.dumps( retJsonObj)) else: debugSet.add(k) if callResult['retcode'] == 0: isnil = False for k in retJsonObj.keys(): v = retJsonObj[k] if v == "": isnil = True break if isnil: emptyRet.append( callResult) else: global normalsize normalsize += 1 self._output_ret( callResult['ret_'], callResult['out']) else: emptyRet.append(callResult) else: # TODO handle other messages? pass except: resultCount += 1 print_cli(traceback.format_exc()) pass pingCount = len(resultPingSet) resultCount = len(resultExeSet) #from collections import Counter #main_log.info("%s, %s, %s, %s" % (pingCount, resultCount, Counter(resultPingSet), Counter(resultExeSet))) # if len(batch_init) <= 0: # break if len(batch_running) == 0: break if pingCount != resultCount: if (time.time() - executeStart) > sub_timeout: # main_log.info("---T0 stop") break except: main_log.info(traceback.format_exc()) pass def executeCallback(selfIp): redisChannel = clientPub.getRedisInstance().pubsub() redisChannel.subscribe(wrapMesage['tempTopic']) noResponseRet = [] noConnectRet = [] emptyRet = [] # retcodes = [] comeSubList = clientPub.pullAccept() # handle special syndic if selfIp in comeSubList: comeSubList.remove(selfIp) syndic_count = len(comeSubList) resultCount = 0 pingCount = 0 resultPingSet = set() sucset = set() resultExeSet = set() debugSet = set() repeatet = set() lossSyndic = [] executeStart = time.time() normalDone = False # NOTE: must publish cmd after registered the redis listen # else we will miss ping message clientPub.publishToSyndicSub( salt.newrun.json.dumps(wrapMesage)) from salt.newrun import (json, byteify, MessageType) normalsize = 0 for message in redisChannel.listen(): try: messageJson = byteify(message) if messageJson['type'] == 'message': resultMessage = messageJson['data'] try: callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: resultPingSet.add(messageIp) pingCount += 1 elif messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: # main_log.info('work or interurupt: %s' % (messageIp)) resultExeSet.add(messageIp) resultCount += 1 else: main_log.info( 'invalid callresult: %s' % callResult) else: # filter no return received of sub node retJsonObj = callResult['ret_'] if retJsonObj: # reset start time executeStart = time.time() if callResult[ 'out'] == 'no_return': if '[No response]' in json.dumps( retJsonObj): noResponseRet.append( callResult) else: noConnectRet.append( callResult) # add to missed list for k, v in retJsonObj.items(): missedList.add(k) else: # put successed ip to tmp set for k, v in retJsonObj.items(): sucset.add(k) # NOTE: debug if k in debugSet: repeatet.add( json.dumps( retJsonObj)) else: debugSet.add(k) if callResult['retcode'] == 0: isnil = False for k in retJsonObj.keys(): v = retJsonObj[k] if v == "": isnil = True break if isnil: emptyRet.append( callResult) else: normalsize += 1 self._output_ret( callResult['ret_'], callResult['out']) else: emptyRet.append(callResult) else: # TODO handle other messages? pass except: resultCount += 1 print_cli(traceback.format_exc()) pass ##check sub timeout, if no node running again #lossSyndic = [item for item in comeSubList if item not in resultExeSet] #print(lossSyndic) losePingCount = syndic_count - pingCount runningCount = syndic_count - resultCount - losePingCount #lossPing = [item for item in comeSubList if item not in resultPingSet] #main_log.info("%s,%s,%s,%s,%s,%s" % (pingCount, syndic_count, runningCount, sub_timeout, executeStart, lossPing)) if pingCount < syndic_count and runningCount <= 0: if (time.time() - executeStart) > sub_timeout: main_log.info("---T0 stop") break elif syndic_count == pingCount and runningCount > 0: if (time.time() - executeStart) > sub_timeout: # main_log.info("---T1 stop") break elif syndic_count == pingCount and resultCount == syndic_count: # main_log.info("---T2 stop") break except: main_log.info(traceback.format_exc()) pass redisChannel.unsubscribe(wrapMesage['tempTopic']) redisChannel.connection_pool.disconnect() # main_log.info('---T: %s, %s, %s' % (emptyRet, noResponseRet, noConnectRet)) ##begin print error returns for result in emptyRet: if result['ret_']: # begin print in client console self._output_ret(result['ret_'], result['out']) for result in noResponseRet: if result['ret_']: for k, v in result['ret_'].items(): if k not in sucset: # begin print in client console self._output_ret(result['ret_'], result['out']) for result in noConnectRet: if result['ret_']: for k, v in result['ret_'].items(): if k not in sucset: # begin print in client console self._output_ret(result['ret_'], result['out']) disconnectedSyndic = set(comeSubList).difference(resultPingSet) if disconnectedSyndic: print_cli('With disconnected syndic: %s' % list(disconnectedSyndic)) if len(missedList) > 0 or len(lossSyndic) > 0: print('missed maids: {}\nmissed minions: {}'.format( ",".join(lossSyndic), ",".join(missedList))) if len(repeatet) > 0: print('Find some minion run repeated: {}'.format(repeatet)) print( 'normal size: {}\nmissed size: {}\nempty size: {}'.format( normalsize, len(missedList), len(emptyRet))) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. # if retcodes.count(0) < len(retcodes): # sys.stderr.write('ERROR: Minions returned with non-zero exit code\n') # sys.exit(11) if self.options.batch: bwait = self.config.get('batch_wait', 0) redisChannel = clientPub.getRedisInstance().pubsub() percentBatch = 0.0 try: if self.options.batch.endswith('%'): stripBatch = float(self.options.batch.strip('%')) percentBatch = stripBatch / 100 else: batch_hold = int(self.options.batch) except: print('An Int or Percent can be used for batch.') return # find all ip list if kwargs['tgt'] == '*': reGetAllMinionList = [] wrapFindAcceptMesage = { 'type': salt.newrun.FunctionType.FIND_ACCEPT, 'tempTopic': ('fa_%s' % str(salt.newrun.uuid.uuid1())) } redisChannel.subscribe(wrapFindAcceptMesage['tempTopic']) clientPub.publishToSyndicSub( salt.newrun.json.dumps(wrapFindAcceptMesage)) from salt.newrun import (json, byteify, MessageType) ping1stCount = 0 work1stcount = 0 for message in redisChannel.listen(): try: messageJson = byteify(message) if messageJson['type'] == 'message': resultMessage = messageJson['data'] try: callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: ping1stCount += 1 elif messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: work1stcount += 1 if ping1stCount == work1stcount and work1stcount == len( comeSubList): break else: main_log.info( 'invalid callresult: %s' % callResult) else: # filter no return received of sub node retJsonObj = callResult['ip_list'] if retJsonObj: reGetAllMinionList = reGetAllMinionList + retJsonObj else: pass #print('callResult: %s' % callResult) except: main_log.info(traceback.format_exc()) pass except: main_log.info(traceback.format_exc()) pass kwargs['tgt'] = reGetAllMinionList batch_init = set(kwargs['tgt']) redisChannel.unsubscribe(wrapFindAcceptMesage['tempTopic']) redisChannel.connection_pool.disconnect() else: if kwargs['tgt_type'] == 'glob': batch_init.add(kwargs['tgt']) else: batch_init = set(kwargs['tgt']) kwargs['tgt_type'] = 'list' wrapMesage['kwargs'] = kwargs if percentBatch > 0: batch_hold = percentBatch * len(batch_init) redisChannel.subscribe(wrapMesage['tempTopic']) batchExecuteCallback(self.newopt['id'], clientPub, redisChannel) else: executeCallback(self.newopt['id'])
def executeCallback(selfIp): redisChannel = clientPub.getRedisInstance().pubsub() redisChannel.subscribe(wrapMesage['tempTopic']) noResponseRet = [] noConnectRet = [] emptyRet = [] # retcodes = [] comeSubList = clientPub.pullAccept() # handle special syndic if selfIp in comeSubList: comeSubList.remove(selfIp) syndic_count = len(comeSubList) resultCount = 0 pingCount = 0 resultPingSet = set() sucset = set() resultExeSet = set() debugSet = set() repeatet = set() lossSyndic = [] executeStart = time.time() normalDone = False # NOTE: must publish cmd after registered the redis listen # else we will miss ping message clientPub.publishToSyndicSub( salt.newrun.json.dumps(wrapMesage)) from salt.newrun import (json, byteify, MessageType) normalsize = 0 for message in redisChannel.listen(): try: messageJson = byteify(message) if messageJson['type'] == 'message': resultMessage = messageJson['data'] try: callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: resultPingSet.add(messageIp) pingCount += 1 elif messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: # main_log.info('work or interurupt: %s' % (messageIp)) resultExeSet.add(messageIp) resultCount += 1 else: main_log.info( 'invalid callresult: %s' % callResult) else: # filter no return received of sub node retJsonObj = callResult['ret_'] if retJsonObj: # reset start time executeStart = time.time() if callResult[ 'out'] == 'no_return': if '[No response]' in json.dumps( retJsonObj): noResponseRet.append( callResult) else: noConnectRet.append( callResult) # add to missed list for k, v in retJsonObj.items(): missedList.add(k) else: # put successed ip to tmp set for k, v in retJsonObj.items(): sucset.add(k) # NOTE: debug if k in debugSet: repeatet.add( json.dumps( retJsonObj)) else: debugSet.add(k) if callResult['retcode'] == 0: isnil = False for k in retJsonObj.keys(): v = retJsonObj[k] if v == "": isnil = True break if isnil: emptyRet.append( callResult) else: normalsize += 1 self._output_ret( callResult['ret_'], callResult['out']) else: emptyRet.append(callResult) else: # TODO handle other messages? pass except: resultCount += 1 print_cli(traceback.format_exc()) pass ##check sub timeout, if no node running again #lossSyndic = [item for item in comeSubList if item not in resultExeSet] #print(lossSyndic) losePingCount = syndic_count - pingCount runningCount = syndic_count - resultCount - losePingCount #lossPing = [item for item in comeSubList if item not in resultPingSet] #main_log.info("%s,%s,%s,%s,%s,%s" % (pingCount, syndic_count, runningCount, sub_timeout, executeStart, lossPing)) if pingCount < syndic_count and runningCount <= 0: if (time.time() - executeStart) > sub_timeout: main_log.info("---T0 stop") break elif syndic_count == pingCount and runningCount > 0: if (time.time() - executeStart) > sub_timeout: # main_log.info("---T1 stop") break elif syndic_count == pingCount and resultCount == syndic_count: # main_log.info("---T2 stop") break except: main_log.info(traceback.format_exc()) pass redisChannel.unsubscribe(wrapMesage['tempTopic']) redisChannel.connection_pool.disconnect() # main_log.info('---T: %s, %s, %s' % (emptyRet, noResponseRet, noConnectRet)) ##begin print error returns for result in emptyRet: if result['ret_']: # begin print in client console self._output_ret(result['ret_'], result['out']) for result in noResponseRet: if result['ret_']: for k, v in result['ret_'].items(): if k not in sucset: # begin print in client console self._output_ret(result['ret_'], result['out']) for result in noConnectRet: if result['ret_']: for k, v in result['ret_'].items(): if k not in sucset: # begin print in client console self._output_ret(result['ret_'], result['out']) disconnectedSyndic = set(comeSubList).difference(resultPingSet) if disconnectedSyndic: print_cli('With disconnected syndic: %s' % list(disconnectedSyndic)) if len(missedList) > 0 or len(lossSyndic) > 0: print('missed maids: {}\nmissed minions: {}'.format( ",".join(lossSyndic), ",".join(missedList))) if len(repeatet) > 0: print('Find some minion run repeated: {}'.format(repeatet)) print( 'normal size: {}\nmissed size: {}\nempty size: {}'.format( normalsize, len(missedList), len(emptyRet)))
def batchRun(wrapMesage, selfIp, clientPub, redisChannel): # NOTE: batch running mode # handle special syndic if selfIp in comeSubList: comeSubList.remove(selfIp) syndic_count = len(comeSubList) resultCount = 0 pingCount = 0 executeStart = time.time() normalDone = False # NOTE: must publish cmd after registered the redis listen # else we will miss ping message # tmpKwargs1 = wrapMesage['kwargs'] # batch_running = set(tmpKwargs1['tgt']) #print('publish wrapMesage: %s' % wrapMesage) clientPub.publishToSyndicSub( salt.newrun.json.dumps(wrapMesage)) from salt.newrun import (json, byteify, MessageType) for message in redisChannel.listen(): try: messageJson = byteify(message) if messageJson['type'] == 'message': resultMessage = messageJson['data'] try: callResult = json.loads(resultMessage, encoding='utf-8') callResult = byteify(callResult) if isinstance(callResult, dict): if 'type' in callResult: messageType = callResult['type'] messageIp = callResult['sub_ip'] if messageType == MessageType.PING and messageIp in comeSubList: resultPingSet.append(messageIp) elif messageType == MessageType.WORK or messageType == MessageType.INTERRUPT: resultExeSet.append(messageIp) else: main_log.info( 'invalid callresult: %s' % callResult) else: # filter no return received of sub node retJsonObj = callResult['ret_'] if retJsonObj: # reset start time executeStart = time.time() for k, v in retJsonObj.items(): # reset running and wait node batch_running.discard(k) if callResult[ 'out'] == 'no_return': if '[No response]' in json.dumps( retJsonObj): noResponseRet.append( callResult) else: noConnectRet.append( callResult) else: # put successed ip to tmp set for k, v in retJsonObj.items(): sucset.add(k) # NOTE: debug if k in debugSet: repeatet.add( json.dumps( retJsonObj)) else: debugSet.add(k) if callResult['retcode'] == 0: isnil = False for k in retJsonObj.keys(): v = retJsonObj[k] if v == "": isnil = True break if isnil: emptyRet.append( callResult) else: global normalsize normalsize += 1 self._output_ret( callResult['ret_'], callResult['out']) else: emptyRet.append(callResult) else: # TODO handle other messages? pass except: resultCount += 1 print_cli(traceback.format_exc()) pass pingCount = len(resultPingSet) resultCount = len(resultExeSet) #from collections import Counter #main_log.info("%s, %s, %s, %s" % (pingCount, resultCount, Counter(resultPingSet), Counter(resultExeSet))) # if len(batch_init) <= 0: # break if len(batch_running) == 0: break if pingCount != resultCount: if (time.time() - executeStart) > sub_timeout: # main_log.info("---T0 stop") break except: main_log.info(traceback.format_exc()) pass