def _parse_args(self,args): l=args.split(',') for cur in l: pair=cur.split(':') LOG.debug('key: %s, value: %s'%(pair[0],pair[1])) self._args[pair[0]]=pair[1] self._unite_args()
def _send2slave(self,content): for index in range(0,len(self._swarm_list)): LOG.info('starting thread %d to deal socket'%(index)) t=threading.Thread(target=self._send2one, args=(content,self._swarm_list[index],self._swarm_port_list[index])) t.start() t.join()
def _scan_target(self, target): try: LOG.debug('scan target: %s' % target) socket.getaddrinfo(target, None) return target + ';' except Exception, e: return ''
def _scan_target(self,target): try: LOG.debug('scan target: %s'%target) socket.getaddrinfo(target,None) return target+';' except Exception, e: return ''
def _parse_args(self, args): l = args.split(',') for cur in l: pair = cur.split(':') LOG.debug('key: %s, value: %s' % (pair[0], pair[1])) self._args[pair[0]] = pair[1] self._unite_args()
def get_result(self): """ Get result from result queue, do task index confirm meanwhile Return '' if all tasks have been confirmed Raises: Queue.Empty: can not get response within timeout """ # check whether all task has been confirmed # if so, return '' if self._task_confirm_num==self._cur_task_num: return '' # may throw Queue.Empty here task_result=self._result_queue.get(block=True,timeout=self._timeout) resultl=task_result.split('|') index=int(resultl[1],10) result=resultl[2] # do confirm # if it is duplicate, try to get result again if self._task_confirm_list[index]!=0: return self.get_result() self._task_confirm_list[index]=1 self._task_confirm_num+=1 LOG.log(REPORT,'task index:%d result:%s'%(index,result)) return result
def main(): args=argparse.Namespace() try: # get all available modules args.modules=get_modules() # parse args from cli and configuration file # arguments parsed from cli will cover origin arguments in configuration file configfile_parse(args) cli_parse(args) begin_banner() init_logger(args.logfile,args.verbose,args.disable_col) except SwarmBaseException as e: print str(e) end_banner() return # now use logger instead of simple print try: m=MSwarm(args) # wake slaves up now m.waken_swarm() m.parse_distribute_task() except SwarmBaseException as e: LOG.critical(str(e)) finally: end_banner()
def get_result(self): """ Get result from result queue, do task index confirm meanwhile Return '' if all tasks have been confirmed Raises: Queue.Empty: can not get response within timeout """ # check whether all task has been confirmed # if so, return '' if self._task_confirm_num==self._cur_task_num: return '' # may throw Queue.Empty here task_result=self._result_queue.get(block=True,timeout=self._timeout) resultl=task_result.split('|') index=int(resultl[1],10) result='|'.join(resultl[2:]) # do confirm # if it is duplicate, try to get result again if self._task_confirm_list[index]!=0: return self.get_result() self._task_confirm_list[index]=1 self._task_confirm_num+=1 LOG.debug('get result: %s'%task_result.replace('\n',' ')) return result
def _request(self,url,body): LOG.debug('request target: '+url) r=self._call_method(url,data=body,headers=self._headers,cookies=self._cookies) for cur_flag in self._flags: if r.text.find(cur_flag)!=-1: return url+','+body+';' return ''
def _send2slave(self, content): for index in range(0, len(self._swarm_list)): LOG.info('starting thread %d to deal socket' % (index)) t = threading.Thread(target=self._send2one, args=(content, self._swarm_list[index], self._swarm_port_list[index])) t.start() t.join()
def get_task(self): task=self._task_queue.get() LOG.debug('get task:%s'%task) taskl=task.split('|') self._cur_task_flag=taskl[0] self._cur_task_index=taskl[1] task='|'.join(taskl[2:]) return self._cur_task_flag,task
def get_task(self): task=self._task_queue.get() LOG.debug('get task: %s'%task.replace('\n',' ')) taskl=task.split('|') self._cur_task_flag=taskl[0] self._cur_task_index=taskl[1] task='|'.join(taskl[2:]) return self._cur_task_flag,task
def _put_task(self, pre_str, task): """ Put task into task queue, update current task list and current task number meanwhile """ task = ":".join([pre_str, str(self._cur_task_num), task]) LOG.debug('put task into queue:%s' % task) self._task_queue.put(task) self._cur_task_num += 1 self._cur_task_list.append(task)
def _send2swarm_r(self,content): ret=[] for index in range(0,len(self._swarm_list)): LOG.info('starting thread %d to deal socket'%(index)) t=threading.Thread(target=self._send2one_r, args=(content,self._swarm_list[index],self._args.s_port,ret)) t.start() t.join() return ret
def put_task(self,pre_str,task): """ Put task into task queue, update current task list and current task number meanwhile. """ task="|".join([pre_str,str(self._cur_task_num),task]) LOG.debug('put task: %s'%task.replace('\n',' ')) self._task_queue.put(task) self._cur_task_num+=1 self._cur_task_list.append(task)
def _put_task(self,pre_str,task): """ Put task into task queue, update current task list and current task number meanwhile """ task=":".join([pre_str,str(self._cur_task_num),task]) LOG.debug('put task into queue:%s'%task) self._task_queue.put(task) self._cur_task_num+=1 self._cur_task_list.append(task)
def _send2swarm_r(self,content): LOG.info('connect to swarm...') ret=[] for index in range(0,len(self._swarm_list)): t=threading.Thread(target=self._send2one_r, args=(content,self._swarm_list[index],self._args.s_port,ret)) t.start() t.join() LOG.info('get %d response from swarm'%len(ret)) return ret
def _send2swarm_r(self, content): ret = [] for index in range(0, len(self._swarm_list)): LOG.info('starting thread %d to deal socket' % (index)) t = threading.Thread(target=self._send2one_r, args=(content, self._swarm_list[index], self._args.s_port, ret)) t.start() t.join() return ret
def _send2swarm_r(self, content): LOG.info('connect to swarm...') ret = [] for index in range(0, len(self._swarm_list)): t = threading.Thread(target=self._send2one_r, args=(content, self._swarm_list[index], self._args.s_port, ret)) t.start() t.join() LOG.info('get %d response from swarm' % len(ret)) return ret
def reorganize_tasks(self): # first clear tasks in task queue while True: try: self._task_queue.get(block=False) except Queue.Empty as e: break # put tasks which have not been confirmed again for cur_index,cur in enumerate(self._task_confirm_list): if cur==0: LOG.debug('put task into queue again: %s'%self._cur_task_list[cur_index]) self._task_queue.put(self._cur_task_list[cur_index])
def _parse_args_for_swarm(self): s = '' s = self._put_key_value(s, 'm_addr', self._args.m_addr) s = self._put_key_value(s, 'm_port', self._args.m_port) s = self._put_key_value(s, 'authkey', self._args.authkey) s = self._put_key_value(s, 'process_num', self._args.process_num) s = self._put_key_value(s, 'thread_num', self._args.thread_num) s = self._put_key_value(s, 'domain_timeout', self._args.domain_timeout) # remove the last ',' s = s[:-1] LOG.debug('args pass to swarm-s: ' + s) return s
def _parse_args_for_swarm(self): s='' s=self._put_key_value(s,'m_addr',self._args.m_addr) s=self._put_key_value(s,'m_port',self._args.m_port) s=self._put_key_value(s,'authkey',self._args.authkey) s=self._put_key_value(s,'process_num',self._args.process_num) s=self._put_key_value(s,'thread_num',self._args.thread_num) s=self._put_key_value(s,'domain_timeout',self._args.domain_timeout) # remove the last ',' s=s[:-1] LOG.debug('args pass to swarm-s: '+s) return s
def parse_distribute_task(self): self._manager = SwarmManager(address=('', self._args.m_port), authkey=self._args.authkey) self._manager.start() self._task_queue = self._manager.get_task_queue() self._result_queue = self._manager.get_result_queue() LOG.info('begin to parse task...') if self._args.enable_domain_scan == True: self.scan_domain() self._shutdown()
def reorganize_tasks(self): # first clear tasks in task queue while True: try: self._task_queue.get(block=False) except Queue.Empty as e: break # put tasks which have not been confirmed again for cur_index,cur in enumerate(self._task_confirm_list): if cur==0: tmptask=self._cur_task_list[cur_index] LOG.debug('put task: %s'%tmptask.replace('\n',' ')) self._task_queue.put(self._cur_task_list[cur_index])
def parse_distribute_task(self): self._manager=SwarmManager(address=('', self._args.m_port), authkey=self._args.authkey) self._manager.start() self._task_queue = self._manager.get_task_queue() self._result_queue = self._manager.get_result_queue() LOG.info('begin to parse task...') if self._args.enable_domain_scan==True: self.scan_domain() self._shutdown()
def waken_swarm(self): """ Waken all slave hosts to run swarm-s.py and send args to them. Synchronize data if need. """ if self._args.waken_cmd != '': LOG.info('sending waken command "%s"to swarm...' % (self._args.waken_cmd.replace( 'ARGS', '-p %d' % self._args.s_port))) self._send2slave( self._args.waken_cmd.replace('ARGS', '-p %d' % self._args.s_port)) # time for slave host to create listen on target port time.sleep(1) s_args = self._parse_args_for_swarm() if self._args.sync_data == True: s_args += '__SYNC__' else: s_args += '__CEND__' r = self._send2swarm_r(s_args) LOG.info('waken %d slaves in swarm' % (len(r))) # do data sync here if self._args.sync_data == True: LOG.info('begin to synchronize data...') self._sync_data() LOG.info('data synchronize completed')
def _parse_url(self, dst, src): """ Check wether target url 'dst' is in the same domain(include port) with url 'src', and convert url into complete url without params. Returns: String of complete url with query params if it has. if target url is not in the same domain, return ''; """ LOG.debug('detecting url: ' + dst) s_parsed = urlparse.urlparse(src) s_scheme = s_parsed.scheme s_netloc = s_parsed.netloc s_cur_dir = s_parsed.path if s_cur_dir[-1] != '/': s_cur_dir = '/'.join(s_cur_dir.split('/')[:-1]) else: s_cur_dir = s_cur_dir[:-1] d_parsed = urlparse.urlparse(dst) d_scheme = d_parsed.scheme if d_parsed.netloc.find(':') == -1 and d_parsed.netloc != '': if d_scheme == 'http': d_netloc = d_parsed.netloc + ':80' elif d_scheme == 'https': d_netloc = d_parsed.netloc + ':443' elif d_scheme == '': d_netloc = d_parsed.netloc + ':80' if s_scheme == 'http' else d_parsed.netloc + ':443' else: d_netloc = d_parsed.netloc else: d_netloc = d_parsed.netloc # add '/' as prefix if the path does not starts with '/' if d_parsed.path != '': d_path = '/' + d_parsed.path if d_parsed.path[ 0] != '/' else d_parsed.path else: d_path = '/' d_query = d_parsed.query # if it is a relative url if d_netloc == '': return urlparse.ParseResult(s_scheme, s_netloc, s_cur_dir + d_path, '', d_query, '').geturl() elif d_netloc == s_netloc and (d_scheme == s_scheme or d_scheme == ''): return urlparse.ParseResult(s_scheme, s_netloc, d_path, '', d_query, '').geturl() else: return ''
def get_do_task(self): proc=[] if self._args['process_num']==0: for cur in range(multiprocessing.cpu_count()): p=multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) else: for cur in range(self._args['process_num']): p=multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) for cur in proc: proc.join() LOG.debug('task completed')
def get_do_task(self): proc = [] if self._args['process_num'] == 0: for cur in range(multiprocessing.cpu_count()): p = multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) else: for cur in range(self._args['process_num']): p = multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) for cur in proc: proc.join() LOG.debug('task completed')
def main(): try: parser=argparse.ArgumentParser() parser.add_argument('-p',dest='s_port',metavar='LISTEN PORT',type=int,required=True, help="Listen port to receive info from master") args=parser.parse_args() init_logger('/var/log/swarm_s.log',True,False) sswarm=SSwarm(args.s_port) # Parse arguments from mswarm sswarm.get_parse_args() # Ready to get and exec command from master host sswarm.get_do_task() except SwarmBaseException as e: LOG.debug(str(e)) return
def _parse_url(self,dst,src): """ Check wether target url 'dst' is in the same domain(include port) with url 'src', and convert url into complete url without params. Returns: String of complete url with query params if it has. if target url is not in the same domain, return ''; """ LOG.debug('detecting url: '+dst) s_parsed=urlparse.urlparse(src) s_scheme=s_parsed.scheme s_netloc=s_parsed.netloc s_cur_dir=s_parsed.path if s_cur_dir[-1]!='/': s_cur_dir='/'.join(s_cur_dir.split('/')[:-1]) else: s_cur_dir=s_cur_dir[:-1] d_parsed=urlparse.urlparse(dst) d_scheme=d_parsed.scheme if d_parsed.netloc.find(':')==-1 and d_parsed.netloc!='': if d_scheme=='http': d_netloc=d_parsed.netloc+':80' elif d_scheme=='https': d_netloc=d_parsed.netloc+':443' elif d_scheme=='': d_netloc=d_parsed.netloc+':80' if s_scheme=='http' else d_parsed.netloc+':443' else: d_netloc=d_parsed.netloc else: d_netloc=d_parsed.netloc # add '/' as prefix if the path does not starts with '/' if d_parsed.path!='': d_path='/'+d_parsed.path if d_parsed.path[0]!='/' else d_parsed.path else: d_path='/' d_query=d_parsed.query # if it is a relative url if d_netloc=='': return urlparse.ParseResult(s_scheme,s_netloc,s_cur_dir+d_path,'',d_query,'').geturl() elif d_netloc==s_netloc and (d_scheme==s_scheme or d_scheme==''): return urlparse.ParseResult(s_scheme,s_netloc,d_path,'',d_query,'').geturl() else: return ''
def _parse_charset(self): try: charset=self._args.domain_charset while True: index=charset.find('-') if index==-1: break begin_chr=charset[index-1] end_chr=charset[index+1] dst='' for x in range(ord(begin_chr),ord(end_chr)+1): dst+=chr(x) charset=charset.replace(begin_chr+'-'+end_chr,dst) ret = ''.join(x for i, x in enumerate(charset) if charset.index(x) == i) LOG.debug('charset: %s'%ret) return ret except Exception, e: LOG.critical('invalid subdomain name charset, or format error') # raise SwarmUseException('invalid subdomain name charset, or format error') raise
def _get_do_task_proc(self): self._manager=SSwarmManager(address=(self._args.m_addr, self._args.m_port), authkey=self._args.authkey) LOG.debug('load module: '+self._args.mod) LOG.debug('begin to get and do task...') try: module=importlib.import_module('modules.'+self._args.mod+'.'+self._args.mod) except ImportError as e: raise SwarmModuleException('an error occured when load module:'+self._args.mod) # create Slave class of this module mod_slave=getattr(module,'Slave')(self._args) while True: flag,task=self._manager.get_task() if flag=='__off__': break # else use module to do task result=mod_slave.do_task(task) self._manager.put_result(result)
def get_do_task(self): proc=[] if self._args.process_num==0: for cur in range(multiprocessing.cpu_count()): p=multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) else: for cur in range(self._args.process_num): p=multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) # start a new thread to listen command from master host # use daemon argtment so we need not to wait for this thread to exit t=threading.Thread(target=self._response_master) t.daemon=True t.start() for cur in proc: cur.join() LOG.debug('task completed')
def _send2one(self,content,ip,port): try: s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.settimeout(self._args.timeout) LOG.debug('connect to %s:%d...'%(ip,port)) s.connect((ip,port)) s.send(content) s.close() LOG.debug('connection to %s:%d close'%(ip,port)) except socket.timeout as e: LOG.warning('%s:%d lost response'%(ip,port)) except socket.error as arg: LOG.error('socket error while connecting to %s:%d errno %d: %s'%(ip,port,arg[0],arg[1]))
def get_do_task(self): proc = [] if self._args.process_num == 0: for cur in range(multiprocessing.cpu_count()): p = multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) else: for cur in range(self._args.process_num): p = multiprocessing.Process(target=self._get_do_task_proc) p.start() proc.append(p) # start a new thread to listen command from master host # use daemon argtment so we need not to wait for this thread to exit t = threading.Thread(target=self._response_master) t.daemon = True t.start() for cur in proc: cur.join() LOG.debug('task completed')
def _parse_charset(self): try: charset = self._args.domain_charset while True: index = charset.find('-') if index == -1: break begin_chr = charset[index - 1] end_chr = charset[index + 1] dst = '' for x in range(ord(begin_chr), ord(end_chr) + 1): dst += chr(x) charset = charset.replace(begin_chr + '-' + end_chr, dst) ret = ''.join(x for i, x in enumerate(charset) if charset.index(x) == i) LOG.debug('charset: %s' % ret) return ret except Exception, e: LOG.critical('invalid subdomain name charset, or format error') # raise SwarmUseException('invalid subdomain name charset, or format error') raise
def main(): try: parser = argparse.ArgumentParser() parser.add_argument('-p', dest='s_port', metavar='LISTEN PORT', type=int, required=True, help="Listen port to receive info from master") args = parser.parse_args() init_logger('/var/log/swarm_s.log', True, False) sswarm = SSwarm(args.s_port) # Parse arguments from mswarm sswarm.get_parse_args() # Ready to get and exec command from master host sswarm.get_do_task() except SwarmBaseException as e: LOG.debug(str(e)) return
def _send2one(self, content, ip, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(self._args.timeout) LOG.debug('connect to %s:%d...' % (ip, port)) s.connect((ip, port)) s.send(content) s.close() LOG.debug('connection to %s:%d close' % (ip, port)) except socket.timeout as e: LOG.warning('%s:%d lost response' % (ip, port)) except socket.error as arg: LOG.error('socket error while connecting to %s:%d errno %d: %s' % (ip, port, arg[0], arg[1]))
def _get_do_task_proc(self): self._manager=SwarmManager(address=(self._args['m_addr'], self._args['m_port']), authkey=self._args['authkey']) self._manager.connect() self._task_queue = self._manager.get_task_queue() self._result_queue = self._manager.get_result_queue() # init scanners and other modules self._init_module() LOG.debug('begin to get and do task...') while True: task=self._task_queue.get() LOG.debug('get task:%s'%task) taskl=task.split(':') task_flag=taskl[0] task_index=taskl[1] if task_flag=='__doms__': result=self.do_domain_scan(taskl[2:]) elif task_flag=='__off__': break result=":".join([task_flag,task_index,result]) LOG.debug('put result:%s'%result) self._result_queue.put(result)
def _get_do_task_proc(self): self._manager = SwarmManager(address=(self._args['m_addr'], self._args['m_port']), authkey=self._args['authkey']) self._manager.connect() self._task_queue = self._manager.get_task_queue() self._result_queue = self._manager.get_result_queue() # init scanners and other modules self._init_module() LOG.debug('begin to get and do task...') while True: task = self._task_queue.get() LOG.debug('get task:%s' % task) taskl = task.split(':') task_flag = taskl[0] task_index = taskl[1] if task_flag == '__doms__': result = self.do_domain_scan(taskl[2:]) elif task_flag == '__off__': break result = ":".join([task_flag, task_index, result]) LOG.debug('put result:%s' % result) self._result_queue.put(result)
def _get_do_task_proc(self): self._manager = SSwarmManager(address=(self._args.m_addr, self._args.m_port), authkey=self._args.authkey) LOG.debug('load module: ' + self._args.mod) LOG.debug('begin to get and do task...') try: module = importlib.import_module('modules.' + self._args.mod + '.' + self._args.mod) except ImportError as e: raise SwarmModuleException('an error occured when load module:' + self._args.mod) # create Slave class of this module mod_slave = getattr(module, 'Slave')(self._args) while True: flag, task = self._manager.get_task() if flag == '__off__': break # else use module to do task result = mod_slave.do_task(task) self._manager.put_result(result)
def _receive_master(self): s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) # incase 'Address already in use error' s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('',self._s_port)) LOG.debug('listen on port:%d'%self._s_port) s.listen(1) sock, addr=s.accept() LOG.debug('receive from master host...') buff='' while True: d=sock.recv(4096) buff+=d if d.find('__EOF__')!=-1: break sock.send('ack') sock.close() s.close() # cut off last __EOF__ buff=buff[:-7] # return to origin args buff=buff.replace('__EOF___','__EOF__') return buff
def _receive_master(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # incase 'Address already in use error' s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('', self._s_port)) LOG.debug('listen on port:%d' % self._s_port) s.listen(1) sock, addr = s.accept() LOG.debug('receive from master host...') buff = '' while True: d = sock.recv(4096) buff += d if d.find('__EOF__') != -1: break sock.send('ack') sock.close() s.close() # cut off last __EOF__ buff = buff[:-7] # return to origin args buff = buff.replace('__EOF___', '__EOF__') return buff
def _send2one(self,content,ip,port): try: s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.settimeout(self._args.timeout) LOG.info('connecting to %s:%d...'%(ip,port)) s.connect((ip,port)) s.send(content) s.close() LOG.info('connection to %s:%d close'%(ip,port)) except socket.timeout,e: LOG.warning('%s:%d lost response'%(ip,port))
def _send2one(self, content, ip, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(self._args.timeout) LOG.info('connecting to %s:%d...' % (ip, port)) s.connect((ip, port)) s.send(content) s.close() LOG.info('connection to %s:%d close' % (ip, port)) except socket.timeout, e: LOG.warning('%s:%d lost response' % (ip, port))
def _send2one_r(self,content,ip,port,result): try: s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.settimeout(self._args.timeout) LOG.debug('connect to %s:%d...'%(ip,port)) s.connect((ip,port)) s.send(content.replace('__EOF__','__EOF___')) s.send('__EOF__') r=s.recv(4096) if r!='': result.append(r) s.close() LOG.debug('connection to %s:%d close'%(ip,port)) except socket.timeout as e: LOG.warning('%s:%d lost response'%(ip,port)) return '' except socket.error as arg: LOG.error('socket error while connecting to %s:%d errno %d: %s'%(ip,port,arg[0],arg[1])) return ''
def get_parse_args(self): # first receive args args = self._receive_master() sync_flag = args[-8:] args = args[:-8] self._parse_args(args) LOG.debug('complete parsing args') if sync_flag == '__SYNC__': # do data sync here LOG.debug('begin to synchronize data...') self._sync_data() LOG.debug('data synchronize completed')
def _send2one_r(self, content, ip, port, result): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(self._args.timeout) LOG.debug('connect to %s:%d...' % (ip, port)) s.connect((ip, port)) s.send(content.replace('__EOF__', '__EOF___')) s.send('__EOF__') r = s.recv(4096) if r != '': result.append(r) s.close() LOG.debug('connection to %s:%d close' % (ip, port)) except socket.timeout as e: LOG.warning('%s:%d lost response' % (ip, port)) return '' except socket.error as arg: LOG.error('socket error while connecting to %s:%d errno %d: %s' % (ip, port, arg[0], arg[1])) return ''
def get_parse_args(self): # first receive args args=self._receive_master() sync_flag=args[-8:] args=args[:-8] self._parse_args(args) LOG.debug('complete parsing args') if sync_flag=='__SYNC__': # do data sync here LOG.debug('begin to synchronize data...') self._sync_data() LOG.debug('data synchronize completed')
def __init__(self, args): self._args=args self._swarm_num=0 try: LOG.info('begin to parse target list') # parse target list self._args.target_list=getlist(args.target,args.target_file) LOG.log(REPORT,'target list parse completed') except SwarmBaseException as e: raise SwarmUseException('parse target error: '+str(e)) try: LOG.info('begin to parse swarm list') # parse swarm list if args.waken_cmd!='': self._swarm_list,self._swarm_port_list=getswarmlist(args.swarm,args.swarm_file) else: self._swarm_list=getlist(args.swarm,args.swarm_file) LOG.log(REPORT,'swarm list parse completed') except SwarmBaseException as e: raise SwarmUseException('parse swarm error: '+str(e))
def __init__(self, args): self._args = args self._swarm_num = 0 try: LOG.info('begin to parse target list') # parse target list self._args.target_list = getlist(args.target, args.target_file) LOG.log(REPORT, 'target list parse completed') except SwarmBaseException as e: raise SwarmUseException('parse target error: ' + str(e)) try: LOG.info('begin to parse swarm list') # parse swarm list if args.waken_cmd != '': self._swarm_list, self._swarm_port_list = getswarmlist( args.swarm, args.swarm_file) else: self._swarm_list = getlist(args.swarm, args.swarm_file) LOG.log(REPORT, 'swarm list parse completed') except SwarmBaseException as e: raise SwarmUseException('parse swarm error: ' + str(e))
def waken_swarm(self): """ Waken all slave hosts to run swarm-s.py and send args to them. Synchronize data if need. """ if self._args.waken_cmd!='': LOG.info('sending waken command "%s"to swarm...'%(self._args.waken_cmd.replace('ARGS','-p %d'%self._args.s_port))) self._send2slave(self._args.waken_cmd.replace('ARGS','-p %d'%self._args.s_port)) # time for slave host to create listen on target port time.sleep(1) s_args=self._parse_args_for_swarm() if self._args.sync_data==True: s_args+='__SYNC__' else: s_args+='__CEND__' r=self._send2swarm_r(s_args) LOG.info('waken %d slaves in swarm'%(len(r))) # do data sync here if self._args.sync_data==True: LOG.info('begin to synchronize data...') self._sync_data() LOG.info('data synchronize completed')
def getlist(target='',target_file=''): """ Return integrated ip and domain name list from target list and file, with network segment parsed. """ try: LOG.info('begin to parse target list') iplist=[] if target!='': iplist.extend(target) if target_file!='': f=open(target_file,'r') targets=f.read() iplist.extend(targets.splitlines()) f.close() # parse network segment and check iplist=_unite_list(iplist) LOG.info('parse completed') return iplist except socket.timeout, e: LOG.critical('time out when parsing target') raise SwarmNetException('time out when parsing target')
def getswarmlist(swarm='',swarm_file=''): """ Return integrated ip and domain name list with port list from swarm list and file like (['127.0.0.1','127.0.0.2','github.com'],[80,90,90]). """ try: LOG.info('begin to parse swarm list') rawlist=[] iplist=[] portlist=[] if swarm!='': rawlist.extend(swarm) if swarm_file!='': f=open(swarm_file,'r') swarm=f.read() rawlist.extend(swarm.splitlines()) f.close() iplist,portlist=_unite_swarmlist(rawlist) LOG.info('parse completed') return iplist,portlist except socket.timeout, e: LOG.critical('time out when parsing target') raise SwarmNetException('time out when parsing target')
def waken_swarm(self): """ Waken all slave hosts to run swarm-s and send args to them. Synchronize data if need. """ if self._args.waken_cmd!='': LOG.info('send waken command "%s"to swarm'%(self._args.waken_cmd.replace('ARGS', '-p %d'%self._args.s_port))) self._send2slave(self._args.waken_cmd.replace('ARGS','-p %d'%self._args.s_port)) LOG.log(REPORT,'sending waken command completed') LOG.info('try to detect swarm status') # time for slave host to create listen on target port time.sleep(2) s_args=self._parse_args_for_swarm() if self._args.sync_data==True: s_args+='__SYNC__' else: s_args+='__CEND__' r=self._send2swarm_r(s_args) self._swarm_num=len(r) LOG.log(REPORT,'waken %d slaves in swarm'%self._swarm_num) if self._swarm_num==0: raise SwarmNetException('no salve can work now. mission terminates') # do data sync here if self._args.sync_data==True: LOG.info('begin to synchronize data...') self._sync_data() LOG.info('data synchronize completed')
def parse_distribute_task(self): # do some common check here if self._args.task_granularity<0 or self._args.task_granularity>3: raise SwarmUseException('invalid task granularity, it should be one number of 1-3') if self._args.process_num<0: raise SwarmUseException('process number can not be negative') if self._args.thread_num<=0: raise SwarmUseException('thread number should be positive') # connect to db server LOG.info('try to connect to db server: %s:%d'%(self._args.db_addr,self._args.db_port)) self._args.db,self._args.coll=init_db(self._args.db_addr,self._args.db_port,self._args.mod) LOG.info('Connection to db server completed') # start the manager self._manager=MSwarmManager(self._args.timeout,address=('', self._args.m_port), authkey=self._args.authkey) try: module=importlib.import_module('modules.'+self._args.mod+'.'+self._args.mod) except ImportError as e: raise SwarmModuleException('an error occured when load module:'+self._args.mod) LOG.info('load module: '+self._args.mod) LOG.info('begin to decompose task...') mod_master=getattr(module,'Master')(self._args) # begin first round of tasks decomposition and distribution roundn=0 self._manager.init_task_statistics() while True: subtaskl=mod_master.generate_subtasks() taskn=len(subtaskl) if taskn==0: break roundn+=1 LOG.log(REPORT,'begin round %d'%roundn) LOG.info('round %d: put task into queue...'%roundn) for cur in subtaskl: self._manager.put_task(self._args.mod,cur) LOG.log(REPORT,'round %d: %d tasks have been put into queue'%(roundn,taskn)) LOG.info('round %d: get result from swarm...'%roundn) # get result confirmedn=0 self._manager.prepare_get_result() while True: try: result=self._manager.get_result() if result=='': break confirmedn+=1 LOG.log(REPORT,'round %d: %d/%d tasks have been completed'%(roundn, confirmedn,taskn)) mod_master.handle_result(result) except Queue.Empty as e: # check number of slave host, if someone has lost response, reorganize tasks # in queue. LOG.info('try to detect swarm status') r=self._send2swarm_r('ack') if len(r)<self._swarm_num: LOG.warning('%d of swarm has lost response. now total swarm:%d' %(self._swarm_num-len(r),len(r))) self._swarm_num=len(r) # if no swarm left if self._swarm_num==0: raise SwarmSlaveException('no swarm left. task failed') LOG.log(REPORT,'reorganize tasks in queue...') self._manager.reorganize_tasks() LOG.log(REPORT,'reorganization completed') else: LOG.log(REPORT,'all swarm works fine. now num: %d'%self._swarm_num) # continue LOG.log(REPORT,'round %d over'%roundn) LOG.log(REPORT,'all tasks have been comfirmed') # do final report now mod_master.report() self._shutdown()