예제 #1
0
    def start(self):
        log = self.logger
        log.info("Agent is starting")
        while True:
            if self.is_registered():
                log.debug("Checking if there are any new job orders")
                job_order = self.get_job_order()
                if job_order:
                    log.info("Received job order %s" % json.dumps(job_order, indent=4, sort_keys=True))
                    job = twindb_agent.job.Job(job_order)
                    proc = multiprocessing.Process(target=job.process,
                                                   name="%s-%s" % (job_order["type"], job_order["job_id"]))
                    proc.start()
                    # Dispatcher can't handle parallel jobs. Will wait till job finishes.
                    # After the bug is fixed .join() should be removed
                    # https://bugs.launchpad.net/twindb/+bug/1484342
                    proc.join()

                # Report replication status
                log.debug("Reporting replication status")
                proc = multiprocessing.Process(target=twindb_agent.handlers.report_show_slave_status,
                                               name="report_sss")
                proc.start()

                # Report agent privileges
                log.debug("Reporting agent granted privileges")
                proc = multiprocessing.Process(target=twindb_agent.handlers.report_agent_privileges,
                                               name="report_agent_privileges")
                proc.start()

                # Calling this has the side affect of "joining" any processes which have already finished.
                multiprocessing.active_children()
            else:
                log.warn("This agent(%s) isn't registered" % self.config.server_id)
            time.sleep(self.config.check_period)
예제 #2
0
def stop(processes, stop_event):
  """Stop queuing processes."""
  stop_event.set()
  for p in processes:
    if isinstance(p, mp.Process):
      p.terminate()
  mp.active_children()  # Use to join the killed processes above.
예제 #3
0
def main(): 
    global badExtensionCounter, failedFlag, pool, failedProcessCounter#, db
    
    sql_setup() # Set-up SQL Database/check to see if exists
    
    # Initiate File Path Handler
    observer = Observer()
    observer.schedule(MyHandler(), path=file_path, recursive=True)
    observer.start()
    
    cpuCount = multiprocessing.cpu_count() # Count all available CPU's
    print "\nTotal CPU Count: %d"%(cpuCount)
    pool = multiprocessing.Pool(4, worker,(processQueue,)) # Create 4 child processes to handle all queued elements
    active = multiprocessing.active_children() # All active child processes
    print "Total number of active child processes: %s\n"%(str(active))
    
    try:
        while True:
            time.sleep(0.2)
    except KeyboardInterrupt:
        pool.terminate() # Stop all child processes
        pool.join() # Join the processes with parent and terminate
        active = multiprocessing.active_children() # All active child processes, list should be empty at this point.
        print "\nTotal number of active child processes: %s\n"%(str(active))
        shutdown() # Run shutdown sequence        
        observer.stop()
        observer.join()
        sys.exit(1)
예제 #4
0
def start_data_process(config, start_dt_arg, end_dt_arg):
    """Loop through the entity types and perform the main function """
    g_logger.info("Start processing data from %s to %s" %
                  (str(start_dt_arg), str(end_dt_arg)))

    processes = []
    for kind, fetch_intervals in config['kinds'].iteritems():
        # fetch_intervals is an array with format [int, int, bool, bool, str]
        # [Save interval, fetch interval, isMutable, is_ndb, json key]
        interval = dt.timedelta(seconds=int(fetch_intervals[0]))
        fetch_interval = fetch_intervals[1]
        start_dt = start_dt_arg
        end_dt = end_dt_arg
        while start_dt < end_dt:
            if len(active_children()) < config['max_threads']:
                next_dt = min(start_dt + interval, end_dt)
                p = Process(target=fetch_and_process_data,
                    args=(kind, start_dt, next_dt, fetch_interval, config))
                p.start()
                download_params = {"kind": kind, "start_dt": start_dt,
                                   "end_dt": next_dt, "start": time.time()}
                processes.append((p, download_params))
                start_dt = next_dt
            else:
                monitor(config, processes)
            # wait for 2 secs to space out the queries
            time.sleep(2)
    while len(active_children()) > 0:
        monitor(config, processes)
        time.sleep(10)
예제 #5
0
def main_loop(config):
    logger.info(
        u'Run main loop. Worker pool size={}. Sleep time is {}.'.format(
            config.WORKER_POOL_SIZE, config.SLEEP
        ))
    parent_pid = os.getpid()
    while True:
        if check_network_status(config.CHECK_URL, config.HTTP_TIMEOUT):
            required_workers_count = config.WORKER_POOL_SIZE - len(
                active_children())
            if required_workers_count > 0:
                logger.info(
                    'Spawning {} workers'.format(required_workers_count))
                spawn_workers(
                    num=required_workers_count,
                    target=worker,
                    args=(config,),
                    parent_pid=parent_pid
                )
        else:
            logger.critical('Network is down. stopping workers')
            for c in active_children():
                c.terminate()

        sleep(config.SLEEP)
예제 #6
0
파일: managers.py 프로젝트: vhnuuh/pyutil
    def shutdown(self, c):
        '''
        Shutdown this process
        '''
        try:
            try:
                util.debug('manager received shutdown message')
                c.send(('#RETURN', None))

                if sys.stdout != sys.__stdout__:
                    util.debug('resetting stdout, stderr')
                    sys.stdout = sys.__stdout__
                    sys.stderr = sys.__stderr__

                util._run_finalizers(0)

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.terminate()

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.join()

                util._run_finalizers()
                util.info('manager exiting with exitcode 0')
            except:
                import traceback
                traceback.print_exc()
        finally:
            exit(0)
예제 #7
0
def bruteUser(userlist, psize, hosti, pathi, porti, securei, userfound):
    global host
    host = hosti
    global port
    port = porti
    global secure
    secure = securei
    global userout
    userout = userfound
    global path
    path = pathi
    f = open(userout, "w").close()
    usersize = len(userlist)
    # manage pool
    if usersize <= psize:
        chunksize = 1
    else:
        chunksize = (usersize / psize) + (usersize % psize)
    print("Userlist size: %d\tChunk size: %d\tPool size: %d" % (usersize, chunksize, psize))
    print("Bruteforcing usernames")
    pool = Pool(processes=psize)
    for chunk in itertools.izip(grouper(userlist, chunksize)):
        pool.map_async(worker, chunk)
    pool.close()
    try:
        while len(active_children()) > 0:  # how many active children do we have
            sleep(2)
            ignore = active_children()
    except KeyboardInterrupt:
        exit("CTRL^C caught, exiting...\n\n")
    print("Username bruteforce complete")
예제 #8
0
파일: crawl.py 프로젝트: lifepy/crawler
    def execute(self, *args, **options):
        if options['start-over']:
            init_db_for_crawl(DB_CONN_URL)

        num2crawl = options['count']
        interval = options['interval']
        if options['url']:
            # run once, test mode (no writeback)
            p = StoreDetailCrawler(interval=interval, count=num2crawl, writeback=False, url=options['url'])
            p.start()
            p.join()
        else:
            # run pool (could have pool size of 1)
            pool_size = options['pool-size']
            while (len(multiprocessing.active_children())<pool_size and not completed):
                num = pool_size - len(multiprocessing.active_children())
                for i in range(num):
                    p = StoreDetailCrawler(count=num2crawl, interval=interval)
                    p.start()
                    time.sleep(2)

                while(len(multiprocessing.active_children())>=pool_size):
                    time.sleep(0.5)
                
                print 'COMPLETED: ',completed
예제 #9
0
파일: mcp.py 프로젝트: bdeeney/rejected
    def stop_processes(self):
        """Iterate through all of the consumer processes shutting them down."""
        self._set_state(self.STATE_SHUTTING_DOWN)
        LOGGER.debug('Stopping consumer processes')
        self._stop_timers()

        active_processes = multiprocessing.active_children()

        # Stop if we have no running consumers
        if not active_processes:
            LOGGER.info('All consumer processes have stopped')
            return self._set_state(self.STATE_STOPPED)

        # Iterate through all of the bindings and try and shutdown processes
        for process in active_processes:
            self._stop_process(process)

        # Wait for them to shutdown cleanly
        time.sleep(2)

        iterations = 0
        while multiprocessing.active_children():
            LOGGER.debug('Waiting on %i active processes to shut down',
                         self.total_process_count)
            time.sleep(1)
            iterations += 1

            # If the shutdown process waited long enough, kill the consumers
            if iterations == self._MAX_SHUTDOWN_WAIT:
                self._kill_processes()
                break

        LOGGER.debug('All consumer processes stopped')
        self._set_state(self.STATE_STOPPED)
예제 #10
0
    def GET(self, *args):
        """
        Inspect the chained requests of all chained campaigns, requires /all
        """
        if not args:
            return dumps({"results" : 'Error: No arguments were given'})
        if args[0] != 'all':
            return dumps({"results" : 'Error: Incorrect argument provided'})

        if len(multiprocessing.active_children()) < 1:  ##see if already running
            ccid_list = self.listAll()                  ##we run only 1 inspection
                              ## in background
            shuffle(ccid_list)
            try:
                p  = multiprocessing.Process(target = self.multiple_inspect,
                        args = (','.join( ccid_list ),))

                p.start()
                return dumps({"results" : True, "message":
                        "Successfully forked inspection to background. PID: %s"
                        % p.pid})

            except Exception as e:
                self.logger.error('Error while forking an inspection')
                self.logger.exception(e)
                return dumps({"results" : False,
                        message : "Failed in forking the process"})
        else:
            return dumps({"results" : True,
                    "message" : "Already running inspection in background. PID: %s"
                            %multiprocessing.active_children()[0].pid})
예제 #11
0
def brutePasses(userlist,passlist,hosti,pathi,porti,securei,psize,loginsi):
	global host
	host = hosti
	global port
	port = porti
	global secure
	secure = securei
	global logins
	logins = loginsi
	global path
	path = pathi
	global usernames
	usernames = userlist
	usersize = len(userlist)
	passsize = len(passlist)
	
	# manage pool
	if (psize == 0):	psize = 5
	if ((usersize*passsize) <= psize):	chunksize = 1
	else:	chunksize = (((usersize*passsize) / psize) + ((usersize*passsize) % psize))
	#print("%s" % ((ceil(float((usersize*passsize)) / psize)) + ((usersize*passsize) % psize)))
	print("Userlist size: %d\tPassword size: %d\tChunk size: %d\tPool size: %d" % (usersize,passsize,chunksize,psize))
	pool = Pool(processes=psize)
        for chunk in itertools.izip(grouper(passlist,chunksize)):  pool.map_async(worker,chunk)
        pool.close()
        try:
                while(len(active_children()) > 0): # how many active children do we have
                        sleep(2)
                        ignore = active_children()
        except KeyboardInterrupt:       exit('CTRL^C caught, exiting...\n\n')
	print("Password bruteforce attempts completed")
    def CheckIfStillUsed(self):
        import time
        if self.LoadItemFromSessionStore('status', 'processID') == None:
            return

        # if a new process ID is in the session data, another process was started and this process was abandoned
        if self.LoadItemFromSessionStore('status', 'processID') != os.getpid() and self.LoadItemFromSessionStore('status', 'processID') != 0:
            print "**** SMLRPP Exiting on process ID, session_status['processID'] = ", self.LoadItemFromSessionStore('status', 'processID'), " os.getpid() = ", os.getpid()
            sys.stdout.flush()

            time.sleep(1.0)
            if self.pool:
                self.pool.close()
                self.pool.join()
                self.pool = None
            for p in multiprocessing.active_children():
                p.terminate()
            os._exit(0) # kills pool processes

        # if the status has not been checked in the past 30 seconds, this process was abandoned
        if (time.time() - self.LoadItemFromSessionStore('status', 'time_of_last_status_check')) > 300:
            print "**** SMLRPP Exiting on time of last status check"
            sys.stdout.flush()

            time.sleep(1.0)
            if self.pool:
                self.pool.close()
                self.pool.join()
                self.pool = None
            for p in multiprocessing.active_children():
                p.terminate()
            os._exit(0) # kills pool processes
예제 #13
0
파일: homopolymer.py 프로젝트: CFPS/ToolSET
def find(mappe, cpu=6, minLengde=5):
    import os, time
    from multiprocessing import Process, Queue, active_children
    arbejdsliste = os.listdir(mappe)
    while arbejdsliste and len(active_children()):
        while len(active_children()) < cpu:
            #spawn child
        time.sleep(1)
    print 'Færdig med arbejdet!'
    return

def child(fil, mappe):
    #indlæs filen
    udfil = open(mappe +'/homopolymer-'+fil, 'wb')
    for sekvens in filen:
        for base in ('A', 'T', 'C', 'G'):
            ind = 0
            while 1:
                try:
                    homopolymer = sekvens[ind:].index(base)
                    udfil.write('>|' + str(homopolymer) + '|\n' + sekvens[homopolymer-100:homopolymer+100])
                    ind = homopolymer + 100
                except ValueError:
                    break
    udfil.close()
    return

if __name__ == "__main__":
    import sys
    find(sys.argv[1])
    exit()
예제 #14
0
def enqueue(l, c, q):
    plist = []
    l.debug('Spawning enqueue processes')
    for i in xrange(MAXPROCS):
        p = multiprocessing.Process(target=EnqueuePeople, args=(l, c, q,),
                                    name='EeQueue-%d' % i)
        plist.append(p)
        # t.daemon = True
        p.start()
        p.join()
    l.debug('Done spawning enqueue processes')
    l.debug('plist: %r' % plist)

    while len(multiprocessing.active_children()) > 0:
        l.debug('Waiting for %d enqueue processes'
                % len(multiprocessing.active_children()))
        try:
            l.info('Queue: %d items.' % q.qsize())
            l.debug('plist: %r' % plist)
            sleep(1)
        except KeyboardInterrupt:
            l.critical('Caught ctrl-c. Stopping worker processes.')
            for p in multiprocessing.active_children():
                p.terminate()
            while len(multiprocessing.active_children()) > 0:
                l.info('Waiting for %d remaining processes to die.'
                       % len(multiprocessing.active_children()))
                sleep(1)
            break
    l.debug('Done waiting for enqueue processes')
예제 #15
0
def main():
    options = get_cmd_line_args()
    config = util.load_unstripped_json(options.config)
    #hard code some args
    config['max_threads'] = 2
    config['coordinator_cfg']['control_db'] = "ka_backpopulate_cntrl"
    config["sub_process_time_out"] = 86400*3
    with open(options.file_list) as f:
        file_list = f.readlines()
    processes = []
    for gzfile in file_list:
        while True:
            if len(active_children()) < config['max_threads']:         
               g_logger.info("Starting loading %s ...", gzfile)
               p = Process(target = gz_pickle_to_mongo,
                           args = (config, gzfile.strip()))
               processes.append((p, gzfile.strip(), time.time()))
               p.start()
               time.sleep(5)
               break
            else: 
               monitor(config, processes)
               time.sleep(10)
    while len(active_children()) > 0:       
        monitor(config, processes)
        time.sleep(10)
 def start (self):
     delete_break_file ()
     enddate = self.last_trade_date ()
     startdate = self.get_start_date (enddate, 100)
     # 分配每个线程的代码任务列表
     worker_count = mp.cpu_count () * 5
     task_queue = mp.Queue ()
     # 创建和启动工作进程
     progress = mp.Value ('i', 0)
     workers = []
     for i in xrange (worker_count):
         w = Monitor (task_queue, progress, startdate, enddate)
         workers.append (w)
         w.start ()
     print "start %d workers to run" % len (workers)
     sys.stdout.flush ()
     # 往任务队列添加任务
     for code in self.code_list:
         task_queue.put (code)
     for i in xrange (worker_count + 10):
         task_queue.put (None)
     # 等待执行完毕
     while progress.value < len (self.code_list):
         time.sleep (10)
         print "-------->workers(%d) progress:%d => %d <---------" % (len (mp.active_children ()), progress.value, len (self.code_list))
         sys.stdout.flush ()
         if len (mp.active_children ()) <= 0:
             print "All Workers Exits"
             sys.stdout.flush ()
             break
예제 #17
0
    def run_tasks(self, tasks):
        # Join whatever children are still sitting around
        multiprocessing.active_children()

        queue = multiprocessing.Queue()
        execute_ps = multiprocessing.Process(
            target=self._execute, args=[tasks, queue])
        execute_ps.start()

        def generate_response(execute_ps, queue):
            while execute_ps.is_alive():
                try:
                    data = queue.get_nowait()
                    yield data
                except Queue.Empty:
                    time.sleep(1)

            execute_ps.join()

            # suck the last goodness out of the queue before moving on
            while True:
                try:
                    data = queue.get_nowait()
                    yield data
                except Queue.Empty:
                    break

        return execute_ps, threadsafe_iter(
            generate_response(execute_ps, queue))
예제 #18
0
파일: listener.py 프로젝트: rc1405/Minerva
 def listener(self, pname, recv_data):
     ip, port = pname.split('-')
     #client = pymongo.MongoClient()
     #collection = client.minerva.sensors
     #print('starting receiver')
     KEYFILE = self.config['Event_Receiver']['certs']['private_key']
     CERTFILE = self.config['Event_Receiver']['certs']['server_cert']
     s = socket(AF_INET, SOCK_STREAM)
     s.bind((ip, int(port)))
     s.listen(5)
     s_ssl = ssl.wrap_socket(s, keyfile=KEYFILE, certfile=CERTFILE, server_side=True, ssl_version=ssl.PROTOCOL_SSLv3)
     active_recv = []
     while True:
         try:
             for p in active_recv:
                 if p not in active_children():
                     p.join()
                     active_recv.remove(p)
             if len(active_children()) < int(self.max_threads):
                 #print('accepting connections')
                 c, a = s_ssl.accept()
                 #print('Got connection', c, a)
                 pr = Process(target=recv_data, args=((a[0], c)))
                 pr.start()
                 active_recv.append(pr)
             else:
                 #print('sleeping')
                 time.sleep(.001)
         except Exception as e:
             print('{}: {}'.format(e.__class__.__name__,e))
예제 #19
0
    def shutdown(self):
        BuiltinCore.shutdown(self)
        self.logger.info("Closing RPC command queues")
        self.rpc_q.close()

        def term_children():
            """ Terminate all remaining multiprocessing children. """
            for child in multiprocessing.active_children():
                self.logger.error("Waited %s seconds to shut down %s, "
                                  "terminating" % (self.shutdown_timeout,
                                                   child.name))
                child.terminate()

        timer = threading.Timer(self.shutdown_timeout, term_children)
        timer.start()
        while len(multiprocessing.active_children()):
            self.logger.info("Waiting for %s child(ren): %s" %
                             (len(multiprocessing.active_children()),
                              [c.name
                               for c in multiprocessing.active_children()]))
            time.sleep(1)
        timer.cancel()
        self.logger.info("All children shut down")

        while len(threading.enumerate()) > 1:
            threads = [t for t in threading.enumerate()
                       if t != threading.current_thread()]
            self.logger.info("Waiting for %s thread(s): %s" %
                             (len(threads), [t.name for t in threads]))
            time.sleep(1)
        self.logger.info("Shutdown complete")
예제 #20
0
파일: proxy.py 프로젝트: eastonqiu/proxy
    def ValidateProxies(self, proxyList):
            
        maxProc = 50
        
        tests = ["http://www.baidu.com"]
    
        result = Queue()
       
        start = time.clock()
        
        for i in proxyList:
            p = Process(target=self.CheckProxy, args=(i, tests, result))
            p.start()  
            
            if len(multiprocessing.active_children()) > maxProc:
                #print('active_children: ', multiprocessing.active_children())
                p.join()
            
        while len(multiprocessing.active_children()) > 0:
            time.sleep(3)
        end = time.clock()
        #print("total time for validation:", end - start, "s")
        
        self.pool = []
        
        for i in range(result.qsize()):
            a = result.get()
            self.pool += [Proxy(a[0], a[1])]

        
        print("{0} validated".format(len(self.pool)))
예제 #21
0
    def manager_thread_main(self):
        """ Checks for workers that died unexpectedly and listens to their
            status update messages.
        """
        while True:
            # wait up to 60 seconds
            try:
                command, argument = self.manager_thread_queue.get(True, 60)

                if command == 'quit':
                    break
                elif command == 'start':
                    with self._lock:
                        self.now_building[argument] = True
                elif command == 'end':
                    with self._lock:
                        self.now_building[argument] = False
                else:
                    logger.warn("Unknown command to manager thread: %s" % command)

            except Queue.Empty:
                pass

            # this one is to remove zombie processes
            multiprocessing.active_children()

            with self._lock:
                ids_to_restart = []
                for id, worker in self.workers.items():
                    if not worker.is_alive():
                        ids_to_restart.append(id)

                for id in ids_to_restart:
                    self.restart(id)
예제 #22
0
def brutePlugin(pluginlist,foundplug,hosti,pathi,porti,securei,psize):
	global host
	host = hosti
	global port
	port = porti
	global secure
	secure = securei
	global plugfound
	plugfound = foundplug
	global path
	path = pathi
	f = open(plugfound,'w').close()
	listsize = (len(pluginlist))
	
	# manage pool
	if (psize == 0):	psize = 5
	if (list <= psize):	chunksize = 1
	else:	chunksize = ((listsize / psize) + (listsize % psize))
	print("Plugin list size: %d\tChunk size: %d\tPool size: %d" % ((listsize),chunksize,psize))
	print("Plugin bruteforcing started")
	pool = Pool(processes=psize)
        for chunk in itertools.izip(grouper(pluginlist,chunksize)):  pool.map_async(worker,chunk)
        pool.close()
        try:
                while(len(active_children()) > 0): # how many active children do we have
                        sleep(2)
                        ignore = active_children()
        except KeyboardInterrupt:       exit('CTRL^C caught, exiting...\n\n')
	print("Plugin bruteforce complete")
예제 #23
0
def fork_it(args):
    threads = int(args.t)
    childs = int(args.f)
    len_hosts = len(HOSTLIST)

    print "[*] attacking %d target(s)\n" "[*] cracking up to %d hosts parallel\n" "[*] threads per host: %d" % (
        len_hosts,
        childs,
        threads,
    )

    i = 1
    for host in HOSTLIST:
        host = host.replace("\n", "")
        print "[*] performing attacks against %s [%d/%d]" % (host, i, len_hosts)
        hostfork = multiprocessing.Process(target=thread_it, args=(host, args))
        hostfork.start()
        # checks that we have a max number of childs
        while len(multiprocessing.active_children()) >= childs:
            time.sleep(0.001)
        time.sleep(0.001)
        i += 1

    # waiting for child processes
    while multiprocessing.active_children():
        time.sleep(1)
예제 #24
0
파일: dyskt.py 프로젝트: sh1nu11bi/wraith
    def run(self):
        """ start execution """
        # setup signal handlers for pause(s),resume(s),stop
        signal.signal(signal.SIGINT,self.stop)   # CTRL-C and kill -INT stop
        signal.signal(signal.SIGTERM,self.stop)  # kill -TERM stop

        # initialize, quit on failure
        logging.info("**** Starting DySKT %s ****",dyskt.__version__)
        self._create()
        if self.state == DYSKT_INVALID:
            # make sure we do not leave system in corrupt state (i.e. w/o nics)
            self._destroy()
            raise DySKTRuntimeException("DySKT failed to initialize, shutting down")

        # set state to running
        self._state = DYSKT_RUNNING

        # execution loop
        while not self._halt.is_set():
            # get message(s) a tuple: (level,originator,type,message) from children
            rs = [] # hide pycharm unreferenced variable warning
            try:
                rs,_,_ = select.select(self._pConns.values(),[],[],0.5)
            except select.error as e: # hide (4,'Interupted system call') errors
                if e[0] == 4: continue

            for r in rs:
                try:
                    l,o,t,m = r.recv()
                    if l == 'err':
                        # only process errors invovled during execution
                        if DYSKT_CREATED < self.state < DYSKT_EXITING:
                            # continue w/out collection if it fails
                            if o == 'collection':
                                logging.warning("Collection radio dropped: %s",m)
                                self._pConns['collection'].send('!STOP!')
                                self._pConns['collection'].close()
                                del self._pConns['collection']
                                mp.active_children()
                                self._cr = None
                            else:
                                logging.error("%s failed (%) %s",o,t,m)
                        else:
                            logging.warning("Uninitiated error (%) %s from %s",t,m,o)
                    elif l == 'warn': logging.warning("%s: (%s) %s",o,t,m)
                    elif l == 'info': logging.info("%s: (%s) %s",o,t,m)
                    elif l == 'cmd':
                        cid,cmd,rdos,ps = self._processcmd(m)
                        if cid is None: continue
                        for rdo in rdos:
                            logging.info("Client sent %s to %s",cmd,rdo)
                            self._pConns[rdo].send('%s:%d:%s' % (cmd,cid,'-'.join(ps)))
                    elif l == 'cmderr':
                        self._pConns['c2c'].send("ERR %d \001%s\001\n" % (t,m))
                    elif l == 'cmdack':
                        self._pConns['c2c'].send("OK %d \001%s\001\n" % (t,m))
                except Exception as e:
                    # blanket catch all
                    logging.error("DySKT failed. (Unknown) %s->%s", type(e),e)
 def Reports_CheckOneSecondSessionUpdates(self, countOfReportsRun, totalNumberOfReportsToBeRun):
     if self.oneSecondTimes != int(time.time()):
         self.CheckIfStillUsed()
         processcountString = '<br><br>Currently using 1 process (the server is busy)'
         if len(multiprocessing.active_children()) > 1:
             processcountString = '<br><br>Currently using ' + str(len(multiprocessing.active_children())) + ' parallel processes'
         self.SaveDictionaryOfItemsToSessionStore('status', {'currentStatus':"Created %s of %s Reports and Graphs %s" % (countOfReportsRun, totalNumberOfReportsToBeRun, processcountString)})
         self.oneSecondTimes = int(time.time())
예제 #26
0
 def tearDown(self):
   # Complain if there are any children left over.
   active_children = multiprocessing.active_children()
   for child in active_children:
     child.Kill(signal.SIGKILL, log_level=logging.WARNING)
     child.join()
   self.assertEqual(multiprocessing.active_children(), [])
   self.assertEqual(active_children, [])
예제 #27
0
    def print_create(self):
        if self.request.method == 'OPTIONS':
            return Response(status=200)

        log.info("[print_create] New print job")
        # delete all child processes that have already terminated
        # but are <defunct>. This is a side_effect of the below function
        multiprocessing.active_children()

        # IE is always URLEncoding the body
        jsonstring = urllib.unquote_plus(self.request.body)

        try:
            spec = json.loads(jsonstring, encoding=self.request.charset)
        except:
            log.error('[print_create] JSON content could not be parsed')
            exc_type, exc_value, exc_traceback = sys.exc_info()
            log.debug("*** Traceback:/n{}".format(traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)))
            log.debug("*** Exception:/n{}".format(traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)))
            raise HTTPBadRequest('JSON is empty or content could not be parsed')

        print_temp_dir = self.request.registry.settings['print_temp_dir']

        # Remove older files on the system
        delete_old_files(print_temp_dir)

        scheme = self.request.headers.get('X-Forwarded-Proto',
                                          self.request.scheme)
        print_proxy_url = self.request.registry.settings['print_proxy_url']
        print_server_url = self.request.registry.settings['print_server_url']
        api_url = self.request.registry.settings['api_url']
        headers = dict(self.request.headers)
        headers.pop("Host", headers)

        unique_filename = datetime.datetime.now().strftime("%y%m%d%H%M%S") + str(random.randint(1000, 9999))
        pdf_download_path = '/download/-multi' + unique_filename + '.pdf.printout'

        with open(create_info_file(print_temp_dir, unique_filename), 'w+') as outfile:
            data = {
                "done": False,
                "status": "running",
                "elapsedTime": 1,
                "waitingTime": 0,
                "downloadURL": pdf_download_path
            }
            json.dump(data, outfile)

        info = (spec, print_temp_dir, scheme, api_url, print_proxy_url, print_server_url, headers, unique_filename)

        p = multiprocessing.Process(target=create_and_merge, args=(info,))
        p.start()

        response = {"ref": unique_filename,
                    "statusURL": "/print/print/geoadmin3/status/{}.json".format(unique_filename),
                    "downloadURL": pdf_download_path}

        return response
예제 #28
0
	def _onloop(self):
		multiprocessing.active_children()

		if len(self.pkgTaskQueue) > 0:
			if self.pkgProcess is not None: # this should never happen, but we'll check anyway
				if not self.pkgProcess.is_alive():
					self.startPackageTask(self.pkgTaskQueue.pop(0))

		return True
예제 #29
0
def main():
    """
    Define script parameters
    """
    LOGLEVEL = logging.DEBUG
    ITEMCOUNT = 100000

    # Set up logging
    logformatter = logging.Formatter('%(asctime)s - %(process)d'
                                     ' - %(levelname)s - %(message)s')

    loghandler = logging.StreamHandler(stderr)
    loghandler.setFormatter(logformatter)
    loghandler.setLevel(LOGLEVEL)

    scriptlogger = logging.getLogger('cassbench')
    scriptlogger.addHandler(loghandler)
    scriptlogger.setLevel(LOGLEVEL)
    scriptlogger.debug('Script logging enabled')

    # Set up variables needed by processes
    iq = Queue()

    # Call function to create, start, and wait for enqueue processes
    # enqueue(scriptlogger, ITEMCOUNT, iq)

    plist = []
    scriptlogger.debug('Spawning enqueue processes')
    for i in xrange(MAXPROCS):
        p = multiprocessing.Process(target=EnqueuePeople, args=(scriptlogger,
                                                                ITEMCOUNT,
                                                                iq,),
                                    name='EeQueue-%d' % i)
        # t.daemon = True
        p.start()
    scriptlogger.debug('Done spawning enqueue processes')

    while len(multiprocessing.active_children()) > 0:
        scriptlogger.debug('Waiting for %d enqueue processes'
                           % len(multiprocessing.active_children()))
        try:
            scriptlogger.info('Queue: %d items.' % iq.qsize())
            sleep(1)
        except KeyboardInterrupt:
            scriptlogger.critical('Caught ctrl-c. Stopping worker processes.')
            for p in multiprocessing.active_children():
                p.terminate()
            while len(multiprocessing.active_children()) > 0:
                scriptlogger.info('Waiting for %d remaining processes to die.'
                                  % len(multiprocessing.active_children()))
                sleep(1)
            break
    scriptlogger.debug('Done waiting for enqueue processes')

    # Call function to create, start, and wait for dequeue processes
    dequeue(scriptlogger, iq)
    scriptlogger.info('Queue: %d items.' % iq.qsize())
예제 #30
0
파일: minion.py 프로젝트: Adapptor/salt
    def tune_in(self):
        '''
        Lock onto the publisher. This is the main event loop for the minion
        '''
        context = zmq.Context()
        poller = zmq.Poller()
        socket = context.socket(zmq.SUB)
        socket.setsockopt(zmq.SUBSCRIBE, '')
        if self.opts['sub_timeout']:
            socket.setsockopt(zmq.IDENTITY, self.opts['id'])
        socket.connect(self.master_pub)
        poller.register(socket, zmq.POLLIN)

        # Make sure to gracefully handle SIGUSR1
        enable_sigusr1_handler()

        if self.opts['sub_timeout']:
            last = time.time()
            while True:
                socks = dict(poller.poll(self.opts['sub_timeout']))
                if socket in socks and socks[socket] == zmq.POLLIN:
                    payload = self.serial.loads(socket.recv())
                    self._handle_payload(payload)
                    last = time.time()
                if time.time() - last > self.opts['sub_timeout']:
                    # It has been a while since the last command, make sure
                    # the connection is fresh by reconnecting
                    if self.opts['dns_check']:
                        try:
                            # Verify that the dns entry has not changed
                            self.opts['master_ip'] = salt.utils.dns_check(
                                self.opts['master'], safe=True)
                        except SaltClientError:
                            # Failed to update the dns, keep the old addr
                            pass
                    poller.unregister(socket)
                    socket.close()
                    socket = context.socket(zmq.SUB)
                    socket.setsockopt(zmq.SUBSCRIBE, '')
                    socket.setsockopt(zmq.IDENTITY, self.opts['id'])
                    socket.connect(self.master_pub)
                    poller.register(socket, zmq.POLLIN)
                    last = time.time()
                time.sleep(0.05)
                multiprocessing.active_children()
                self.passive_refresh()
        else:
            while True:
                socks = dict(poller.poll(60))
                if socket in socks and socks[socket] == zmq.POLLIN:
                    payload = self.serial.loads(socket.recv())
                    self._handle_payload(payload)
                    last = time.time()
                time.sleep(0.05)
                multiprocessing.active_children()
                self.passive_refresh()
예제 #31
0
    def __run_multiprocess_test(
        self, stream, manager, ignore_inactive_db_check=False, tracker=None
    ):
        """
        Tests the content of the given file.
        """

        self.print_header()

        finished = False
        index = "funilrys"

        if PyFunceble.CONFIGURATION.db_type == "json":
            manager_data = manager.list()
        else:
            manager_data = None

        minimum_position = tracker.get_position() if tracker else 0
        file_position = 0

        while True:
            while (
                len(active_children()) <= PyFunceble.CONFIGURATION.maximal_processes
                and not self.autosave.is_time_exceed()
            ):
                try:
                    line = next(stream)

                    if isinstance(line, tuple):
                        index, line = line

                    if (
                        tracker
                        and tracker.authorized
                        and file_position < minimum_position
                    ):
                        file_position += len(line)

                        if (
                            self.autosave.authorized
                            or PyFunceble.CONFIGURATION.print_dots
                        ):
                            PyFunceble.LOGGER.info(
                                f"Skipped {line!r}: insufficient position."
                            )
                            print(".", end="")

                        continue

                    subjects = self.get_subjects(line)

                    if isinstance(subjects, list):
                        for subject in subjects:
                            self.__start_process(
                                subject,
                                manager_data,
                                ignore_inactive_db_check=ignore_inactive_db_check,
                            )

                            if index != "funilrys":
                                # An index was given, we remove the index and subject from
                                # the mining database.
                                self.mining.remove(index, subject)
                    else:
                        self.__start_process(
                            subjects,
                            manager_data,
                            ignore_inactive_db_check=ignore_inactive_db_check,
                        )

                        if index != "funilrys":
                            # An index was given, we remove the index and subject from
                            # the mining database.
                            self.mining.remove(index, subjects)

                    if tracker and tracker.authorized:
                        file_position += len(line)

                    continue
                except StopIteration:
                    finished = True
                    break

            self.__check_exception(active_children(), manager_data)

            while len(
                active_children()
            ) >= PyFunceble.CONFIGURATION.maximal_processes and "PyF" in " ".join(
                [x.name for x in reversed(active_children())]
            ):
                self.__process_live_merging(finished, manager_data, tracker)
                continue

            if self.__process_live_merging(finished, manager_data, tracker):
                continue

            if self.__process_end_merging(finished, manager_data, tracker):
                break
예제 #32
0
    def construct_and_get_shadow_file(
        self, file_stream, ignore_inactive_db_check=False
    ):
        """
        Provides a path to a file which contain the list to file.

        The idea is to do a comparison between what we already tested
        and what we still have to test.
        """

        def start_process(*args):

            original_config = PyFunceble.CONFIGURATION.copy()
            origin_intern = PyFunceble.INTERN.copy()

            args += (PyFunceble.LOADER,)
            args += (origin_intern,)

            process = OurProcessWrapper(target=self.work_process, args=args)
            process.name = f"PyF shadow {line}"
            process.start()

            PyFunceble.LOADER.config.update(original_config)
            PyFunceble.LOADER.inject_all()
            PyFunceble.INTERN.update(origin_intern)

        if PyFunceble.CONFIGURATION.shadow_file:
            with NamedTemporaryFile(delete=False) as temp_file:
                if self.autosave.authorized or PyFunceble.CONFIGURATION.print_dots:
                    print("")

                finished = False

                while True:
                    while (
                        len(active_children())
                        <= PyFunceble.CONFIGURATION.maximal_processes
                    ):
                        try:
                            line = next(file_stream)

                            start_process(
                                line,
                                temp_file.name,
                                ignore_inactive_db_check,
                                self.autocontinue,
                                self.inactive_db,
                            )

                            active_children()
                            continue
                        except StopIteration:
                            finished = True
                            break

                    while len(
                        active_children()
                    ) >= PyFunceble.CONFIGURATION.maximal_processes and "shadow" in " ".join(
                        [x.name for x in reversed(active_children())]
                    ):
                        active_children()

                    if finished:
                        break

                if self.autosave.authorized or PyFunceble.CONFIGURATION.print_dots:
                    print("")

                return temp_file.name
        return file_stream.name
예제 #33
0
)

global_registry().gauge_callback(
    name='python.threads.daemon',
    callback=lambda: sum(1 for thread in threading.enumerate()
                         if thread.isDaemon()),
    label='Daemon thread count',
    description='Number of daemon threads',
    numerator='threads',
)

# ------------------------------------------------------------------------------

global_registry().gauge_callback(
    name='python.multiprocessing.count',
    callback=lambda: len(multiprocessing.active_children()),
    label='Process count',
    description='Number of multiprocessing processes',
    numerator='processes',
)

global_registry().gauge_callback(
    name='python.multiprocessing.active',
    callback=lambda: sum(1 for proc in multiprocessing.active_children()
                         if proc.is_alive()),
    label='Active multiprocessing processes',
    description='Number of active multiprocessing processes',
    numerator='processes',
)

global_registry().gauge_callback(
예제 #34
0
    def learn_model(self,
                    X,
                    types=None,
                    type_hierarchy=None,
                    domains=None,
                    ranges=None):

        train_triples = to_triples(X, order="sop")

        if self.add_types and types is not None:
            types = types.tocoo()
            n_entities = X[0].shape[0]
            rdf_type_id = len(X)
            type_triples = np.array([(types.row[i], types.col[i] + n_entities,
                                      rdf_type_id) for i in range(types.nnz)])
            train_triples = np.vstack((train_triples, type_triples))
            types = types.tocsr()
            self.n_entity += types.shape[1]
            self.n_relations += 1

        self.train_triples = train_triples
        self.train_hr_t = gen_hr_t(train_triples)
        self.train_tr_h = gen_tr_h(train_triples)

        self.create_model()

        train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, \
        train_loss, train_op = train_ops(self, learning_rate=self.lr,
                                         optimizer_str=self.optimizer,
                                         regularizer_weight=self.loss_weight)

        test_input, test_head, test_tail, _, _, triple_score = test_ops(self)
        self.test_input = test_input

        self.sess = tf.Session()
        #tf.initialize_all_variables().run(session=self.sess)
        tf.global_variables_initializer().run(session=self.sess)

        self.saver = tf.train.Saver()

        iter_offset = 1

        total_inst = self.n_train

        # training data generator
        raw_training_data_queue = []
        training_data_queue = []
        data_generators = list()
        for i in range(self.n_generator):
            data_generators.append(
                Process(target=data_generator_func,
                        args=(raw_training_data_queue, training_data_queue,
                              self.train_tr_h, self.train_hr_t, self.n_entity,
                              self.neg_weight)))
            data_generators[-1].start()

        for n_iter in range(iter_offset, self.max_iter + 1):
            start_time = timeit.default_timer()
            accu_loss = 0.
            accu_re_loss = 0.
            ninst = 0

            if self.verbose:
                print("initializing raw training data...")
            nbatches_count = 0
            for dat in self.raw_training_data(batch_size=self.batch):
                raw_training_data_queue.append(dat)
                nbatches_count += 1
            if self.verbose:
                print("raw training data initialized.")

            while nbatches_count > 0:
                nbatches_count -= 1

                hr_tlist, hr_tweight, tr_hlist, tr_hweight = training_data_queue[
                    0]
                training_data_queue = training_data_queue[1:]

                l, rl, _ = self.sess.run(
                    [train_loss, self.regularizer_loss, train_op], {
                        train_hrt_input: hr_tlist,
                        train_hrt_weight: hr_tweight,
                        train_trh_input: tr_hlist,
                        train_trh_weight: tr_hweight
                    })

                accu_loss += l
                accu_re_loss += rl
                ninst += len(hr_tlist) + len(tr_hlist)

            if self.verbose:
                print("iter %d avg loss %.5f, time %.3f" %
                      (n_iter, accu_loss / ninst,
                       timeit.default_timer() - start_time))

            if self.save_per and n_iter and n_iter % self.save_per == 0:
                self.save_model(
                    os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 "ckp/checkpoint-%d.ckp" % n_iter))

        for p in active_children():
            p.terminate()
예제 #35
0
 def child_handler(signal, frame):
     # this is required for the multiprocessing mechanism to work
     multiprocessing.active_children()
def run_spades_all(chr_start, chr_end, output_dir, num_of_threads,
                   minicontig_dir):
    num_of_threads = multiprocessing.cpu_count() - 10
    for chr_num in range(chr_start, chr_end + 1):
        in_dir = output_dir + "chr" + str(chr_num) + "_files_cutPBHC/"
        count = 1
        fastq_files_all = sorted(glob.glob(in_dir + "fastq_by_*.fastq"),
                                 key=os.path.getsize,
                                 reverse=True)
        total_num = len(fastq_files_all)
        pool = Pool(num_of_threads)
        out_dir_list = []
        for one_file_fastq in fastq_files_all:
            one_file = one_file_fastq[:-6]
            out_dir = one_file + "_spades_assembly"
            spades_contig_file = out_dir + "/" + "contigs.fasta"
            if os.path.exists(spades_contig_file):
                count += 1
                #print("using existing " + spades_contig_file)
                out_dir_list.append(out_dir)
            else:
                count += 1
                pool.apply_async(use_spades, (one_file_fastq, out_dir, "xin"))
                out_dir_list.append(out_dir)
            if (count - 1) % num_of_threads == 0 or (count - 1) == total_num:
                pool.close()
                while len(active_children()) > 1:
                    time.sleep(0.5)
                pool.join()

                # delete temp files
                """
                if out_dir_list != []:
                    pool = Pool(num_of_threads)
                    for one_dir in out_dir_list:
                        pool.apply_async(del_one_dir_1,(one_dir,"xin"))
                    pool.close()
                    while len(active_children()) > 1:
                        time.sleep(0.5)
                    pool.join()
                """

                if (count - 1) == total_num:
                    print("finished chr" + str(chr_num))
                else:
                    pool = Pool(num_of_threads)
                    out_dir_list = []
        output_file = "Aquila_cutPBHC_minicontig" + "_chr" + str(
            chr_num) + ".fasta"
        Concatenate_start(in_dir, minicontig_dir, output_file, "xin")

        # delete assembly files

        time.sleep(5)
        pool = Pool(num_of_threads)
        count = 1
        for one_file_fastq in fastq_files_all:
            one_file = one_file_fastq[:-6]
            out_dir = one_file + "_spades_assembly"
            count += 1
            pool.apply_async(del_one_dir_2, (out_dir, "xin"))
            if (count - 1) % num_of_threads == 0 or (count - 1) == total_num:
                pool.close()
                while len(active_children()) > 1:
                    time.sleep(0.5)
                pool.join()
                if (count - 1) == total_num:
                    print("finished chr" + str(chr_num))
                else:
                    pool = Pool(num_of_threads)
        time.sleep(5)

    print("All Done~")
예제 #37
0
def discover_strategy(block_size,
                      Strategizer,
                      strategies,
                      jobs=1,
                      nsamples=50,
                      threads=1):
    """Discover a strategy using ``Strategizer``

    :param block_size: block size to try
    :param Strategizer: strategizer to use
    :param strategies: strategies for smaller block sizes
    :param jobs: number of jobs to run in parallel
    :param nsamples: number of lattice bases to consider
    :param threads: number of threads to use per job

    """
    connections = []
    processes = []
    k = jobs
    m = nsamples

    strategizer = Strategizer(block_size)

    # everybody is alive in the beginning
    alive = range(m)

    return_queue = Queue()

    for i in range(m):
        manager, worker = Pipe()
        connections.append((manager, worker))
        strategies_ = list(strategies)
        strategies_.append(Strategizer.Strategy(block_size, worker))

        # note: success probability, rerandomisation density etc. can be adapted here
        param = Param(block_size=block_size,
                      strategies=strategies_,
                      flags=BKZ.GH_BND)
        param["threads"] = threads
        process = Process(target=worker_process,
                          args=(2**16 * block_size + i, param, return_queue))
        processes.append(process)

    callback = [None] * m
    for chunk in chunk_iterator(alive, k):
        for i in chunk:
            process = processes[i]
            process.start()
            manager, worker = connections[i]
            worker.close()
            connections[i] = manager

        # wait for `k` responses
        for i in chunk:
            callback[i] = connections[i].recv()

    assert all(callback)  # everybody wants preprocessing parameters

    preproc_params = strategizer(callback)

    callback = callback_roundtrip(alive, k, connections, preproc_params)
    assert all(callback)  # everybody wants pruning parameters

    pruning_params = strategizer(callback)

    callback = callback_roundtrip(alive, k, connections, pruning_params)
    assert not any(callback)  # no more questions

    strategy = Strategy(block_size=block_size,
                        preprocessing_block_sizes=preproc_params,
                        pruning_parameters=pruning_params)

    active_children()

    stats = []
    for i in range(m):
        stats.append(return_queue.get())

    return strategy, tuple(stats), tuple(strategizer.queries)
예제 #38
0
# -*- coding: utf-8 -*-
# @Time    : 2019/8/30 11:53
# @Author  : Wang fang chen
# @Email   : [email protected]
# @WeiXin  :w335441537
# @File    : 进程简单使用2.py
# @Software: PyCharm
'''
通过 cpu_count() active_children() 方法
获取当前机器的 CPU 核心数量以及得到目前所有的运行的进程。


'''
import multiprocessing


def run(num):
    print('Process:%d' % num)


if __name__ == '__main__':
    for i in range(8):
        p = multiprocessing.Process(target=run, args=(i, ))
        p.start()
    print('CPU核心数量:' + str(multiprocessing.cpu_count()))  #查看机器CPU核数
    #目前运行的进程
    for p in multiprocessing.active_children():
        print('子进程名称:' + p.name + '   id:' + str(p.pid))
    print('进程结束!')
예제 #39
0
def load():
    write('\n\n')
    # load libs
    from core.color import finish
    # load all modules in lib/brute, lib/scan, lib/graph
    module_names = load_all_modules()
    graph_names = load_all_graphs()

    # Parse ARGVs
    try:
        parser, options, startup_update_flag = load_all_args(module_names, graph_names)
    except SystemExit:
        finish()
        sys.exit(1)
    # Filling Options
    check_ranges = options.check_ranges
    check_subdomains = options.check_subdomains
    targets = options.targets
    targets_list = options.targets_list
    thread_number = options.thread_number + 1
    thread_number_host = options.thread_number_host
    log_in_file = options.log_in_file
    scan_method = options.scan_method
    exclude_method = options.exclude_method
    users = options.users
    users_list = options.users_list
    passwds = options.passwds
    passwds_list = options.passwds_list
    timeout_sec = options.timeout_sec
    ports = options.ports
    time_sleep = options.time_sleep
    language = options.language
    verbose_level = options.verbose_level
    show_version = options.show_version
    check_update = options.check_update
    socks_proxy = options.socks_proxy
    retries = options.retries
    graph_flag = options.graph_flag
    help_menu_flag = options.help_menu_flag
    ping_flag = options.ping_flag
    methods_args = options.methods_args
    method_args_list = options.method_args_list
    wizard_mode = options.wizard_mode

    # Checking Requirements
    (targets, targets_list, thread_number, thread_number_host,
     log_in_file, scan_method, exclude_method, users, users_list,
     passwds, passwds_list, timeout_sec, ports, parser, module_names, language, verbose_level, show_version,
     check_update, socks_proxy, retries, graph_flag, help_menu_flag, methods_args, method_args_list, wizard_mode) = \
        check_all_required(
            targets, targets_list, thread_number, thread_number_host,
            log_in_file, scan_method, exclude_method, users, users_list,
            passwds, passwds_list, timeout_sec, ports, parser, module_names, language, verbose_level, show_version,
            check_update, socks_proxy, retries, graph_flag, help_menu_flag, methods_args, method_args_list, wizard_mode
        )

    info(messages(language, 0))
    # check for update
    if startup_update_flag is True:
        __version__, __code_name__ = _version_info()
        _check(__version__, __code_name__, language, socks_proxy)

    info(messages(language, 96).format(len(load_all_modules()) - 1 + len(load_all_graphs())))
    suff = now(model="%Y_%m_%d_%H_%M_%S") + ''.join(random.choice(string.ascii_lowercase) for x in
                                                    range(10))
    subs_temp = 'tmp/subs_temp_' + suff
    range_temp = 'tmp/ranges_' + suff
    total_targets = -1
    for total_targets, _ in enumerate(
            analysis(targets, check_ranges, check_subdomains, subs_temp, range_temp, log_in_file, time_sleep,
                     language, verbose_level, show_version, check_update, socks_proxy, retries, socks_proxy, True)):
        pass
    total_targets += 1
    total_targets = total_targets * len(scan_method)
    targets = analysis(targets, check_ranges, check_subdomains, subs_temp, range_temp, log_in_file, time_sleep,
                       language, verbose_level, show_version, check_update, socks_proxy, retries, socks_proxy, False)
    trying = 0
    for target in targets:
        for sm in scan_method:
            trying += 1
            p = multiprocessing.Process(target=start_attack, args=(
                str(target).rsplit()[0], trying, total_targets, sm, users, passwds, timeout_sec, thread_number,
                ports, log_in_file, time_sleep, language, verbose_level, show_version, check_update, socks_proxy,
                retries, ping_flag, methods_args))
            p.start()
            while 1:
                n = 0
                processes = multiprocessing.active_children()
                for process in processes:
                    if process.is_alive() is True:
                        n += 1
                    else:
                        processes.remove(process)
                if n >= thread_number_host:
                    time.sleep(0.01)
                else:
                    break

    while 1:
        try:
            exitflag = True
            for process in multiprocessing.active_children():
                if process.is_alive() is True:
                    exitflag = False
            time.sleep(0.01)
            if exitflag is True:
                break
        except KeyboardInterrupt:
            for process in multiprocessing.active_children():
                process.terminate()
            break
    info(messages(language, 42))
    os.remove(subs_temp)
    os.remove(range_temp)
    info(messages(language, 43))
    sort_logs(log_in_file, language, graph_flag)
    write('\n')
    info(messages(language, 44))
    write('\n\n')
    finish()
예제 #40
0
def launcher():
    """Starts eye processes. Hosts the IPC Backbone and Logging functions.

    Reacts to notifications:
       ``launcher_process.should_stop``: Stops the launcher process
       ``eye_process.should_start``: Starts the eye process
    """

    #Reliable msg dispatch to the IPC via push bridge.
    def pull_pub(ipc_pub_url, pull):
        ctx = zmq.Context.instance()
        pub = ctx.socket(zmq.PUB)
        pub.connect(ipc_pub_url)

        while True:
            m = pull.recv_multipart()
            pub.send_multipart(m)

    #The delay proxy handles delayed notififications.
    def delay_proxy(ipc_pub_url, ipc_sub_url):
        ctx = zmq.Context.instance()
        sub = zmq_tools.Msg_Receiver(ctx, ipc_sub_url, ('delayed_notify', ))
        pub = zmq_tools.Msg_Dispatcher(ctx, ipc_pub_url)
        poller = zmq.Poller()
        poller.register(sub.socket, zmq.POLLIN)
        waiting_notifications = {}

        while True:
            if poller.poll(timeout=250):
                #Recv new delayed notification and store it.
                topic, n = sub.recv()
                n['_notify_time_'] = time() + n['delay']
                waiting_notifications[n['subject']] = n
            #When a notifications time has come, pop from dict and send it as notification
            for s, n in list(waiting_notifications.items()):
                if n['_notify_time_'] < time():
                    del n['_notify_time_']
                    del n['delay']
                    del waiting_notifications[s]
                    pub.notify(n)

    #Recv log records from other processes.
    def log_loop(ipc_sub_url, log_level_debug):
        import logging
        #Get the root logger
        logger = logging.getLogger()
        #set log level
        if log_level_debug:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)
        #Stream to file
        fh = logging.FileHandler(os.path.join(user_dir, '{}.log'.format(app)),
                                 mode='w')
        fh.setFormatter(
            logging.Formatter(
                '%(asctime)s - %(processName)s - [%(levelname)s] %(name)s: %(message)s'
            ))
        logger.addHandler(fh)
        #Stream to console.
        ch = logging.StreamHandler()
        ch.setFormatter(
            logging.Formatter(
                '%(processName)s - [%(levelname)s] %(name)s: %(message)s'))
        logger.addHandler(ch)
        # IPC setup to receive log messages. Use zmq_tools.ZMQ_handler to send messages to here.
        sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                     ipc_sub_url,
                                     topics=("logging", ))
        while True:
            topic, msg = sub.recv()
            record = logging.makeLogRecord(msg)
            logger.handle(record)

    ## IPC
    timebase = Value(c_double, 0)
    eyes_are_alive = Value(c_bool, 0), Value(c_bool, 0)

    zmq_ctx = zmq.Context()

    #Let the OS choose the IP and PORT
    ipc_pub_url = 'tcp://*:*'
    ipc_sub_url = 'tcp://*:*'
    ipc_push_url = 'tcp://*:*'

    # Binding IPC Backbone Sockets to URLs.
    # They are used in the threads started below.
    # Using them in the main thread is not allowed.
    xsub_socket = zmq_ctx.socket(zmq.XSUB)
    xsub_socket.bind(ipc_pub_url)
    ipc_pub_url = xsub_socket.last_endpoint.decode('utf8').replace(
        "0.0.0.0", "127.0.0.1")

    xpub_socket = zmq_ctx.socket(zmq.XPUB)
    xpub_socket.bind(ipc_sub_url)
    ipc_sub_url = xpub_socket.last_endpoint.decode('utf8').replace(
        "0.0.0.0", "127.0.0.1")

    pull_socket = zmq_ctx.socket(zmq.PULL)
    pull_socket.bind(ipc_push_url)
    ipc_push_url = pull_socket.last_endpoint.decode('utf8').replace(
        "0.0.0.0", "127.0.0.1")

    # Starting communication threads:
    # A ZMQ Proxy Device serves as our IPC Backbone
    ipc_backbone_thread = Thread(target=zmq.proxy,
                                 args=(xsub_socket, xpub_socket))
    ipc_backbone_thread.setDaemon(True)
    ipc_backbone_thread.start()

    pull_pub = Thread(target=pull_pub, args=(ipc_pub_url, pull_socket))
    pull_pub.setDaemon(True)
    pull_pub.start()

    log_thread = Thread(target=log_loop,
                        args=(ipc_sub_url, 'debug' in sys.argv))
    log_thread.setDaemon(True)
    log_thread.start()

    delay_thread = Thread(target=delay_proxy, args=(ipc_push_url, ipc_sub_url))
    delay_thread.setDaemon(True)
    delay_thread.start()

    del xsub_socket, xpub_socket, pull_socket

    topics = ('notify.eye_process.', 'notify.player_process.',
              'notify.world_process.', 'notify.service_process',
              'notify.clear_settings_process.', 'notify.player_drop_process.',
              'notify.launcher_process.', 'notify.meta.should_doc',
              'notify.circle_detector_process.should_start',
              'notify.ipc_startup')
    cmd_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=topics)
    cmd_push = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)

    while True:
        # Wait until subscriptions were successfull
        cmd_push.notify({'subject': 'ipc_startup'})
        if cmd_sub.socket.poll(timeout=50):
            cmd_sub.recv()
            break

    if app == 'service':
        cmd_push.notify({'subject': 'service_process.should_start'})
    elif app == 'capture':
        cmd_push.notify({'subject': 'world_process.should_start'})
    elif app == 'player':
        rec_dir = os.path.expanduser(sys.argv[-1])
        cmd_push.notify({
            'subject': 'player_drop_process.should_start',
            'rec_dir': rec_dir
        })

    with Prevent_Idle_Sleep():
        while True:
            # listen for relevant messages.
            if cmd_sub.socket.poll(timeout=1000):
                topic, n = cmd_sub.recv()
                if "notify.eye_process.should_start" in topic:
                    eye_id = n['eye_id']
                    Process(target=eye,
                            name='eye{}'.format(eye_id),
                            args=(timebase, eyes_are_alive[eye_id],
                                  ipc_pub_url, ipc_sub_url, ipc_push_url,
                                  user_dir, app_version, eye_id,
                                  n.get('overwrite_cap_settings'))).start()
                elif "notify.player_process.should_start" in topic:
                    Process(target=player,
                            name='player',
                            args=(
                                n['rec_dir'],
                                ipc_pub_url,
                                ipc_sub_url,
                                ipc_push_url,
                                user_dir,
                                app_version,
                            )).start()
                elif "notify.world_process.should_start" in topic:
                    Process(target=world,
                            name='world',
                            args=(
                                timebase,
                                eyes_are_alive,
                                ipc_pub_url,
                                ipc_sub_url,
                                ipc_push_url,
                                user_dir,
                                app_version,
                            )).start()
                elif "notify.clear_settings_process.should_start" in topic:
                    Process(target=clear_settings,
                            name='clear_settings',
                            args=(user_dir, )).start()
                elif "notify.service_process.should_start" in topic:
                    Process(target=service,
                            name='service',
                            args=(timebase, eyes_are_alive, ipc_pub_url,
                                  ipc_sub_url, ipc_push_url, user_dir,
                                  app_version)).start()
                elif "notify.player_drop_process.should_start" in topic:
                    Process(target=player_drop,
                            name='player',
                            args=(
                                n['rec_dir'],
                                ipc_pub_url,
                                ipc_sub_url,
                                ipc_push_url,
                                user_dir,
                                app_version,
                            )).start()
                elif "notify.circle_detector_process.should_start" in topic:
                    Process(target=circle_detector,
                            name='circle_detector',
                            args=(ipc_push_url, n['pair_url'],
                                  n['source_path'])).start()
                elif "notify.meta.should_doc" in topic:
                    cmd_push.notify({
                        'subject': 'meta.doc',
                        'actor': 'launcher',
                        'doc': launcher.__doc__
                    })
            else:
                if not active_children():
                    break

        for p in active_children():
            p.join()
예제 #41
0
    # p.close()
    # p.join()
    # print("All subprocesses done.")

    #subprocess 子进程
    # print('$ nslookup www.python.org')
    # r = subprocess.call(['nslookup', 'www.python.org'])
    # print('Exit code:', r)
    #进程间通信  使用Queue
    # q = Queue()
    # pw = Process(target=write,args=(q,))
    # pr = Process(target=read,args=(q,))
    #
    # pw.start()
    # pr.start()
    #
    # pw.join()

    #pr进程是死循环,无法等待期结束,所以使用 terminate
    # pr.terminate()

    for i in range(5):
        p = Process(target=process, args=(i, ))
        p.start()

    print('CPU number:' + str(cpu_count()))
    for p in active_children():
        print('Child process name: ' + p.name + ' id: ' + str(p.pid))

    print('Process Ended')
예제 #42
0
    def loop(self):
        # q = Queue()

        d = {
            'version': 1,
            'formatters': {
                'detailed': {
                    'class':
                    'logging.Formatter',
                    'format':
                    '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
                }
            },
            'handlers': {
                'console': {
                    'class': 'logging.StreamHandler',
                    'level': 'INFO',
                },
                'file': {
                    'class': 'logging.FileHandler',
                    'filename': 'mplog.log',
                    'mode': 'w',
                    'formatter': 'detailed',
                },
                'foofile': {
                    'class': 'logging.FileHandler',
                    'filename': 'mplog-foo.log',
                    'mode': 'w',
                    'formatter': 'detailed',
                },
                'errors': {
                    'class': 'logging.FileHandler',
                    'filename': 'mplog-errors.log',
                    'mode': 'w',
                    'level': 'ERROR',
                    'formatter': 'detailed',
                },
            },
            'loggers': {
                'foo': {
                    'handlers': ['foofile']
                }
            },
            'root': {
                'level': 'DEBUG',
                'handlers': ['console', 'file', 'errors']
            },
        }
        # logging.config.dictConfig(d)

        self.start()

        try:
            # logger = logging
            while self.event.is_set():
                time.sleep(1)
                alive = mp.active_children()
                print('-' * 30)
                print('Alive processes:', len(alive))
                for p in alive:
                    print('  ', p.name, p.pid)
                # debugging info here via print or logging or webpage
                # record = q.get()
                # if record:
                #     logger.handle(record)
        except (KeyboardInterrupt, SystemExit) as e:
            if KeyboardInterrupt == type(e):
                err = 'ctrl-C'
            elif SystemExit == type(e):
                err = 'exit'
            print('\n>> Received {}\n'.format(err))
            # set the kill flag
            self.event.clear()

        finally:
            self.end()
예제 #43
0
async def consumer_handler(websocket, path):
    global q
    global playback
    global path_icarous
    logger = logging.getLogger()

    while True:
        message = await websocket.recv()
        logger.info('SERVER: Input Message: ' + str(message))
        # parse the message
        message = message.split(' ')
        # print(message)
        # look for message to start new instance of icaorus
        if 'NEW_AIRCRAFT' in message:
            ac = int(message[3])
            user = um.getUser(websocket)
            user.addUserAircraft(ac)
            m = manager.list([message])
            p = addProcess(to_ic, (q, m))
            m_lists.append([ac, m])
            processes.append([ac, p])
            p.start()
            logger.info('SERVER: Starting New Aircraft {}'.format(processes))

        elif 'SHUTDOWN' in message:
            logger.info('SERVER: Active children: {}'.format(
                multiprocessing.active_children()))
            if playback:
                playback = False
            # get the ac
            ac = int(message[3])
            print('SHUT_DOWN function', message)
            q.put('{"AIRCRAFT":' + str(ac) + ', "name":"SHUT_DOWN"}')
            # get the list to add it to
            m = [x[1] for x in m_lists if x[0] == ac]
            if (len(m) > 0):
                m_lists.remove([x for x in m_lists if x[0] == ac][0])
                m1 = m[0]
                m1.append(message)
                # find the process
                p = [x for x in processes if x[0] == ac]
                processes.remove(p[0])
                p = p[0][1]
                p.join(timeout=5)
                logger.info('SERVER: Process exit code:{}'.format(p.exitcode))
                if p.exitcode == None:
                    p.terminate()
                    q.close()
                    q.join_thread()
                    q = Queue()

                # remove the list
                del m
                logger.info('SERVER: Active children: {}'.format(
                    multiprocessing.active_children()))

        elif 'CHECK_PATH' in message:
            complete_path = os.path.join(os.path.expanduser('~'),
                                         message[1][1:], 'exe/cpu1/core-cpu1')
            print(complete_path)

            try:
                f = open(complete_path, 'r')
                print('valid')
                q.put(
                    '{"name":"PATH_ICAROUS", "type":"PASS", "I":"VALID PATH"}')
                path_icarous = complete_path
            except Exception as e:
                print('check path error', e)
                q.put(
                    '{"name":"PATH_ICAROUS", "type":"FAIL", "I":"INVALID PATH:"}'
                )

        elif 'CHECK_PATH_A' in message:
            complete_path = os.path.join(os.path.expanduser('~'),
                                         message[1][1:],
                                         'Tools/autotest/sim_vehicle.py')
            print(complete_path)
            try:
                f = open(complete_path, 'r')
                print('valid')
                q.put(
                    '{"name":"PATH_ARDUPILOT", "type":"PASS", "I":"VALID PATH"}'
                )
            except Exception as e:
                print('check path error', e)
                q.put(
                    '{"name":"PATH_ARDUPILOT", "type":"FAIL", "I":"INVALID PATH:"}'
                )

        elif 'AIRCRAFT' in message:
            if message[1] != 'None' and not playback:

                if 'HITL_DISCONNECT' in message:

                    ac = int(message[3])
                    print('HITL_DISCONNECT function', message)
                    q.put('{"AIRCRAFT":' + message[3] +
                          ', "name":"SHUT_DOWN"}')
                    # get the list to add it to
                    m = [x[1] for x in m_lists if x[0] == ac]
                    try:
                        m_lists.remove([x for x in m_lists if x[0] == ac][0])
                    except IndexError:
                        print('HITL Disconnect Error')
                        logger.error(
                            'SERVER: AC not found in list. Unable to shut down.',
                            exc_info=True)
                        pass
                    m1 = m[0]
                    m1.append(message)
                    # find the process and remove it
                    p = [x for x in processes if x[0] == ac]
                    processes.remove(p[0])
                    p = p[0][1]

                    # give icarous time to shut down
                    time.sleep(2)
                    p.join(timeout=5)
                    logger.info('SERVER: Process exit code:{}'.format(
                        p.exitcode))
                    if p.exitcode == None:
                        p.terminate()
                        q.close()
                        q.join_thread()
                        q = Queue()
                    del m
                    logger.info('SERVER: Active children: {}'.format(
                        multiprocessing.active_children()))

                else:
                    ac = int(message[1])
                    # get the list to add it to
                    m = [x[1] for x in m_lists if x[0] == ac]
                    try:
                        m = m[0]
                        m.append(message)
                    except Exception as e:
                        print('could not append message', e)
                        print('playback', playback)

            elif 'HITL' in message:
                print(message)
                ac = int(message[3])
                # setup the new process
                user = um.getUser(websocket)
                user.addUserAircraft(ac)
                m = manager.list([message])
                p = addProcess(to_ic, (q, m))
                m_lists.append([ac, m])
                print('m lists', m_lists)
                processes.append([ac, p])
                print('processes', processes)
                p.start()
                logger.info('SERVER: Starting HITL {}'.format(processes))

            elif 'READ_USER_SETTINGS' in message:
                settings = UM.readUserSettings()
                q.put('{"name":"USER_SETTINGS"' + settings + '}')
                logger.info('SERVER: Read User Settings {}'.format(settings))

            elif 'SAVE_USER_SETTINGS' in message:
                msg = UM.saveUserSettings(message)
                q.put('{"name":"USER_SETTINGS_SAVED", "INFO":"' + msg + '"}')
                logger.info('SERVER: Saved User Settings {}'.format(message))

            elif 'RESET_USER_SETTINGS' in message:
                settings = UM.readUserSettings(0)
                q.put('{"name":"USER_SETTINGS_RESET"' + settings + '}')
                logger.info('SERVER: Reset User Settings {}'.format(settings))

            elif 'PLAYBACK' in message:

                if 'START' in message:
                    playback = True
                    # setup the new process
                    user = um.getUser(websocket)
                    user.addUserAircraft(-1)
                    m = manager.list([message])
                    p = addProcess(to_ic, (q, m))
                    m_lists.append([-1, m])
                    processes.append([-1, p])
                    p.start()
                    logger.info(
                        'SERVER: Starting Playback {}'.format(processes))

                else:
                    #  pass the message
                    m.append(message)

            else:
                logger.info(
                    'SERVER: Ignoring unassigned aircraft messages for now. {}'
                    .format(message))

        elif 'ADD_TRAFFIC' in message:
            for m in m_lists:
                x = ['AIRCRAFT '] + [m[0]] + message
                m[1].append(x)

        elif 'REMOVE_TRAFFIC' in message:
            for m in m_lists:
                x = ['AIRCRAFT '] + [m[0]] + message
                m[1].append(x)

        else:
            logger.info('SERVER: Undefined Input Message: {}'.format(message))
예제 #44
0
def myexcepthook(exctype, value, tb):
    for p in active_children():
        print(f"terminating {p}")
        p.terminate()
    sys.__excepthook__(exctype, value, tb)
    sys.exit(-1)
예제 #45
0
	name = splited_line[name_index]
	URL = splited_line[URL_index]
	ID = splited_line[ID_index]
	FACE_rect = splited_line[FACE_rect_index]
	rect_array = FACE_rect.split(',')
	
	if name != compare_name:
		compare_name = name
		try:
			os.makedirs("images/" + name)
		except:
			Exception

	img_name = build_std_path(name,ID)
	img_path = "images/" + compare_name +"/"+ img_name + ".jpg"

	if __name__ == '__main__':
		while len(multiprocessing.active_children()) > MAX_OF_THREADS:
			pass

		p = my_process(target=get_img_from_URL,args=(URL,ID,img_path,rect_array,))
		p.start()

		p.time_out(delay)
	else:
		print "Process ERROR!"
	counter += 1

print "100%"

file.close()
예제 #46
0
        'login': login,
        'password': password,
        'max_mark': b1,
        'min_mark': b2
    })

    pr_status = dict()
    for i, tasks in enumerate(check_lists):
        name = f'Proc-{i}'
        multiprocessing.Process(target=worker,
                                args=(tasks, shared_data, queue),
                                name=name).start()
        pr_status[name] = ['init', 0, len(tasks)]

    no_grade = []  # list of links to manual grade
    while (len(multiprocessing.active_children()) > 1) or (not queue.empty()):
        if queue.empty():
            continue
        resp = queue.get()
        if (resp['status'] == 0) or (resp['status'] == 1):
            pr_status[resp['process']][1] += 1
            pr_status[resp['process']][
                0] = f"{pr_status[resp['process']][1]}/{pr_status[resp['process']][2]}"
            if resp['status'] == 1:
                no_grade.append(resp['message'])
        elif resp['status'] == 5:
            pr_status[resp['process']][0] = 'start'
        elif resp['status'] == 6:
            pr_status[resp['process']][0] = 'login'
        elif resp['status'] == 7:
            pr_status[
예제 #47
0
                'exclude': exc_dirs,
                'qtt': qtt,
                'mqi': mqi
            }
        else:
            exc_dirs = map(lambda x: '^' + os.path.join(src_dir, x) + '.*',
                           tmp_exclude_dirs)
            sections[section] = {
                'src': src_dir,
                'dsts': dst_dirs,
                'include': [src_dir],
                'exclude': exc_dirs,
                'qtt': qtt,
                'mqi': mqi
            }

    return sections


if __name__ == "__main__":
    sinkronconf = read_config()

    for section in sinkronconf:
        # each section takes one process
        sectionjob = multiprocessing.Process(target=section_job,
                                             args=(sinkronconf[section], ))
        sectionjob.start()

    while multiprocessing.active_children():
        time.sleep(1)
예제 #48
0
def run_master(stats, args, pipes):
    # for filtering duplicate results (found near-simultaneously by 2+ children)
    # and spurious results (if using improved-implies and a child reaches a point that
    # suddenly becomes blocked by new blocking clauses, it could return that incorrectly
    # as an MUS or MCS)
    # Need to parse the constraint set (again!) just to get n for the map formula...
    csolver = setup_csolver(args, seed=None)
    msolver = mapsolvers.MinisatMapSolver(csolver.n)
    # Old way: results = set()

    remaining = args.limit

    while multiprocessing.active_children() and pipes:
        ready, _, _ = select.select(pipes, [], [])
        with stats.time('hubcomms'):
            for receiver in ready:
                while receiver.poll():
                    try:
                        # get a result
                        result = receiver.recv()
                    except EOFError:
                        # Sometimes a closed pipe will still trigger ready and .poll(),
                        # but it then throws an EOFError on .recv().  Handle that here.
                        pipes.remove(receiver)
                        break

                    if result[0] == 'done':
                        # "done" indicates the child process has finished its work,
                        # but enumeration may not be complete (if the child was only
                        # enumerating MCSes, e.g.)
                        if args.verbose > 1:
                            print("Child (%s) sent 'done'." % receiver)
                        # Terminate the child process.
                        receiver.send('terminate')
                        # Remove it from the list of active pipes
                        pipes.remove(receiver)

                    elif result[0] == 'complete':
                        # "complete" indicates the child process has completed enumeration,
                        # with everything blocked.  Everything can be stopped at this point.
                        if args.verbose > 1:
                            print("Child (%s) sent 'complete'." % receiver)

                        # TODO: print children's results, but differentiate somehow...
                        #if args.stats:
                        #    # Print received stats
                        #    at_exit(result[1])

                        # End / cleanup all children
                        for pipe in pipes:
                            pipe.send('terminate')
                        # Exit main process
                        sys.exit(0)

                    else:
                        assert result[0] in ['U', 'S']
                        # filter out duplicate / spurious results
                        with stats.time('msolver'):
                            if not msolver.check_seed(result[1]):
                                if args.verbose > 1:
                                    print(
                                        "Child (%s) sent duplicate (len: %d)" %
                                        (receiver, len(result[1])))
                                if result[0] == 'U':
                                    stats.increment_counter("duplicate MUS")
                                else:
                                    stats.increment_counter("duplicate MSS")

                                # already found/reported/explored
                                continue

                        with stats.time('msolver_block'):
                            if result[0] == 'U':
                                msolver.block_up(result[1])
                            elif result[0] == 'S':
                                msolver.block_down(result[1])

                        # Old way to check duplicates:
                        #res_set = frozenset(result[1])
                        #res_set = ",".join(str(x) for x in result[1])
                        #if res_set in results:
                        #    continue

                        #results.add(res_set)

                        print_result(result, args, stats, csolver.n)

                        if remaining:
                            remaining -= 1
                            if remaining == 0:
                                sys.stderr.write("Result limit reached.\n")
                                # End / cleanup all children
                                for pipe in pipes:
                                    pipe.send('terminate')
                                # Exit main process
                                sys.exit(0)

                        if not args.comms_disable:
                            # send it to all children *other* than the one we got it from
                            for other in pipes:
                                if other != receiver:
                                    other.send(result)