Exemple #1
2
def checkmultiprocess(ipqueue,cacheResult):
    if ipqueue.qsize() == 0:
        return
    processlist = []
    "如果ip数小于512,只使用一个子进程,否则则使用指定进程数,每个进程处理平均值的数量ip"
    max_threads = g_maxthreads
    maxprocess = g_useprocess
    if ipqueue.qsize() < g_maxthreads:
        max_threads = ipqueue.qsize()
        maxprocess = 1
    else:
        max_threads = (ipqueue.qsize() + g_useprocess) / g_useprocess
        if max_threads > g_maxthreads:
            max_threads = g_maxthreads
    #multiprocessing.log_to_stderr(logging.DEBUG)
    for i in xrange(0,maxprocess):
        p = Process(target=callsingleprocess,args=(ipqueue,cacheResult,max_threads))
        p.daemon = True
        processlist.append(p)
        p.start()
    
    try:
        for p in processlist:
            p.join()
    except KeyboardInterrupt:
        PRINT("need wait all process end...")
        for p in processlist:
            if p.is_alive():
                p.terminate()  
Exemple #2
0
def main():
    d = Manager().dict()

    a = Process(target=processT, args=(1,d))
    b = Process(target=processT, args=(2,d))

    a.daemon = False
    b.daemon = False

    a.start()
    b.start()


    time.sleep(2)
    d[1]=False
    print d
    time.sleep(5)

    d[1]=True

    print d

    time.sleep(2)
    d[2] = False
    print d
    time.sleep(5)

    d[1] = False
Exemple #3
0
 def run(self):
     '''
     并行处理
     jobs:迭代器或生成器
     '''
     workers = []
     
     #添加job
     worker = Process(target = ADD_JOB,args=(self.job_queue,))
     worker.daemon = True
     worker.start()
     workers.append(worker)
         
     for i in range(self.process_num):
         worker = Process(target = manual_function,args=(self.function,self.job_queue, self.result_queue))
         worker.daemon = True
         worker.start()
         workers.append(worker)
         
     #map(lambda worker:worker.start(),workers)
    
     try:
         map(lambda worker:worker.join(),workers)
     except KeyboardInterrupt:
         print 'parent received ctrl-c'
         map(lambda worker:worker.terminate(),workers)
         map(lambda worker:worker.join(),workers)
def serve():
    global serving

    if serving:
        return

    serving = True

    def _serve(dir, port, bad_ssl_cert=False):
        base_dir = os.path.join('tests', dir)
        os.chdir(base_dir)
        server = HTTPServer(('', port), SimpleHTTPRequestHandler)
        if bad_ssl_cert:
            server.socket = ssl.wrap_socket(server.socket,
                                            server_side=True,
                                            certfile=os.path.join(
                                                '..', 'cert.pem')
                                            )
        server.serve_forever()

    proc_site = Process(target=_serve, args=('site', 8000))
    proc_site.daemon = True
    proc_site.start()

    proc_external_site = Process(target=_serve, args=('external-site', 8001))
    proc_external_site.daemon = True
    proc_external_site.start()

    proc_bad_ssl = Process(target=_serve, args=('one-page-site', 8002, True))
    proc_bad_ssl.daemon = True
    proc_bad_ssl.start()
Exemple #5
0
def main():
    logging.basicConfig(level=logging.DEBUG)
    event_queue = Queue()
    led_control_queue = Queue()

    monitor = Process(target=button_monitor.run, args=(event_queue,))
    monitor.daemon = True
    monitor.start()

    lcon = Process(target=led_controller.run, args=(
        event_queue, led_control_queue))
    lcon.daemon = True
    lcon.start()

    eloop = Process(target=dispatcher.run, args=({
        'events': event_queue,
        'led_control': led_control_queue,
    },))
    eloop.daemon = True
    eloop.start()

    try:
        monitor.join()
        eloop.join()
        lcon.join()
    except KeyboardInterrupt:
        pass

    logging.info('Successfully quit')
Exemple #6
0
def mp_extract(jobs, nWorkers):

    procs = []

    ctarget = len(jobs)
    count = Value("i", 0)

    q_read = Queue(5)
    q_work = Queue()

    # start the reading process
    p = Process(target=read, args=[jobs, q_read])
    p.daemon = True
    p.start()

    # start the worker processes
    for i in range(nWorkers):
        p = Process(target=work, args=[q_read, q_work, count, ctarget])
        p.daemon = True
        p.start()
        procs.append(p)

    # start the saver process
    p = Process(target=save, args=[q_work, ctarget])
    p.daemon = True
    p.start()
    p.join()

    for p in procs:
        p.join()
def p3_add():
    conn = Connection()
    db = conn.wc
    InvertHour = returnInvertedHour(HOUR)
    for lang in LANGLIST:
        if WEEKDAY == '5':
            HOURDAYDB = str(lang) + '_hitshourlydaily'
            db[HOURDAYDB].update({str(InvertHour): {'$exists': True}}, {'$set': {str(InvertHour): 0}}, False,
                                 {'multi': True})
        ruFILE1 = "/tmp/" + str(lang) + "_action/q1_pagecounts.*"
        ruFILE2 = "/tmp/" + str(lang) + "_action/q2_pagecounts.*"
        ruFILE3 = "/tmp/" + str(lang) + "_action/q3_pagecounts.*"
        ruFILE4 = "/tmp/" + str(lang) + "_action/q4_pagecounts.*"

        t = Process(target=UpdateHits, args=(ruFILE1, HOUR, DAY, MONTH, YEAR, lang))
        u = Process(target=UpdateHits, args=(ruFILE2, HOUR, DAY, MONTH, YEAR, lang))
        v = Process(target=UpdateHits, args=(ruFILE3, HOUR, DAY, MONTH, YEAR, lang))
        w = Process(target=UpdateHits, args=(ruFILE4, HOUR, DAY, MONTH, YEAR, lang))

        t.daemon = True
        u.daemon = True
        v.daemon = True
        w.daemon = True

        t.start()
        u.start()
        v.start()
        w.start()

        t.join()
        u.join()
        v.join()
        w.join()
Exemple #8
0
    def setup(self):
        create_link('dummyX', 'dummy')
        t_url = 'unix://\0%s' % (uuid.uuid4())
        p_url = 'unix://\0%s' % (uuid.uuid4())
        self.connect = Event()
        self.release = Event()

        target = Process(target=_run_remote_uplink,
                         args=(t_url, self.connect, self.release))
        target.daemon = True
        target.start()
        self.connect.wait()
        self.connect.clear()

        proxy = Process(target=_run_remote_uplink,
                        args=(p_url, self.connect, self.release))
        proxy.daemon = True
        proxy.start()
        self.connect.wait()
        self.connect.clear()

        self.ip = IPRoute(do_connect=False)
        link, proxy = self.ip.connect(p_url)
        self.ip.register('bala', proxy)
        link, host = self.ip.connect(t_url, addr=proxy)
        service = self.ip.discover(self.ip.default_target, addr=host)

        self.ip.default_peer = host
        self.ip.default_dport = service

        self.dev = self.ip.link_lookup(ifname='dummyX')
    def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
        """
        Starts a writer process, a verifier process, a queue to track writes,
        and a queue to track successful verifications (which are rewrite candidates).

        wait_for_rowcount provides a number of rows to write before unblocking and continuing.

        Returns the writer process, verifier process, and the to_verify_queue.
        """
        # queue of writes to be verified
        to_verify_queue = Queue()
        # queue of verified writes, which are update candidates
        verification_done_queue = Queue(maxsize=500)

        writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
        # daemon subprocesses are killed automagically when the parent process exits
        writer.daemon = True
        self.subprocs.append(writer)
        writer.start()

        if wait_for_rowcount > 0:
            self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)

        verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
        # daemon subprocesses are killed automagically when the parent process exits
        verifier.daemon = True
        self.subprocs.append(verifier)
        verifier.start()

        return writer, verifier, to_verify_queue
Exemple #10
0
def main():
	
	inifile = ConfigParser.SafeConfigParser()
	inifile.read("polecolor.ini")
	teamcolor = str(inifile.get("team", "color"))

	abe = absclass.AbsEncoder()
	abe.SetOffset()

	pole = poleclass.Pole("polecolor.ini")
	q_turret = Queue(maxsize = 1)
	q_debug = Queue(maxsize = 5)

	p_send = Process(target = sendData, args = (q_turret, ))
	p_debug = Process(target = debug, args = (q_debug, ))
	p_send.daemon = True
	p_debug.daemon = True
	p_debug.start()
	p_send.start()

	while True:
		while gpio.input(19):
			pass
			#q_debug.put(["not auto"])

		for i in [3, 2, 1, 3, 2, 1]:
			autoFire(abe, pole, q_debug, q_turret, teamcolor, i)
			time.sleep(5)

		q_turret.put("notauto")
		while not gpio.input(19):
			q_debug.put(["back please"])
    def _start_continuous_counter_increment_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
        """
        Starts a counter incrementer process, a verifier process, a queue to track writes,
        and a queue to track successful verifications (which are re-increment candidates).

        Returns the writer process, verifier process, and the to_verify_queue.
        """
        # queue of writes to be verified
        to_verify_queue = Queue()
        # queue of verified writes, which are update candidates
        verification_done_queue = Queue(maxsize=500)

        incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
        # daemon subprocesses are killed automagically when the parent process exits
        incrementer.daemon = True
        self.subprocs.append(incrementer)
        incrementer.start()

        if wait_for_rowcount > 0:
            self._wait_until_queue_condition('counters incremented (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)

        count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
        # daemon subprocesses are killed automagically when the parent process exits
        count_verifier.daemon = True
        self.subprocs.append(count_verifier)
        count_verifier.start()

        return incrementer, count_verifier, to_verify_queue
def parallel_precompute(global_conf_file=None):
    # Define queues
    queueIn = Queue(nb_workers+2)
    queueOut = Queue(nb_workers+8)
    queueProducer = Queue()
    queueFinalizer = Queue()
    queueConsumer = Queue(nb_workers)

    # Start finalizer
    t = Process(target=finalizer, args=(global_conf_file, queueOut, queueFinalizer))
    t.daemon = True
    t.start()
    # Start consumers
    for i in range(nb_workers):
        t = Process(target=consumer, args=(global_conf_file, queueIn, queueOut, queueConsumer))
        t.daemon = True
        t.start()
    # Start producer
    t = Process(target=producer, args=(global_conf_file, queueIn, queueProducer))
    t.daemon = True
    t.start()

    # Wait for everything to be started properly
    producerOK = queueProducer.get()
    finalizerOK = queueFinalizer.get()
    for i in range(nb_workers):
        consumerOK = queueConsumer.get()
    print "[parallel_precompute: log] All workers are ready."
    sys.stdout.flush()
    # Wait for everything to be finished
    finalizerEnded = queueFinalizer.get()
    print "[parallel_precompute: log] Done at {}".format(get_now())
    return
def setup():
    r = requests.get('http://localhost:8888/sign_in?ocupus_orchestrator')

    peers = dict()

    connected = r.text.split("\n")

    my_info = connected[0].split(",")
    my_id = int(my_info[1])

    messages = Queue()

    initial_peer_id = -1

    for l in connected[1:]:
        info = l.split(",")
        if len(info) > 1:
            if info[0] == "receiver":
                initial_peer_id = int(info[1])

    t = threading.Thread(target=hanging_get, args = (my_id, messages, initial_peer_id))
    t.daemon = True
    t.start()
    print "Started hanging get thread"    
    p = Process(target=from_remote_server, args=(5554,messages))
    p.daemon = True
    p.start()

    sysproc = Process(target=system_utilities.power_control_listener)
    sysproc.daemon = True
    sysproc.start()

    print "Started xmq process"
Exemple #14
0
    def run(self):
        tsdb_type = config.get('TSDB', 'tsdtype')
        if (tsdb_type == 'OddEye') or (tsdb_type == 'InfluxDB') or (tsdb_type == 'KairosDB') or (tsdb_type == 'OpenTSDB'):
            def run_normal():
                while True:
                    run_scripts()
                    run_shell_scripts()
                    time.sleep(cron_interval)

            def run_cache():
                while True:
                    upload_cache()
                    time.sleep(cron_interval)

            from multiprocessing import Process


            p1 = Process(target=run_normal)
            p1.daemon = True
            p1.start()

            p2 = Process(target=run_cache())
            if not p2.is_alive():
                p2.daemon = True
                p2.start()
                p2.join()
            p1.join()
        else:
            while True:
                run_scripts()
                run_shell_scripts()
                time.sleep(cron_interval)
Exemple #15
0
 def spawn_process(self, target, *args):
     """
     :type target: function or class
     """
     p = Process(target=target, args=args)
     p.daemon = True
     if target == worker:
         p.daemon = Conf.DAEMONIZE_WORKERS
         p.timer = args[2]
         self.pool.append(p)
     p.start()
     return p
Exemple #16
0
def start_listeners():
    """Start the listener servers."""
    listeners = []
    telnet_server = Process(target=_start_telnet_server)
    telnet_server.daemon = True
    telnet_server.start()
    listeners.append(telnet_server)
    if CLI.args.ws:
        websocket_server = Process(target=_start_websocket_server)
        websocket_server.daemon = True
        websocket_server.start()
        listeners.append(websocket_server)
    return listeners
Exemple #17
0
    def start(self):

        self.daemons = []
        if self.configs["no_upstream_all"] or self.configs["no_upstream_eap"]:
            proc = Process(target=self._start, args=(self.configs["phy0"],))
            proc.daemon = True
            proc.start()
            self.daemons.append(proc)
        print self.configs["phy"]
        proc = Process(target=self._start, args=(self.configs["phy"],))
        proc.daemon = True
        proc.start()
        self.daemons.append(proc)
def me_multiprocessing_need_queue():
    # 其他多进程用法都请见 recommending/init_index.py
    # 可以直接使用queue同步数据
    p_fi = Process(target=me_threading, args=(chr_need_print_2[0],))
    p_fi.daemon = True
    p_fi.start()

    p_se = Process(target=me_threading, args=(chr_need_print_2[1],))
    p_se.daemon = True
    p_se.start()

    p_fi.join()
    p_se.join()
Exemple #19
0
def run_processes(target, inqueue, outqueue=None):
    threads = max(1, (NUM_THREADS - 1))
    for n in range(threads):
        if outqueue:
            p = Process(target=target, args=(n, inqueue, outqueue))
        else:
            p = Process(target=target, args=(n, inqueue))
        p.daemon = True
        p.start()
    pp = Process(target=__progress, args=(-1, inqueue))
    pp.daemon = True
    pp.start()
    inqueue.close()
    inqueue.join()
Exemple #20
0
def main():
    proxy_queue = Queue()
    proxy_hosts = Queue()

    create_db()
    # 查询urls
    DB_CONN = get_conn()
    c = DB_CONN.cursor()
    LazyFW.log(r'''SELECT count(*) as `cnt` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    c.execute(r'''SELECT count(*) as `cnt` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    proxys = c.fetchone()
    c.close()
    if proxys[0] < 10:
        proxy_urls = get_proxy_urls()
        for url in proxy_urls:
            proxy_queue.put_nowait(url)

        workers = []
        for i in range(PROXY_THREAD_FETCH_MAX):
            p = Process(target=worker, args=('fetch_proxy', proxy_queue))
            p.daemon = True
            p.start()
            workers.append(p)

        for p in workers:
            p.join()
    DB_CONN.commit()
    DB_CONN.close()

    # 再次查询出数据
    DB_CONN = get_conn()
    LazyFW.log(r'''SELECT `host`,`port` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    c = DB_CONN.cursor()
    c.execute(r'''SELECT `host`,`port` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    for row in c.fetchall():
        proxy_hosts.put_nowait(row)

    c.close()
    DB_CONN.commit()
    DB_CONN.close()

    workers = []
    for i in range(PROXY_THREAD_TEST_PROXY_MAX):
        p = Process(target=worker, args=('proxy_test', proxy_hosts))
        p.daemon = True
        p.start()
        workers.append(p)

    for p in workers:
        p.join()
def read_inst(packet, command):
    command = decrypt_val(command)
    cmd = command.split(' ', 1)
    if(cmd[0] == "run"):
        cmdProc = Process(target=run_cmd, args=(packet, cmd[1],))
        cmdProc.daemon = True
        cmdProc.start()
        # run_cmd(packet, cmd[1])
    elif(cmd[0] == "watch"):
        fileProc = Process(target=watch_dir, args=(packet, cmd[1],))
        fileProc.daemon = True
        fileProc.start()
        # watch_dir(packet, cmd[1])
    else:
        print(cmd)
Exemple #22
0
def main():
    rate = Value('d',0.5)
    scale = Value('i',100)

    a = Process(target=processA, args=(rate,scale))
    b = Process(target=processB, args=(rate,scale))

    a.daemon = True
    b.daemon = True

    a.start()
    b.start()

    a.join()
    b.join()
Exemple #23
0
 def attack_deauth(self):
     global threadloading
     if self.linetarget.text() == "":
         QMessageBox.information(self, "Target Error", "Please, first select Target for attack")
     else:
         self.bssid = str(self.linetarget.text())
         self.deauth_check = self.xmlcheck.xmlSettings("deauth", "select",None,False)
         self.args = str(self.xmlcheck.xmlSettings("mdk3","arguments", None, False))
         if self.deauth_check == "packets_scapy":
             self.AttackStatus(True)
             t = Process(target=self.deauth_attacker, args=(self.bssid,str(self.input_client.text())))
             print("[*] deauth Attack On:"+self.bssid)
             threadloading['deauth'].append(t)
             t.daemon = True
             t.start()
         else:
             if path.isfile(popen('which mdk3').read().split("\n")[0]):
                 self.AttackStatus(True)
                 t = ProcessThread(("mdk3 %s %s %s"%(self.interface,self.args,self.bssid)).split())
                 t.name = "mdk3"
                 threadloading['mdk3'].append(t)
                 t.start()
             else:
                 QMessageBox.information(self,'Error mdk3','mkd3 not installed')
                 set_monitor_mode(self.get_placa.currentText()).setDisable()
    def serve(self):
        """Start a fixed number of worker threads and put client into a queue"""

        #this is a shared state that can tell the workers to exit when set as false
        self.isRunning.value = True

        #first bind and listen to the port
        self.serverTransport.listen()

        #fork the children
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except (Exception) as x:
                logging.exception(x)

        #wait until the condition is set by stop()

        while True:

            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
		break
            except (Exception) as x:
                logging.exception(x)

        self.isRunning.value = False
Exemple #25
0
def save_log_playback(self, settings):
    """
    Calls :func:`_save_log_playback` via a :py:class:`multiprocessing.Process`
    so it doesn't cause the :py:class:`~tornado.ioloop.IOLoop` to block.
    """
    settings['container'] = self.ws.container
    settings['prefix'] = self.ws.prefix
    settings['user'] = user = self.current_user['upn']
    settings['users_dir'] = os.path.join(self.ws.settings['user_dir'], user)
    settings['gateone_dir'] = GATEONE_DIR
    settings['url_prefix'] = self.ws.settings['url_prefix']
    settings['256_colors'] = get_256_colors(self)
    q = Queue()
    global PROC
    PROC = Process(target=_save_log_playback, args=(q, settings))
    PROC.daemon = True # We don't care if this gets terminated mid-process.
    io_loop = tornado.ioloop.IOLoop.instance()
    def send_message(fd, event):
        """
        Sends the log enumeration result to the client.  Necessary because
        IOLoop doesn't pass anything other than *fd* and *event* when it handles
        file descriptor events.
        """
        io_loop.remove_handler(fd)
        message = q.get()
        self.write_message(message)
    # This is kind of neat:  multiprocessing.Queue() instances have an
    # underlying fd that you can access via the _reader:
    io_loop.add_handler(q._reader.fileno(), send_message, io_loop.READ)
    PROC.start()
    return
Exemple #26
0
def spawn(script):
    (pipe_in, pipe_out) = Pipe(False)
    p = Process(target=script, args=(kicked, pipe_out))
    p.daemon = True  # If the main process crashes for any reason then kill the child process
    p.start()
    pipe_out.close()
    return (pipe_in, p)
Exemple #27
0
def initialize_workers(num_workers, target, daemon=True):
    """
    Initializes the worker processes.
    """
    workers = []
    process = None

    print '> initializing {} workers ..'.format(num_workers),

    for i in range(num_workers):
        try:
            process = Process(target=target, args=(i + 1,))
            process.daemon = daemon
            process.start()
            workers.append(process)

            sys.stdout.write('\r')
            sys.stdout.write('> %s workers initialized' % (i + 1))
            sys.stdout.flush()
            sleep(cfg.settings.general.worker_cooling)

        except RuntimeError:
            pass

    print ' .. ok'
    return workers
Exemple #28
0
def run_server():
    global server_process
    os.system("rm -rf /tmp/unittest_herodb")
    os.mkdir("/tmp/unittest_herodb")
    server_process = Process(target=_run_server, args=("/tmp/unittest_herodb", 8081))
    server_process.daemon = True
    server_process.start()
Exemple #29
0
def start_workers(config):
    '''
    Picks up all the external system configuration from the config file and starts up as many processes as non-default sections in the config.
    The following elements are required from the default configuration section :
    - solr_url : base url of the solr server.
    - nova_db_server : IP or hostname of the nova controller.
    - nova_db_port : Port of the nova db to which the workers should connect.For nova+mysql this would be 3306.
    - nova_db_creds : credentials in the format user:password
    - amqp_server : IP or hostname of the amqp server. Usually, this is same as the nova controller.
    - amqp_port : Port of the AMQP server. If using RMQ this should be 5672.
    - amqp_creds : credentials in the format user:password
    
    Each non-default section of the config should represent a resource type that this system monitors. Each individual worker corresponds to
    a resource type and is run in a separate python process.
    '''
 
    logUtils.setup_logging(config)
    global _LOGGER
    _LOGGER = logUtils.get_logger(__name__)
    for section in config.sections():
        process = Process(target=worker.run, args=(config, section,))
        process.daemon = True
        process.start()
        _LOGGER.info('Started worker process - ' + str(process.pid))
        _PROCESSES.append(process)
 def start_parser_process(self):
     if self.mp_mode:
         from multiprocessing import Process, Event
     else:
         from multiprocessing.dummy import Process, Event
     waiting_shutdown_event = Event()
     if self.mp_mode:
         bot = self.bot.__class__(
             network_result_queue=self.network_result_queue,
             parser_result_queue=self.parser_result_queue,
             waiting_shutdown_event=waiting_shutdown_event,
             shutdown_event=self.shutdown_event,
             parser_mode=True,
             meta=self.bot.meta)
     else:
         # In non-multiprocess mode we start `run_process`
         # method in new semi-process (actually it is a thread)
         # Because the use `run_process` of main spider instance
         # all changes made in handlers are applied to main
         # spider instance, that allows to suppport deprecated
         # spiders that do not know about multiprocessing mode
         bot = self.bot
         bot.network_result_queue = self.network_result_queue
         bot.parser_result_queue = self.parser_result_queue
         bot.waiting_shutdown_event = waiting_shutdown_event
         bot.shutdown_event = self.shutdown_event
         bot.meta = self.bot.meta
     proc = Process(target=bot.run_parser)
     if not self.mp_mode:
         proc.daemon = True
     proc.start()
     return waiting_shutdown_event, proc
Exemple #31
0
def main():

    # step 1:
    # obtain host name, gpuID
    # create the corresponding folder
    hostName = socket.gethostname()
    foldName = str(hostName) + '-gpu' + str(gpu2Use)
    path = os.getcwd()
    targetDir = path + "/" + foldName

    if not os.path.exists(targetDir):
        try:
            os.mkdir(targetDir)
        except OSError:
            print("creation of %s failed!" % targetDir)

    MAXCORUN = int(args.maxCoRun)  # max jobs per gpu
    RANDSEED = int(args.seed)
    gpuNum = 1

    #logger.debug("MaxCoRun={}\trandseed={}\tsaveFile={}".format(MAXCORUN, RANDSEED, args.ofile))

    #----------------------------------------------------------------------
    # 1) application status table : 5 columns
    #----------------------------------------------------------------------
    #
    #    jobid      gpu     status      starT       endT
    #       0       0           1       1           2
    #       1       1           1       1.3         2.4
    #       2       0           0       -           -
    #       ...
    #----------------------------------------------------------------------
    maxJobs = 10000
    rows, cols = maxJobs, 5  # note: init with a large prefixed table
    d_arr = mp.Array(ctypes.c_double, rows * cols)
    arr = np.frombuffer(d_arr.get_obj())
    AppStat = arr.reshape((rows, cols))

    id2name = {}

    #----------------------------------------------------------------------
    #
    #----------------------------------------------------------------------

    #--------------------------------------------------------------------------
    # input: app, app2dir_dd in app_info.py
    #--------------------------------------------------------------------------
    if len(app) <> len(app2dir_dd):
        print "Error: app number wrong, check ../prepare/app_info.py!"
        sys.exit(1)

    # three random sequences
    #app_s1 = genRandSeq(app, seed=RANDSEED) # pi

    apps_num = len(app)
    logger.debug("Total GPU Applications = {}.".format(apps_num))

    #--------------------------------------------------------------------------
    # interference analysis : by run 2 apps concurrently
    #--------------------------------------------------------------------------

    appPool = app

    for idx, app1 in enumerate(appPool):
        ofile = targetDir + '/' + app1 + ".npy"
        #print ofile

        # exclude: rodinia-heartwall, lonestar_sssp, dmr

        if idx >= 0:  # NOTE: modify if program hangs
            #if idx == 0: # NOTE: modify if program hangs
            app_run2_dd = {}
            for a2_idx, app2 in enumerate(appPool):
                if a2_idx >= 0:
                    print("idx = {}({}) : a2_idx={}({})".format(
                        idx, app1, a2_idx, app2))

                    app1_runtime, app2_runtime = [], []

                    # run 3 times, get the fastest
                    for ii in xrange(2):
                        workers = []  # for mp processes

                        # run 2 processes concurrently
                        jobID = 0
                        id2name[jobID] = app1
                        process = Process(target=run_work,
                                          args=(jobID, AppStat,
                                                app2dir_dd[app1]))
                        process.daemon = False
                        workers.append(process)
                        process.start()

                        jobID = 1
                        id2name[jobID] = app2
                        process = Process(target=run_work,
                                          args=(jobID, AppStat,
                                                app2dir_dd[app2]))
                        process.daemon = False
                        workers.append(process)
                        process.start()

                        for p in workers:
                            p.join()

                        total_jobs = 2
                        appRT_dd = getGpuJobTiming(AppStat, total_jobs,
                                                   id2name)
                        #print appRT_dd
                        app1_runtime.append(appRT_dd[app1])
                        app2_runtime.append(appRT_dd[app2])

                    #print app1_runtime
                    app1_best = min(app1_runtime)

                    # NOTE: only remember the app1 runtime  when running with app2
                    app_run2_dd[app2] = app1_best
        代码段,数据段,PCB(进程控制块)


2 进程的两种开启方法
      (1) p = Process(target=None,args(,))
      (2) 自定义类,继承Process父类

    3 进程的常用方法
      (1) start()  开启一个子进程
      (2) join()   异步变同步(就是让父进程停留在join这句话,等待子进程执行结束,父进程再继续执行)
      (3) is_alive() 判断进程是否活着
      (4) terminate()   杀死进程
    4 进程的常用属性
       (1) p.name =    给p进程一个名字
       (2) p.pid       返回p进程的pid
       (3) p.daemon = True   将p进程设置为守护进程。(True为守护进程,False为普通进程)
           守护进程的两个特点:
              守护进程会随着父进程的代码执行结束而结束
              守护进程不能再创建子进程(不能要孩子)
		必须在start之前



    dos系统:      单用户单任务
    windows系统:  单用户多任务(早期的windows)
    unix系统:     多用户多任务

       什么是并行和并发?
		并行:两件或多件事情在同一时间点同时执行     两者同时执行(多个cpu)
		并发:两件或多件事情在同一时间间隔内同时执行  在资源有限的情况下,两者交替轮流使用
       什么是阻塞和非阻塞?
Exemple #33
0
    def init_schedulers(self):
        children = self.children
        mq = import_item(str(self.mq_class))

        f = self.factory
        ident = f.session.bsession
        # disambiguate url, in case of *
        monitor_url = disambiguate_url(f.monitor_url)
        # maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
        # IOPub relay (in a Process)
        q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A', b'iopub')
        q.bind_in(f.client_url('iopub'))
        q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub")
        q.bind_out(f.engine_url('iopub'))
        q.setsockopt_out(zmq.SUBSCRIBE, b'')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Multiplexer Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')

        q.bind_in(f.client_url('mux'))
        q.setsockopt_in(zmq.IDENTITY, b'mux_in')
        q.bind_out(f.engine_url('mux'))
        q.setsockopt_out(zmq.IDENTITY, b'mux_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)

        # Control Queue (in a Process)
        q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
        q.bind_in(f.client_url('control'))
        q.setsockopt_in(zmq.IDENTITY, b'control_in')
        q.bind_out(f.engine_url('control'))
        q.setsockopt_out(zmq.IDENTITY, b'control_out')
        q.connect_mon(monitor_url)
        q.daemon = True
        children.append(q)
        if 'TaskScheduler.scheme_name' in self.config:
            scheme = self.config.TaskScheduler.scheme_name
        else:
            scheme = TaskScheduler.scheme_name.default_value
        # Task Queue (in a Process)
        if scheme == 'pure':
            self.log.warn("task::using pure DEALER Task scheduler")
            q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
            # q.setsockopt_out(zmq.HWM, hub.hwm)
            q.bind_in(f.client_url('task'))
            q.setsockopt_in(zmq.IDENTITY, b'task_in')
            q.bind_out(f.engine_url('task'))
            q.setsockopt_out(zmq.IDENTITY, b'task_out')
            q.connect_mon(monitor_url)
            q.daemon = True
            children.append(q)
        elif scheme == 'none':
            self.log.warn("task::using no Task scheduler")

        else:
            self.log.info("task::using Python %s Task scheduler" % scheme)
            sargs = (
                f.client_url('task'),
                f.engine_url('task'),
                monitor_url,
                disambiguate_url(f.client_url('notification')),
                disambiguate_url(f.client_url('registration')),
            )
            kwargs = dict(logname='scheduler',
                          loglevel=self.log_level,
                          log_url=self.log_url,
                          config=dict(self.config))
            if 'Process' in self.mq_class:
                # run the Python scheduler in a Process
                q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
                q.daemon = True
                children.append(q)
            else:
                # single-threaded Controller
                kwargs['in_thread'] = True
                launch_scheduler(*sargs, **kwargs)

        # set unlimited HWM for all relay devices
        if hasattr(zmq, 'SNDHWM'):
            q = children[0]
            q.setsockopt_in(zmq.RCVHWM, 0)
            q.setsockopt_out(zmq.SNDHWM, 0)

            for q in children[1:]:
                if not hasattr(q, 'setsockopt_in'):
                    continue
                q.setsockopt_in(zmq.SNDHWM, 0)
                q.setsockopt_in(zmq.RCVHWM, 0)
                q.setsockopt_out(zmq.SNDHWM, 0)
                q.setsockopt_out(zmq.RCVHWM, 0)
                q.setsockopt_mon(zmq.SNDHWM, 0)
Exemple #34
0
def test_fault_tolerance(fault_db):
    force_kill_spawn_port = 5012
    normal_spawn_port = 5013

    def worker_killer_task(config, master_address):
        from scannerpy import ProtobufGenerator, Config, start_worker
        import time
        import grpc
        import subprocess
        import signal
        import os

        c = Config(None)

        import scanner.metadata_pb2 as metadata_types
        import scanner.engine.rpc_pb2 as rpc_types
        import scanner.types_pb2 as misc_types

        protobufs = ProtobufGenerator(config)

        # Spawn a worker that we will force kill
        script_dir = os.path.dirname(os.path.realpath(__file__))
        with open(os.devnull, 'w') as fp:
            p = subprocess.Popen([
                'python3 ' + script_dir +
                '/spawn_worker.py {:d}'.format(force_kill_spawn_port)
            ],
                                 shell=True,
                                 stdout=fp,
                                 stderr=fp,
                                 preexec_fn=os.setsid)

            # Wait a bit for the worker to do its thing
            time.sleep(10)

            # Force kill worker process to trigger fault tolerance
            os.killpg(os.getpgid(p.pid), signal.SIGTERM)
            p.kill()
            p.communicate()

            # Wait for fault tolerance to kick in
            time.sleep(15)

            # Spawn the worker again
            subprocess.call([
                'python3 ' + script_dir +
                '/spawn_worker.py {:d}'.format(normal_spawn_port)
            ],
                            shell=True)

    master_addr = fault_db._master_address
    killer_process = Process(target=worker_killer_task,
                             args=(fault_db.config, master_addr))
    killer_process.daemon = True
    killer_process.start()

    frame = fault_db.sources.FrameColumn()
    range_frame = fault_db.streams.Range(frame, 0, 20)
    sleep_frame = fault_db.ops.SleepFrame(ignore=range_frame)
    output_op = fault_db.sinks.Column(columns={'dummy': sleep_frame})

    job = Job(op_args={
        frame: fault_db.table('test1').column('frame'),
        output_op: 'test_fault',
    })

    table = fault_db.run(output_op, [job],
                         pipeline_instances_per_node=1,
                         force=True,
                         show_progress=False)
    table = table[0]

    assert len([_ for _ in table.column('dummy').load()]) == 20

    # Shutdown the spawned worker
    channel = grpc.insecure_channel('localhost:' + str(normal_spawn_port),
                                    options=[('grpc.max_message_length',
                                              24499183 * 2)])
    worker = fault_db.protobufs.WorkerStub(channel)

    try:
        worker.Shutdown(fault_db.protobufs.Empty())
    except grpc.RpcError as e:
        status = e.code()
        if status == grpc.StatusCode.UNAVAILABLE:
            print('could not shutdown worker!')
            exit(1)
        else:
            raise ScannerException(
                'Worker errored with status: {}'.format(status))
    killer_process.join()
def create_exp_process(meta_info, expconfig, url, count, no_cache):
    process = Process(target=run_exp,
                      args=(meta_info, expconfig, url, count, no_cache))
    process.daemon = True
    return process
Exemple #36
0
 def start_logging_server(cls, filename, port, after_fork_hook):
     process = Process(target=cls.run_in_process,
                       args=(filename, port, after_fork_hook))
     process.daemon = True
     process.start()
     return process
    def start_actors(self):
        def worker(worker_n, sample_queue, command_queue, ret_queue,
                   sucess_queue):
            with tf.device(
                    tf.train.replica_device_setter(cluster=self.cluster)):
                # worker only need old network to act
                with tf.variable_scope('old'):
                    network_io = policy_fn(self.state_size, self.goal_size,
                                           self.model_config)
                    input_state_tensor_old = network_io['input_state_tensor']
                    input_goal_tensor_old = network_io['input_goal_tensor']
                    action_distribution_old = network_io['action_distribution']
                    state_value_old = network_io['state_value']
            gpu_options = tf.GPUOptions(allow_growth=True)
            server = tf.train.Server(
                self.cluster,
                job_name="worker",
                task_index=worker_n,
                config=tf.ConfigProto(gpu_options=gpu_options))
            with tf.Session(target=server.target) as sess:
                # wait for the connection
                print("Worker %d: waiting for cluster connection..." %
                      worker_n)
                while sess.run(tf.report_uninitialized_variables()).size != 0:
                    print("Worker %d: waiting for variable initialization..." %
                          worker_n)
                    time.sleep(1.0)
                print("Worker %d: variables initialized" % worker_n)
                env = self.env_init_fn()
                # wait for command forever
                while True:
                    command = command_queue.get(True)
                    if command == 'sample':
                        local_step = 0
                        sucess = 0
                        samples = []
                        # print "Start Sampling Process  " + str(worker_n)
                        init_state, init_goal = self.env_reset_fn(env)
                        state = init_state
                        goal = init_goal
                        while (local_step < self.max_steps):
                            np_action_mean, np_action_log_std, np_state_value = sess.run(
                                action_distribution_old + [state_value_old],
                                feed_dict={
                                    input_state_tensor_old: [state],
                                    input_goal_tensor_old: [goal]
                                })
                            np_action = np_action_mean[0] + np.exp(
                                np_action_log_std[0]) * np.random.normal(
                                    size=(3))
                            transition = self.env_sample_fn(env, np_action)
                            (reward, next_state, next_goal, done) = transition
                            if reward > 0:
                                sucess = 1
                            np_action_prob = np_gaussion_prob(
                                (np_action_mean, np_action_log_std), np_action)
                            sample = (state, goal, np_action, reward, done,
                                      np_action_prob, np_state_value)
                            samples.append(sample)
                            local_step += 1
                            state = next_state
                            goal = next_goal
                            if done:
                                break
                        # ==============================================================================
                        # All TD(0)
                        advantage_list, estimated_state_value_list = TD0_estimation(
                            samples, self.gamma, self.advantage_offset)
                        # ===============================================================================
                        # samples with advantage and target state_value
                        full_samples = []
                        for i in range(len(samples)):
                            state, goal, np_action, reward, done, np_action_prob, np_state_value = samples[
                                i]
                            full_samples.append(
                                (state, goal, np_action, np_action_prob,
                                 advantage_list[i],
                                 estimated_state_value_list[i]))
                        # ===============================================================================
                        # send results
                        for sample in full_samples:
                            sample_queue.put(sample, True)
                        ret_queue.put(len(full_samples), True)
                        sucess_queue.put(sucess, True)
                    else:
                        if command == 'sample-alt':
                            local_step = 0
                            sucess = 0
                            samples = []
                            gripper_states = []
                            # print "Start Sampling Process  " + str(worker_n)
                            init_state, init_goal = self.env_reset_fn(env)
                            state = init_state
                            goal = init_goal
                            while (local_step < self.max_steps):
                                np_action_mean, np_action_log_std, np_state_value = sess.run(
                                    action_distribution_old +
                                    [state_value_old],
                                    feed_dict={
                                        input_state_tensor_old: [state],
                                        input_goal_tensor_old: [goal]
                                    })
                                np_action = np_action_mean[0] + np.exp(
                                    np_action_log_std[0]) * np.random.normal(
                                        size=(3))
                                # save current gripper state
                                gripper_states.append(get_gripper_state(env))
                                transition = self.env_sample_fn(env, np_action)
                                (reward, next_state, next_goal,
                                 done) = transition
                                if reward > 0:
                                    sucess = 1
                                np_action_prob = np_gaussion_prob(
                                    (np_action_mean, np_action_log_std),
                                    np_action)
                                sample = (state, goal, np_action, reward, done,
                                          np_action_prob, np_state_value)
                                samples.append(sample)
                                local_step += 1
                                state = next_state
                                goal = next_goal
                                if done:
                                    break
                            # ==============================================================================
                            # All TD(0)
                            advantage_list, estimated_state_value_list = TD0_estimation(
                                samples, self.gamma, self.advantage_offset)
                            # ===============================================================================
                            # samples with advantage and target state_value
                            full_samples = []
                            for i in range(len(samples)):
                                state, goal, np_action, reward, done, np_action_prob, np_state_value = samples[
                                    i]
                                full_samples.append(
                                    (state, goal, np_action, np_action_prob,
                                     advantage_list[i],
                                     estimated_state_value_list[i]))

                            # ==================================================
                            # Generate alternative goals for fail cases
                            if sucess < 0.5 and np.random.random(
                            ) < self.alt_success_rate:
                                samples_alt = []
                                is_impossible = False
                                for i in range(len(samples)):
                                    state, goal, np_action, reward, done, np_action_prob, np_state_value = samples[
                                        i]
                                    alt_goal = fake_goal_from_gripper_state(
                                        gripper_states[i], gripper_states[-1])
                                    alt_goal = np.hstack([alt_goal, goal[3:]])
                                    alt_np_action_mean, alt_np_action_log_std, alt_np_state_value = sess.run(
                                        action_distribution_old +
                                        [state_value_old],
                                        feed_dict={
                                            input_state_tensor_old: [state],
                                            input_goal_tensor_old: [alt_goal]
                                        })
                                    alt_np_action_prob = np_gaussion_prob(
                                        (alt_np_action_mean,
                                         alt_np_action_log_std), np_action)
                                    if alt_np_action_prob < 1e-2:
                                        is_impossible = True
                                        break
                                    alt_reward = -1
                                    if i == (len(samples) - 1):
                                        alt_reward = 99
                                    sample_alt = (state, alt_goal, np_action,
                                                  alt_reward, done,
                                                  alt_np_action_prob,
                                                  alt_np_state_value)
                                    samples_alt.append(sample_alt)
                                # All TD(0)
                                if not is_impossible:
                                    alt_advantage_list, alt_estimated_state_value_list = TD0_estimation(
                                        samples_alt, self.gamma,
                                        self.advantage_offset)
                                    # samples with advantage and target state_value
                                    for i in range(len(samples_alt)):
                                        alt_state, alt_goal, alt_np_action, alt_reward, alt_done, alt_np_action_prob, alt_np_state_value = samples_alt[
                                            i]
                                        full_samples.append((
                                            alt_state, alt_goal, alt_np_action,
                                            alt_np_action_prob,
                                            alt_advantage_list[i],
                                            alt_estimated_state_value_list[i]))
                            # ===============================================================================
                            # send results
                            for sample in full_samples:
                                sample_queue.put(sample, True)
                            ret_queue.put(len(full_samples), True)
                            sucess_queue.put(sucess, True)
                        else:
                            if command == 'end':
                                break
            # clean up
            env.close()
            print('Sampling Actor %d ends' % worker_n)
            return 0

        # start workers
        for worker_n in range(self.num_actors):
            process = Process(target=worker,
                              args=(worker_n, self.sample_queue,
                                    self.command_queue, self.ret_queue,
                                    self.sucess_queue))
            process.daemon = True
            process.start()
            self.processes.append(process)
Exemple #38
0
def bench(server_func,
          client_func,
          client_count,
          server_kwds=None,
          client_kwds=None,
          client_max=10,
          server_join_timeout=1.0):
    """Bench-test the server_func (with optional keyword args from server_kwds) as a process; will fail
    if one already bound to port.  Creates a thread pool (default 10) of client_func.  Each client
    is supplied a unique number argument, and the supplied client_kwds as keywords, and should
    return 0 on success, !0 on failure.

    Both threading.Thread and multiprocessing.Process work fine for running a bench server.
    However, Thread needs to use the out-of-band means to force server_main termination (since we
    can't terminate a Thread).  This is implemented as a container (eg. dict-based cpppo.apidict)
    containing a done signal.

    """

    # Either multiprocessing.Process or threading.Thread will work as Process for the Server
    from multiprocessing import Process
    #from threading 		import Thread as Process

    # Only multiprocessing.pool.ThreadPool works, as we cannot serialize some client API objects
    from multiprocessing.pool import ThreadPool as Pool
    #from multiprocessing.dummy	import Pool
    #from multiprocessing	import Pool

    log.normal("Server %r startup...", misc.function_name(server_func))
    server = Process(target=server_func, kwargs=server_kwds or {})
    server.daemon = True
    server.start()
    time.sleep(.25)

    try:
        log.normal(
            "Client %r tests begin, over %d clients (up to %d simultaneously)",
            misc.function_name(client_func), client_count, client_max)
        pool = Pool(processes=client_max)
        # Use list comprehension instead of generator, to force start of all asyncs!
        asyncs = [
            pool.apply_async(client_func, args=(i, ), kwds=client_kwds or {})
            for i in range(client_count)
        ]
        log.normal("Client %r started %d times in Pool; harvesting results",
                   misc.function_name(client_func), client_count)

        successes = 0
        for a in asyncs:
            try:
                result = a.get()
                successes += 1 if not result else 0
                if result:
                    log.warning("Client failed w/ non-0 result: %s", result)
            except Exception as exc:
                log.warning("Client failed w/ Exception: %s", exc)

        failures = client_count - successes
        log.normal("Client %r tests done: %d/%d succeeded (%d failures)",
                   misc.function_name(client_func), successes, client_count,
                   failures)
        return failures
    finally:
        # Shut down server; use 'server.control.done = true' to stop server, if
        # available in server_kwds.  If this doesn't work, we can try terminate
        control = server_kwds.get('server', {}).get('control',
                                                    {}) if server_kwds else {}
        if 'done' in control:
            log.normal("Server %r done signalled",
                       misc.function_name(server_func))
            control[
                'done'] = True  # only useful for threading.Thread; Process cannot see this
        if hasattr(server, 'terminate'):
            log.normal("Server %r done via .terminate()",
                       misc.function_name(server_func))
            server.terminate(
            )  # only if using multiprocessing.Process(); Thread doesn't have
        server.join(timeout=server_join_timeout)
        if server.is_alive():
            log.warning("Server %r remains running...",
                        misc.function_name(server_func))
        else:
            log.normal("Server %r stopped.", misc.function_name(server_func))
Exemple #39
0
    server.add_handler("ContactService", ContactService(req_context))

    redis_client = redis.StrictRedis("localhost", port=6379)
    while True:
        raw_msg = redis_client.brpop(["contact"], timeout=1)
        if raw_msg:
            (headers, req) = load_msg(raw_msg[1])
            if headers.has_key("reply_to"):
                tls = threading.local()
                # set the headers on the thread local req_context
                req_context.headers = headers
                resp = server.call(req)
                redis_client.lpush(headers["reply_to"],
                                   dump_msg(headers, resp))
                req_context.headers = None


if __name__ == "__main__":
    # In a real system the router and worker would probably be
    # separate processes.  For this demo we're combining them
    # for simplicity
    worker_proc = Process(target=start_worker)
    worker_proc.daemon = True
    worker_proc.start()

    def handler(signum, frame):
        worker_proc.terminate()

    signal.signal(signal.SIGTERM, handler)
    start_router()
Exemple #40
0
 def CrawlArticles(self, request, context):
     spids = request.spider[:]
     p = Process(target=crawl_articles, args=(spids, ))
     p.daemon = True
     processes.append(p)
     return spider_pb2.CrawlTaskResult(isrunning=True)
Exemple #41
0
def exportBySE(seNames, pathIn, dirOut, pathInfo):
    fin = open(pathIn)
    dCombCount = dict()
    dCombSe = dict()
    dSe = dict()
    nA = 0
    print("Reading...")
    oSeNames = seNames
    if not type(seNames) == set:
        seNames = set(seNames)
    print(oSeNames)
    while True:
        line = fin.readline()
        if line == "":
            break
        parts = line.strip().split("$")
        drugCmb = parts[1]
        ses = parts[-1]
        ses = set(ses.split(","))
        drugs = sorted(drugCmb.split(","))
        if len(drugs) > 20:
            continue

        nA += 1

        for se in oSeNames:

            drugPars = []

            if 2 <= len(drugs) <= 20:
                for i in range(len(drugs)):
                    for j in range(i + 1, len(drugs)):
                        d1 = drugs[i]
                        d2 = drugs[j]
                        pair = "%s,%s" % (d1, d2)
                        drugPars.append(pair)
                        dCombCountx = utils.get_insert_key_dict(
                            dCombCount, se, dict())

                        utils.add_dict_counter(dCombCountx, pair)

            if se in sorted(list(ses)):
                utils.add_dict_counter(dSe, se)
                for pair in drugPars:
                    dComSEx = utils.get_insert_key_dict(dCombSe, se, dict())
                    utils.add_dict_counter(dComSEx, pair)

    fin.close()

    print("Cal Contingency table...")
    dContigenTable = dict()

    for se in oSeNames:
        dCombCountx = dCombCount[se]
        dComSEx = utils.get_dict(dCombSe, se, dict())
        nSe = utils.get_dict(dSe, se, 0)
        if nSe == 0:
            continue
        for drugComb, nComb in dCombCountx.items():
            ar = np.zeros((2, 2))
            nCombSe = utils.get_dict(dComSEx, drugComb, 0)
            if nCombSe == 0:
                # print("SKIP")
                continue
            ar[0, 0] = nCombSe
            ar[1, 0] = nComb - nCombSe
            ar[0, 1] = nSe - nCombSe
            ar[1, 1] = nA - (nComb + nSe - nCombSe)
            nName = "%s_%s" % (drugComb, se)
            dContigenTable[nName] = ar

    producers = []
    consumers = []
    queue = Queue(params.K_FOLD)
    counter = Value('i', 0)
    counter2 = Value('i', 0)

    inputList = list(dContigenTable.items())
    nInputList = len(inputList)
    nDPerWorker = int(nInputList / params.N_DATA_WORKER)
    # assert 'g-csf' in allDrugNames
    for i in range(params.N_DATA_WORKER):
        startInd = i * nDPerWorker
        endInd = (i + 1) * nDPerWorker
        endInd = min(endInd, nInputList)
        if i == params.N_DATA_WORKER - 1:
            endInd = nInputList
        data = inputList[startInd:endInd]
        producers.append(Process(target=producer, args=(queue, data)))

    sname = "__".join(list(seNames))
    seNameString = "%s" % hash(sname)

    fFileNameMap = open(pathInfo, "a")
    fFileNameMap.write("%s\t%s\n" % (seNameString, sname))
    fFileNameMap.close()
    fout = open("%s/%s" % (dirOut, seNameString), "w")
    p = Process(target=consumer, args=(queue, counter, counter2, fout, []))
    p.daemon = True
    consumers.append(p)

    print("Start Producers...")
    for p in producers:
        p.start()
    print("Start Consumers...")
    for p in consumers:
        p.start()

    for p in producers:
        p.join()
    print("Finish Producers")

    queue.put(None)

    while True:
        if counter.value == 0:
            time.sleep(0.01)
            continue
        else:
            break
    fout.flush()
    fout.close()
Exemple #42
0
def start_crawling_articles(output_file: str,
                            temporary: str,
                            num_cores: int,
                            category_list: List[str],
                            start_date: str,
                            end_date: str,
                            date_step: int,
                            max_page: int = 100):
    """Crawl news articles in parallel.

    Arguments:
        output_file (str): Output file path.
        temporary (str): Temporary directory path.
        num_cores (int): The number of processes.
        category_list (list): The list of categories to crawl from.
        start_date (str): Start date string.
        end_date (str): End date string.
        date_step (int): The number of days to skip.
        max_page (int): The maximum pages to crawl.
    """
    date_list = utils.drange(start_date, end_date, date_step)
    total_search = len(date_list) * len(category_list)

    # Prepare multi-processing.
    workers = []
    queue = Queue()
    date_list_chunks = utils.split_list(date_list, chunks=num_cores)

    for i in range(num_cores):
        w = Process(target=_collect_article_urls_worker,
                    args=(queue, category_list, date_list_chunks[i], max_page))
        w.daemon = True
        w.start()

        workers.append(w)

    # Gather article urls from processes.
    article_urls = []
    exit_processes = 0
    tqdm_iter = tqdm.trange(total_search, desc='[*] collect article urls')
    while True:
        batch_article_urls = queue.get()

        if batch_article_urls is None:
            tqdm_iter.update()
        else:
            article_urls += batch_article_urls
            exit_processes += 1

        # Exit for waiting processes.
        if exit_processes == num_cores:
            break
    tqdm_iter.close()

    print(f'[*] successfully collecting article urls.'
          f' total articles: {len(article_urls)}')

    # Prepare multi-processing.
    workers = []
    queue = Queue()

    # Create temporary files and split articles into chunks.
    crawled_files = utils.random_filenames(temporary, num_cores)
    article_list_chunks = utils.split_list(article_urls, chunks=num_cores)

    for i in range(num_cores):
        w = Process(target=_crawl_articles_worker,
                    args=(crawled_files[i], article_list_chunks[i], queue))
        w.daemon = True
        w.start()

        workers.append(w)

    # Show crawling progress.
    exit_processes = 0
    for _ in tqdm.trange(len(article_urls), desc='[*] crawl articles'):
        if queue.get() is None:
            exit_processes += 1

        # Exit for waiting processes.
        if exit_processes == num_cores:
            break

    # Merge temporary files into `output_file`.
    print(f'[*] finish crawling articles. merge chunks into [{output_file}].')
    with open(output_file, 'wb') as dst:
        for name in crawled_files:
            with open(name, 'rb') as src:
                shutil.copyfileobj(src, dst)

    # Remove temporary files.
    for name in crawled_files:
        os.remove(name)
Exemple #43
0
def main():
    #global app2dir
    #global app2cmd
    global app2metric
    global app2trace

    #-------------------------------------------------------------------------#
    # GPU Job Table
    #-------------------------------------------------------------------------#
    #    jobid           starT       endT   status
    #       0             1           2
    #       1             1.3         2.4
    #       2             -           -
    #       ...
    #----------------------------------------------------------------------
    maxJobs = 10000
    rows, cols = maxJobs, 4  # note: init with a large prefixed table
    d_arr = mp.Array(ctypes.c_double, rows * cols)
    arr = np.frombuffer(d_arr.get_obj())
    GpuJobTable = arr.reshape((rows, cols))

    #===================#
    # 1) read app info
    #===================#
    #app2dir    = np.load('../07_sim_devid/similarity/app2dir_dd.npy').item()
    #app2cmd    = np.load('../07_sim_devid/similarity/app2cmd_dd.npy').item()

    #app2metric = np.load('../07_sim_devid/similarity/app2metric_dd.npy').item()
    #app2trace  = np.load('../07_sim_devid/perfmodel/app2trace_dd.npy').item()

    #print len(app2dir), len(app2cmd), len(app2metric), len(app2trace)

    #app_iobound_dd = np.load('./case_studies/app_iobound_dd.npy').item()

    #=========================================================================#
    # set up the launch order list
    #=========================================================================#
    appsList = get_appinfo('./prepare/app_info_79.bin')
    #print appsList

    app2dir_dd = {}
    for v in appsList:
        app2dir_dd[v[0]] = v[1]

    #print app2dir_dd

    #launch_list = ['cudasdk_concurrentKernels', 'poly_correlation']

    #==========#
    # test 1
    #==========#

    #launch_list = ['cudasdk_concurrentKernels', 'poly_3mm', 'poly_gemm', 'cudasdk_segmentationTreeThrust'] # sim
    #launch_list = ['cudasdk_concurrentKernels', 'cudasdk_segmentationTreeThrust', 'poly_3mm', 'poly_gemm'] # job size

    #launch_list = ['cudasdk_concurrentKernels', 'poly_3mm', 'cudasdk_segmentationTreeThrust', 'cudasdk_MCSingleAsianOptionP'] # sim
    #launch_list = ['cudasdk_concurrentKernels', 'cudasdk_segmentationTreeThrust', 'poly_3mm', 'cudasdk_MCSingleAsianOptionP'] # job size
    #launch_list = ['cudasdk_concurrentKernels', 'cudasdk_segmentationTreeThrust', 'cudasdk_MCSingleAsianOptionP', 'poly_3mm'] # job size  + iobound

    #launch_list = ['cudasdk_concurrentKernels', 'poly_3mm', 'cudasdk_segmentationTreeThrust'] # sim
    #launch_list = ['cudasdk_concurrentKernels', 'cudasdk_segmentationTreeThrust', 'poly_3mm'] # job size

    #for x in launch_list:
    #    print x, app2dir_dd[x]

    #==========#
    # test 2
    #==========#

    #launch_list = ['shoc_lev1reduction', 'poly_3mm', 'poly_gemm', 'cudasdk_segmentationTreeThrust'] # sim
    #launch_list = ['shoc_lev1reduction', 'cudasdk_segmentationTreeThrust', 'poly_3mm', 'poly_gemm'] # jobsize + sim

    #==========#
    # test 3
    #==========#
    #launch_list = ['cudasdk_scan', 'cudasdk_interval', 'cudasdk_MCEstimatePiQ', 'cudasdk_concurrentKernels'] # sim
    #launch_list = ['cudasdk_scan', 'cudasdk_concurrentKernels', 'cudasdk_MCEstimatePiQ','cudasdk_interval'] # threads + sim
    #launch_list = ['cudasdk_scan', 'cudasdk_concurrentKernels', 'cudasdk_interval', 'cudasdk_MCEstimatePiQ'] # threads + sim

    apps_num = len(launch_list)
    logger.debug("Total GPU Applications = {}.".format(apps_num))

    appQueList = copy.deepcopy(launch_list)  # application running queue

    workers = []  # for mp processes

    #==================================#
    # run the apps in the queue
    #==================================#
    MAXCORUN = 2
    activeJobs = 0
    jobID = -1

    current_jobid_list = []

    for i in xrange(apps_num):
        Dispatch = False

        if activeJobs < MAXCORUN:
            Dispatch = True

        #print("iter {} dispatch={}".format(i, Dispatch))

        if Dispatch:
            activeJobs += 1
            jobID += 1
            current_jobid_list.append(jobID)

            appName = appQueList[i]
            process = Process(target=run_work,
                              args=(jobID, GpuJobTable, appName, app2dir_dd))

            process.daemon = False
            #logger.debug("Start %r", process)
            workers.append(process)
            process.start()

        else:
            # spin
            while True:
                break_loop = False

                current_running_jobs = 0
                jobs2del = []

                for jid in current_jobid_list:
                    if GpuJobTable[jid,
                                   3] == 1:  # check the status, if one is done
                        jobs2del.append(jid)
                        break_loop = True

                if break_loop:
                    activeJobs -= 1

                    # update
                    if jobs2del:
                        for id2del in jobs2del:
                            del_idx = current_jobid_list.index(id2del)
                            del current_jobid_list[del_idx]
                    break

            #------------------------------------
            # after spinning, schedule the work
            #------------------------------------

            #print("iter {}: activeJobs = {}".format(i, activeJobs))
            activeJobs += 1
            jobID += 1
            current_jobid_list.append(jobID)
            #print("iter {}: activeJobs = {}".format(i, activeJobs))

            appName = appQueList[i]
            process = Process(target=run_work,
                              args=(jobID, GpuJobTable, appName, app2dir_dd))

            process.daemon = False
            #logger.debug("Start %r", process)
            workers.append(process)
            process.start()

    #=========================================================================#
    # end of running all the jobs
    #=========================================================================#

    for p in workers:
        p.join()

    total_jobs = jobID + 1
    PrintGpuJobTable(GpuJobTable, total_jobs)

    if total_jobs <> apps_num:
        logger.debug("[Warning] job number doesn't match.")
Exemple #44
0
  from urllib.request import urlopen
  import config as conf

  manager = Manager()
  pidstate = manager.dict()
  pidstate['is_awake'] = True
  pidstate['sched_enabled'] = conf.sched_enabled
  pidstate['sleep_time'] = conf.sleep_time
  pidstate['wake_time'] = conf.wake_time
  pidstate['i'] = 0
  pidstate['settemp'] = conf.set_temp
  pidstate['avgpid'] = 0.

  print("Starting Scheduler thread...")
  s = Process(target=scheduler,args=(1,pidstate))
  s.daemon = True
  s.start()

  print("Starting PID thread...")
  p = Process(target=pid_loop,args=(1,pidstate))
  p.daemon = True
  p.start()

  print("Starting HE Control thread...")
  h = Process(target=he_control_loop,args=(1,pidstate))
  h.daemon = True
  h.start()

  print("Starting REST Server thread...")
  r = Process(target=rest_server,args=(1,pidstate))
  r.daemon = True
Exemple #45
0
def spawn_process():
    p = Process(target=write_playback)
    p.daemon = True
    p.start()
    gc.collect()
Exemple #46
0
    def _execute(
        self,
        *,
        use_threads: bool,
        max_workers: int,
        tqdm_kwargs: dict,
        worker_initializer: Callable,
        task: Callable,
        task_arguments: Iterable,
        task_finished: Callable,
    ):
        if use_threads and max_workers == 1:
            with self.pbar_class(**tqdm_kwargs) as pbar:
                for args in task_arguments:
                    result = task(args)
                    task_finished(result, pbar)
            return

        task_arguments = list(task_arguments)
        grouped_args = list(
            zip_longest(*list(split_every(max_workers, task_arguments))))
        if not grouped_args:
            return

        processes = []
        connections = []
        for chunk in grouped_args:
            parent_conn, child_conn = Pipe()

            worker_args = [args for args in chunk if args is not None]
            process = Process(
                target=process_loop,
                args=(
                    child_conn,
                    worker_initializer,
                    logging.getLogger("").level,
                    task,
                    worker_args,
                ),
            )
            process.daemon = True
            processes.append(process)
            connections.append(parent_conn)

        for process in processes:
            process.start()

        with self.pbar_class(**tqdm_kwargs) as pbar:
            while connections:
                for r in wait(connections):
                    try:
                        msg_type, msg = r.recv()
                    except EOFError:
                        connections.remove(r)
                        continue

                    if msg_type == MessageType.result:
                        if task_finished:
                            task_finished(msg, pbar)
                    elif msg_type == 'log':
                        record = msg
                        logger = logging.getLogger(record.name)
                        logger.handle(record)
                    elif msg_type == MessageType.complete:
                        connections.remove(r)
                    elif msg_type == MessageType.exception:
                        for process in processes:
                            process.terminate()
                        raise msg

        for process in processes:
            process.join()
Exemple #47
0
def create_and_run_exp_process(meta_info, expconfig):
    """Creates the experiment process."""
    process = Process(target=run_exp, args=(meta_info, expconfig, ))
    process.daemon = True
    process.start()
    return process
def main(path_to_imgs_pickle, test_run, download_process_no,
         classification_process_no, output_file, progress_report_interval):
    """
    Sets up and runs the multiprocessing pipeline.

    It consists of following steps:
    1) Downloading Inception v3 model if it is not present. This is done by maybe_download_and_extract() function.

    2) Filling imgid_urls_queue "process-safe" queue with Instagram Image IDs and its corresponding url lists from pickle
     file passed in path_to_imgs_pickle parameter. The amount of key value pairs (Image ID, url list) loaded is limited
     by the parameter test_run. Setting it to 0 or None loads all the available records form the file.

    3) Initialization of  "process-safe" queues img_filename_queue and result_queue.

    4) Spawning number of download process which is specified in download_process_no parameter.
    It also download_process_no-times appends tuples (None, None) to the end of imgid_urls_queue to signal the end of
    the queue for the download workers.

    5) Spawning number of classification process which is specified in classification_process_no parameter.

    6) Spawning worker for saving results into a file.

    7) Spawning progress-reporting worker.

    8) Joining the download workers. Once the download is done None is appened to the end of img_filename_queue
    classification_process_no-times to signal classification workers the end of queue.

    9) Joining classification workers. Once that is done, None is appended result_queue to signal results-saving worker
    the end of queue.

    10) Joining the results-saving worker.

    The progress reporting worker quits automatically with the main (parent) process because it is of daemon type.
    Therefore there is no need to join it.
    :param path_to_imgs_pickle: path to pickle file containing dictionary of Instagram Image IDs as keys and corresponding lists of urls as values
    :param test_run: amount of Instagram Images IDs to classify; None or 0 if all
    :param download_process_no: number of download processes
    :param classification_process_no: number of classification processes
    :param output_file: filename of the results file to be written to
    :param progress_report_interval: interval how often to write the current progress into stdout in seconds; if None or 0 => no progress reporting
    :return: None
    """
    ctx = multiprocessing.get_context('spawn')

    maybe_download_and_extract()

    with open(path_to_imgs_pickle, 'rb') as handle:
        urldict = pickle.load(handle)

    imgid_urls_queue = ctx.Queue()
    i = 0
    for img_id, url_list in urldict.items():
        i += 1
        imgid_urls_queue.put((img_id, url_list))
        if test_run and i >= test_run:
            break
    imgid_urls_queue_orig_len = imgid_urls_queue.qsize()

    img_filename_queue = ctx.Queue()

    download_p_list = []
    for i in range(1, download_process_no + 1):
        download_p = ctx.Process(target=download_worker,
                                 args=(
                                     imgid_urls_queue,
                                     img_filename_queue,
                                 ))
        download_p_list.append(download_p)
        imgid_urls_queue.put((None, None))
        download_p.start()

    result_queue = ctx.Queue()

    classification_p_list = []
    for i in range(1, classification_process_no + 1):
        classification_p = ctx.Process(target=classification_worker,
                                       args=(
                                           img_filename_queue,
                                           result_queue,
                                       ))
        classification_p_list.append(classification_p)
        classification_p.start()

    save_results_p = ctx.Process(target=save_result_worker,
                                 args=(
                                     result_queue,
                                     output_file,
                                 ))
    save_results_p.start()

    if progress_report_interval:
        progress_reporting_p = Process(target=progress_reporting_worker,
                                       args=(
                                           imgid_urls_queue,
                                           imgid_urls_queue_orig_len,
                                           img_filename_queue,
                                           result_queue,
                                           progress_report_interval,
                                       ))
        progress_reporting_p.daemon = True
        progress_reporting_p.start()

    for p in download_p_list:
        p.join()

    print('Download done!', flush=True)

    for p in classification_p_list:
        img_filename_queue.put(None)

    for p in classification_p_list:
        p.join()

    print('Classification done!', flush=True)

    result_queue.put(None)

    save_results_p.join()

    print('Done!')

def task1(name, n):
    print("%s id: %s is running..., parent id is <%s>" %
          (name, os.getpid(), os.getppid()))
    time.sleep(n)
    print("%s id: %s has done===, parent id is <%s>" %
          (name, os.getpid(), os.getppid()))
    # 字进程不允许创建子进程
    # p = Process(target=time.sleep, args=(2, 3))
    # p.start()


def task2(name, n):
    print("%s id: %s is running..., parent id is <%s>" %
          (name, os.getpid(), os.getppid()))
    time.sleep(n)
    print("%s id: %s has done===, parent id is <%s>" %
          (name, os.getpid(), os.getppid()))


if __name__ == "__main__":
    p_task1 = Process(target=task1, args=("task1", 3))
    p_task2 = Process(target=task2, args=("task2", 3))
    #一定要在p.start()前设置, 设置p为守护进程, 禁止p创建子进程, 并且父进程代码执行结束, p即终止运行
    p_task1.daemon = True
    # 未加守护进程的任务还会继续运行
    p_task1.start()
    p_task2.start()
    print("主程序%s......")
            rpc.update(details=rpc_route['details'],
                       large_image=rpc_route['image'],
                       small_image=large_icon,
                       large_text=rpc_route['details'],
                       start=start_time,
                       state="Act: {}".format(current_act))
        if conn.poll(5):
            received = conn.recv()
            if received[0]:
                current_route = received[0]
            if received[1]:
                current_act = received[1]


if __name__ == '__main__':
    parent_conn, child_conn = Pipe(duplex=False)
    p = Process(target=rpc_process, args=(
        parent_conn,
        route_dict,
    ))
    p.daemon = True
    p.start()
    print("Available routes")
    for i in route_dict.keys():
        print(i)
    while True:
        route = input("New Route: ")
        act = input("Act?: ")
        child_conn.send((route, act))
        time.sleep(5)
Exemple #51
0
 def run_plugin_process(uri, name, subscribe_options, log_level):
     plugin_process = Process(target=run_plugin,
                              args=())
     plugin_process.daemon = False
     plugin_process.start()
Exemple #52
0
    def make_password_clicked(self):
        self.hide_all_empty_errors()
        check_var = True
        for k, v in self.ledits.items():
            if not v.text():
                k.show()
                check_var = False

        if not tuple(self.ledits.values())[2].text().isnumeric():
            check_var = False

        if not check_var:
            self.all_is_valid = False
        else:
            self.all_is_valid = True

        if self.all_is_valid:
            self.ui.pushButton.hide()
            self.ui.label_5.hide()
            self.ui.label_7.show()
            self.ui.pushButton_3.show()
            self.ui.progressBar.show()

            percent = int(
                self.ui.lineEdit_3.text()) / 10_000 * self.pbar_time / 100

            queue = Queue()
            p = Process(target=hashpass,
                        args=(self.ui.lineEdit.text(),
                              self.ui.lineEdit_2.text(),
                              int(self.ui.lineEdit_3.text()), queue))
            p.daemon = True
            p.start()
            self.active_daemon = p

            for i in range(100):
                i += 1
                if self.generate_stopped:
                    break
                if not queue.qsize():
                    updated_time = time()
                    while time() - updated_time < percent:
                        if self.generate_stopped:
                            break
                        QtCore.QCoreApplication.processEvents()
                    else:
                        self.ui.progressBar.setValue(i)
                else:
                    self.ui.progressBar.setValue(100)
                    break

            if not self.generate_stopped:
                p.join()
                self.ui.progressBar.setValue(0)
                self.ui.label_7.hide()
                self.ui.label_8.show()
                self.ui.lineEdit_4.setText(queue.get())
                self.ui.lineEdit_4.show()
                self.ui.pushButton_2.show()
                self.ui.pushButton_3.hide()
                self.active_daemon = None

            self.generate_stopped = False
Exemple #53
0
def server_stream(dataset_dict=None,
                  stream=None,
                  hwm=10,
                  wrapping_streams=[],
                  port=dict(train=5557, valid=5558, test=5559),
                  **kwargs):
    """This function is the main utility to use fuel.ServerDataStream in conjunction
       with fuel.server. It takes care of launching the subprocesses and returns
       the stream needed in the main script

       ***This function has 2 different mutually exclusive modes:
       1) dataset_dict is not None := it will start a stream and do all the background work
           to instanciate the dataset object and the appropriate streams (mostly done by create_stream)
       2) stream is not None := it will use this stream as is

       port := port for server
       hwm := high-water mark
       wrapping_streams (only used if dataset is not None) :=
           You can include a list of simple transformer object to wrap the main stream you
           wish included in the server call. Their constructor has to be argumentless.
       kwargs := kwargs needed for the Dataset and fuel server setup
    """
    a = dataset_dict is None
    b = stream is None
    assert (a and not b) or (not a and b), "Specify a dataset XOR a stream"

    if stream is None:
        assert isinstance(dataset_dict, dict) and len(dataset_dict.keys()) == 3, \
                "dataset_dict needs to be a dict "+\
                "with keys [dataset (str name or fuel object), split, batch_size]"
        dataset = dataset_dict['dataset']
        split = dataset_dict['split']
        batch_size = dataset_dict['batch_size']

        #TODO: allow more than 1 split!
        assert split in ['train', 'valid', 'test'], \
                "split name error or NotImplemented more than 1 split"

        # auto-assign port depending on the split
        port = port[split] if isinstance(port, dict) else port

        if isinstance(dataset, str):
            sources = kwargs.setdefault('sources', ('features', ))
        else:
            sources = dataset.provides_sources
        p = Process(target=create_stream_and_start_server,
                    name='fuel_server',
                    args=(dataset, split, batch_size, port, hwm,
                          wrapping_streams, kwargs))
    else:
        sources = stream.sources
        p = Process(target=start_fuel_server,
                    name='fuel_server',
                    args=(stream, port, hwm))

    p.daemon = True
    p.start()

    sdstream = ServerDataStream(sources, False, port=port, hwm=hwm)

    return sdstream
Exemple #54
0
 def start(task, *args):
     process = Process(target=task, args=args)
     process.daemon = True
     process.start()
Exemple #55
0
    def run_program(self, dataset, parameters):
        self.logger.info("Starting run\nParameters:\n{}".format("\n".join(
            ["\t{}: {}".format(k, v) for k, v in parameters.items()])))
        self.logger.info(
            "Distributing load over {} cores".format(NUM_OF_WORKERS))

        kg_i, kg_s = dataset

        # fit model
        t0 = timer()

        # MP manager
        manager = Manager()

        # generate semantic item sets from sampled graph
        si_sets = manager.dict(generate_semantic_item_sets(kg_i))

        # generate common behaviour sets
        work = manager.Queue()
        keys = list(si_sets.keys())
        slices = self.diagonal_matrix_slicer(keys)

        cbs_sets = manager.list()
        pool = []
        for i in range(NUM_OF_WORKERS):
            p = Process(target=generate_common_behaviour_sets,
                        args=(si_sets, cbs_sets, work,
                              parameters["similarity_threshold"]))
            p.daemon = True
            p.start()
            pool.append(p)

        for slce in slices:
            work.put(slce)

        for p in pool:
            work.put(None)

        # join shared variables
        for p in pool:
            p.join()

        # extend common behaviour sets
        cbs_size = 2
        cbs_sets_extended = manager.list(cbs_sets)
        while cbs_size < parameters["max_cbs_size"]:
            func = partial(extend_common_behaviour_sets, cbs_sets_extended,
                           parameters["similarity_threshold"])

            slices = self.diagonal_matrix_slicer(cbs_sets_extended)
            cbs_sets_extention = manager.list()
            with Pool(processes=NUM_OF_WORKERS) as pool:
                it = pool.imap_unordered(func=func, iterable=slices)

                while True:
                    try:
                        cbs_subset = next(it)
                        cbs_sets_extention.extend(cbs_subset)
                    except StopIteration:
                        break

            cbs_sets.extend(cbs_sets_extention)
            cbs_sets_extended = cbs_sets_extention
            cbs_size *= 2

        # generate semantic item sets from sampled graph association rules
        rules = manager.list()
        work = manager.Queue()
        size = max(1, floor(len(cbs_sets) / NUM_OF_WORKERS))
        slices = [slice(i, i + size) for i in range(0, len(cbs_sets), size)]

        pool = []
        for i in range(NUM_OF_WORKERS):
            p = Process(target=generate_semantic_association_rules,
                        args=(kg_i, kg_s, cbs_sets, work, rules,
                              parameters["minimal_local_support"]))
            p.daemon = True
            p.start()
            pool.append(p)

        for slce in slices:
            work.put(slce)

        for p in pool:
            work.put(None)

        # join shared variables
        for p in pool:
            p.join()

        # calculate support and confidence, skip those not meeting minimum requirements
        final_rule_set = manager.list()
        work = manager.Queue()
        size = max(1, floor(len(rules) / NUM_OF_WORKERS))
        slices = [slice(i, i + size) for i in range(0, len(rules), size)]

        pool = []
        for i in range(NUM_OF_WORKERS):
            p = Process(target=evaluate_rules,
                        args=(kg_i, rules, work, final_rule_set,
                              parameters["minimal_support"],
                              parameters["minimal_confidence"]))

            p.daemon = True
            p.start()
            pool.append(p)

        for slce in slices:
            work.put(slce)

        for p in pool:
            work.put(None)

        # join shared variables
        for p in pool:
            p.join()

        # sorting rules on both support and confidence
        final_rule_set.sort(key=itemgetter(2, 1), reverse=True)

        # time took
        t1 = timer()
        dt = t1 - t0
        self.logger.info("Program completed in {:.3f} ms".format(dt))
        print("  Program completed in {:.3f} ms".format(dt))

        self.logger.info("Found {} rules".format(len(final_rule_set)))
        print("  Found {} rules".format(len(final_rule_set)))
        return final_rule_set
def paramiko_tunnel(lport,
                    rport,
                    server,
                    remoteip='127.0.0.1',
                    keyfile=None,
                    password=None,
                    timeout=60):
    """launch a tunner with paramiko in a subprocess. This should only be used
    when shell ssh is unavailable (e.g. Windows).

    This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
    as seen from `server`.

    If you are familiar with ssh tunnels, this creates the tunnel:

    ssh server -L localhost:lport:remoteip:rport

    keyfile and password may be specified, but ssh config is checked for defaults.


    Parameters
    ----------

    lport : int
        local port for connecting to the tunnel from this machine.
    rport : int
        port on the remote machine to connect to.
    server : str
        The ssh server to connect to. The full ssh server string will be parsed.
        user@server:port
    remoteip : str [Default: 127.0.0.1]
        The remote ip, specifying the destination of the tunnel.
        Default is localhost, which means that the tunnel would redirect
        localhost:lport on this machine to localhost:rport on the *server*.

    keyfile : str; path to public key file
        This specifies a key to be used in ssh login, default None.
        Regular default ssh keys will be used without specifying this argument.
    password : str;
        Your ssh password to the ssh server. Note that if this is left None,
        you will be prompted for it if passwordless key based login is unavailable.
    timeout : int [default: 60]
        The time (in seconds) after which no activity will result in the tunnel
        closing.  This prevents orphaned tunnels from running forever.

    """
    if paramiko is None:
        raise ImportError("Paramiko not available")

    if password is None:
        if not _try_passwordless_paramiko(server, keyfile):
            password = getpass("%s's password: " % (server))

    p = Process(
        target=_paramiko_tunnel,
        args=(lport, rport, server, remoteip),
        kwargs=dict(keyfile=keyfile, password=password),
    )
    p.daemon = True
    p.start()
    return p
Exemple #57
0
# -*- coding: utf-8 -*-

from multiprocessing import Process
import os
import time


def sleeper(name, seconds):
    print 'starting child process with id: ', os.getpid()
    print 'parent process:', os.getppid()
    print 'sleeping for %s ' % seconds
    time.sleep(seconds)
    print "%s done sleeping" % name


if __name__ == '__main__':
    print "in parent process (id %s)" % os.getpid()
    p = Process(target=sleeper, args=('bob', 5))
    print 'daemon?', p.daemon
    p.daemon = not p.daemon
    print 'daemon?', p.daemon
    p.start()
    print "in parent process after child process start"
    print "parent process about to join child process"
    p.join()
    print "in parent process after child process join"
    print "parent process exiting with id ", os.getpid()
    print "The parent's parent process:", os.getppid()
Exemple #58
0
    """ Hanle ctrl c signal"""
    print "Close "+__file__
    try:
        web.stop()
    except:
        pass

    sys.exit()

'''
############### main method part #########
'''
if __name__=='__main__':

    #scanning ctrl-c
    signal.signal(signal.SIGINT,sig_handler)
    signal.signal(signal.SIGTERM,sig_handler)

    # start Web server
    p2 = Process(target=web.run, args=(WEB_PORT,))
    p2.daemon = True
    p2.start()

    print('service living,  web: ' + str(WEB_PORT))

    while True:
        time.sleep(10)



Exemple #59
0
# that consume some CPU and that have children
import time
from multiprocessing import Pool, Process, current_process


def f(n):
    time.sleep(n)


def do_sum(li):
    sum(li)
    proc_name = current_process().name
    print(proc_name)


my_list = list(range(150000000))

pool = Pool(3)
pool.map(
    do_sum,
    [my_list[:50000000], my_list[50000000:100000000], my_list[100000000:]])
pool.close()
pool.join()

# Generate system CPU usage
for i in range(1000):
    p = Process(target=f, args=(0.001, ))
    p.daemon = False
    p.start()
    p.join()
Exemple #60
0
            apikey = line.split("#")[0]
            apisecret = line.split("#")[1]
            total_money = line.split("#")[2]
            thread = threading.Thread(target=init_sell,
                                      args=(apikey, apisecret, total_load_coin,
                                            load_money))
            thread.setDaemon(True)
            thread.start()
            local_thread.append(thread)
        for _th in local_thread:
            _th.join()
    while True:
        with open(multi_config_file, "r") as f:
            for line in f.readlines():
                apikey = line.split("#")[0]
                apisecret = line.split("#")[1]
                total_money = line.split("#")[2]
                p1 = Process(target=tick,
                             args=(apikey, apisecret, load_money, load_coin,
                                   load_parition, total_money,
                                   load_bidirection, load_coin_place))
                p1.daemon = True
                p1.start()
                processes.append(p1)
        processes[0].join(timeout=3600)
        for p in processes:
            p.terminate()
        processes = []

#  period_restart()