Exemple #1
0
    def run(self, parser=2, downloader=2):
        self._logger.info('이미지 다운로드 작업 시작')
        start = time.time()

        # 멀티 프로세싱 처리를 위한 매니저
        with Manager() as manager:
            # 프로세스 목록
            processes = []

            # 공유 메모리 변수
            content_list = manager.list()
            image_list = manager.list()
            count = manager.Value('i', 0)
            lock = manager.Lock()
            feeder_running = manager.Value('i', 1)
            parser_running = manager.Value('i', 1)

            parser_logger = Logger('cybot_parser.log')
            downloader_logger = Logger('cybot_downloader.log')
            main_cookies = self._driver.get_cookies()
            cookie = []

            for c in main_cookies:
                cookie.append({'name': c['name'], 'value': c['value']})

            # 파서 프로세스 생성 및 시작
            for idx in range(parser):
                parser_instance = Parser(self._chromedriver, cookie,
                                         parser_logger, self._delay)
                parser_process = Process(target=parser_instance.parse, \
                    args=(content_list, image_list, feeder_running, parser_running)
                )
                parser_process.name = 'Parser::' + str(idx)
                parser_process.start()
                processes.append(parser_process)
                self._logger.info('Parser', str(idx), '프로세스 시작')

            # 다운로더 프로세스 생성 및 시작
            for idx in range(downloader):
                downloader_instance = Downloader(downloader_logger)
                downloader_process = Process(target=downloader_instance.downloader, \
                    args=(image_list, count, lock, parser_running))
                downloader_process.name = 'Downloader::' + str(idx)
                downloader_process.start()
                processes.append(downloader_process)
                self._logger.info('Downloader', str(idx), '프로세스 시작')

            # 피더 프로세스 시작
            self._logger.info('Feeder 시작')
            self.feeder(content_list, feeder_running)

            # 파서, 다운로더 프로세스가 종료되지않은 경우 대기
            for p in processes:
                p.join()

            self._logger.info('작업 소요시간: {}초'.format(
                round(time.time() - start, 2)))
            self._logger.info('전체 이미지 수: {}'.format(count.value))
Exemple #2
0
def start_services():
    server = ShakecastServer()
    p = Process(target=server.start)
    p.name = 'ShakeCast-Server'
    p.daemon = True
    p.start()

    server = ShakecastWebServer()
    p = Process(target=server.start)
    p.name = 'ShakeCast-Web-Server'
    p.daemon = True
    p.start()
Exemple #3
0
    def run(self):
        # check Python platform version - 3.5 works while 3.6 or higher is properly tested
        pyversion = sys.version_info
        if pyversion < (3, 5):
            raise Exception(
                'Laniakea-Spark needs Python >= 3.5 to work. Please upgrade your Python version.'
            )
        if pyversion >= (3, 5) and pyversion < (3, 6):
            log.info('Running on Python 3.5 while Python 3.6 is recommended.')
        if not shutil.which('debspawn'):
            log.warning(
                'The "debspawn" tool was not found in PATH, we will not be able to run most actions.'
            )

        self._conf = LocalConfig()
        self._conf.load()

        log.info('Maximum number of parallel jobs: {0}'.format(
            self._conf.max_jobs))

        # initialize workers
        if self._conf.max_jobs == 1:
            # don't use multiprocess when our maximum amount of jobs is just 1
            self.run_worker_process('worker_0')
        else:
            for i in range(0, self._conf.max_jobs):
                worker_name = 'worker_{}'.format(i)
                p = Process(target=self.run_worker_process,
                            args=(worker_name, ))
                p.name = worker_name
                p.start()
 def attack_deauth(self):
     global threadloading
     if self.linetarget.text() == "":
         QMessageBox.information(self, "Target Error",
                                 "Please, first select Target for attack")
     else:
         self.bssid = str(self.linetarget.text())
         self.deauth_check = self.xmlcheck.xmlSettings(
             "deauth", "select", None, False)
         self.args = str(
             self.xmlcheck.xmlSettings("mdk3", "arguments", None, False))
         if self.deauth_check == "packets_scapy":
             self.AttackStatus(True)
             t = Process(target=self.deauth_attacker,
                         args=(self.bssid, str(self.input_client.text())))
             print("[*] deauth Attack On:" + self.bssid)
             threadloading['deauth'].append(t)
             t.daemon = True
             t.start()
         else:
             if path.isfile(popen('which mdk3').read().split("\n")[0]):
                 self.AttackStatus(True)
                 t = ProcessThread(
                     ("mdk3 %s %s %s" %
                      (self.interface, self.args, self.bssid)).split())
                 t.name = "mdk3"
                 threadloading['mdk3'].append(t)
                 t.start()
             else:
                 QMessageBox.information(self, 'Error mdk3',
                                         'mkd3 not installed')
                 set_monitor_mode(self.get_placa.currentText()).setDisable()
Exemple #5
0
 def attack_deauth(self):
     global threadloading
     if self.linetarget.text() == "":
         QMessageBox.information(self, "Target Error", "Please, first select Target for attack")
     else:
         self.bssid = str(self.linetarget.text())
         self.deauth_check = self.xmlcheck.xmlSettings("deauth", "select",None,False)
         self.args = str(self.xmlcheck.xmlSettings("mdk3","arguments", None, False))
         if self.deauth_check == "packets_scapy":
             self.AttackStatus(True)
             t = Process(target=self.deauth_attacker, args=(self.bssid,str(self.input_client.text())))
             print("[*] deauth Attack On:"+self.bssid)
             threadloading['deauth'].append(t)
             t.daemon = True
             t.start()
         else:
             if path.isfile(popen('which mdk3').read().split("\n")[0]):
                 self.AttackStatus(True)
                 t = ProcessThread(("mdk3 %s %s %s"%(self.interface,self.args,self.bssid)).split())
                 t.name = "mdk3"
                 threadloading['mdk3'].append(t)
                 t.start()
             else:
                 QMessageBox.information(self,'Error mdk3','mkd3 not installed')
                 set_monitor_mode(self.get_placa.currentText()).setDisable()
 def attack_deauth(self):
     global threadloading
     if self.linetarget.text() == "":
         QMessageBox.information(self, "Target Error",
                                 "Please, first select Target for attack")
     else:
         self.bssid = str(self.linetarget.text())
         self.deauth_check = self.xmlcheck.xmlSettings(
             "deauth", "select", None, False)
         self.args = str(
             self.xmlcheck.xmlSettings("mdk3", "arguments", None, False))
         if self.deauth_check == "packets_scapy":
             self.AttackStatus(True)
             t = Process(target=self.deauth_attacker,
                         args=(self.bssid, str(self.input_client.text())))
             print("[*] deauth Attack On:" + self.bssid)
             threadloading['deauth'].append(t)
             t.daemon = True
             t.start()
         else:
             self.AttackStatus(True)
             t = ProcessThread(
                 ("mdk3 mon0 %s %s" % (self.args, self.bssid)).split())
             t.name = "mdk3"
             threadloading['mdk3'].append(t)
             t.start()
Exemple #7
0
    def __try_start(self):

        def _wrapper(func, queue):
            def _inner(*args, **kwargs):
                try:
                    result = func(*args, **kwargs)
                    queue.put(result)
                finally:
                    queue.close()

            return _inner

        if self.is_closed and not self.always_finish:
            return

        with self.__pending_lock:
            with self.__running_lock:
                while self.has_pending_processes and not self.is_full:
                    # Create a new Process
                    next = self.__pending.pop()
                    q = Queue()
                    p = Process(
                            target=_wrapper(func=next['target'], queue=q),
                            args=next['args'],
                            kwargs=next['kwargs'])

                    if not next['name'] is None:
                        p.name = next['name']

                    self.__running.append({
                        'process': p,
                        'queue': q,
                        'callback': next['callback']
                        })
                    p.start()
Exemple #8
0
 def attack_deauth(self):
     global threadloading
     if self.linetarget.text() == '':
         QMessageBox.information(self, 'Target Error',
                                 'Please, first select Target for attack')
     else:
         self.bssid = str(self.linetarget.text())
         self.deauth_check = self.xmlcheck.xmlSettings(
             'deauth', 'select', None, False)
         self.args = str(
             self.xmlcheck.xmlSettings('mdk3', 'arguments', None, False))
         self.interface = str(
             set_monitor_mode(self.get_placa.currentText()).setEnable())
         if self.deauth_check == 'packets_scapy':
             self.AttackStatus(True)
             t = Process(target=self.deauth_attacker,
                         args=(self.bssid, str(self.input_client.text()),
                               self.interface))
             threadloading['deauth'].append(t)
             t.daemon = True
             t.start()
         else:
             if path.isfile(popen('which mdk3').read().split("\n")[0]):
                 self.AttackStatus(True)
                 t = ProcessThread(
                     ('mdk3 %s %s %s' %
                      (self.interface, self.args, self.bssid)).split())
                 t.name = 'Thread mdk3'
                 threadloading['mdk3'].append(t)
                 t.start()
             else:
                 QMessageBox.information(self, 'Error mdk3',
                                         'mkd3 not installed')
                 set_monitor_mode(self.get_placa.currentText()).setDisable()
Exemple #9
0
    def __try_start(self):
        def _wrapper(func, queue):
            def _inner(*args, **kwargs):
                try:
                    result = func(*args, **kwargs)
                    queue.put(result)
                finally:
                    queue.close()

            return _inner

        if self.is_closed and not self.always_finish:
            return

        with self.__pending_lock:
            with self.__running_lock:
                while self.has_pending_processes and not self.is_full:
                    # Create a new Process
                    next = self.__pending.pop()
                    q = Queue()
                    p = Process(target=_wrapper(func=next['target'], queue=q),
                                args=next['args'],
                                kwargs=next['kwargs'])

                    if not next['name'] is None:
                        p.name = next['name']

                    self.__running.append({
                        'process': p,
                        'queue': q,
                        'callback': next['callback']
                    })
                    p.start()
 def attack_deauth(self):
     global threadloading
     if self.linetarget.text() == '':
         QMessageBox.information(self, 'Target Error', 'Please, first select Target for attack')
     else:
         self.bssid = str(self.linetarget.text())
         self.deauth_check = self.xmlcheck.xmlSettings('deauth', 'select',None,False)
         self.args = str(self.xmlcheck.xmlSettings('mdk3','arguments', None, False))
         self.interface = str(set_monitor_mode(self.get_placa.currentText()).setEnable())
         if self.deauth_check == 'packets_scapy':
             self.AttackStatus(True)
             t = Process(target=self.deauth_attacker, args=(self.bssid,
             str(self.input_client.text()),self.interface))
             threadloading['deauth'].append(t)
             t.daemon = True
             t.start()
         else:
             if path.isfile(popen('which mdk3').read().split("\n")[0]):
                 self.AttackStatus(True)
                 t = ProcessThread(('mdk3 %s %s %s'%(self.interface,self.args,self.bssid)).split())
                 t.name = 'Thread mdk3'
                 threadloading['mdk3'].append(t)
                 t.start()
             else:
                 QMessageBox.information(self,'Error mdk3','mkd3 not installed')
                 set_monitor_mode(self.get_placa.currentText()).setDisable()
Exemple #11
0
    async def start_worker(self, index):
        self.log.info('Start block loader worker %s' % index)
        # prepare pipes for communications
        in_reader, in_writer = os.pipe()
        out_reader, out_writer = os.pipe()
        in_reader, out_reader = os.fdopen(in_reader,
                                          'rb'), os.fdopen(out_reader, 'rb')
        in_writer, out_writer = os.fdopen(in_writer,
                                          'wb'), os.fdopen(out_writer, 'wb')

        # create new process
        worker = Process(target=Worker,
                         args=(index, in_reader, in_writer, out_reader,
                               out_writer, self.rpc_url, self.rpc_timeout,
                               self.rpc_batch_limit, self.dsn,
                               self.parent.app_proc_title))
        worker.start()
        in_reader.close()
        out_writer.close()
        # get stream reader
        worker.reader = await self.get_pipe_reader(out_reader)
        worker.writer = await self.get_pipe_writer(in_writer)
        worker.name = str(index)
        self.worker[index] = worker
        self.worker_busy[index] = False
        # start message loop
        self.loop.create_task(self.message_loop(index))
        # wait if process crash
        await self.loop.run_in_executor(None, worker.join)
        del self.worker[index]
        self.log.info('Block loader worker %s is stopped' % index)
    def run(self):
        """

        """
        try:
            cfg = ConfigParser()
            re = cfg.read(CONFIG_FILE)
            if CONFIG_FILE not in re:
                self.error_parse_config()
        except Exception:
            self.error_parse_config()

        appProcess = list()
        for i in cfg.sections():
            print "Starting push process for App %s" % cfg.get(i, 'app_name')
            p = Process(target=runApp, args=(cfg.getboolean(i, 'app_sandbox'),
                                             cfg.get(i, 'app_cert'),
                                             cfg.get(i, 'app_key'),
                                             cfg.get(i,'driver'),
                                             cfg.get(i, 'queue_host'),
                                             cfg.getint(i,'queue_port'),
                                             cfg.get(i, 'queue_db_name'),
                                             cfg.get(i, 'queue_username'),
                                             cfg.get(i, 'queue_password'),
                                             cfg.get(i, 'app_queue_name'),
                                             cfg.get(i, 'app_name'),
                                             cfg.getboolean(i,'debug'),
                                             cfg.get(i,'feedback_callback'),))
            appProcess.append(p)
            p.name = cfg.get(i, 'app_name')
            p.daemon = True
            p.start()

        for p in appProcess:
            p.join()
def launch_stream(path, put_queue):
    # streamer = Process(target=start_video_stream, args=(path, put_queue,)) # function for video stream
    streamer = Process(target=frames_folder, args=(
        path,
        put_queue,
    ))  # function for photo stream
    streamer.name = "VideoStreamer"
    streamer.start()
Exemple #14
0
def run_tasks():
    for k,t in __Task_list__.items():
        func = t["func"]    
        intterupt=t["intterupt"]
        p=Process(target=__roll__,args=(func,intterupt))
        p.name=k
        p.daemon=True
        p.start()
        t["Process"]=p
Exemple #15
0
def upload(code, filename = "temp.sl", platform = "telosb"):
    with open(filename, "w") as codeFile:
        codeFile.write(code)
    generateMakefile(filename)

    p1 = Process(target = doPopen, args = (["make", platform, "upload"],))
    p1.deamonic = True
    p1.name = "Upload thread"
    p1.start()
def start_PTZ_thread(lock, camera, pan_amt, tilt_amt, is_system_on_high_alert, cam_name):
	# Initialise threaded PTZ controller
	processPTZ = Process(target=application_helpers.PTZ_task,
						 args=(lock, camera, pan_amt, tilt_amt, is_system_on_high_alert))
	processPTZ.name = "PTZ_Controller_" + cam_name
	processPTZ.daemon = True  # run the watchdog as daemon so it terminates with the main process
	processPTZ.start()

	return processPTZ
Exemple #17
0
def upload(code, filename="temp.sl", platform="telosb"):
    with open(filename, "w") as codeFile:
        codeFile.write(code)
    generateMakefile(filename)

    p1 = Process(target=doPopen, args=(["make", platform, "upload"], ))
    p1.deamonic = True
    p1.name = "Upload thread"
    p1.start()
def init(xmpp_server, jid, password, handle_message, msg_queue, child_conn):
    # Interprocess queue for dispatching xmpp messages


    msg_process = Process(target=message_consumer, args=(xmpp_server, jid, password, handle_message, msg_queue, child_conn))
    msg_process.name = 'msg_process'
    msg_process.daemon = True
    msg_process.start()

    return msg_process
Exemple #19
0
 def _repopulate_pool(self) -> None:
     """Bring the number of pool processes up to the specified number,
     for use after reaping workers which have exited.
     """
     for _ in range(self._num_workers - len(self._pool)):
         w = Process(target=self._target,
                     args=self._args,
                     kwargs=self._kwargs)
         self._pool.append(w)
         w.name = w.name.replace('Process', 'PoolWorker')
         w.start()
Exemple #20
0
 def _setup_main_queue(self):
     self._queue = Queue()
     
     if self._use_processes is True:
         t = Process(target=self._process_queue)
     else:
         t = _Thread(target=self._process_queue)
     t.name = "Queue"
     t.daemon = True
     t.start()
     print "Queue started", t
Exemple #21
0
 def _repopulate_pool(self) -> None:
     """Bring the number of pool processes up to the specified number,
     for use after reaping workers which have exited.
     """
     for _ in range(self._num_workers - len(self._pool)):
         w = Process(target=QgsRequestHandler.run, args=(self._router,),
                                kwargs={ 'broadcastaddr': self._broadcastaddr } )
         self._pool.append(w)
         w.name = w.name.replace('Process', 'QgisWorker')
         w.daemon = True
         w.start()
Exemple #22
0
 def _repopulate_pool(self) -> None:
     """Bring the number of pool processes up to the specified number,
     for use after reaping workers which have exited.
     """
     for _ in range(self._num_workers - len(self._pool)):
         w = Process(target=worker_handler,
                     args=(self._router, self._broadcastaddr),
                     kwargs=dict(maxcycles=self._maxcycles,
                                 initializer=self._initializer,
                                 initargs=self._initargs))
         self._pool.append(w)
         w.name = w.name.replace('Process', 'PoolWorker')
         w.start()
Exemple #23
0
    def flush(self):
        """Call eshelpers.bulk() in a new thread."""
        # if flush() will be called first after a fork,
        # self.bulk_send_processes may contain non-child process
        # it is required to try refresh_forked_handler()
        self.refresh_forked_handler()

        if self._timer is not None and self._timer.is_alive():
            self._timer.cancel()
        self._timer = None

        # join and delete dead bulk_send processes
        alive_processes = []
        for process in self.bulk_send_processes:
            if not process.is_alive():
                process.join()  # blocking w/o timeout
            else:
                alive_processes.append(process)
        self.bulk_send_processes = alive_processes

        if self._buffer:
            try:
                with self._buffer_lock:
                    logs_buffer = self._buffer
                    self._buffer = []
                actions = ({
                    '_index':
                    self._index_name_func.__func__(self.es_index_name),
                    '_type':
                    self.es_doc_type,
                    '_source':
                    log_record
                } for log_record in logs_buffer)
                # call bulk send in a new process
                bulk_send_process = Process(
                    target=eshelpers.bulk,
                    kwargs={
                        # name mangling is used in get_es_client()
                        'client': self._CMRESHandler__get_es_client(),
                        'actions': actions,
                        'stats_only': True
                    })
                bulk_send_process.name = \
                    "ES Logging {} @ {}".format(os.getpid(), time.time())
                bulk_send_process.start()
                # save reference to the child process
                self.bulk_send_processes.append(bulk_send_process)

            except Exception as exception:
                if self.raise_on_indexing_exceptions:
                    raise exception
Exemple #24
0
    def __init__(self, url, process_count, location, pages):
        self.url = url
        self.queue = Queue()
        self.process_count = process_count
        self.location = location
        self.pages = pages
        self.sub = url.split('/')[-2]

        self.logger = logging.getLogger('reddigImages')
        self.logger.setLevel(level)
        hdlr = logging.FileHandler('{}/{}_{}.log'.format(
            location, self.sub,
            datetime.datetime.now().strftime('%Y%m%d_%H%M%S')))
        hdlr.setFormatter(logging.Formatter('%(message)s'))
        self.logger.addHandler(hdlr)

        self.add_process = Process(target=add_pages,
                                   args=(
                                       self.logger,
                                       self.url,
                                       self.queue,
                                       self.pages,
                                   ))
        self.add_process.name = 'Add Pages'
        self.add_process.daemon = True
        self.add_process.start()

        self.logger.info('Download for %s started!' % url)

        time.sleep(5)
        self.logger.debug('queue empty: %s' % self.queue.empty())

        self.jobs = []
        for i in range(0, process_count):
            job = Process(target=get_data,
                          args=(
                              self.logger,
                              self.url,
                              self.queue,
                              self.location,
                          ))
            job.daemon = True
            job.name = '%s_imgProcess_%s' % (self.sub, i)
            self.jobs.append(job)
            try:
                job.start()
            except OSError, e:
                logger.info('Can\'t start process %s because of %s' %
                            (i, str(e)))
Exemple #25
0
def main():
    #通过args进行传参,按照索引,分别传输至对应位置
    #p = Process(target=work,args=(2,'Suchang'))
    #通过kwargs进行传参,按照键值对,分别传输至对应位置
    #p = Process(target=work,kwargs={'sec':2,'name':'suchang'})
    #通过args与kwargs相互配合的方式传参,注意args是一个可迭代对象
    p = Process(target=work, args=(2, ), kwargs={'name': 'suchang'})

    p.start()
    #通过修改进程对象的name属性,来获得自定义的进程名
    p.name = 'worker'
    print('Process isalive?:%s' % (p.is_alive()))
    print('Process name:%s' % p.name)
    print('Process PID:%s' % p.pid)
    p.join()
    print('Process isalive?:%s' % (p.is_alive()))
Exemple #26
0
    def add_worker(self):
        wname = "PySplash-%s" % self.count
        w = Process(target=_worker, args=(wname, self.pool_queue, self.args))
        w.name = wname
        
        self.count += 1

        worker = {}
        worker['state'] = WorkerState.STARTING
        worker['last_update'] = time.time()
        worker['w'] = w

        self.workers[w.name] = worker

        self.last_scale = time.time()
        w.start()
Exemple #27
0
def test():
    global jobs
    req = request.json

    if req and 'REPO' in req and 'BRANCH' in req and 'ID' in req and \
            any([x for x in config['ARRAYS'] if x['ID'] == req['ID']]):

        # Add a check to make sure we aren't already _running_ a job for this
        # array
        for k, v in jobs.items():
            if v['ID'] == req['ID']:
                # Update status to make sure
                _update_state(k)
                if v['STATUS'] == 'RUNNING':
                    response.status = 412  # Precondition fail
                    return

        # Run the job
        # Build the arguments for the script
        uri = ""
        password = ""
        plug = ""

        for a in config['ARRAYS']:
            if a['ID'] == req['ID']:
                uri = a['URI']
                password = a['PASSWORD']
                plug = a['PLUGIN']
                break

        # When we add rpm builds we will need client to pass which 'type' too
        incoming = ('git', req['REPO'], req['BRANCH'], uri, password)
        job_id = _rs(32)
        p = Process(target=_run_command, args=(job_id, incoming))
        p.name = "|".join(incoming)
        p.start()

        jobs[job_id] = dict(STATUS='RUNNING',
                            PROCESS=p,
                            ID=req['ID'],
                            PLUGIN=plug)
        response.status = 201
        return {"JOB_ID": job_id}
    else:
        response.status = 400
Exemple #28
0
def test():
    global jobs
    req = request.json

    if req and 'REPO' in req and 'BRANCH' in req and 'ID' in req and \
            any([x for x in config['ARRAYS'] if x['ID'] == req['ID']]):

        # Add a check to make sure we aren't already _running_ a job for this
        # array
        for k, v in jobs.items():
            if v['ID'] == req['ID']:
                # Update status to make sure
                _update_state(k)
                if v['STATUS'] == 'RUNNING':
                    response.status = 412   # Precondition fail
                    return

        # Run the job
        # Build the arguments for the script
        uri = ""
        password = ""
        plug = ""

        for a in config['ARRAYS']:
            if a['ID'] == req['ID']:
                uri = a['URI']
                password = a['PASSWORD']
                plug = a['PLUGIN']
                break

        # When we add rpm builds we will need client to pass which 'type' too
        incoming = ('git', req['REPO'], req['BRANCH'], uri, password)
        job_id = _rs(32)
        p = Process(target=_run_command, args=(job_id, incoming))
        p.name = "|".join(incoming)
        p.start()

        jobs[job_id] = dict(STATUS='RUNNING',
                            PROCESS=p,
                            ID=req['ID'],
                            PLUGIN=plug)
        response.status = 201
        return {"JOB_ID": job_id}
    else:
        response.status = 400
Exemple #29
0
 def startmyloop(s):
     global defaultdevice
     print('startmyloop')
     q = Queue()
     qtoc = Queue()
     p = Process(target=theloop, args=(q, qtoc))
     p.daemon = True
     p.name = 'theloopprocess'
     p.start()
     mycmd = ''
     while mycmd != 'quit':
         qtoc.put(defaultdevice)
         try:
             mycmd = q.get()
             s.testcaseengine.debug(mycmd)
             runcmd(mycmd)
         except KeyboardInterrupt:
             pass
Exemple #30
0
    def job_create(repo, branch, array_id):
        global jobs

        testlib.p("Running test for %s %s %s" % (repo, branch, array_id))

        if any([x for x in config['ARRAYS'] if x['ID'] == array_id]):

            # Add a check to make sure we aren't already _running_
            # a job for this array
            for k, v in jobs.items():
                if v['ID'] == array_id:
                    # Update status to make sure
                    _update_state(k)
                    if v['STATUS'] == 'RUNNING':
                        return "", 412, "Job already running on array"

            # Run the job
            # Build the arguments for the script
            uri = ""
            password = ""
            plug = ""

            for a in config['ARRAYS']:
                if a['ID'] == array_id:
                    uri = a['URI']
                    password = a['PASSWORD']
                    plug = a['PLUGIN']
                    break

            # When we add rpm builds we will need client to pass
            # which 'type' too
            incoming = ('git', repo, branch, uri, password)
            job_id = _rs(32)
            p = Process(target=_run_command, args=(job_id, incoming))
            p.name = "|".join(incoming)
            p.start()

            jobs[job_id] = dict(STATUS='RUNNING',
                                PROCESS=p,
                                ID=array_id,
                                PLUGIN=plug)
            return job_id, 201, ""
        else:
            return "", 400, "Invalid array specified!"
Exemple #31
0
def copy_files_and_start_sumo_UI(port, number_of_parallel_runs, begin_time,
                                 theta):
    for i in range(0, number_of_parallel_runs):
        # creating new directories and copy base files
        sample = "sample" + str(i)
        path = "C:\Users\Rajitha\Desktop\glosa\glosa" + sample
        os.mkdir(path)
        shutil.copy(r"C:\Users\Rajitha\Desktop\glosa\temp.py", path)
        shutil.copy("C:\Users\Rajitha\Desktop\glosa\example.net.xml", path)
        shutil.copy(r"C:\Users\Rajitha\Desktop\glosa\demo.sumo.cfg", path)

        datafile = r"C:\Users\Rajitha\Desktop\glosa\filename.xml"
        with open(datafile, 'r') as myfile:
            xml = myfile.read()
        xml = """""" + xml + """"""
        xml = xml.format(placeholder="\"" + str(begin_time[i]) + "\"")
        #print xml
        file = open(path + r"\filename.xml", "w")
        file.write(xml)
        file.close()

        os.chdir(path)
        import temp  # importing glosa module for each and every glosa instance

        p = Process(target=temp.starter, args=(i, port, theta[0], theta[1]))
        p.name = sample
        Pros.append(p)
        p.start()
        port = port + 1

    for t in Pros:
        t.join()

    for j in range(0, number_of_parallel_runs):
        altprocess = Pros[j]
        altprocess.terminate()

    file = open("C:\Users\Rajitha\Desktop\glosa\glosasample0\data.txt", "r")
    temp_data = file.read()
    margin_and_delay = temp_data.split(" ")
    margins.append(margin_and_delay[0])
    delays.append(margin_and_delay[1])

    os.chdir("C:\Users\Rajitha\Desktop\glosa")
Exemple #32
0
    def run(self):
        filename = os.path.join(self.API.path, Settings.get('blockly_location'))
        host = Settings.get('blockly_host')
        port = int(Settings.get('blockly_port'))
        con1, con2 = Pipe()
        p1 = Process(target = listen, args = (host, port, con2, True))
        p1.daemon = False
        p1.name = "Socket listening thread"

        try:
            p1.start()
            self.printLine("Service started successfully.")
            if p1.is_alive():
                # Damn linux
                if os.name == 'posix':
                    Popen(['xdg-open', filename])
                # other OS
                else:
                    webbrowser.open_new_tab(filename)
            else:
                self.printLine("Failed to open {}:{}, port might be in use.".format(host, port))
            lastSync = time.time() + 20
            self.API.onExit.append(p1.terminate)
            while p1.is_alive() and self.active:
                if con1.poll(0.1):
                    data = con1.recv()
                    if data != "Sync recieved":
                        self.handleRecievedCode(data)
                    lastSync = time.time()
                if time.time() - lastSync > 20:
                    self.printLine("No sync for 20 sec.\nTerminating...")
                    self.API.onExit.remove(p1.terminate)
                    p1.terminate()
                    break
                wx.YieldIfNeeded()
            if p1.is_alive():
                self.printLine("Service terminated successfully.")
                self.API.onExit.remove(p1.terminate)
                p1.terminate()
        except:
            self.printLine("Exception occurred, terminating...")
            if p1.terminate in self.API.onExit:
                self.API.onExit.remove(p1.terminate)
Exemple #33
0
 def childPro(self):
     print("守护进程被启动")
     signal.signal(signal.SIGINT, signal.SIG_IGN)  #接收到ctrl+c的信号,这儿默认是忽略
     os.setsid()  #子进程摆脱屏幕控制,成为后台进程
     self.writePidToFile("w", os.getpid())
     while True:
         grandChildProcess = Process(target=self.grandChildPro)
         grandChildProcess.name = "daemonWorkerPro"
         grandChildProcess.start()
         grandPid = grandChildProcess.pid
         print("子进程的pid为:", grandPid)
         grandChildProcess.join()
         if not grandChildProcess.exitcode == None:
             print("子进程被退出,具体接收到的信号为:", grandChildProcess.exitcode)
             pidList = self.readPidFromFile()
             if grandPid in pidList:
                 pidList.remove(grandPid)
             for everyPid in pidList:
                 self.writePidToFile("w", everyPid)
Exemple #34
0
    def job_create(repo, branch, array_id):
        global jobs

        testlib.p("Running test for %s %s %s" % (repo, branch, array_id))

        if any([x for x in config["ARRAYS"] if x["ID"] == array_id]):

            # Add a check to make sure we aren't already _running_
            # a job for this array
            for k, v in jobs.items():
                if v["ID"] == array_id:
                    # Update status to make sure
                    _update_state(k)
                    if v["STATUS"] == "RUNNING":
                        return "", 412, "Job already running on array"

            # Run the job
            # Build the arguments for the script
            uri = ""
            password = ""
            plug = ""

            for a in config["ARRAYS"]:
                if a["ID"] == array_id:
                    uri = a["URI"]
                    password = a["PASSWORD"]
                    plug = a["PLUGIN"]
                    break

            # When we add rpm builds we will need client to pass
            # which 'type' too
            incoming = ("git", repo, branch, uri, password)
            job_id = _rs(32)
            p = Process(target=_run_command, args=(job_id, incoming))
            p.name = "|".join(incoming)
            p.start()

            jobs[job_id] = dict(STATUS="RUNNING", PROCESS=p, ID=array_id, PLUGIN=plug)
            return job_id, 201, ""
        else:
            return "", 400, "Invalid array specified!"
 def attack_deauth(self):
     global threadloading
     if self.linetarget.text() == "":
         QMessageBox.information(self, "Target Error", "Please, first select Target for attack")
     else:
         self.bssid = str(self.linetarget.text())
         self.deauth_check = self.xmlcheck.xmlSettings("deauth", "select",None,False)
         self.args = str(self.xmlcheck.xmlSettings("mdk3","arguments", None, False))
         if self.deauth_check == "packets_scapy":
             self.AttackStatus(True)
             t = Process(target=self.deauth_attacker, args=(self.bssid,str(self.input_client.text())))
             print("[*] deauth Attack On:"+self.bssid)
             threadloading['deauth'].append(t)
             t.daemon = True
             t.start()
         else:
             self.AttackStatus(True)
             t = ProcessThread(("mdk3 mon0 %s %s"%(self.args,self.bssid)).split())
             t.name = "mdk3"
             threadloading['mdk3'].append(t)
             t.start()
Exemple #36
0
    def __init__(self):

        newOut = open("/dev/null", "w")
        newIn = open("/dev/null", "r")
        os.dup2(newIn.fileno(), sys.stdin.fileno())
        os.dup2(newOut.fileno(), sys.stdout.fileno())

        self.sem = Semaphore()  #初始化一个信号量,控制一次只能一个进程能访问
        argList = sys.argv
        if argList[1] == "start":
            childProcess = Process(target=self.childPro)
            childProcess.name = "daemonParentPro"
            childProcess.start()
        elif argList[1] == "stop":

            pidList = self.readPidFromFile()
            if len(pidList) > 0:
                for pidValue in pidList:
                    cmd = "kill -9 " + str(pidValue)
                    print("执行杀死进程命令", cmd)
                    subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
        else:
            print("error command")
Exemple #37
0
if __name__ == '__main__':
    try:
        versionFile = os.path.join(pathToMansos, "doc/VERSION")
        if not os.path.exists(pathToMansos) or not os.path.isfile(versionFile):
            print ("MansOS not found in '{}', please edit this file in line 35".format(pathToMansos))
        else:
            f = open(versionFile, "r")
            version = f.readline()
            print ("MansOS version: {}".format(version))
            abs_path = os.path.abspath('.') #Absolute path of current working directory
            filename = "file://"+os.path.join(abs_path, 'blockly/seal/playground-seal.html')

            con1, con2 = Pipe()
            p1 = Process(target = listen, args = (con2,))
            p1.daemon = False
            p1.name = "Socket listening thread"
            p1.start()
            time.sleep(0.1)

            if p1.is_alive():
                print ('Try to open "{}"'.format(filename))
                webbrowser.open(filename)
            else:
                print ("Failed to open {}:{}, port might be in use.".format(host, port))
            lastSync = time.time() + 30
            while p1.is_alive():
                if con1.poll(0.1):
                    con1.recv()
                    lastSync = time.time()
                if time.time() - lastSync > 10:
                    print ("No sync for 10 sec.\nTerminating...")
Exemple #38
0
    def create_children(self, names=None, new_names=None, classes=None, start=False, use_cgroup=True):
        """If classes, create children from list of classes.
         Classes can be a list, or a list of (class, kwargs) tuples
         If names, revive named pickles or rerun child if pickle doesn't exist (i.e. save failed).
         If neither, revive all pickled children."""
        names_and_classes = dict()
        if classes is None and names is None:
            names = self.child_serialization_filenames.keys()

        cgroup = None
        if use_cgroup:
            cgroup = self.cgroup

        if classes is not None:
            if type(classes[0]) is not tuple:  # if there's just classes (no kwargs) then convert
                classes = [(x, {}) for x in classes]
            if new_names is None:
                new_names = [self.namegen() for _ in range(len(classes))]
            for name, a_class in zip(new_names, classes):
                names_and_classes[name] = a_class

        if names is not None:
            for name in names:
                exit_code = self.child_processes[name].exitcode
                if exit_code is None:
                    if self.child_processes[name].is_alive():
                        logger.error("%s: Child %s has not terminated",
                                     self.name, name)
                        continue
                    else:
                        logger.error("%s: Child %s has been created but not started",
                                     self.name, name)
                if exit_code < 0:
                    logger.warn("%s: Child %s exited with code %d and I won't restart it",
                                self.name, name, exit_code)
                    continue
                elif exit_code == 1:
                    logger.info("%s: Child %s has a saved state", self.name, name)
                    names_and_classes[name] = (self.child_classes[name], self.child_kwargs[name])
                elif exit_code == 0:
                    logger.warn("%s: Child %s terminated of its own accord and I won't restart it",
                                self.name, name)
                    continue

        # Loop over names and classes, creating children
        for name, (cl, kwargs) in names_and_classes.iteritems():
            child = cl(**kwargs)
            child.name = name
            self.child_classes[name] = cl
            self.child_kwargs[name] = kwargs
            # Create conns
            child.inbox_conn = self.conns_to_children[child.name] = Queue()
            self.conns_from_children[child.name] = child.outbox_conn = Queue()

            # Set save file
            if name in self.child_processes and self.child_processes[name].exitcode == 1:
                # child has been started before
                pickle_file = child.save_file = self.child_serialization_filenames[name]
            else:
                root, ext = os.path.splitext(self.save_file)
                child.save_file = self.child_serialization_filenames[name] = root + '_' + name + ext
                pickle_file = None

            # share communication value
            child.flag = self.child_flags[name] = Value('i', 0)

            # Create process
            p = Process(target=start_communication,
                        kwargs=dict(agent=child, state_filename=pickle_file, cgroup=cgroup,
                                    password=self.password))
            p.name = name
            logger.info("%s: Created child %s", self.name, name)
            self.child_processes[child.name] = p
            self.child_states[name] = 'unstarted'
            del child
            if start:
                p.start()

        self.startable_children.update(names_and_classes.keys())
        return names_and_classes.keys()  # returns list of created child names
Exemple #39
0
def a():
    while True:
        pass


import os, time
import signal
from multiprocessing import Process
if __name__ == '__main__':

    p = Process(target=a, )
    p.name = "luxu"
    p.start()

    time.sleep(5)
    os.kill(p.ident, signal.SIGTERM)
Processes = []
NumProcesses = 12 # Number of cores available to do the processing

LineToProcess = iRow
# Run until all the threads are done, and there is no pixels to process
while Processes or LineToProcess < eRow:

    # if we aren't using all the processors AND there is interpolations left to
    # compute, then spawn another thread
    if (len(Processes) < NumProcesses) and LineToProcess < eRow:

        p = Process(target = DoClassification, args=[LineToProcess, iCol, eCol])

        p.daemon = True
        p.name = str(LineToProcess)
        p.start()
        Processes.append(p)

        LineToProcess += 1

    # in case that we have the maximum number of threads check
    # if any of them are done.
    else:
        for process in Processes:
            if not process.is_alive():
                Processes.remove(process)
                if int(process.name) % 10 == 0:
                    print process.name, 'processed'

Exemple #41
0
def connect(sig, who):
    ## 1 = CONNECT
    ## 2 = WITH SOCKET
    ## 3 = CONNECTED
    ## 4 = RECONNECT
    global q
    global s
    global pMain
    global packet_send
    global packet_recv
    global init_packet_send
    global init_packet_recv
    global sock
    global connected
    debug(2, 'CONNECT', 'CALLED')
    debug(3, 'CONNECT', 'SIGNAL {}'.format(sig))
    debug(3, 'CONNECT', 'WHO {}'.format(who))
    debug(3, 'CONNECT', 'SOCKET {}'.format(sock))
    debug(3, 'CONNECT', 'CONNECTED {}'.format(connected))
    sig = int(sig)
    if sig == int(1) and sock == False:
        try:
            connsock(1, 'CONNECT')
        except KeyboardInterrupt:
            signals(2, 'CONNECT 1', 'SIGINT')
            raise
        except:
            raise
    elif sig == int(2) and sock == True:
        try:
            connsock(2, 'CONNECT')
        except KeyboardInterrupt:
            signals(2, 'CONNECT 2', 'SIGINT')
            raise
        except:
            raise
    elif sig == int(3) and sock == True and connected == True:
        stdout(1, 'Connected to {}.'.format(host))
        packet_recv = int(0)
        packet_send = int(0)
        init_packet_recv = int(0)
        init_packet_send = int(0)
        try:
            log(4, 'Init Starting')
            comms(1, None)
            log(4, 'Start Some Workers For Queue')
            try:
                pool = multiprocessing.Pool(processes=1)
                m = multiprocessing.Manager()
                q = m.Queue()
                workers = pool.apply_async(localqueue, ('GET', None))
            except KeyboardInterrupt:
                signals(2, 'CONNECT 3', 'SIGINT')
                raise
            except:
                print 'unable to start workers'
                p = Process(target=localqueue, args=('GET', None))
                p.name = 'comms2'
                p.start()
                raise
            log(4, 'BUILD CREATED')
            procs(1)
            log(4, 'Init Done')
        except KeyboardInterrupt:
            signals(2, 'Init', 'SIGINT')
            raise
        except:
            raise
        try:
            log(4, 'Main Starting')
            pMain = Process(target=procs, args=('2'))
            pMain.name = 'pMain'
            pMain.start()
            time.sleep(2)
            try:
                pid2 = pMain.pid()
                log(4, 'pMain Started With PID {}'.format(pid2))
            except:
                log(4, 'pMain Started')
            log(4, 'Main Done')
        except KeyboardInterrupt:
            signals(2, 'CONNECT pMain', 'SIGINT')
            raise
        except:
            signals(4, 'CONNECT pMain', None)
            raise
        time.sleep(3)
        try:
            stdin()
        except KeyboardInterrupt:
            signals(2, 'CONNECT STDIN', 'SIGINT')
            raise
        except:
            signals(4, 'CONNECT STDIN', None)
            raise
    elif sig == int(4):
        print 'HELLO WORLD LINE 743'
        #connsock(9, 'CONNECT')
        #log(4, 'CONNECTION LOST!')
        sock = False
        connected = False
Exemple #42
0
def procs(sig):
    #    print 'procs process id:', os.getpid() ## CHILD OF __MAIN__
    ## 1 = INIT DAEMONS
    ## 2 = CONNECTED DAEMONS
    ## 5 = CHECK PROCS
    ## 9 = DISTROY
    ## a parent pid can only test its children
    global sock
    global connected
    global pData
    global pReport
    global pCheckin
    debug(2, 'PROCS', 'CALLED FROM {}'.format(__name__))
    debug(3, 'PROCS', 'CALLED WITH SIGNAL {}'.format(sig))
    sig = int(sig)
    if sig == int(1):
        try:
            if connected == True and sock == True:
                try:
                    pData = Process(target=data_recv, )
                    pData.name = 'pData'
                    pData.start()
                    try:
                        pid3 = pData.pid()
                        log(4, 'pData Started With PID {}'.format(pid3))
                    except:
                        log(4, 'pData Started')
                        pass
                except:
                    log(4, 'pData Refused To Start')
                    raise
            else:
                log(4, 'pData Not Connected')
        except KeyboardInterrupt:
            signals(2, 'PROCS 1', 'SIGINT')
            raise
        except:
            signals(4, 'PROCS 1', 'PDATA REFUSED TO START')
            raise
    elif sig == int(2):
        try:
            if connected == True and sock == True:
                try:
                    pCheckin = Process(target=checkin,
                                       args=('CLIENT', 'PROCS', 'PING',
                                             connected, sock))
                    pCheckin.name = 'pCheckin'
                    pCheckin.start()
                    try:
                        pid4 = pCheckin.pid()
                        log(4, 'pCheckin Started With PID {}'.format(pid4))
                    except:
                        log(4, 'pCheckin Started')
                        pass
                except:
                    log(4, 'pCheckin Refused To Start')
                    raise
            else:
                log(4, 'pCheckin Not Connected')
        except KeyboardInterrupt:
            signals(2, 'PROCS 2', 'SIGINT')
            raise
        except:
            signals(4, 'PROCS 2', 'CHECKIN FAILED TO START')
            raise
        try:
            if connected == True and sock == True:
                try:
                    pReport = Process(target=genfakereport, args=())
                    pReport.name = 'pReport'
                    pReport.start()
                    try:
                        pid5 = pReport.pid()
                        log(4, 'pReport Started With PID {}'.format(pid5))
                    except:
                        log(4, 'pReport Started')
                        pass
                except:
                    log(4, 'pReport Refused To Start')
                    raise
            else:
                log(4, 'pReport Not Connected')
        except KeyboardInterrupt:
            signals(2, 'PROCS 2', 'SIGINT')
        except:
            signals(4, 'PROCS 2', 'PREPRT FAILED TO START')
            raise
    elif sig == int(5):
        connstatus = 'CONNECTED {} SOCKET {}'.format(connected, sock)
        log(
            4, 'PROCS CALLED FROM {} WITH SIGNAL {} AND {}'.format(
                __name__, sig, connstatus))


#        print (pData, pData.is_alive())
#        print (pReport, pReport.is_alive())
    elif sig == int(9):
        try:
            connstatus = 'CONNECTED {} SOCKET {}'.format(connected, sock)
            log(
                4, 'PROCS CALLED FROM {} WITH SIGNAL {} AND {}'.format(
                    __name__, sig, connstatus))
            log(4, 'START KILLING PIDS')
            pData.terminate()
            pReport.terminate()
            pReport.terminate()
            log(4, 'WHICH PIDS ARE STILL ALIVE?????')
            procs(5)
        except KeyboardInterrupt:
            time.sleep(0.5)
            signals(2, 'PROCS 9', 'SIGINT')
            raise
        except:
            time.sleep(0.5)
            raise
Exemple #43
0
from multiprocessing import Process
import time
import os


def func():
    print('this is son', os.getpid())


if __name__ == '__main__':
    p = Process(target=func)
    p.start()
    p.name = 'alex'
    print(p.name, p.pid, p.daemon)
# Aprox 347k pixels

Processes = []
NumProcesses = 8 # Number of cores available to do the processing

PixelToProcess = 0
# Run until all the threads are done, and there is no pixels to process
while Processes or PixelToProcess < indices_mask[0].shape[0]:
    # if we aren't using all the processors AND there is interpolations left to
    # compute, then spawn another thread
    if ( len( Processes ) < NumProcesses ) and PixelToProcess < indices_mask[0].shape[0]:

        p = Process( target = computeCartesian, args = [ PixelToProcess ])

        p.daemon = True
        p.name = str( PixelToProcess )
        p.start()
        Processes.append(p)

        PixelToProcess += 1

    # in case that we have the maximum number of threads check
    # if any of them are done.
    else:
        for process in Processes:
            if not process.is_alive():
                Processes.remove(process)
                if int(process.name) % 100 == 0:
                    print process.name, 'processed'

# Save output files
Exemple #45
0
def main(argv):
    global args

    parser = argparse.ArgumentParser(description=__DESCRIPTION__, 
                            formatter_class=argparse.RawDescriptionHelpFormatter)

    
    parser.add_argument("-r", dest="reftree", 
                        type=str, required=True,
                        help="""Reference tree""")
    
    parser.add_argument("--source_trees", dest="source_trees", 
                        type=str, required = True,
                        help=("A list of *rooted* genetrees, one per line, in the format: TreeID/SeedID [TAB] newick "))
   
    parser.add_argument("--plot_newick", dest="plot_newick", 
                        type=str,
                        help=(""))
    
    parser.add_argument("--spname_delimiter", dest="spname_delimiter", 
                        type=str, default="_",
                        help=("species code delimiter in node names"))
    
    parser.add_argument("--spname_field", dest="spname_field", 
                        type=int, default=-1,
                        help=("position of the species code extracted from node names. -1 = last field"))

    parser.add_argument("--collateral", dest="use_collateral", 
                        action="store_true",
                        help=("If enabled, collateral information will be used as"
                              " equally qualified data. Otherwise, such data will"
                              " be reported separatedly. Use this if your set of"
                              " trees are not overlaping. "))

    parser.add_argument("--skip_dup_detection", dest="skip_dup_detection", 
                        action="store_true",
                        help=('If used, duplications will be expected to be annotated'
                              ' in the source gene trees with the evoltype="D" tag.'
                              ' Otherwise they will be inferred on the fly using'
                              ' the species overlap algorithm.'))

    parser.add_argument("--spoverlap", dest="species_overlap", 
                        type=float, default=0.0,
                        help=("Species overlap cutoff. A number between 0 and 1 "
                        "representing the percentage of species that should be "
                        "shared between two sister partitions to be considered a"
                        " duplication. 0 = any overlap represents a duplication. "))
    
    parser.add_argument("--debug", dest="debug", 
                        action="store_true",
                        help=("generate an image of every input gene tree tree, so the result can be inspected"))

    parser.add_argument("--snapshot_step", dest="snapshot_step", 
                        type=int, default=1000,
                        help=("How many trees should be processed between snapshots dumps?"))

    parser.add_argument("--reftree_constraint", dest="reftree_constraint", 
                        type=str, 
                        help=("A python module from from which a function called "
                              "*is_valid_treeid(treeid, refbranch)* should be importable. "
                              "The function will be used to decide if the info of a given "
                              "source tree is informative or not for each reftree branch. "))
    
    parser.add_argument("-o", dest="output", 
                        type=str, required=True, 
                        help=("output tag name (extensions will be added)"))

    parser.add_argument("--cpu", dest="cpu", 
                        type=int, default=1, 
                        help=("enable parallel computation"))

    parser.add_argument("--img_report", dest="img_report", 
                        action="store_true", 
                        help=("If true, it generates a summary image results with all the computed data"))

    parser.add_argument("--report_supports", dest="report_supports", 
                        action="store_true", 
                        help=("If used, supported ref tree branches are individually reported for each gene tree "))

    
    args = parser.parse_args(argv)
    if args.plot_newick:
        t = Tree(args.plot_newick)
        ts = TreeStyle()
        ts.layout_fn = info_layout
        t.render("tree_analysis.png", tree_style=ts)
        sys.exit(0)
    
    SPNAME_FIELD, SPNAME_DELIMITER = args.spname_field, args.spname_delimiter
    USE_COLLATERAL = args.use_collateral
    DETECT_DUPLICATIONS = True if not args.skip_dup_detection else False
    REPORT_PER_TREE_SUPPORTS = True if args.report_supports  else False
    SP_OVERLAP = args.species_overlap
    DEBUG = args.debug
    IMG_REPORT = args.img_report
    reftree = PhyloTree(args.reftree, sp_naming_function=None)
    for nid, n in enumerate(reftree.traverse()):
        n.add_features(nid = nid)
    REFTREE_SPECIES = set(reftree.get_leaf_names())
    print __DESCRIPTION__

    if REPORT_PER_TREE_SUPPORTS:
        REPORT_SUPPORT_FILE = open("%s.gentree_supports" %args.output, "w")
        print >>REPORT_SUPPORT_FILE, '#'+'\t'.join(map(str, ["treeId", "spCoverage", "mean_support",  "mean_coll_support", "tested_branches", 'tested_coll_branches']))
    
    TOTAL_TREES = int(commands.getoutput("wc -l %s" %args.source_trees).split()[0]) + 1
    print >>sys.stderr, "Processing %d source trees" %TOTAL_TREES
    if args.reftree_constraint:
        import imp
        constraint = imp.load_source('constraint', args.reftree_constraint)
        IS_VALID_TREEID = constraint.is_valid_treeid
    else:
        IS_VALID_TREEID = None
       
    if args.cpu > 1:
        MONITOR_STEP = 0
        #return (informed_branches, dup_per_branch, losses_per_branch, losses_per_dup_branch, refbranch_supports,
        #       coll_dup_per_branch, coll_losses_per_branch, coll_losses_per_dup_branch, coll_refbranch_supports)
        # The output of the process_trees function are 9 dictionaries in which keys are refbranches
        target_dicts = [{} for x in range(9)] 
        def merge_dict_results(target, source):
            def merge_dict(target, source):
                for k, v in source.iteritems():
                    if k not in target:
                        target[k] = v
                    elif isinstance(v, list):
                        target[k].extend(v)
                    elif isinstance(v, set):
                        target[k].update(v)
                    elif isinstance(v, int):
                        target[k] += v
                    else:
                        raise ValueError("Impossible to merge str results")
            for index in xrange(len(target)):
                merge_dict(target[index], out[index])

        from multiprocessing import Process, Queue
        from Queue import Empty as QueueEmpty
        outputs_queue = Queue()
        if TOTAL_TREES > args.cpu:
            trees_per_cpu = TOTAL_TREES / args.cpu
            trees_per_cpu += 1 if TOTAL_TREES % args.cpu else 0
        else:
            trees_per_cpu = 1
            args.cpu = TOTAL_TREES
            
        all_workers = set()
        for cpu_num in xrange(args.cpu):
            sline = (cpu_num*trees_per_cpu)
            eline = (cpu_num*trees_per_cpu) + trees_per_cpu
            data_iter = tree_iterator(args.source_trees,
                                      restrict_species=REFTREE_SPECIES,
                                      start_line=sline,
                                      end_line=eline)
            print >>sys.stderr, "Launching worker %d from %d to %d" %(cpu_num, sline, eline)
            worker = Process(target=run_parallel,
                             args=(cpu_num, outputs_queue, process_trees, data_iter, reftree, trees_per_cpu))
            worker.name = "Worker_%d" %cpu_num
            all_workers.add(worker)
            worker.start()
            
        while all_workers:
            # clear done threads
            for w in list(all_workers):
                if not w.is_alive():
                    print >>sys.stderr, "%s thread is done!" %w.name
                    all_workers.discard(w)
            # get and merge results
            while 1:
                try:
                    out = outputs_queue.get(False)
                except QueueEmpty:
                    break
                else:
                    # This merge depends on process_trees return output!!!!!
                    merge_dict_results(target_dicts, out)
                    # Dump a snapshot
                    dump_results(reftree, *target_dicts)
                time.sleep(0.1)
            if all_workers:
                time.sleep(1)
        # collected data
        (informed_branches, dup_per_branch, losses_per_branch, losses_per_dup_branch, refbranch_supports,
         coll_dup_per_branch, coll_losses_per_branch, coll_losses_per_dup_branch,
         coll_refbranch_supports) = target_dicts
    else:
        MONITOR_STEP = args.snapshot_step
        data_iter = tree_iterator(args.source_trees, restrict_species=REFTREE_SPECIES)
        
        (informed_branches, dup_per_branch, losses_per_branch, losses_per_dup_branch, refbranch_supports,
         coll_dup_per_branch, coll_losses_per_branch, coll_losses_per_dup_branch,
         coll_refbranch_supports) = process_trees(data_iter, reftree, TOTAL_TREES)

    if REPORT_PER_TREE_SUPPORTS:
        REPORT_SUPPORT_FILE.close()

    dump_results(reftree, informed_branches, dup_per_branch, losses_per_branch, losses_per_dup_branch, refbranch_supports,
                 coll_dup_per_branch, coll_losses_per_branch, coll_losses_per_dup_branch, coll_refbranch_supports)

    print >>sys.stderr, "Dumping full analysis..."
    # Full dump, including duplication details
    cPickle.dump(reftree, open("%s.pkl"%args.output, "w"))
        sys.exit(1)
    else:
        run = True
        process_list = []
        results = []
        arrays_to_test = test_hardware.TestArrays().providers(sys.argv[1])

        for system in arrays_to_test:
            (u, credentials) = test_hardware.TestArrays.uri_password_get(system)
            name = system['COMPANY']
            ip = system['IP']
            system_id = "%s-%s" % (name, ip)

            p = Process(target=run_test, args=(sys.argv[2], sys.argv[3],
                                               system_id, u, credentials))
            p.name = system_id
            p.start()
            process_list.append(p)

        start = time.time()
        print('Test run started at: %s, time limit is %s minutes' %
              (time.strftime("%c"), str(time_limit_seconds / 60.0)))
        sys.stdout.flush()

        while len(process_list) > 0:
            for p in process_list:
                p.join(1)
                if not p.is_alive():
                    print('%s exited with %s at %s (runtime %s seconds)' %
                          (p.name, str(p.exitcode), time.strftime("%c"),
                           str(time.time() - start)))
Exemple #47
0
 # Read time budget
 with open(os.path.join(input_dir, basename, basename + '_public.info'), 'r') as info_file:
     for line in info_file:
         if line.startswith('time_budget'):
             time_budget = int(line.split('=')[-1])
 # Debug code
 # time_budget = 120
 print('Time budget = %ds' % time_budget)
 root_logger.info('Time budget = %ds, dataset %s', time_budget, basename)
 # Start separate process to analyse file
 p = Process(target=run_automl, args=(input_dir,
                                      output_dir,
                                      basename,
                                      time_budget - (time.time() - start_time) - 20,
                                      running_on_codalab))
 p.name = 'Manager'
 p.start()
 # Monitor the process, checking to see if it is complete or if total memory usage too high
 while True:
     time.sleep(0.2)
     if p.is_alive():
         available_mem = psutil.virtual_memory().available  # measured in bytes
         print('Available memory = %fMB' % (available_mem / (1024 * 1024)))
         if available_mem < constants.OVERHEAD / 2:
             print('Less than %.1f GB memory available - aborting process' %
                   (constants.OVERHEAD / float(2 ** 30)))
             psutil.Process(pid=p.pid).send_signal(sig=signal.SIGTERM)  # tidy up then die please
             p.join(timeout=10)  # give it a while to respond to signal
             if p.is_alive():
                 util.murder_family(p.pid, killall=True, sig=signal.SIGKILL)
                 p.join()
def run_parallel(args: argparse.Namespace):
    if args.pretend:
        boot_cheribsd.PRETEND = True
    boot_cheribsd.MESSAGE_PREFIX = "\033[0;35m" + "main process: \033[0m"
    if args.parallel_jobs < 1:
        boot_cheribsd.failure("Invalid number of parallel jobs: ",
                              args.parallel_jobs,
                              exit=True)
    boot_cheribsd.success("Running ", args.parallel_jobs, " parallel jobs")
    # to ensure that all threads have started lit
    mp_barrier = Barrier(parties=args.parallel_jobs + 1, timeout=4 * 60 * 60)
    mp_q = Queue()
    ssh_port_queue = Queue()
    processes = []
    # Extract the kernel + disk image in the main process to avoid race condition:
    kernel_path = boot_cheribsd.maybe_decompress(Path(args.kernel), True, True,
                                                 args) if args.kernel else None
    disk_image_path = boot_cheribsd.maybe_decompress(Path(
        args.disk_image), True, True, args) if args.disk_image else None
    for i in range(args.parallel_jobs):
        shard_num = i + 1
        boot_cheribsd.info(args)
        p = Process(target=run_shard,
                    args=(mp_q, mp_barrier, shard_num, args.parallel_jobs,
                          ssh_port_queue, kernel_path, disk_image_path,
                          args.build_dir))
        p.stage = run_remote_lit_test.MultiprocessStages.FINDING_SSH_PORT
        p.daemon = True  # kill process on parent exit
        p.name = "<LIBCXX test shard " + str(shard_num) + ">"
        p.start()
        processes.append(p)
        atexit.register(p.terminate)
    dump_processes(processes)
    try:
        return run_parallel_impl(args, processes, mp_q, mp_barrier,
                                 ssh_port_queue)
    except BaseException as e:
        boot_cheribsd.info("Got error while running run_parallel_impl (",
                           type(e), "): ", e)
        raise
    finally:
        wait_or_terminate_all_shards(processes, max_time=5, timed_out=False)
        # merge junit xml files
        if args.xunit_output:
            boot_cheribsd.success("Merging JUnit XML outputs")
            result = junitparser.JUnitXml()
            xunit_file = Path(args.xunit_output).absolute()
            dump_processes(processes)
            for i in range(args.parallel_jobs):
                shard_num = i + 1
                shard_file = xunit_file.with_name("shard-" + str(shard_num) +
                                                  "-" + xunit_file.name)
                mp_debug(args, processes[i], processes[i].stage)
                if shard_file.exists():
                    result += junitparser.JUnitXml.fromfile(str(shard_file))
                else:
                    error_msg = "ERROR: could not find JUnit XML " + str(
                        shard_file) + " for shard " + str(shard_num)
                    boot_cheribsd.failure(error_msg, exit=False)
                    error_suite = junitparser.TestSuite(name="failed-shard-" +
                                                        str(shard_num))
                    error_case = junitparser.TestCase(name="cannot-find-file")
                    error_case.classname = "failed-shard-" + str(shard_num)
                    error_case.result = junitparser.Error(message=error_msg)
                    error_suite.add_testcase(error_case)
                    result.add_testsuite(error_suite)
                if processes[
                        i].stage != run_remote_lit_test.MultiprocessStages.EXITED:
                    error_msg = "ERROR: shard " + str(
                        shard_num
                    ) + " did not exit cleanly! Was in stage: " + processes[
                        i].stage.value
                    if hasattr(processes[i], "error_message"):
                        error_msg += "\nError message:\n" + processes[
                            i].error_message
                    error_suite = junitparser.TestSuite(
                        name="bad-exit-shard-" + str(shard_num))
                    error_case = junitparser.TestCase(name="bad-exit-status")
                    error_case.result = junitparser.Error(message=error_msg)
                    error_suite.add_testcase(error_case)
                    result.add_testsuite(error_suite)

            result.update_statistics()
            result.write(str(xunit_file))
            if args.pretend:
                print(xunit_file.read_text())
            boot_cheribsd.success("Done merging JUnit XML outputs into ",
                                  xunit_file)
            print("Duration: ", result.time)
            print("Tests: ", result.tests)
            print("Failures: ", result.failures)
            print("Errors: ", result.errors)
            print("Skipped: ", result.skipped)
Exemple #49
0
def run_experiment_file(filename, plot_override=True, separate_process=False):
    """
    This is intended to be the function that's called to initiate a series of experiments.
    """
    exp = load_experiment_details(filename=filename)
    print("BEGIN EXPERIMENT SPECIFICATIONS")
    print(exp_params_to_str(exp))
    print("END EXPERIMENT SPECIFICATIONS")

    # # Set number of processors
    p = psutil.Process()
    all_cpus = list(range(psutil.cpu_count() - 1))
    p.cpu_affinity(all_cpus)

    # Set up logging
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.DEBUG)

    form = logging.Formatter("[%(levelname)s/%(processName)s] %(asctime)s %(message)s")

    # Handler for logging to stderr
    sh = logging.StreamHandler(stream=sys.stdout)
    sh.setLevel(logging.WARN)  # set level here
    # sh.addFilter(ProcessFilter())  # filter to show only logs from manager
    sh.setFormatter(form)
    root_logger.addHandler(sh)

    # Handler for logging to file
    util.move_make_file(constants.LOGFILE)
    fh = logging.handlers.RotatingFileHandler(constants.LOGFILE, maxBytes=512 * 1024 * 1024)
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(form)
    root_logger.addHandler(fh)

    # Make output dir
    util.move_make(exp["output_dir"])

    # Make score dir and learning curve
    if exp["score_dir"] is not None:
        util.move_make(exp["score_dir"])
        with open(os.path.join(exp["score_dir"], "learning_curve.csv"), "w") as score_file:
            score_file.write("Time,Score\n")

    # Record start time
    open(os.path.join(exp["output_dir"], exp["basename"] + ".firstpost"), "wb").close()

    # Plotting?
    if plot_override is not None:
        exp["plot"] = plot_override

    # Start manager
    mgr = FixedLearnersStackingManager(
        exp["input_dir"],
        exp["output_dir"],
        exp["basename"],
        exp["time_budget"],
        compute_quantum=exp["compute_quantum"],
        plot=exp["plot"],
        overhead_memory=constants.OVERHEAD,
        cgroup_soft_limit=constants.CGROUP_SOFT_LIMIT,
        cgroup_hard_limit=constants.CGROUP_HARD_LIMIT,
        exp=exp,
    )

    if separate_process:

        # Create process
        p = Process(target=agent.start_communication, kwargs=dict(agent=mgr))
        p.name = "manager"
        p.start()

        print("\nPress enter to terminate at any time.\n")
        while True:
            if not p.is_alive():
                break

            # Wait for one second to see if any keyboard input
            i, o, e = select.select([sys.stdin], [], [], 1)
            if i:
                print("\n\nTerminating")
                try:
                    ps = psutil.Process(pid=p.pid)
                    ps.send_signal(signal.SIGTERM)
                    p.join(timeout=5)
                    if p.is_alive():
                        print("Didn't respond to SIGTERM")
                        util.murder_family(pid=p.pid, killall=True, sig=signal.SIGKILL)
                except psutil.NoSuchProcess:
                    pass  # already dead
                break

    else:
        mgr.communicate()
	def __init__(self, logfile=u'default', facedetect=True, eyedetect=True, \
		pupthreshold=50, glintthreshold=200, glintdetect=True, \
		pupsizemode=u'diameter', minfacesize=(30,30), Lexpect=(0.7,0.4), \
		Rexpect=(0.3,0.4), maxpupdist=0.2, maxpupsize=0.3, maxcpu=6, \
		**kwargs):
		
		"""Initialises an EyeTracker class.
		
		Keyword Arguments
		
		logfile		-	A string that indicates the path to the log
						file. An extension will be added
						automatically. Default = 'default'.
		
		facedetect		-	A Boolean that indicates whether face
						detection should be attempted before further
						processing (eye detection, and pupil/glint
						detection). Set this to False if you will
						be using the EyeTracker from close to an
						eye, in which cases faces need and could not
						be detected. Default = True.
		
		pupthreshold	-	An integer that indicates what the highest
						luminance value is that is still considered
						to be part of the pupil. This value needs to
						be between 0 and 255. Default = 50.
		
		glintthreshold	-	An integer that indicates what the lowest
						luminance value is that is still considered
						to be part of the glint. This value needs to
						be between 0 and 255. Default = 200.
		
		glintdetect		-	A Boolean that indicates whether the glint
						(the corneal reflection) should also be
						detected. Default = True.
		
		pupsizemode		-	A string that indicates how the pupil size
						should be reported.
						'diameter' reports the width of the rect in
						which the thresholded pupil fits.
						'surface' reports the number of thresholded
						pixels that are assumed to be the pupil.
		
		minfacesize		-	A (w,h) tuple that indicates what size a
						face should minimally be. Default = (30,30)
		
		Lexpect		-	A (x,y) tuple that indicates where the left
						eye is expected to be. Note that the
						coordinates are in relative space, where
						(0,0) is the top-left of the image, (0,1)
						is the bottom-left, and (1,1) is the
						bottom-right. Also note that the left eye is
						likely to be on the right side of the image,
						and the right eye is likely to be in the
						left part of the image. Default = (0.7,0.4)
		
		Rexpect		-	A (x,y) tuple that indicates where the right
						eye is expected to be. Note that the
						coordinates are in relative space, where
						(0,0) is the top-left of the image, (0,1)
						is the bottom-left, and (1,1) is the
						bottom-right. Also note that the left eye is
						likely to be on the right side of the image,
						and the right eye is likely to be in the
						left part of the image. Default = (0.3,0.4)
		
		maxpupdist		-	A float that indicates what the maximal
						allowable distance is between the expected
						eye position, and the position of detected
						potential eye. The maximal distance is
						defined as a proportion of the image height.
						It can also be set to None. Default = (0.2)
		
		maxpupsize		-	A float that indicates what the maximal
						allowable width is of the detected eyes. The
						maximal size is defined as a proportion of
						the image width. It can also be set to None.
						Default = (0.3)

		maxcpu		-	Integer indicating the maximum amount of
						parallel processes that will be doing all
						of the image processing. This happens in
						parallel to speed things up; the processing
						time on one CPU can't keep up with the
						camera's sampling rate. Default = 6.
		"""

		# DEBUG message.
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Initialising a new EyeTracker.")
		
		# GENERAL SETTINGS
		# Face detection yes/no, and from what size.
		self._facedetect = facedetect
		self._minfacesize = minfacesize
		# Face eye yes/no.
		self._eyedetect = eyedetect
		# Eye detection settings. These are relative positions of where
		# each eye is expected to be in a frame, how far away detected eyes
		# are allowed to be from the expected locations, and how large the
		# detected eyes are allowed to be. (All defined as proportions of
		# the frame's width and height.)
		self._Lexpect = Lexpect
		self._Rexpect = Rexpect
		self._maxpupdist = maxpupdist
		self._maxpupsize = maxpupsize
		# Pupil detection thresholds (dark for pupil, bright for glint),
		# and additional options that determine whether glints should be
		# detected, and how the pupil size should be reported.
		self._pupt = pupthreshold
		self._glit = glintthreshold
		self._glintdetect = glintdetect
		self._pupsizemode = pupsizemode
		
		# ALIVE EVENT
		# This event signals whether the tracker is still alive. It should
		# only be cleared when closing the connection to the tracker!
		self._alive = Event()
		self._alive.set()
		
		# FRAME OBTAINING THREAD
		# Boolean that turns to True when a connection with the source of
		# frames has been established.
		self._connected = False
		# We need a Queue for frames that are generated in the obtainer
		# Thread. The Queue is read out by the parallel processes.
		self._framequeue = Queue()
		# We need a lock to prevent potential simultaneous attempts to
		# access the image source at the same time. This shouldn't actually
		# be possible in the current implementation, but may be added in
		# the future.
		self._sourcelock = Lock()
		# Start the frame obtaining Thread
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting a Thread to obtain frames.")
		self._frame_obtainer_thread = Thread(target=self._frame_obtainer, \
			args=[self._alive, self._framequeue])
		self._frame_obtainer_thread.name = u'frame_obtainer'
		self._frame_obtainer_thread.daemon = True
		self._frame_obtainer_thread.start()

		# PARALLEL PROCESSING
		# We need a Queue for samples that are generated in the parallel
		# processes that are simultaneously processing new frames.
		self._samplequeue = Queue()
		# Check how many CPUs we can use.
		cpus = cpu_count()
		if cpus > maxcpu:
			cpus = maxcpu
		# Start parallel processes to do image processing.
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting %d parallel processes to process frames into samples." \
			% (cpus-1))
		self._frame_processes = []
		for i in range(1, cpus):
			p = Process(target=_frame_processer, \
				args=[self._alive, self._framequeue, self._samplequeue, \
				self._pupt, self._glit, self._facedetect, self._eyedetect, \
				self._minfacesize, self._Lexpect, self._Rexpect, \
				self._maxpupdist, self._maxpupsize, self._glintdetect, \
				self._pupsizemode])
			p.name = u'frame_processor_%d' % (i)
			p.daemon = True
			p.start()
			self._frame_processes.append(p)
		
		# SAMPLE WRITING
		# Variable that keeps track of the latest sample.
		self._latest_sample = [0, numpy.zeros((2,5))*numpy.NaN]
		# Boolean that signals whether the recording Thread should be
		# active or not.
		self._recording = False
		# Lock to prevent simultaneous access to the log file.
		self._loglock = Lock()
		# The log file is an open text file. It will be opened when
		# self._start_recording is called, and it will be closed when
		# self._stop_recording is called. Between calling those two
		# functions, samples will be appended to the log. To prevent
		# samples from being appended to an existing log file, here we
		# open a new logfile with in 'w' mode, thereby erasing any existing
		# content of a previous log file. This means users need to be
		# careful when naming their files, to prevent overwriting.
		self._logfilename = u'%s.tsv' % (logfile)
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Creating new logfile '%s'." \
			% (self._logfilename))
		# Create a header for the log file.
		l = [u'time']
		l.extend([u'Lpx', u'Lpy', u'Lps', u'Lgx', u'Lgy'])
		l.extend([u'Rpx', u'Rpy', u'Rps', u'Rgx', u'Rgy'])
		line = u'\t'.join(map(unicode, l)) + u'\n'
		# Create a new log file.
		self._loglock.acquire(True)
		self._logfile = open(self._logfilename, u'w')
		self._logfile.write(line)
		self._logfile.close()
		self._loglock.release()

		# Start the sample logging Thread
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting a Thread to log samples to file '%s'." \
			% (self._logfilename))
		self._sample_logging_thread = Thread(target=self._sample_logger, \
			args=[self._alive, self._samplequeue])
		self._sample_logging_thread.name = u'sample_logger'
		self._sample_logging_thread.daemon = True
		self._sample_logging_thread.start()
		
		# CUSTOM INITIALISATION
		# Run the custom initialisation procedure.
		self.connect(**kwargs)
Exemple #51
0
            logger.log("Tor connection failed: {}".format(e), 'error')
            time.sleep(5)

    # Awaken the spiders!
    Spiders = []
    Spider_Procs = []

    logger.log('Waking the Spiders...', 'info')
    my_names = []

    # We'll start two processes for every processor.
    count = (cpu_count() * 2)
    for x in range(count):
        spider = Spider()
        spider_proc = Process(target=spider.crawl)
        spider_proc.name = names.get_first_name()
        while spider_proc.name in my_names:
            spider_proc.name = names.get_first_name()
        my_names.append(spider_proc.name)
        Spider_Procs.append(spider_proc)
        Spiders.append(spider)
        spider_proc.start()
        # We make them sleep a second so they don't all go skittering after
        # the same url at the same time.
        time.sleep(1)

    for spider_proc in Spider_Procs:
        spider_proc.join()

    try:
        os.unlink('sleep')