コード例 #1
0
class Multiprocess(object):
    # THE COMPLICATION HERE IS CONNECTING THE DISPARATE LOGGING TO
    # A CENTRAL POINT


    def __init__(self, functions):
        self.outbound = Queue()
        self.inbound = Queue()
        self.inbound = Queue()

        #MAKE

        #MAKE THREADS
        self.threads = []
        for t, f in enumerate(functions):
            thread = worker(
                "worker " + unicode(t),
                f,
                self.inbound,
                self.outbound,
            )
            self.threads.append(thread)


    def __enter__(self):
        return self

    #WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
    def __exit__(self, a, b, c):
        try:
            self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
        except Exception, e:
            Log.warning("Problem adding to inbound", e)

        self.join()
コード例 #2
0
ファイル: TestQueueBlocking.py プロジェクト: sys-git/PyRQ
class TestBlockingMethods(unittest.TestCase):
    def setUp(self):
        self.quiet=True
        self.random = Random()
        self._timers = []
        self.namespaces = []
        self.iface = PyRQIface(quiet=self.quiet, ref="test")
        self.dummyQueue = Queue()
        self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
        desiredPort = "19001"
        self.r = SubprocessQueueServer(
                                       desiredPort=desiredPort,
                                       handlerClazz=Linkage.create(MockHandler),
                                       quiet=self.quiet
#           includePydevd="/home/francis/.eclipse/org.eclipse.platform_3.7.0_155965261/plugins/org.python.pydev.debug_2.5.0.2012040618/pysrc"
           )
        PyRQIface.setGlobalPYRQ(self.r.details())
        self.r.start().waitUntilRunning()
        pass
    def tearDown(self):
        try:
            self.dummyQueue.close()
            del self.dummyQueue
        except Exception, _e:
            pass
        for namespace in self.namespaces:
            self.iface.setNamespace(namespace)
        try:    self.iface.close()
        except ClosedError, _e:
            pass
コード例 #3
0
ファイル: multiprocess.py プロジェクト: mozilla/esFrontLine
class Multiprocess(object):
    # THE COMPLICATION HERE IS CONNECTING THE DISPARATE LOGGING TO
    # A CENTRAL POINT

    def __init__(self, functions):
        self.outbound = Queue()
        self.inbound = Queue()
        self.inbound = Queue()

        #MAKE

        #MAKE THREADS
        self.threads = []
        for t, f in enumerate(functions):
            thread = worker(
                "worker " + unicode(t),
                f,
                self.inbound,
                self.outbound,
            )
            self.threads.append(thread)

    def __enter__(self):
        return self

    #WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
    def __exit__(self, a, b, c):
        try:
            self.inbound.close(
            )  # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
        except Exception, e:
            Log.warning("Problem adding to inbound", e)

        self.join()
コード例 #4
0
def empty_queue(queue_: Queue) -> None:
    while True:
        try:
            queue_.get(block=False)
        except queue.Empty:
            break

    queue_.close()
コード例 #5
0
ファイル: test.py プロジェクト: smihica/py-q4pg
def test_multiprocess_tasks():
    wait_until_convenient()
    TAG = "message_q"
    def fetch_task(queue):
        pid = os.getpid()
        count = 0
        for dq in q.listen(TAG, timeout=1):
            s = { 'pid': pid, 'data': dq }
            if dq:
                count += 1
                queue.put(s)
                sleep(uniform(0.1, 0.5)) # sleep 0.1~0.5 seconds randomly
            elif q.count(TAG) == 0:
                return count # the number of tasks done by this process


    test_items = range(0, 10000) # enqueue 10000 tasks
    for i in test_items:
        q.enqueue(TAG, i + 1)

    while q.count(TAG) != len(test_items): # wait until test data is ready
        wait_until_convenient()

    jobs = []
    wait_until_convenient()
    queue = Queue()
    start = timer()
    num_p = 30 # the number of processes to use
    for i in range(0, num_p):
        job = Process(target=fetch_task, args=(queue,))
        jobs.append(job)
        job.start() # start task process

    remaining = q.count(TAG)
    while remaining > 0: # wait until the queue is consumed completely
        remaining = q.count(TAG)
        sys.stdout.write('\rRunning test_multiprocess_tasks - remaining %5d/%5d' % (remaining, len(test_items),))
        sys.stdout.flush()
        wait_until_convenient()

    processed_data = set()
    qsize = 0
    while not queue.empty():
        item = queue.get()
        data = item.get('data')
        qsize += 1
        assert data not in processed_data, "failed test_multiprocess_tasks - data %s has been processed already" % (data, )
        processed_data.add(item.get('data'))

    queue.close()
    queue.join_thread()
    for j in jobs:
        j.join()

    assert qsize == len(test_items), "failed test_multiprocess_tasks - tasks are not complete %d/%d" % (qsize, len(test_items), )
    end = timer()
    print("\rOK test_multiprocess_tasks - %d done in %5d seconds" % (qsize, end - start))
コード例 #6
0
ファイル: ServerSink.py プロジェクト: sys-git/PyRQ
class ServerSink(iMockDebuggerSink):
    def __init__(self, peerName, theTime, details, quiet):
        self._peerName = peerName
        self._methods = []
        methods = iMockDebuggerSink()._getMethods()
        self._methods = methods
        self._terminate = False
        self._details = details
        self._qw = None
        self._startMutex = Semaphore(0)
        self._q = Queue()
        self.quiet= quiet
        self._marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=quiet)
        self._qw = QueueWriter(target=details, autoConnect=True, marshaller=self._marshaller, quiet=quiet)
        self._qw.start()
        self.thread = None
    def start(self):
        t = threading.Thread(target=self.run, args=[self._startMutex])
        t.setName("ServerSink.%(P)s"%{"P":self._peerName})
        t.setDaemon(True)
        self.thread = t
        self.thread.start()
        return "server.sink.started"
    def close(self):
        self._terminate = True
        try:    self.thread.join()
        except: pass
        try:    self._qw.close()
        except: pass
        try:    self._q.close()
        except: pass
        return "server.sink.closed"
    def waitUntilRunning(self, block=True, timeout=None):
        self._startMutex.acquire(block=block, timeout=timeout)
        return self
    def __getattribute__(self, name):
        if name in object.__getattribute__(self, "_methods"):
            q = self._q
            def wrapper(self, *args, **kwargs):
                ServerSink._testPickleability((name, args, kwargs))
                q.put((name, args, kwargs))
            return wrapper
        return object.__getattribute__(self, name)
    def run(self, startMutex):
        startMutex.release()
        while self._terminate==False:
            try:
                data = self._q.get(block=True, timeout=1)
            except Empty:   pass
            else:
                ServerSink._testPickleability(data)
                try:
                    self._qw.put(data, block=True, timeout=10)
                except Exception, _e:
                    break
コード例 #7
0
ファイル: TestRRQReader.py プロジェクト: sys-git/PyRQ
class TestWithTimeouts(_BaseReader):
    def setUp(self):
        self.logger = self._getLogger()
        self.quiet=True
        self.random = Random()
        self.timerTerminate = 0
        self._timers = []
        self.namespaces = []
        self.dummyQueue = Queue()
        self.iface = PyRQIface(ref="test", quiet=self.quiet, loggingModule=testLoggingModule)
        self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
        desiredPort = "19001"
        self.r = SubprocessQueueServer(
                                       desiredPort=desiredPort,
                                       handlerClazz=Linkage.create(TimeoutMockHandler),
                                       quiet=self.quiet,
           )
        PyRQIface.setGlobalPYRQ(self.r.details())
    def _createInterface(self):
        namespace = self.iface.create()
        self.iface = PyRQIface(namespace=namespace, ref="test", quiet=self.quiet, loggingModule=testLoggingModule)
        self.namespaces.append(namespace)
        return namespace
    def tearDown(self):
        self.timerTerminate = 1
        time.sleep(2)
        try:
            self.dummyQueue.close()
            del self.dummyQueue
        except Exception, _e:
            pass
        for namespace in self.namespaces:
            self.iface.setNamespace(namespace)
        try:    self.iface.close()
        except ClosedError, _e:
            pass
コード例 #8
0
ファイル: TestRRQReader.py プロジェクト: sys-git/PyRQ
class TestRRQReader(_BaseReader):
    def setUp(self):
        self.logger = self._getLogger()
        self.quiet = True
        self._queues = []
        self.random = Random()
        self.timerTerminate = 0
        self._timers = []
        self.namespaces = []
        self.iface = PyRQIface(quiet=self.quiet, ref="default", loggingModule=testLoggingModule)
        self.dummyQueue = Queue()
        self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
        desiredPort = "19001"
        self.r = SubprocessQueueServer(
                                       desiredPort=desiredPort,
                                       quiet=self.quiet,
                                       handlerClazz=Linkage.create(MockHandler),
#           includePydevd="/home/francis/.eclipse/org.eclipse.platform_3.7.0_155965261/plugins/org.python.pydev.debug_2.5.0.2012040618/pysrc"
           )
        PyRQIface.setGlobalPYRQ(self.r.details())
        self.r.start().waitUntilRunning()
        pass
    def tearDown(self):
        self.logger.info("------------ TEST END ------------")
        self.timerTerminate = 1
        time.sleep(2)
        try:
            self.dummyQueue.close()
            del self.dummyQueue
        except Exception, _e:
            pass
        for namespace in self.namespaces:
            self.iface.setNamespace(namespace)
        try:    self.iface.close()
        except ClosedError, _e:
            pass
コード例 #9
0
ファイル: smp.py プロジェクト: jucabot/polymr
class MultiCoreEngine():
    
    _mapred = None
    
    _out_queue = None
    _in_queue = None
    _log_queue = None
        
    _processes = None
    
        
    def __init__(self,mapred):
        self._mapred = mapred
            
    def _start(self,name,cpu, module_name, class_name, params):
        fn = None
        
        self._processes = []
        self._in_queue = Queue()
        self._out_queue = Queue()
        self._log_queue = Queue()
        
        if name == "mapper":
            fn = q_run_mapper
        elif name == "reducer":
            fn = q_run_reducer
        
        for i in range(cpu):
            process = Process(target=fn,args=(module_name, class_name ,params, self._in_queue, self._out_queue, self._log_queue))
            self._processes.append(process)
            process.start()
    
    def _stop(self):
        
        for process in self._processes:
            self._in_queue.put("STOP")
        
        while not self._log_queue.empty():
            print self._log_queue.get()
    
    def _get_data_chunks(self):
        chunks = []
        for process in self._processes:
            chunks.append(self._out_queue.get())
        
        return chunks
    
    def _set_data_chunks(self, chunks):
        
        map(self._in_queue.put,chunks)
        
                        
    def _send_lines(self,lines, cpu, lines_len ):
        line_splits = [lines[i* lines_len / cpu : (i+1)* lines_len / cpu] for i in range(cpu) ]
                    
        for i in range(cpu): 
            self._in_queue.put(line_splits[i])
    
    def _terminate(self):
        for process in self._processes:
            process.join()
            process.terminate()
                
        self._in_queue.close()
        self._out_queue.close()
        self._processes = None
        
    def _force_terminate(self):
        for process in self._processes:
            process.terminate()
            
    def _merge_data(self, data):
       
        self._mapred.data = merge_kv_dict(self._mapred.data,data)
                
    def _merge_reduced_data(self, data):
       
        self._mapred.data_reduced = merge_kv_dict(self._mapred.data_reduced,data)
                
    def _split_data(self, num_splits):
        splits = []
        index = 0
        
        len_data = len(self._mapred.data)
        
        chunk_len = int(math.ceil(len_data / float(num_splits)))
        
        if chunk_len == 0:
            splits.append(self._mapred.data)
        else:        
            for i in range(int(math.ceil(len_data/float(chunk_len)))):
                splits.append({})
                
            for (key, value) in self._mapred.data.items():
                
                i = int(math.floor(index / float(chunk_len)))
                       
                splits[i][key]=value
                
                index = index + 1
        
        return splits
    
    
    def _run_map(self,cpu,cache_line,input_reader ):
        
        self._start("mapper",cpu, self._mapred.__class__.__module__,self._mapred.__class__.__name__ ,self._mapred.params)
    
        try:
            map_len = 0
            lines = []
            lines_len = 0
            f = input_reader.read()
              
            for line in f:
                if  lines_len > 0 and lines_len % cache_line == 0:
                    self._send_lines(lines, cpu, lines_len)        
                    lines = []
                    lines_len = 0
               
                lines.append(line)
                lines_len += 1
                map_len += 1
                 
            input_reader.close()
            
            self._send_lines(lines, cpu, lines_len)
            
            self._stop()
            
            map(self._merge_data, self._get_data_chunks())
                
                
            self._terminate()
            
        
        except Exception,e:
            print "ERROR: Exception while mapping : %s\n%s" % (e,traceback.print_exc())
            self._force_terminate()
             
        return map_len
コード例 #10
0
ファイル: SSH.py プロジェクト: sys-git/YATES
class SSHClient(Process):
    TIMEOUT = 10
    PING_RECEIVED = re.compile("1 received")

    def __init__(self, username, password, host, cmdsToSend, port = 22, exitCmd = "exit", timeout = None):
        Process.__init__(self, name = "SSHClient")

        self.logger = LogManager().getLogger('SSHClient-%s' % host)
        self.username = username
        self.password = password
        self.host = host
        self.port = int(port)
        self.cmdsToSend = cmdsToSend if isinstance(cmdsToSend, list) else [cmdsToSend]
        self.exitCmd = exitCmd

        self.queue = Queue()
        self.msg = ""
        self.status = Status.FAILURE
        self.startTime = Value('d', 0.0)
        self.endTime = Value('d', 0.0)
        self.timeout = timeout or SSHClient.TIMEOUT
        self.cmdsSend = False
        self.start()

    def isFinished(self):
        """ True if the process has finished """
        return not self.is_alive()

    def updateOutput(self):
        """
        Update the msg to include the latest
        output from the given commands
        """
        try:
            while True:
                msg = self.queue.get(timeout = 0.5)
                self.msg += msg
        except Empty: pass
        except IOError: pass

        if self.isFinished():
            self.queue.close()
        return self.msg

    def run(self):
        factory = SSHFactory(self)
        factory.protocol = ClientTransport
        reactor.connectTCP(self.host, self.port, factory)

        self.startTime.value = time.time()
        check = task.LoopingCall(self.__ping)
        check.start(2.0)
        reactor.callLater(self.timeout, self.__timeout)
        log.defaultObserver.stop()
        reactor.run()
        self.endTime.value = time.time()
        self.queue.close()
        sys.exit(self.status)

    def __timeout(self):
        """ Timeout checker """
        if self.status != Status.FAILURE:
            return

        self.logger.error('Connection timeout to peer %s:%s'
            %(self.host, self.port))
        reactor.stop()

    def __ping(self):
        with open('/dev/null') as null:
            ping = subprocess.Popen(["ping", "-c1", "-W1", self.host],
                stdout = null, stderr = null)
            ping.wait()

        if ping.returncode != 0 and reactor.running:
            if self.cmdsSend == False:
                self.status = Status.FAILURE
            reactor.stop() 

    def cleanup(self):
        self.queue.close()

    def shutdown(self):
        """ Terminate the SSH process """
        self.terminate()
        self.join()
        self.endTime.value = time.time()
コード例 #11
0
class MVacManager(Manager):
    """
        Multyvac backend.
    """
 

    @contract(num_processes='int')
    def __init__(self, context, cq, num_processes, 
                 recurse=False, 
                 show_output=False,
                 new_process=False,
                 volumes=[],
                 rdb=False,
                 rdb_vol=None,
                 rdb_db=None):
        Manager.__init__(self, context=context, cq=cq, recurse=recurse)
        self.num_processes = num_processes
        self.last_accepted = 0
        self.cleaned = False
        self.show_output = show_output
        self.new_process = new_process
        self.volumes = volumes
        self.rdb = rdb
        self.rdb_db = rdb_db
        self.rdb_vol = rdb_vol
        
    def process_init(self):
        self.event_queue = Queue()
        self.event_queue_name = str(id(self))
        from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
        PmakeManager.queues[self.event_queue_name] = self.event_queue

        # info('Starting %d processes' % self.num_processes)

        self.subs = {}  # name -> sub
        # available + processing + aborted = subs.keys
        self.sub_available = set()
        self.sub_processing = set()
        self.sub_aborted = set()

        self.signal_queue = Queue()

        db = self.context.get_compmake_db()
        storage = db.basepath  # XXX:
        logs = os.path.join(storage, 'logs')
        for i in range(self.num_processes):
            name = 'w%02d' % i
            write_log = os.path.join(logs, '%s.log' % name)
            make_sure_dir_exists(write_log)
            signal_token = name
            self.subs[name] = PmakeSub(name, 
                                       signal_queue=self.signal_queue,
                                       signal_token=signal_token,
                                       write_log=write_log)
        self.job2subname = {}
        self.subname2job = {}
        # all are available at the beginning
        self.sub_available.update(self.subs)

        self.max_num_processing = self.num_processes

    def check_any_finished(self):
        # We make a copy because processing is updated during the loop
        try:
            token = self.signal_queue.get(block=False)
        except Empty:
            return False
        #print('received %r' % token)
        job_id = self.subname2job[token]
        self.subs[token].last
        self.check_job_finished(job_id, assume_ready=True)
        return True 
    
    # XXX: boiler plate
    def get_resources_status(self):
        resource_available = {}

        assert len(self.sub_processing) == len(self.processing)

        if not self.sub_available:
            msg = 'already %d nproc' % len(self.sub_processing)
            if self.sub_aborted:
                msg += ' (%d workers aborted)' % len(self.sub_aborted)
            resource_available['nproc'] = (False, msg)
            # this is enough to continue
            return resource_available
        else:
            resource_available['nproc'] = (True, '')

        return resource_available

    @contract(reasons_why_not=dict)
    def can_accept_job(self, reasons_why_not):
        if len(self.sub_available) == 0 and len(self.sub_processing) == 0:
            # all have failed
            msg = 'All workers have aborted.'
            raise MakeHostFailed(msg)

        resources = self.get_resources_status()
        some_missing = False
        for k, v in resources.items():
            if not v[0]:
                some_missing = True
                reasons_why_not[k] = v[1]
        if some_missing:
            return False
        return True

    def instance_job(self, job_id):
        publish(self.context, 'worker-status', job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name
        self.subname2job[name] = job_id

        job = get_job(job_id, self.db)

        if self.rdb:
            f = mvac_job_rdb
            args = (job_id, self.context,
                    self.event_queue_name, self.show_output,
                    self.volumes, self.rdb_vol.name, self.rdb_db, os.getcwd())            
        else:
            if job.needs_context:
                # if self.new_process:
                #     f = parmake_job2_new_process
                #     args = (job_id, self.context)
                # 
                # else:
                f = parmake_job2
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output)
            else:
                f = mvac_job
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output,
                        self.volumes, os.getcwd())
    
        if True:
            async_result = sub.apply_async(f, args)
        else:
            warnings.warn('Debugging synchronously')
            async_result = f(args)
            
        return async_result

    def event_check(self):
        if not self.show_output:
            return
        while True:
            try:
                event = self.event_queue.get(block=False)  # @UndefinedVariable
                event.kwargs['remote'] = True
                broadcast_event(self.context, event)
            except Empty:
                break

    def process_finished(self):
        if self.cleaned:
            return
        self.cleaned = True
        
        #print('Clean up...') 
        
        for name in self.sub_processing:
            self.subs[name].proc.terminate()

        for name in self.sub_available:
            self.subs[name].terminate()

        elegant = False
        # XXX: in practice this never works well
        if elegant:
            timeout = 1
            for name in self.sub_available:
                self.subs[name].proc.join(timeout)
            
        # XXX: ... so we just kill them mercilessly
        else:
            #  print('killing')
            for name in self.sub_processing:
                pid = self.subs[name].proc.pid
                os.kill(pid, signal.SIGKILL)

        self.event_queue.close()
        self.signal_queue.close()
        from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
        del PmakeManager.queues[self.event_queue_name]
        

    # Normal outcomes    
    def job_failed(self, job_id, deleted_jobs):
        Manager.job_failed(self, job_id, deleted_jobs)
        self._clear(job_id)

    def job_succeeded(self, job_id):
        Manager.job_succeeded(self, job_id)
        self._clear(job_id)

    def _clear(self, job_id):
        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        del self.subname2job[name]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)
        self.sub_available.add(name)

    def host_failed(self, job_id):
        Manager.host_failed(self, job_id)

        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        del self.subname2job[name]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)

        # put in sub_aborted
        self.sub_aborted.add(name)

    def cleanup(self):
        self.process_finished()
コード例 #12
0
ファイル: mvac_manager.py プロジェクト: p-muller/compmake
class MVacManager(Manager):
    """
        Multyvac backend.
    """
 

    @contract(num_processes='int')
    def __init__(self, context, cq, num_processes, 
                 recurse=False, 
                 show_output=False,
                 new_process=False,
                 volumes=[],
                 rdb=False,
                 rdb_vol=None,
                 rdb_db=None):
        Manager.__init__(self, context=context, cq=cq, recurse=recurse)
        self.num_processes = num_processes
        self.last_accepted = 0
        self.cleaned = False
        self.show_output = show_output
        self.new_process = new_process
        self.volumes = volumes
        self.rdb = rdb
        self.rdb_db = rdb_db
        self.rdb_vol = rdb_vol
        
    def process_init(self):
        self.event_queue = Queue()
        self.event_queue_name = str(id(self))
        from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
        PmakeManager.queues[self.event_queue_name] = self.event_queue

        # info('Starting %d processes' % self.num_processes)

        self.subs = {}  # name -> sub
        # available + processing + aborted = subs.keys
        self.sub_available = set()
        self.sub_processing = set()
        self.sub_aborted = set()

        self.signal_queue = Queue()

        db = self.context.get_compmake_db()
        storage = db.basepath  # XXX:
        logs = os.path.join(storage, 'logs')
        for i in range(self.num_processes):
            name = 'w%02d' % i
            write_log = os.path.join(logs, '%s.log' % name)
            make_sure_dir_exists(write_log)
            signal_token = name
            self.subs[name] = PmakeSub(name, 
                                       signal_queue=self.signal_queue,
                                       signal_token=signal_token,
                                       write_log=write_log)
        self.job2subname = {}
        self.subname2job = {}
        # all are available at the beginning
        self.sub_available.update(self.subs)

        self.max_num_processing = self.num_processes

    def check_any_finished(self):
        # We make a copy because processing is updated during the loop
        try:
            token = self.signal_queue.get(block=False)
        except Empty:
            return False
        #print('received %r' % token)
        job_id = self.subname2job[token]
        self.subs[token].last
        self.check_job_finished(job_id, assume_ready=True)
        return True 
    
    # XXX: boiler plate
    def get_resources_status(self):
        resource_available = {}

        assert len(self.sub_processing) == len(self.processing)

        if not self.sub_available:
            msg = 'already %d nproc' % len(self.sub_processing)
            if self.sub_aborted:
                msg += ' (%d workers aborted)' % len(self.sub_aborted)
            resource_available['nproc'] = (False, msg)
            # this is enough to continue
            return resource_available
        else:
            resource_available['nproc'] = (True, '')

        return resource_available

    @contract(reasons_why_not=dict)
    def can_accept_job(self, reasons_why_not):
        if len(self.sub_available) == 0 and len(self.sub_processing) == 0:
            # all have failed
            msg = 'All workers have aborted.'
            raise MakeHostFailed(msg)

        resources = self.get_resources_status()
        some_missing = False
        for k, v in resources.items():
            if not v[0]:
                some_missing = True
                reasons_why_not[k] = v[1]
        if some_missing:
            return False
        return True

    def instance_job(self, job_id):
        publish(self.context, 'worker-status', job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name
        self.subname2job[name] = job_id

        job = get_job(job_id, self.db)

        if self.rdb:
            f = mvac_job_rdb
            args = (job_id, self.context,
                    self.event_queue_name, self.show_output,
                    self.volumes, self.rdb_vol.name, self.rdb_db, os.getcwd())            
        else:
            if job.needs_context:
                # if self.new_process:
                #     f = parmake_job2_new_process
                #     args = (job_id, self.context)
                # 
                # else:
                f = parmake_job2
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output)
            else:
                f = mvac_job
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output,
                        self.volumes, os.getcwd())
    
        if True:
            async_result = sub.apply_async(f, args)
        else:
            warnings.warn('Debugging synchronously')
            async_result = f(args)
            
        return async_result

    def event_check(self):
        if not self.show_output:
            return
        while True:
            try:
                event = self.event_queue.get(block=False)  # @UndefinedVariable
                event.kwargs['remote'] = True
                broadcast_event(self.context, event)
            except Empty:
                break

    def process_finished(self):
        if self.cleaned:
            return
        self.cleaned = True
        
        #print('Clean up...') 
        
        for name in self.sub_processing:
            self.subs[name].proc.terminate()

        for name in self.sub_available:
            self.subs[name].terminate()

        elegant = False
        # XXX: in practice this never works well
        if elegant:
            timeout = 1
            for name in self.sub_available:
                self.subs[name].proc.join(timeout)
            
        # XXX: ... so we just kill them mercilessly
        else:
            #  print('killing')
            for name in self.sub_processing:
                pid = self.subs[name].proc.pid
                os.kill(pid, signal.SIGKILL)

        self.event_queue.close()
        self.signal_queue.close()
        from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
        del PmakeManager.queues[self.event_queue_name]
        

    # Normal outcomes    
    def job_failed(self, job_id, deleted_jobs):
        Manager.job_failed(self, job_id, deleted_jobs)
        self._clear(job_id)

    def job_succeeded(self, job_id):
        Manager.job_succeeded(self, job_id)
        self._clear(job_id)

    def _clear(self, job_id):
        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        del self.subname2job[name]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)
        self.sub_available.add(name)

    def host_failed(self, job_id):
        Manager.host_failed(self, job_id)

        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        del self.subname2job[name]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)

        # put in sub_aborted
        self.sub_aborted.add(name)

    def cleanup(self):
        self.process_finished()
コード例 #13
0
class SmtpMessageServer(object):
    """
    This class can start an SMTP debugging server,
    configure LinOTP to talk to it and read the
    results back to the parent tester.

    On open, an SMTP server is set up to listen locally.
    Derived classes can define a hook to set the LinOTP
    configuration to point to this server.

    Example usage:

    with SmtpMessageServer(testcase) as smtp:
        get_otp()
    """
    def __init__(self, testcase, message_timeout):
        self.testcase = testcase

        # We need a minimum version of 2.9.2 to set the SMTP port number, so
        # skip if testing an earlier version
        self.testcase.need_linotp_version('2.9.2')

        self.timeout = message_timeout

        self.set_config = SetConfig(testcase.http_protocol, testcase.http_host,
                                    testcase.http_port, testcase.http_username,
                                    testcase.http_password)

        # We advertise the local SMTP server hostname
        # using the IP address that connects to LinOTP
        self.addr = self._get_local_ip()
        self.msg_payload = None

    def __enter__(self):
        self.smtp_process_queue = Queue()
        self.smtp_process = Process(target=get_otp_mail,
                                    args=(self.smtp_process_queue,
                                          self.timeout))
        self.smtp_process.start()
        self.port = self.smtp_process_queue.get(True, 5)
        self._do_lintop_config()

        return self

    def _do_lintop_config(self):
        parameters = self.get_config_parameters()

        logger.debug("Configuration parameters: %s", parameters)
        result = self.set_config.setConfig(parameters)

        assert result, "It was not possible to set the config. Result:%s" % result

    def get_config_parameters(self):
        # This function can be overridden to provide configuration parameters to configure
        # specific parts of LinOTP
        assert False, "This function should be overridden"

    def get_otp(self):
        messagestr = self.smtp_process_queue.get(True, 10)
        msg = email.message_from_string(messagestr)
        otp = msg.get_payload()

        logger.debug("Received email message payload:%s", otp)

        return otp

    def __exit__(self, *args):
        self.smtp_process_queue.close()
        self.smtp_process.terminate()
        self.smtp_process.join(5)

    def _get_local_ip(self):
        """
        Get the IP address of the interface that connects to
        LinOTP
        """

        with closing(
                socket.create_connection(
                    (self.testcase.http_host, int(self.testcase.http_port)),
                    10)) as s:
            addr = s.getsockname()[0]

        return addr
コード例 #14
0
ファイル: ApiBase.py プロジェクト: sys-git/epyrpc
class ApiBase(iApi, iIpcTransportDataReceiveListener):
    r"""
    @summary: The base-class to the top-level api object ONLY, not sub-apis.
    """
    DEFAULT_MAX_ASYNC_HANDLERS = 1
    def __init__(self, name, ns="", solicited=True, ignoreUnhandled=False, maxAsync=None):
        super(ApiBase, self).__init__(ns=ns, solicited=solicited, name=name)
        self._setup(ns=self._getNamespace(), solicited=self.solicited, ipc=self.ipc)
        self._dataRxCount = itertools.count(0)
        self._ignoreUnhandled = ignoreUnhandled
        if (maxAsync == None) or (maxAsync < 1):
            maxAsync = ApiBase.DEFAULT_MAX_ASYNC_HANDLERS
        self._maxAsync = maxAsync
        self._q = Queue()
        self._workers = []
        self._createAsyncWorkers()
        self.isAlive = True
    def _createAsyncWorkers(self, start=True):
        #    Create the thread pool to handle the api calls.
        for _ in range(0, self._maxAsync):
            thread = ApiAsyncWorker.create(self._q, self, start=start)
            self._workers.append(thread)
        self._logger.debug("Created workers.")
    def __del__(self):
        self.teardown()
    def teardown(self):
        if self is threading.current_thread(): return 
        if not self.isAlive: return
        self.isAlive = False
        #    Unfortunately we require time to stop the workers.
        self._logger.debug("Stopping async workers...")
        for _ in range(0, self._maxAsync):
            self._q.put(STOP())
        time.sleep(1)
        for worker in self._workers:
            worker.stop()
        for worker in self._workers:
            if worker.isAlive(): worker.join()
        self._workers = []
        self._q.close()
        time.sleep(1)
        del self._q
        self._q = None
        self._logger.debug("Stopped async workers (all daemon anyway).")
        #    Now un-bind our data-receive listener from the IPC:
        if self._ipc != None:
            self._ipc.setTransportDataReceiveListener(self)
        self._ipc = None
    def _newIpc(self):
        super(ApiBase, self)._newIpc()
        #    Now bind our data-receive listener to the IPC:
        self._ipc.setTransportDataReceiveListener(self)
    def transportDataReceive(self, tId, data):
        r"""
        @summary: Data is received that is NOT part of an existing transaction.
        We need to decide what to do with it...
        Recursively ask each of our sub-api's to decode the data and handle it.
        If no one can, then return UnsupportedApiError() (unless we consume it with:
        self._ignoreUnhandled==True).
        The handlers will have previously been set by the controlling entity, ie:
        ExecutionOrganiser, Head.
        This method always returns NoResponseRequired, making the call asynchronous.
        """
        myNsPrefix = self._getNamespacePrefix()
        try:
            count = self._dataRxCount.next()
            if isinstance(data, iApiTransportItem):
                ns = data.ns()
                if self._isInMyNamespace(ns):
                    self._findHandler(ns)
                    args = data.args()
                    kwargs = data.kwargs()
                    synchronous = True
                    self._q.put(KNOWN(ns, tId, synchronous, count, args, kwargs))
                    raise NoResponseRequired(ns)
            else:
                #    Inform our listener about the data that we can't handle:
                handler = self.transportDataReceiveListener
                if handler != None:
                    self._q.put(UNKNOWN(tId, data))
                raise NoResponseRequired(myNsPrefix)
        except UnsupportedApiError, e:
            if self._ignoreUnhandled == False:
                #    Propagate exception directly as before.
                raise
            #    Consume silently:
            self._logger.debug("UnsupportedApiError: %(NS)s" % {"NS":e.ns()})
            raise NoResponseRequired(myNsPrefix, e)
コード例 #15
0
ファイル: pmake_manager.py プロジェクト: afcarl/compmake
class PmakeManager(Manager):
    """
        Specialization of Manager for local multiprocessing, using
        an adhoc implementation of "pool" because of bugs of the
        Python 2.7 implementation of pool multiprocessing.
    """

    queues = {}

    @contract(num_processes='int')
    def __init__(self,
                 context,
                 cq,
                 num_processes,
                 recurse=False,
                 new_process=False,
                 show_output=False):
        Manager.__init__(self, context=context, cq=cq, recurse=recurse)
        self.num_processes = num_processes
        self.last_accepted = 0
        self.new_process = new_process
        self.show_output = show_output

        if new_process and show_output:
            msg = ('Compmake does not yet support echoing stdout/stderr '
                   'when jobs are run in a new process.')
            warning(msg)
        self.cleaned = False

    def process_init(self):
        self.event_queue = Queue(1000)
        self.event_queue_name = str(id(self))
        PmakeManager.queues[self.event_queue_name] = self.event_queue

        # info('Starting %d processes' % self.num_processes)

        self.subs = {}  # name -> sub
        # available + processing + aborted = subs.keys
        self.sub_available = set()
        self.sub_processing = set()
        self.sub_aborted = set()

        db = self.context.get_compmake_db()
        storage = db.basepath  # XXX:
        logs = os.path.join(storage, 'logs')

        #self.signal_queue = Queue()

        for i in range(self.num_processes):
            name = 'parmake_sub_%02d' % i
            write_log = os.path.join(logs, '%s.log' % name)
            make_sure_dir_exists(write_log)
            signal_token = name

            self.subs[name] = PmakeSub(name=name,
                                       signal_queue=None,
                                       signal_token=signal_token,
                                       write_log=write_log)
        self.job2subname = {}
        # all are available
        self.sub_available.update(self.subs)

        self.max_num_processing = self.num_processes

    # XXX: boiler plate
    def get_resources_status(self):
        resource_available = {}

        assert len(self.sub_processing) == len(self.processing)

        if not self.sub_available:
            msg = 'already %d processing' % len(self.sub_processing)
            if self.sub_aborted:
                msg += ' (%d workers aborted)' % len(self.sub_aborted)
            resource_available['nproc'] = (False, msg)
            # this is enough to continue
            return resource_available
        else:
            resource_available['nproc'] = (True, '')

        return resource_available

    @contract(reasons_why_not=dict)
    def can_accept_job(self, reasons_why_not):
        if len(self.sub_available) == 0 and len(self.sub_processing) == 0:
            # all have failed
            msg = 'All workers have aborted.'
            raise MakeHostFailed(msg)

        resources = self.get_resources_status()
        some_missing = False
        for k, v in resources.items():
            if not v[0]:
                some_missing = True
                reasons_why_not[k] = v[1]
        if some_missing:
            return False
        return True

    def instance_job(self, job_id):
        publish(self.context,
                'worker-status',
                job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name

        if self.new_process:
            f = parmake_job2_new_process
            args = (job_id, self.context)

        else:
            f = parmake_job2
            args = (job_id, self.context, self.event_queue_name,
                    self.show_output)

        async_result = sub.apply_async(f, args)
        return async_result

    def event_check(self):
        if not self.show_output:
            return
        while True:
            try:
                event = self.event_queue.get(block=False)  # @UndefinedVariable
                event.kwargs['remote'] = True
                broadcast_event(self.context, event)
            except Empty:
                break

    def process_finished(self):
        if self.cleaned:
            return
        self.cleaned = True
        # print('process_finished()')

        for name in self.sub_processing:
            self.subs[name].proc.terminate()

        for name in self.sub_available:
            self.subs[name].terminate()

        # XXX: in practice this never works well
        if False:
            # print('joining')
            timeout = 1
            for name in self.sub_available:
                self.subs[name].proc.join(timeout)

        # XXX: ... so we just kill them mercilessly
        if True:
            #  print('killing')
            for name in self.sub_processing:
                pid = self.subs[name].proc.pid
                os.kill(pid, signal.SIGKILL)

                #print('process_finished() finished')

        self.event_queue.close()
        del PmakeManager.queues[self.event_queue_name]

    # Normal outcomes
    def job_failed(self, job_id, deleted_jobs):
        Manager.job_failed(self, job_id, deleted_jobs)
        self._clear(job_id)

    def job_succeeded(self, job_id):
        Manager.job_succeeded(self, job_id)
        self._clear(job_id)

    def _clear(self, job_id):
        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)
        self.sub_available.add(name)

    def host_failed(self, job_id):
        Manager.host_failed(self, job_id)

        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)

        # put in sub_aborted
        self.sub_aborted.add(name)

    def cleanup(self):
        self.process_finished()
コード例 #16
0
class Channel(object):

    # Maximum size of shared memory array, just a safety check it's not set to something ridiculous
    MAX_BUFFER_SIZE = 256 * 1024 * 1024

    def __init__(self, buffer_size):
        # type: (int) -> None

        self.buffer_size = buffer_size

        if self.buffer_size > Channel.MAX_BUFFER_SIZE:
            raise Exception(
                "{} exceeds allowable size ({}) of shared memory to allocate".
                format(self.buffer_size, Channel.MAX_BUFFER_SIZE))
        else:
            self.__queue = Queue()

            # Create the shared memory array
            self.__shared_array = RawArray(c_char, self.buffer_size)

            # Tracks who has been given permission to write to the shared array
            self.__owner = None

    def put(self, sending_worker_name, packet):
        # type: (str, PacketBase) -> None

        # if sending_worker_name is None:
        #     warnings.warn("Put packet with no sender {}".format(packet))

        packet.set_sender(sending_worker_name)

        if not isinstance(packet, PacketBase):
            raise Exception("Packet {} is not of type {}".format(
                packet, PacketBase))

        self.__pickle_and_put(packet)

    def get(self):
        packet = self.__get_and_unpickle()

        # if packet.sender_name is None:
        #     warnings.warn("Get packet with no sender {}".format(packet))

        return packet

    def __pickle_and_put(self, packet):
        # type: (PacketBase) -> None
        pickled_packet = self.__pickle(packet)
        self.__queue.put(pickled_packet)

    @staticmethod
    def __pickle(packet):
        return cPickle.dumps(packet, cPickle.HIGHEST_PROTOCOL)

    def available(self):
        return self.__owner is None

    def release(self):
        self.__owner = None

    def acquire(self, worker_name):
        if self.__owner is None or self.__owner == worker_name:
            self.__owner = worker_name
        else:
            raise Exception(
                "Shared array is owned by '{}'. '{}' cannot acquire.".format(
                    self.__owner, worker_name))

    def copy_array_to_buffer(self, ndarray):
        # type: (np.ndarray) -> None

        total_bytes = ndarray.nbytes
        if total_bytes > self.buffer_size:
            raise Exception(
                "Numpy array ({} bytes) exceeds capacity of shared memory ({} bytes)."
                .format(total_bytes, self.buffer_size))
        else:
            # Reshape the source array into 1 row, wrap the shared array and copy the ndarray into the wrapper
            ndarray = ndarray.reshape(1, ndarray.shape[0] * ndarray.shape[1])
            shared_array_wrapper = np.frombuffer(self.__shared_array,
                                                 dtype=ndarray.dtype,
                                                 count=ndarray.size)
            shared_array_wrapper[0:ndarray.shape[0] *
                                 ndarray.shape[1]] = ndarray

    def copy_array_from_buffer(self, df_shape, dtype):
        # type: (tuple, str) -> pd.DataFrame

        num_elements = df_shape[0] * df_shape[1]
        num_bytes = num_elements * np.dtype(dtype).itemsize
        if num_bytes > self.buffer_size:
            raise Exception(
                "Array size {} exceeds capacity of shared memory {}.".format(
                    num_bytes, self.buffer_size))
        else:
            # Wrap the shared array, reshape the shared array and reconstruct the dataframe
            shared_array_wrapper = np.frombuffer(self.__shared_array,
                                                 dtype=dtype,
                                                 count=num_elements)
            shaped_array = shared_array_wrapper.reshape(
                df_shape[0], df_shape[1])
            df = pd.DataFrame(shaped_array, copy=False)
            return df

    def __get_and_unpickle(self):
        pickled_msg = self.__queue.get()
        msg = self.__unpickle(pickled_msg)
        return msg

    @staticmethod
    def __unpickle(pickled_msg):
        return cPickle.loads(pickled_msg)

    def close(self):
        self.__queue.close()
コード例 #17
0
ファイル: pmake_manager.py プロジェクト: p-muller/compmake
class PmakeManager(Manager):
    """
        Specialization of Manager for local multiprocessing, using
        an adhoc implementation of "pool" because of bugs of the
        Python 2.7 implementation of pool multiprocessing.
    """

    queues = {}

    @contract(num_processes='int')
    def __init__(self, context, cq, num_processes, recurse=False,
                 new_process=False,
                 show_output=False):
        Manager.__init__(self, context=context, cq=cq, recurse=recurse)
        self.num_processes = num_processes
        self.last_accepted = 0
        self.new_process = new_process
        self.show_output = show_output

        if new_process and show_output:
            msg = ('Compmake does not yet support echoing stdout/stderr '
                   'when jobs are run in a new process.')
            warning(msg)
        self.cleaned = False

    def process_init(self):
        self.event_queue = Queue(1000)
        self.event_queue_name = str(id(self))
        PmakeManager.queues[self.event_queue_name] = self.event_queue

        # info('Starting %d processes' % self.num_processes)

        self.subs = {}  # name -> sub
        # available + processing + aborted = subs.keys
        self.sub_available = set()
        self.sub_processing = set()
        self.sub_aborted = set()

        db = self.context.get_compmake_db()
        storage = db.basepath  # XXX:
        logs = os.path.join(storage, 'logs')
        
        #self.signal_queue = Queue()
        
        for i in range(self.num_processes):
            name = 'parmake_sub_%02d' % i
            write_log = os.path.join(logs, '%s.log' % name)
            make_sure_dir_exists(write_log)
            signal_token = name
            
            self.subs[name] = PmakeSub(name=name, 
                                       signal_queue=None, 
                                       signal_token=signal_token, 
                                       write_log=write_log)
        self.job2subname = {}
        # all are available
        self.sub_available.update(self.subs)

        self.max_num_processing = self.num_processes

    # XXX: boiler plate
    def get_resources_status(self):
        resource_available = {}

        assert len(self.sub_processing) == len(self.processing)

        if not self.sub_available:
            msg = 'already %d nproc' % len(self.sub_processing)
            if self.sub_aborted:
                msg += ' (%d workers aborted)' % len(self.sub_aborted)
            resource_available['nproc'] = (False, msg)
            # this is enough to continue
            return resource_available
        else:
            resource_available['nproc'] = (True, '')

        return resource_available

    @contract(reasons_why_not=dict)
    def can_accept_job(self, reasons_why_not):
        if len(self.sub_available) == 0 and len(self.sub_processing) == 0:
            # all have failed
            msg = 'All workers have aborted.'
            raise MakeHostFailed(msg)

        resources = self.get_resources_status()
        some_missing = False
        for k, v in resources.items():
            if not v[0]:
                some_missing = True
                reasons_why_not[k] = v[1]
        if some_missing:
            return False
        return True

    def instance_job(self, job_id):
        publish(self.context, 'worker-status', job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name

        if self.new_process:
            f = parmake_job2_new_process
            args = (job_id, self.context)

        else:
            f = parmake_job2
            args = (job_id, self.context,
                    self.event_queue_name, self.show_output)

        async_result = sub.apply_async(f, args)
        return async_result

    def event_check(self):
        if not self.show_output:
            return
        while True:
            try:
                event = self.event_queue.get(block=False)  # @UndefinedVariable
                event.kwargs['remote'] = True
                broadcast_event(self.context, event)
            except Empty:
                break

    def process_finished(self):
        if self.cleaned:
            return
        self.cleaned = True
        # print('process_finished()')

        for name in self.sub_processing:
            self.subs[name].proc.terminate()

        for name in self.sub_available:
            self.subs[name].terminate()

        # XXX: in practice this never works well
        if False:
            # print('joining')
            timeout = 1
            for name in self.sub_available:
                self.subs[name].proc.join(timeout)

        # XXX: ... so we just kill them mercilessly
        if True:
            #  print('killing')
            for name in self.sub_processing:
                pid = self.subs[name].proc.pid
                os.kill(pid, signal.SIGKILL)

                #print('process_finished() finished')
        
        self.event_queue.close()
        del PmakeManager.queues[self.event_queue_name]
        

    # Normal outcomes    
    def job_failed(self, job_id, deleted_jobs):
        Manager.job_failed(self, job_id, deleted_jobs)
        self._clear(job_id)

    def job_succeeded(self, job_id):
        Manager.job_succeeded(self, job_id)
        self._clear(job_id)

    def _clear(self, job_id):
        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)
        self.sub_available.add(name)

    def host_failed(self, job_id):
        Manager.host_failed(self, job_id)

        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)

        # put in sub_aborted
        self.sub_aborted.add(name)

    def cleanup(self):
        self.process_finished()
コード例 #18
0
ファイル: FileSink.py プロジェクト: sys-git/PyRQ
class FileSink(iMockDebuggerSink):
    def __init__(self, peerName, theTime, filename, quiet):
        self._peerName = peerName
        self._fp = open(filename, "w")
        self.fp.write("File debugger started at: %(T)s for client: %(C)s"%{"T":theTime, "C":peerName})
        self.fp.flush()
        self._methods = []
        methods = iMockDebuggerSink()._getMethods()
        self._methods = methods
        self._terminate = False
        self.quiet=  quiet
        self._startMutex = Semaphore(0)
        self._q = Queue()
        self.thread = None
    def start(self):
        t = threading.Thread(target=self.run, args=[self._startMutex])
        t.setName("FileSink.%(P)s"%{"P":self._peerName})
        t.setDaemon(True)
        self.thread = t
        self.thread.start()
        return "file.sink.started"
    def close(self):
        self._terminate = True
        try:    self.thread.join()
        except: pass
        try:    self._fp.close()
        except: pass
        try:    self._fp.close()
        except: pass
        try:    self._q.close()
        except: pass
        self._fp = None
        return "file.sink.closed"
    def waitUntilRunning(self, block=True, timeout=None):
        self._startMutex.acquire(block=block, timeout=timeout)
        return self
    def __getattribute__(self, name):
        if name in object.__getattribute__(self, "_methods"):
            q = self._q
            def wrapper(self, *args, **kwargs):
                q.put((name, args, kwargs))
            return wrapper
        return object.__getattribute__(self, name)
    def run(self, startMutex):
        startMutex.release()
        while self._terminate==False:
            try:
                data = self._q.get(block=True, timeout=1)
            except Empty:   pass
            else:
                try:
                    (methodName, args, kwargs) = data
                    peerName = args[0]
                    relativeTime = args[1]
                    args = args[2:]
                    ss = ["PEER:", peerName, "REL-TIME:", relativeTime, "METHOD", methodName, "ARGS:", str(args), "KWARGS", str(kwargs)]
                    s = "\n".join(ss)
                except: pass
                else:
                    try:
                        self._fp.write(s)
                    except:
                        break
コード例 #19
0
class SmtpMessageServer(object):
    """
    This class can start an SMTP debugging server,
    configure LinOTP to talk to it and read the
    results back to the parent tester.

    On open, an SMTP server is set up to listen locally.
    Derived classes can define a hook to set the LinOTP
    configuration to point to this server.

    Example usage:

    with SmtpMessageServer(testcase) as smtp:
        get_otp()
    """

    def __init__(self, testcase, message_timeout):
        self.testcase = testcase

        # We need a minimum version of 2.9.2 to set the SMTP port number, so
        # skip if testing an earlier version
        self.testcase.need_linotp_version('2.9.2')

        self.timeout = message_timeout

        self.set_config = SetConfig(testcase.http_protocol,
                                    testcase.http_host,
                                    testcase.http_port,
                                    testcase.http_username,
                                    testcase.http_password)

        # We advertise the local SMTP server hostname
        # using the IP address that connects to LinOTP
        self.addr = self._get_local_ip()
        self.msg_payload = None

    def __enter__(self):
        self.smtp_process_queue = Queue()
        self.smtp_process = Process(
            target=get_otp_mail, args=(self.smtp_process_queue, self.timeout))
        self.smtp_process.start()
        self.port = self.smtp_process_queue.get(True, 5)
        self._do_lintop_config()

        return self

    def _do_lintop_config(self):
        parameters = self.get_config_parameters()

        logger.debug("Configuration parameters: %s", parameters)
        result = self.set_config.setConfig(parameters)

        assert result, "It was not possible to set the config. Result:%s" % result

    def get_config_parameters(self):
        # This function can be overridden to provide configuration parameters to configure
        # specific parts of LinOTP
        assert False, "This function should be overridden"

    def get_otp(self):
        messagestr = self.smtp_process_queue.get(True, 10)
        msg = email.message_from_string(messagestr)
        otp = msg.get_payload()

        logger.debug("Received email message payload:%s", otp)

        return otp

    def __exit__(self, *args):
        self.smtp_process_queue.close()
        self.smtp_process.terminate()
        self.smtp_process.join(5)

    def _get_local_ip(self):
        """
        Get the IP address of the interface that connects to
        LinOTP
        """

        with closing(socket.create_connection((self.testcase.http_host,
                                               int(self.testcase.http_port)),
                                              10)) as s:
            addr = s.getsockname()[0]

        return addr