Example #1
13
def tempControlProcTest(mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param, conn):
    
        p = current_process()
        print 'Starting:', p.name, p.pid
        parent_conn_temp, child_conn_temp = Pipe()            
        ptemp = Process(name = "getrandProc", target=getrandProc, args=(child_conn_temp,))
        #ptemp.daemon = True
        ptemp.start()   
        parent_conn_heat, child_conn_heat = Pipe()           
        pheat = Process(name = "heatProctest", target=heatProctest, args=(cycle_time, duty_cycle, child_conn_heat))
        #pheat.daemon = True
        pheat.start()  
        
        while (True):
            if parent_conn_temp.poll():
                randnum = parent_conn_temp.recv() #non blocking receive
                conn.send([randnum, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param])
            if parent_conn_heat.poll():
                cycle_time, duty_cycle = parent_conn_heat.recv()
                #duty_cycle = on_time/offtime*100.0
                #cycle_time = on_time + off_time
            if conn.poll():
                mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = conn.recv()
                #conn.send([mode, cycle_time, duty_cycle])
                #if mode == "manual": 
                parent_conn_heat.send([cycle_time, duty_cycle])
Example #2
0
class CameraProcess(object):
    END_MESSAGE = 'END'
    def __init__(self):
        self._resolution = Camera.RESOLUTION_LO
        self._main_conn, self._worker_conn = Pipe()

        self._worker = Process(target=cameraWorker, args=((self._main_conn, self._worker_conn),self._resolution,))
        self._worker.daemon = True
        self._worker.start()

    def stop(self):
        self._main_conn.send(CameraProcess.END_MESSAGE)
        while True:
            if self._main_conn.poll(0.1):
                if self._main_conn.recv() == CameraProcess.END_MESSAGE:
                    break
            self._main_conn.send(CameraProcess.END_MESSAGE)
        self._worker.join()

    def update(self):
        if not self._main_conn.poll():
            return
        data = self._main_conn.recv()
        self._y_stereo = data
        EventDispatcher().dispatch_event(YImageEvent(self._y_stereo))
Example #3
0
def run_approx(nodes, timeout: int):
    start_time = time.process_time()  # Get current uptime in secconds
    solution_read, solution_write = Pipe()
    tracepoint_read, tracepoint_write = Pipe()

    trace = Trace()

    # Create a subprocess which will run the algorithm on another core while this process checks time
    p = Process(target=approx, args=(nodes, tracepoint_write, solution_write))
    p.start()  # Start the process

    # Have the thread spin so that it keeps track of time most accurately
    while ((time.process_time() - start_time) < timeout and p.is_alive()):
        if solution_read.poll(0) and tracepoint_read.poll(0):
            # Block until a solution is available or timeout
            solution = solution_read.recv()

            # Receive corresponding tracepoint and append
            trace.add_tracepoint(tracepoint_read.recv())

    p.join(0)  # Join the subprocess which automatically closes the pipes
    if p.is_alive():
        p.terminate()

    return solution, trace
Example #4
0
def tempControlProcTest(mode, cycle_time, duty_cycle, set_point, k_param,
                        i_param, d_param, conn):

    p = current_process()
    print 'Starting:', p.name, p.pid
    parent_conn_temp, child_conn_temp = Pipe()
    ptemp = Process(name="getrandProc",
                    target=getrandProc,
                    args=(child_conn_temp, ))
    #ptemp.daemon = True
    ptemp.start()
    parent_conn_heat, child_conn_heat = Pipe()
    pheat = Process(name="heatProctest",
                    target=heatProctest,
                    args=(cycle_time, duty_cycle, child_conn_heat))
    #pheat.daemon = True
    pheat.start()

    while (True):
        if parent_conn_temp.poll():
            randnum = parent_conn_temp.recv()  #non blocking receive
            conn.send([
                randnum, mode, cycle_time, duty_cycle, set_point, k_param,
                i_param, d_param
            ])
        if parent_conn_heat.poll():
            cycle_time, duty_cycle = parent_conn_heat.recv()
            #duty_cycle = on_time/offtime*100.0
            #cycle_time = on_time + off_time
        if conn.poll():
            mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = conn.recv(
            )
            #conn.send([mode, cycle_time, duty_cycle])
            #if mode == "manual":
            parent_conn_heat.send([cycle_time, duty_cycle])
Example #5
0
 def run(self):
     if not self.isRunnable():
         print "Can't run thread '{}'".format(self.name)
         print self.target, self.EVT_ID, self.notifyWindow, self.process
         return
     try:
         parentPipe, childPipe = Pipe()
         #self.process = Process(target = self.target, args = (childPipe, self.args))
         self.process = threading.Thread(target = self.target,
            name = "Serial communication thread",
            kwargs = {"pipe": childPipe,
                      "args": self.args})
         self.process.name = self.name
         self.process.daemon = True
         self.process.start()
         while self.process.isAlive() or parentPipe.poll(0.001):
             if parentPipe.poll(0.001):
                 out = parentPipe.recv()
                 wx.PostEvent(self.notifyWindow, ResultEvent(out, self.EVT_ID))
             if self.stop:
                 if self.stopSignalTime == 0:
                     self.stopSignalTime = time() + 0.5
                     parentPipe.send([False])
                 if time() > self.stopSignalTime:
                     self.process._Thread__stop()
             wx.YieldIfNeeded()
             #sleep(0.01)
     except OSError, e:
             wx.PostEvent(self.notifyWindow, ResultEvent(\
             "Execution failed in thread '{}', message: {}".format(\
                                             self.name, e), self.EVT_ID))
    def add_gzip_upload_process(self, args, kwargs):
        """
        Create and start a gzip process
        Keep track of the total size of files we are gzipping
        :param args: contains info about file
        :param kwargs:
        :return:
        """
        parent_conn, child_conn = Pipe(duplex=False)

        process = Process(name=os.path.basename(args['file']),
                          target=self.gzip_upload_func,
                          args=[child_conn, args['file_part']],
                          kwargs=kwargs)

        setattr(process, 'process_type', 'GZIP')
        setattr(process, 'header', args['header'])
        setattr(process, 'file', args['file'])
        setattr(process, 'file_size', args['file_size'])

        process.start()
        self.gzip_running_size += args['file_size']
        logging.debug("Added Gzip process, running size {}".format(
            self.gzip_running_size))
        child_conn.close()
        try:
            parent_conn.poll()
            started = parent_conn.recv()
            if started:
                logging.debug("GZIP and upload process started on {}".format(
                    args['file_part']))
        except (EOFError, BrokenPipeError):
            pass
        return process, parent_conn
class IPCManagerPipes:
    def __init__(self):
        self.s2r_conn_r_, self.s2r_conn_w_ = Pipe()
        self.r2s_conn_r_, self.r2s_conn_w_ = Pipe()

    # def connect(self):
    #     pass

    # def start(self):
    #     pass

    # def shutdown(self):
    #     pass

    def get_from_sender(self):
        if not self.s2r_conn_r_.poll():
            return None
        return self.s2r_conn_r_.recv()

    def get_from_receiver(self):
        if not self.r2s_conn_r_.poll():
            return None
        return self.r2s_conn_r_.recv()

    def send_to_receiver(self, msg):
        self.s2r_conn_w_.send(msg)

    def send_to_sender(self, msg):
        self.r2s_conn_w_.send(msg)
Example #8
0
def main():

    parent_conn, child_conn = Pipe()
    #sim.reset()
    #sim.setup()
    sim_proc = Process(target=Simulation, args=(child_conn,GAIT_STEPS*STEP_REPEAT,1111, 'gui'))
    sim_proc.start()
    #joints_seg = parent_conn.recv()
    #print(joints_seg)
    #simulation.setup()
    #stepss = readFile(r'E:\projekty\spider\algorithm\gen\generations-07_18_2018_13_26.txt')
    sleep(2)
    while True:
        for step in stepss:
            print(len(step))
            y = convertToSim(step)
            logger.debug('converted: %s', str(y))
            y = copytoLegs(y)
            logger.debug('copied: %s', str(y))
            y = stepRepeat(y)
            logger.debug('sent: %s', str(y))
            parent_conn.send(y)
            while True:
                if parent_conn.poll():
                    rec = parent_conn.recv()
                    if rec == "simok":
                        logger.debug('simok')
                        break
                    else:
                        logger.debug('wrong data, received %s', str(rec))
                #else:
                    #logger.debug('waitinf for simok')

            while True:
                if parent_conn.poll():
                    rec= parent_conn.recv()[-1]
                    logger.debug('data received : size %s %s', len(rec), str(rec))
                    if len(rec)==4:
                        contact, base_pos, base_angle, diff = rec
                    #if len(contact) == 6 and len(base_pos) == 3 and len(base_angle) == 3:
                        parent_conn.send('dataok')
                        logger.debug('dataok')
                        break
                else:
                    logger.debug('waitinf for correct data')

            parent_conn.send('reset')
            while True:
                #print('in loop3')
                if parent_conn.poll():
                    rec = parent_conn.recv()
                    if rec == "resok":
                        logger.debug('resok')
                        break
                #else:
                #    logger.debug('waitinf for reset confirm')
        #parent_conn.send('reset')


    print("end")
Example #9
0
def ls2(nodes, timeout: int,
        seed_num):  #Here we will implement the Simulated Annealing Algorithm
    start_time = time.process_time()  # Get current uptime in secconds
    solution_read, solution_write = Pipe()
    tracepoint_read, tracepoint_write = Pipe()
    solution = Solution()
    random.seed(seed_num)
    trace = Trace()
    p = Process(target=anneal_route,
                args=(nodes, tracepoint_write, solution_write, seed_num))
    p.start()  # Start the process

    # Have the thread spin so that it keeps track of time most accurately
    while (((time.process_time() - start_time) < timeout) and p.is_alive()):
        if solution_read.poll(0) and tracepoint_read.poll(0):
            # Blocking call which shouldn't block since poll conditions ensure data is available
            solution = solution_read.recv()
            trace.add_tracepoint(tracepoint_read.recv(
            ))  # Receive corresponding tracepoint and append

    p.join(0)  # Join the subprocess which automatically closes the pipes
    if p.is_alive():
        p.terminate()

    return solution, trace
Example #10
0
def search(processes: list,
           pipe: Pipe,
           start: float = None,
           timeout: float = None) -> tuple:
    while len(active_children()) > 0 and not istimeout(start, timeout):
        if pipe.poll(): return pipe.recv()
    if pipe.poll(timeout=0.0001): return pipe.recv()
    if istimeout(start, timeout): raise Elliot("Timeout reached")
Example #11
0
def main():
    Base.metadata.create_all(engine)
    ready_msg = "ready"
    logger_queue = Queue(-1)
    page_parent_conn, page_child_conn = Pipe()
    logger_parent_conn, logger_child_conn = Pipe()

    log_process = LogListenerProcess(queue=logger_queue,
                                     pipe=logger_child_conn,
                                     verifying_message=ready_msg,
                                     configure=main_logger_configure)
    log_process.start()
    logger_parent_conn.send(ready_msg)

    get_process = GetKakakuNoteBookPage(lcd_from="10",
                                        lcd_to="16",
                                        configure=queue_logger_configure,
                                        queue=logger_queue,
                                        pipe=page_child_conn,
                                        verifying_message=ready_msg)
    get_process.start()
    page_parent_conn.send(ready_msg)

    queue_logger_configure(logger_queue)
    logger = getLogger(current_process().name)
    logger.info("Preparing PhantomJS ...")

    logger.info("[Start MainProcess]")
    if page_parent_conn.poll(timeout=60) and logger_parent_conn.poll(
            timeout=60):
        assert page_parent_conn.recv(
        ) == ready_msg, "GetKakakuNoteBookPage Process can't start."
        assert logger_parent_conn.recv(
        ) == ready_msg, "LogListener Process can't start."
        logger.debug("All processes ready.")
    else:
        print("[Process start failed]")
        sys.exit(1)

    logger.info("Try to get information from kakaku.com ...")
    while True:
        if page_parent_conn.poll(timeout=60):
            table = GetTable(page_parent_conn.recv())
            for tr in table.all_tr:
                scraped = ScrapeNoteBook(tr)
                add_to_db(scraped, logger=logger)
            page_parent_conn.send("Done")
        else:
            logger.info("[Complete GetKakakuNoteBook Process]")
            break

    logger.info("[End MainProcess]")
    page_parent_conn.send(None)
    logger_queue.put(None)
    log_process.join()
    get_process.join()
    sys.exit()
Example #12
0
    def blocking_run(self):
        parent_conn, child_conn = Pipe()
        q = Queue()
        self.p = Process(target=job_process,
                         args=(
                             self.job_id,
                             self.job_class,
                             self.parameters,
                             q,
                             child_conn,
                             self.server_url,
                             self.log_filename,
                         ))
        self.p.start()
        while self.p.is_alive():
            while parent_conn.poll():
                self.output_received_from_job(parent_conn.recv())
            time.sleep(1)
        self.p.join()
        try:
            while parent_conn.poll():
                self.output_received_from_job(parent_conn.recv())
        except:
            print('Exception while gathering job process output')

        try:
            if self.terminated:
                # Job was terminated from user request
                self.result = {
                    'job_id': self.job_id,
                    'success': False,
                    'retcode': 1,
                    'exception': 'Terminated by server',
                    'progress': 'terminated'
                }
            else:
                if q.empty():
                    # Job process error, the job should always place its output in the queue
                    self.result = {
                        'job_id': self.job_id,
                        'success': False,
                        'retcode': 1,
                        'exception': 'Job process terminated abnormally',
                        'progress': 'failed'
                    }
                else:
                    # Job terminated normally
                    self.result = q.get()
                    self.result['progress'] = self.status
        except:
            print('Exception while gathering job output')

        parent_conn.close()
        return self.result
Example #13
0
class Driver(object):
    def __init__(self):
        self._driver_process = None
        self._sig_parent = None
        self._sig_child = None

    def start(self, func, *argv):
        self._sig_parent, self._sig_child = Pipe()
        self._driver_process = Process(target=Driver.run_driver_func,
                                       args=(func, self._sig_child, argv))
        self._driver_process.daemon = True
        self._driver_process.start()

    def terminate(self, timeout=5):
        assert self._driver_process is not None, "It's an error to attempt to \
            terminate a driver before it has been started."

        try:
            self._driver_process.join(timeout)
        except TimeoutError:
            logging.error("The driver was not terminated for some reason "
                          "(exitcode: {}), force to terminate it.".format(
                              self._driver_process.exitcode))
            self._driver_process.terminate()
            time.sleep(0.1)
        finally:
            self._sig_parent.close()
            self._sig_parent = None
            self._sig_child = None
            self._driver_process = None

    def poll(self, timeout=None):
        if self._sig_parent is not None:
            if timeout is not None:
                return self._sig_parent.poll(timeout)
            else:
                return self._sig_parent.poll()
        else:
            return False

    def send(self, msg):
        self._sig_parent.send(msg)

    def recv(self):
        return self._sig_parent.recv()

    @staticmethod
    def run_driver_func(driver_func, signal, *argv):
        try:
            driver_func(signal, *argv[0])
        finally:
            signal.close()
Example #14
0
def timeout(func, args=(), kwargs={}, timeout_duration=10):
    """This function will spawn a thread and run the given function
    using the args, kwargs and return the given default value if the
    timeout_duration is exceeded.
    """

    class InterruptableProcess(Process):

        def __init__(self, pipe):
            Process.__init__(self)
            self.pipe = pipe

        def run(self):
            result = func(*args, **kwargs)
            self.pipe.send((result.installList, result.uninstallList, result.solvable))

    parent_conn, child_conn = Pipe()
    p = InterruptableProcess(child_conn)
    p.start()
    p.join(timeout_duration)
    if p.is_alive():
        p.terminate()
        pass
    if parent_conn.poll():
        return parent_conn.recv()
    else:
        return None
Example #15
0
 def run(self):
     print self.obj.name() + " thinks..."
     parent_conn, child_conn = Pipe()
     self.process = AsyncRunProcess(self.obj, self.state, child_conn)
     self.process.start()
     while not self.stop and not parent_conn.poll():
         time.sleep(0.1)
     if self.stop:
         self.process.terminate()
         self.process.join()
     elif parent_conn.poll():
         msg, result = parent_conn.recv()
         print "Calculation finished"
         self.process.join()
         self.emit(QtCore.SIGNAL("success"), result)
     self.emit(QtCore.SIGNAL("finished"))
Example #16
0
class SonicProcess(Process):
    """
    Wrapper class around multiprocessing.Process that would capture the exception thrown if the Process throws
    an exception when run.

    This exception (including backtrace) can be logged in test log to provide better info of why a particular Process failed.
    """
    def __init__(self, *args, **kwargs):
        Process.__init__(self, *args, **kwargs)
        self._pconn, self._cconn = Pipe()
        self._exception = None

    def run(self):
        try:
            Process.run(self)
            self._cconn.send(None)
        except Exception as e:
            tb = traceback.format_exc()
            self._cconn.send((e, tb))
            raise e

    # for wait_procs
    def wait(self, timeout):
        return self.join(timeout=timeout)

    # for wait_procs
    def is_running(self):
        return self.is_alive()

    @property
    def exception(self):
        if self._pconn.poll():
            self._exception = self._pconn.recv()
        return self._exception
Example #17
0
    class ActiveConn(object):
        """
        Talks to the specific backend instance associated with a userid
        """
        def __init__(self, userid, modules):
            self.userid = userid
            self.here, self.there = Pipe(duplex=True)
            self.proc = CoqProc()
            self.proc.start(modules)
            self.read()
            logging.debug("Coqtop Process started %s", self.proc)

        def read(self):
            """poll results from the coqtop backend"""
            res = None
            self.proc.run(self.there)
            if self.here.poll():
                res = self.here.recv()
                logging.debug("Received content from process")
            return {'userid': self.userid, 'response': res}

        def send(self, data):
            """send results to our coqtop instance"""
            if self.proc.alive:
                logging.debug("sending stuff")
                self.here.send(data + " ")
                return True
            else:
                return False

        def quit(self):
            if self.proc.alive:
                return self.proc.terminate(True)
Example #18
0
    class ActiveConn(object):
        """
        Talks to the specific backend instance associated with a userid
        """
        def __init__(self, userid, modules):
            self.userid = userid
            self.here, self.there = Pipe(duplex=True)
            self.proc = CoqProc()
            self.proc.start(modules)
            self.read()
            logging.debug("Coqtop Process started %s", self.proc)

        def read(self):
            """poll results from the coqtop backend"""
            res = None
            self.proc.run(self.there)
            if self.here.poll():
                res = self.here.recv()
                logging.debug("Received content from process")
            return {'userid': self.userid, 'response': res}

        def send(self, data):
            """send results to our coqtop instance"""
            if self.proc.alive:
                logging.debug("sending stuff")
                self.here.send(data + " ")
                return True
            else:
                return False

        def quit(self):
            if self.proc.alive:
                return self.proc.terminate(True)
Example #19
0
class CelRecognizerWorker(QObject):
    update = Signal(list)

    def __init__(self):
        super().__init__()
        self.process = None

        self.timer = QTimer()
        self.timer.timeout.connect(self.check_for_results)
        self.timer.setInterval(1000)
        self.timer.start(1000)
    
    def submit(self, cel_path, video_path):
        # Cancel an existing process if it exists
        self.cancel()
        
        # Start a new process to run the CelRecognizer
        self.pipe, child_conn = Pipe()
        self.process = Process(
            target=_recognizer_worker_func,
            args=(child_conn, cel_path, video_path))
        self.process.start()
    
    def check_for_results(self):
        if self.process is not None:
            while self.pipe.poll():
                results = self.pipe.recv()
                self.update.emit(results)

    def cancel(self):
        if self.process is not None:
            self.process.terminate()
            self.process.join()
            self.process.close()
            self.process = None
Example #20
0
    def dispatch(self, fcn, timeout=0, params=()):
        """Start a new subrocess.

        This function will detach 'fcn' as a process of its own,
        passing params AND a pipe as arguments to 'fcn'.
        If 'timeout' > 0 this function will block, waiting for the
        detached process to send some data over the pipe.
        If data i written to pipe in time, the pipe will be returned to the calling process.
        On timeout, the pipe will be closed and 'None' will be returned to the calling process.

        """
        conn, child_conn = Pipe()
        p = Process(target=fcn, args=params + (child_conn,))
        p.start()
        if timeout > 0:
            poll_status = conn.poll(timeout)
            if poll_status == False:
                print "Dispatched function %s did not send anything within the specified timeout (%d s)" % (fcn, timeout)
                # FIXME: How to properly handle this case?
                # - p.terminate() doesn't terminate any subprocesses the p might have spawned
                # - conn.close(), i.e. closing the control channel, is probably better. Make sure p detects and handle it.
                # - send a proper control message (set running to False) should work too.
                # The proper way to implement a client is to handle connection.close() and stop running control message in the same way.
                conn.close()
                conn = None
        return conn
Example #21
0
class Simulation:
    """
    Physics simulation
    """
    def __init__(self, team, project):
        logging.debug("Simulation created for team %d project %s" %
                      (team, project))
        self.team = team
        self.project = project

        #TODO: Load code from SVN
        zip = open("robot.zip", "rb").read()
        self.pipe, otherend = Pipe()

        self.sim = Sim(zip, otherend)
        self.sim.start()

    def step(self, commands):
        """
        Step the simulation
        """
        #Send commands
        for command in commands:
            message, args = command[0], command[1:]
            self.pipe.send((message, args))

        #Receive data
        data = []
        while self.pipe.poll():
            data.append(self.pipe.recv())
        return data

    def end(self):
        self.pipe.send(("END", None))
        self.sim.join()
class StubExecuteTestsFunc:
    def __init__(self):
        self.main_conn, self.func_conn = Pipe()
        self._called = self._complete = None
        self.stub_reset()

    def stub_reset(self):
        self._called = self._complete = False

    def stub_complete(self):
        self._complete = True
        self.main_conn.send(StubExecuteTestsFuncConnMessages.COMPLETE)

    def stub_called(self):
        if not self._called and self.main_conn.poll():
            conn_message = self.main_conn.recv()
            if conn_message == StubExecuteTestsFuncConnMessages.CALLED:
                self._called = True
        return self._called

    def __enter__(self):
        self.stub_reset()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stub_complete()

    def __call__(self, *_):
        self._called = True
        self.func_conn.send(StubExecuteTestsFuncConnMessages.CALLED)
        while not self._complete:
            conn_message = self.func_conn.recv()
            if conn_message == StubExecuteTestsFuncConnMessages.COMPLETE:
                self._complete = True
Example #23
0
class MMLWorker(object):
    """Represents the Daemon's connection to the subprocess"""
    def __init__(self,runner):
        """Creates and initalizes a subprocess and its connections."""
        self.pipe, pipe = Pipe()
        self.proc = Process(target=worker, args=(pipe,runner))
        self.proc.start();
        self.pid = self.proc.pid
    def __del__(self):
        """Ensures the subprocess is correctly garbage collected."""
        self.pipe.close();
        self.proc.terminate();
    def pump(self,block=False):
        """Returns a key,val pair from the subprocess, or None,None."""
        key,val = None,None
        if block:
            (key,val) = self.pipe.recv()
        elif self.pipe.poll():
            (key,val) = self.pipe.recv()
        return key,val
    def stop(self):
        """Sends the stop signal to the subprocess."""
        self.pipe.send(('stop',()))
    def pause(self):
        """Sends the pause signal to the subprocess."""
        self.pipe.send(('pause',()))
    def start(self,program):
        """Sends the start signal to the subprocess."""
        self.pipe.send(('start',(program,)))
Example #24
0
class RPC_Wamp(object):

    def __init__(self):
        self.ip = unicode(CONF.wamp.wamp_ip)
        self.port = unicode(CONF.wamp.wamp_port)
        self.realm = unicode(CONF.wamp.wamp_realm)
        self.server = RPCWampServer(self.ip, self.port, self.realm)
        self.b_a_int, self.b_a_ext = Pipe()
        server_process = multiprocessing.Process(target=reactor.run, args=())
        server_process.start()

    def rpc_call(self, rpc, *args):
        res = ''
        RPCWampClient(
            self.ip, self.port, self.realm, rpc, args, self.b_a_ext)
        client_process = multiprocessing.Process(target=reactor.run, args=())
        client_process.start()

        while True:
            if self.b_a_int.poll():
                res = self.b_a_int.recv()
                client_process.join()
                break
        if res['error'] == 0:
            return res['response']
        else:
            return {'result': 1}
class StubExecuteTestsFunc:
    def __init__(self):
        self.main_conn, self.func_conn = Pipe()
        self._called = self._complete = None
        self.stub_reset()

    def stub_reset(self):
        self._called = self._complete = False

    def stub_complete(self):
        self._complete = True
        self.main_conn.send(StubExecuteTestsFuncConnMessages.COMPLETE)

    def stub_called(self):
        if not self._called and self.main_conn.poll():
            conn_message = self.main_conn.recv()
            if conn_message == StubExecuteTestsFuncConnMessages.CALLED:
                self._called = True
        return self._called

    def __enter__(self):
        self.stub_reset()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stub_complete()

    def __call__(self, *_):
        self._called = True
        self.func_conn.send(StubExecuteTestsFuncConnMessages.CALLED)
        while not self._complete:
            conn_message = self.func_conn.recv()
            if conn_message == StubExecuteTestsFuncConnMessages.COMPLETE:
                self._complete = True
Example #26
0
    def map(self, function, *function_args):
        lock = Lock()
        main_pipe, remote_pipe = Pipe()
        requests = list(enumerate(function_args))
        works_count = len(function_args)
        results = [self] * works_count
        workers = self._create_workers(function, function_args, remote_pipe,
                                       lock)

        cursor = 0
        while cursor < works_count:
            if results[cursor] is not self:
                yield results[cursor]
                cursor += 1
            if main_pipe.poll():
                response = main_pipe.recv()
                if requests:
                    main_pipe.send(requests.pop(0))
                if response is not True:
                    index, result = response
                    results[index] = result

        for worker in workers:
            worker.join()
        main_pipe.close()
        raise StopIteration
Example #27
0
class gui:
    ''' A multithreaded handler for the coincidence-count GUI '''
    def __init__(self):
        ''' Constructor '''
        # Start up the GUI process and build the communication network
        self.pipe, their_pipe = Pipe()
        self.gui = Process(target=gui_head,
                           name='gui_head',
                           args=(their_pipe, ))
        self.gui.start()

    def collect(self):
        ''' Collect all messages and act upon them '''
        messages = []
        while self.pipe.poll(0):
            messages.append(self.pipe.recv())
        return messages

    def send(self, key, value):
        ''' Send a message to the threaded GUI '''
        self.pipe.send((key, value))

    def kill(self):
        ''' Send the message to shut down '''
        self.send('kill', None)
Example #28
0
class J1708():
	def __init__(self,uart=None):
		self._sport = None
		if uart is not None:
			self._sport = serial.Serial(port=uart,baudrate=9600,
						    bytesize=serial.EIGHTBITS,
						    stopbits=serial.STOPBITS_ONE)

		self.buslock = Lock()
		self.mypipe, self.otherpipe = Pipe()
		self.r_proc = Process(target=run, args=(self._sport,self.buslock,self.otherpipe))
		self.r_proc.start()


	def read_message(self,timeout=None):
		if not self.mypipe.poll(timeout):
			return None

		retval = self.mypipe.recv()

		return retval

    #currently relying on the read_thread to maintain synchronization
	def send_message(self,msg):
		retval = 0
		thismsg = bytes(msg)
		chksum = struct.pack('b',checksum(thismsg))
		thismsg += chksum
		with self.buslock:
			retval = self._sport.write(thismsg)
			self._sport.flushInput()#solve "echo" problem
		return retval
     
	def __del__(self):
		self.r_proc.terminate()
class MPPlugin(object):
    # this functionality needs to be implemented in the cython parent side
    def __init__(self):
        self.plot_pipe, plotter_pipe = Pipe()
        self.plotter = SimplePlotter(20000.)
        self.plot_process = Process(target=self.plotter,
                                    args=(plotter_pipe, ))
        self.plot_process.daemon = True
        self.plot_process.start()

    def bufferfunction(self, n_arr=None, finished=False):
        # print "entering plot"
        send = self.plot_pipe.send
        if finished:
            send(None)
        else:
            # print "sending data"
            if not n_arr:
                n_arr = np.random.random((11, 1000))
            send({'data': n_arr})

        while 1:
            if not self.plot_pipe.poll():
                break
            e = self.plot_pipe.recv()
            print(e)

    def setparam(self, name, value):
        self.plot_pipe.send({'param': {name: value}})
Example #30
0
class EmulatorProxy(object):

    width = 128
    height = 64
    device_mode = "1"
    char_width = 6
    char_height = 8
    type = ["char", "b&w-pixel"]

    def __init__(self):
        self.device = type("MockDevice", (), {"mode": "1", "size": (128, 64)})
        self.parent_conn, self.child_conn = Pipe()
        self.proc = Process(target=Emulator, args=(self.child_conn, ))
        self.__base_classes__ = (GraphicalOutputDevice, CharacterOutputDevice)
        self.current_image = None
        self.proc.start()

    def poll_input(self, timeout=1):
        if self.parent_conn.poll(timeout) is True:
            return self.parent_conn.recv()
        return None

    def quit(self):
        DummyCallableRPCObject(self.parent_conn, 'quit')()
        self.proc.join()

    def __getattr__(self, name):
        # Raise an exception if the attribute being called
        # doesn't actually exist on the Emulator object
        getattr(Emulator, name)
        # Otherwise, return an object that imitates the requested
        # attribute of the Emulator - for now, only callables
        # are supported, and you can't get the result of a
        # callable.
        return DummyCallableRPCObject(self.parent_conn, name)
def simulateModel(geometry, model):
    from multiprocessing import Pipe, Process
    from time import sleep
    parent_conn, child_conn = Pipe()
    p = Process(target=_simulateModel, args=(geometry, model, child_conn))
    try:
        p.start()
        while not parent_conn.poll():
            sleep(0.1)
        timeTrace, vTraces, textOutput, err, tb = parent_conn.recv()
        p.join()
    except BaseException:
        if p.is_alive():
            p.terminate()
        timeTrace = []
        vTraces = []
        textOutput = ""
        err = None
        tb = ""
        raise

    if err is not None:
        print(tb)
        raise err
    return timeTrace, vTraces, textOutput
Example #32
0
class gui:

    """ A multithreaded handler for the coincidence-count GUI """

    def __init__(self):
        """ Constructor """
        # Start up the GUI process and build the communication network
        self.pipe, their_pipe = Pipe()
        self.gui = Process(target=gui_head, name="gui_head", args=(their_pipe,))
        self.gui.start()

    def collect(self):
        """ Collect all messages and act upon them """
        messages = []
        while self.pipe.poll(0):
            messages.append(self.pipe.recv())
        return messages

    def send(self, key, value):
        """ Send a message to the threaded GUI """
        self.pipe.send((key, value))

    def kill(self):
        """ Send the message to shut down """
        self.send("kill", None)
Example #33
0
        def fn_with_timeout(*args, **kwargs):
            conn1, conn2 = Pipe()
            kwargs["_conn"] = conn2
            th = Process(target=partial(fn, best_loss=self._best_loss), args=args, kwargs=kwargs)
            th.start()
            if conn1.poll(self.trial_timeout):
                fn_rval = conn1.recv()
                th.join()
            else:
                self.info("TERMINATING DUE TO TIMEOUT")
                th.terminate()
                th.join()
                fn_rval = "return", {"status": hyperopt.STATUS_FAIL, "failure": "TimeOut"}

            assert fn_rval[0] in ("raise", "return")
            if fn_rval[0] == "raise":
                raise fn_rval[1]

            # -- remove potentially large objects from the rval
            #    so that the Trials() object below stays small
            #    We can recompute them if necessary, and it's usually
            #    not necessary at all.
            if fn_rval[1]["status"] == hyperopt.STATUS_OK:
                fn_loss = float(fn_rval[1].get("loss"))
                fn_preprocs = fn_rval[1].pop("preprocs")
                fn_classif = fn_rval[1].pop("classifier")
                fn_iters = fn_rval[1].pop("iterations")
                if fn_loss < self._best_loss:
                    self._best_preprocs = fn_preprocs
                    self._best_classif = fn_classif
                    self._best_loss = fn_loss
                    self._best_iters = fn_iters
            return fn_rval[1]
Example #34
0
    def run(self):
        logging.info('Visualizer thread started')

        parent_end, child_end = Pipe()

        # Sensible default value for max_process
        max_process = 3
        process_count = 0

        while not self.stop or not self.job_backlog.empty():
            while parent_end.poll(0.1):
                (name, counter) = parent_end.recv()
                self.controller.find_trial(name).set_counter_plot(counter)
                process_count -= 1

            if self.job_backlog.empty():
                time.sleep(1)
            elif process_count < max_process:
                process_count += 1

                function, snapshot, trial = self.job_backlog.get_nowait()
                logging.info('Visualizing {}'.format(trial.get_name()))
                p = Process(target=self.render_graph,
                            args=(function, snapshot, trial.get_name(),
                                  child_end))
                p.start()
                self.job_backlog.task_done()

        logging.info('Visualizer Finished')
Example #35
0
def run_http_server(redirect_uri = None, modify_port = True, port_range = (10000, 10010) ):
    """Returns (modified) redirect_uri"""
    from multiprocessing import Process, Pipe
    from urllib.parse import urlsplit, urlunsplit
    if redirect_uri is None:
        redirect_uri = "http://localhost"
    p = urlsplit(redirect_uri)
    #Ensure hostname is localhost or 127.0.0.1
    if p.hostname != "127.0.0.1" and p.hostname != "localhost":
        raise ValueError("url must have host of 127.0.0.1 or localhost! Got: {}".format(p.hostname))
    if not modify_port:
        if p.port is not None:
            port_range = (int(p.port), int(p.port))
        else:
            port_range = (int(80), int(80))
    parent_port_pipe, child_port_pipe = Pipe()
    parent_pipe, child_pipe = Pipe()
    httpd_p = Process(target = _run_http_server, args = (child_port_pipe, child_pipe, port_range))
    httpd_p.start()
    if parent_port_pipe.poll(3000):
        final_port = parent_port_pipe.recv()
    else:
        raise Exception("Timeout waiting for HTTP server process to start")
    if final_port == 0:
        #Could not find a port
        raise Exception("Could not find open port")
    netloc = "{0}:{1}".format(p.hostname, final_port)
    if p.path:
        path = p.path
    else:
        path = '/'
    p = p._replace(netloc = netloc, path = path)
    return (urlunsplit(p), parent_pipe, httpd_p)
Example #36
0
class ProcessManager():
    def __init__(self, client_id, debug=False):
        self.client_id = client_id
        self.client_status = Value("i", ClientStatus.IDLE)
        self.parent_conn, self.child_conn = Pipe()
        self.pinger = Pinger(client_id, self.child_conn, self.client_status)
        self.task_worker = TaskWorker(client_id, self.client_status, debug)

    def _are_processes_alive(self, processes):
        for process in processes:
            if not process.is_alive():
                other_processes = list(
                    filter(lambda p: p != process, processes))
                for other_process in other_processes:
                    other_process.stop()

                logging.info("Client is exiting...")
                return False

        return True

    def launch(self):
        self.task_worker.launch()
        self.pinger.launch()

        while True:
            if not self._are_processes_alive([self.task_worker, self.pinger]):
                return

            if self.parent_conn.poll(1):
                self.parent_conn.recv()
                logging.info("Stopping task...")
                self.task_worker.stop()
                self.task_worker.launch()
Example #37
0
def evaluate_expression(exp):
    '''
    Evaluates given expression.
    '''    
    if not exp:
        return "No expression supplied."
    exp = str(exp)
    
    # Setup evaluation process if it's not present
    global eval_process, eval_pipe_parent, eval_pipe_child
    if not eval_process:
        eval_pipe_parent, eval_pipe_child = Pipe()
        eval_process = Process(name = "seejoo_eval",
                               target = _eval_worker,
                               args = (eval_pipe_child,))
        eval_process.daemon = True
        eval_process.start()
    
    # Push expression through the pipe and wait for result
    eval_pipe_parent.send(exp)
    if eval_pipe_parent.poll(EVAL_TIMEOUT):
        res = str(eval_pipe_parent.recv())
        res = filter(lambda x: ord(x) >= 32, res)   # Sanitize result
        return res        

    # Evaluation timed out; kill the process and return error
    os.kill(eval_process.pid, 9)
    os.waitpid(eval_process.pid, os.WUNTRACED)
    eval_process = None
    return "Operation timed out."
Example #38
0
class MPPlugin(object):
    # this functionality needs to be implemented in the cython parent side
    def __init__(self):
        self.plot_pipe, plotter_pipe = Pipe()
        self.plotter = SimplePlotter(20000.)
        self.plot_process = Process(target=self.plotter, args=(plotter_pipe, ))
        self.plot_process.daemon = True
        self.plot_process.start()

    def bufferfunction(self, n_arr=None, finished=False):
        # print "entering plot"
        send = self.plot_pipe.send
        if finished:
            send(None)
        else:
            # print "sending data"
            if not n_arr:
                n_arr = np.random.random((11, 1000))
            send({'data': n_arr})

        while 1:
            if not self.plot_pipe.poll():
                break
            e = self.plot_pipe.recv()
            print(e)

    def setparam(self, name, value):
        self.plot_pipe.send({'param': {name: value}})
Example #39
0
class MyThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.pipe_ext, self.pipe_int = Pipe()
            
    def read(self):
        msgs = []
        while self.pipe_ext.poll():
            msgs.append(self.pipe_ext.recv())
            
        return msgs

    def write(self, data):
        self.pipe_ext.send(data)
        
    
    def internal_read(self):
        msgs = []
        while self.pipe_int.poll():
            msgs.append(self.pipe_int.recv())
            
        return msgs

    def internal_write(self, data):
        self.pipe_int.send(data)
Example #40
0
class Airplay():
	def __init__(self, file):
		self.metadata   = AIRPLAY_DEFAULT
		self.block      = ''
		self.out_pipe, self.in_pipe = Pipe()

		p = Process(target=read_shairport_pipe, args=(file, self.in_pipe,))
		p.start()

	def __repr__(self):
		printout = "metadata:\n"+self.metadata
		# for k,v in self.metadata.items():
		# 		printout += '%12s : %s\n' % (k,v)
		return printout


	def grab(self):
		if self.out_pipe.poll(0):
			s = True
			self.metadata = self.out_pipe.recv()   # prints "[42, None, 'hello']"
		else:
			print "nothing in pipe"
			s = False

		return s
Example #41
0
def test_contract_manager() -> None:
    aut = AirspaceManager()
    stop_ev = Event()
    conn, manager_conn = Pipe()

    p = Process(target=run_as_process, kwargs={"aut": aut,
                                               "conn": manager_conn,
                                               "stop_ev": stop_ev})
    p.start()
    try:
        uid = 0
        for i in range(10):
            if conn.poll(1.0):
                act = conn.recv()
                print(act)
            elif i % 3 != 2:
                uid = i % 5
                target = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 3, 0.5])),
                    (1.0, Rectangle(mins=[0.5, 0.5, 1.0], maxes=[1.5, 1.5, 1.5]))
                    ])
                conn.send(("request", {"uid": uid, "target": target}))
            else:
                releasable = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 2, 0.5]))
                    ])
                print("Agent " + str(uid) + " release > " + str(releasable))
                conn.send(("release", {"uid": uid, "releasable": releasable}))
    finally:
        stop_ev.set()  # Stop all automatons
        p.join()
        conn.close()
        manager_conn.close()
Example #42
0
File: tryit.py Project: cnsoft/WorQ
def master():
    results = []
    mcn, scn = Pipe()
    mon = Thread(target=monitor, args=(mcn, results))
    mon.daemon = True
    mon.start()

    proc = Process(target=slave, args=(scn,))
    proc.start()

    mcn.send('1')
    while not results:
        time.sleep(0.01)
    pid = results.pop()
    log.info('pid: %s', pid)

    mcn.send('die')
    try:
        for n in range(7):
            if mcn.poll(1):
                break
            if not proc.is_alive():
                raise Exception('stop')
    except Exception:
        log.error('slave died', exc_info=True)
Example #43
0
class Aby3Process(Process):
    """
    Extends from Process, evaluate the computation party in aby3.
    """
    def __init__(self, *args, **kwargs):
        Process.__init__(self, *args, **kwargs)
        self._pconn, self._cconn = Pipe()
        self._exception = None

    def run(self):
        """
        Override. Send any exceptions raised in
        subprocess to main process.
        """
        try:
            Process.run(self)
            self._cconn.send(None)
        except Exception as e:
            tb = traceback.format_exc()
            self._cconn.send((e, tb))

    @property
    def exception(self):
        """
        Get exception.
        """
        if self._pconn.poll():
            self._exception = self._pconn.recv()
        return self._exception
Example #44
0
def master():
    results = []
    mcn, scn = Pipe()
    mon = Thread(target=monitor, args=(mcn, results))
    mon.daemon = True
    mon.start()

    proc = Process(target=slave, args=(scn, ))
    proc.start()

    mcn.send('1')
    while not results:
        time.sleep(0.01)
    pid = results.pop()
    log.info('pid: %s', pid)

    mcn.send('die')
    try:
        for n in range(7):
            if mcn.poll(1):
                break
            if not proc.is_alive():
                raise Exception('stop')
    except Exception:
        log.error('slave died', exc_info=True)
Example #45
0
class EmulatorProxy(object):

    width = 128
    height = 64

    def __init__(self):
        self.device = type("MockDevice", (), {"mode": "1", "size": (128, 64)})
        self.parent_conn, self.child_conn = Pipe()
        self.proc = Process(target=Emulator, args=(self.child_conn, ))
        self.proc.start()

    def poll_input(self, timeout=1):
        if self.parent_conn.poll(timeout) is True:
            return self.parent_conn.recv()
        return None

    def quit(self):
        DummyCallableRPCObject(self.parent_conn, 'quit')()
        self.proc.join()

    def __getattr__(self, name):
        # Raise an exception if the attribute being called
        # Doesn't actually exist on the Emulator object
        getattr(Emulator, name)
        return DummyCallableRPCObject(self.parent_conn, name)
Example #46
0
        def fn_with_timeout(*args, **kwargs):
            conn1, conn2 = Pipe()
            kwargs['_conn'] = conn2
            th = Process(target=fn, args=args, kwargs=kwargs)
            th.start()
            if conn1.poll(self.trial_timeout):
                fn_rval = conn1.recv()
                th.join()
            else:
                print 'TERMINATING DUE TO TIMEOUT'
                th.terminate()
                th.join()
                fn_rval = 'return', {
                    'status': hyperopt.STATUS_FAIL,
                    'failure': 'TimeOut'
                }

            assert fn_rval[0] in ('raise', 'return')
            if fn_rval[0] == 'raise':
                raise fn_rval[1]

            # -- remove potentially large objects from the rval
            #    so that the Trials() object below stays small
            #    We can recompute them if necessary, and it's usually
            #    not necessary at all.
            if fn_rval[1]['status'] == hyperopt.STATUS_OK:
                fn_loss = float(fn_rval[1].get('loss'))
                fn_preprocs = fn_rval[1].pop('preprocs')
                fn_classif = fn_rval[1].pop('classifier')
                if fn_loss < self._best_loss:
                    self._best_preprocs = fn_preprocs
                    self._best_classif = fn_classif
                    self._best_loss = fn_loss
            return fn_rval[1]
Example #47
0
    def run(self):
        logging.info('Visualizer thread started')

        parent_end, child_end = Pipe()

        # Sensible default value for max_process
        max_process = 2
        process_count = 0

        while not self.stop or not self.job_backlog.empty():
            while parent_end.poll(0.1):
                parent_end.recv() ## currently not using the info... irrelevant
                
                ## TODO - a signal to notify the viewer that visuzaliztion job has been finished... 
                #self.controller.view_update(self)
                process_count -= 1

            if self.job_backlog.empty():
                time.sleep(1)
            elif process_count < max_process:
                process_count += 1
                run_name, function, snapshot = self.job_backlog.get_nowait()
                if not (run_name in self.remove_run_name):
                    logging.info('Added job to visuzalizer Que: ' + str(run_name))
                    logging.info('No. of jobs in Que: ' + str(process_count))
                    p = Process(target=self.render_graph,
                                args=(function, snapshot, run_name, child_end))
                    p.start()
                
        logging.info('Visualizer Finished')
def fork_get_action(game_state, active_player, time_limit):
    action_queue = Queue()
    listener, client = Pipe()
    active_player.queue = action_queue  # give the agent instance a threadsafe queue

    # comment out these lines for debugging mode
    p = Process(target=_request_action,
                args=(active_player, game_state, time_limit, client))
    p.start()
    p.join(timeout=PROCESS_TIMEOUT)
    if p and p.is_alive(): p.terminate()

    # Uncomment these lines to run in debug mode, which runs the search function in the
    # main process so that debuggers and profilers work properly. NOTE: calls to your
    # search methods will NOT timeout in debug mode; you must be very careful to avoid
    # calls that are not methods of your CustomPlayer class or else your agent may fail
    #
    # from copy import deepcopy
    # active_player.queue = None
    # active_player = deepcopy(active_player)
    # active_player.queue = action_queue
    # _request_action(active_player, game_state, time_limit, client)

    if listener.poll():
        active_player.context = listener.recv()  # preserve any internal state
    while True:  # treat the queue as LIFO
        action = action_queue.get_nowait(
        )  # raises Empty if agent did not respond
        if action_queue.empty(): break
    return action
Example #49
0
class NodePair:
    def __init__(self,txnode,rxnode):
        self.txNode = txnode
        self.rxNode = rxnode
        self.ptx_conn, self.tx_conn = Pipe(True)
        self.prx_conn, self.rx_conn = Pipe(True)
        self.ptx = Process(target=self.txNode.runNode, args=(self.tx_conn,))
        self.prx = Process(target=self.rxNode.runNode, args=(self.rx_conn,))
        self.ptx.start()
        self.prx.start()

    def CompareResults(self):
        self.prx_conn.send(StatusMessage())
        passedB = False
        ba = StatusMessage()
        if self.prx_conn.poll(1):
            b = self.prx_conn.recv()#clear buffer
            print '%s checksums rx %x calc %x'%(self.rxNode.name, b.status[3], b.status[4])
            passedB = True
        if passedB :
            if b.status[3] == b.status[4]:
                return True
            else:
                return False
        else:
            print 'error found'

    def GetDisplayName(self):
        return self.txNode.name + ' to ' + self.rxNode.name

    def StopNodePair(self):
        self.ptx_conn.send(CloseMessage())
        self.prx_conn.send(CloseMessage())
        self.ptx.join()
        self.ptx_conn.close()
        self.tx_conn.close()
        self.ptx.terminate()
        self.prx.join()
        self.prx_conn.close()
        self.rx_conn.close()
        self.prx.terminate()

    def SetTranmitData(self,txData):
        self.ptx_conn.send(TxDataMessage(txData))
        if self.ptx_conn.poll(2):
            a = self.ptx_conn.recv()#clear buffer
            print 'inside checksum %x'%a.checksum
Example #50
0
class ProcessIA():
	"""
	Classe qui va lancer une IA en subprocess.
	Gère aussi la communication avec les IA lancées via un pipe.
	"""
	def __init__(self, color_and_robot_list):
		self.__color = color_and_robot_list[0]
		if color_and_robot_list[1] is not None:
			self.__bigrobot = color_and_robot_list[1]
			if color_and_robot_list[2] is not None:
				self.__minirobot = color_and_robot_list[2]
			else:
				self.__minirobot = None
		else:
			self.__bigrobot =None
			if color_and_robot_list[2] is not None:
				self.__minirobot = color_and_robot_list[2]
			else:
				self.__minirobot = None
		self.__robots = color_and_robot_list
		self.__hokuyo = Hokuyo(self.__robots[1:])
		self.__communication = Communication(self.__bigrobot, self.__minirobot, self.__hokuyo, self)
		#communication de data entre l'IA et le simu
		self.__parent_conn, self.__child_conn = Pipe()
		#lancement de l'ia
		if TEST_MODE == False:
			self.__process = Process(target=main.startIa, args=(self.__child_conn,self.__color))
		elif TEST_MODE == True :
			self.__process = Process(target=test.testIa, args=(self.__child_conn,self.__color)) #pour les tests
		self.__process.start()
		time.sleep(0.1)
		#on démarre le thread de lecture des données IA renvoyées à travers le pipe
		self.__read_thread = threading.Thread(target=self.__readPipe)
		self.__read_thread.start()

	def writePipe(self, addresse, ordre, args):
		"""
		Envoie des données à l'IA à travers le Pipe
		"""
		self.__parent_conn.send((addresse, ordre, args))

	def __readPipe(self):
		"""
		Méthode de lecture des données envoyées par l'IA via le pipe.
		recv est bloquant, donc lancé dans un thread
		"""
		while True:
			if self.__parent_conn.poll():
				message = self.__parent_conn.recv()
				self.__parseDataIa(message)



	def __parseDataIa(self, data):
		"""
		Formate les données IA reçues pour les adapter à la méthode
		sendOrderAPI de Communication
		"""
		self.__communication.orderBalancing(data[0],data[1],data[2])
Example #51
0
class Flow:
  def __init__ (self, debug_mode=False):
    from multiprocessing import Pipe, Process
    self.motions = []
    self.debug_mode = debug_mode
    self.parent_conn, self.child_conn = Pipe()
    self.p = Process(target=self.do_motion_detect, args=(self.child_conn,))
    self.p.start()

  # Send kill command to child process
  def kill (self):
    self.parent_conn.send('STOP')
    #self.parent_conn.recv()
    self.p.join()

  def _clear_pipe (self):
    while (self.parent_conn.poll(0.01) != False):
        reading = self.parent_conn.recv()
        self.motions.append(reading)


  # Checks if pipe has any data
  # If it does return that
  # Else return []
  def get_motion (self):
    self._clear_pipe()
    if len(self.motions) == 0: return None
    else:
        copy = self.motions
        self.motions = []
        return copy

  def do_motion_detect (self, child_conn):
    import picamera, subprocess, time
    camera = picamera.PiCamera(framerate=30)

    output = DetectMotion(camera)
    output.set_conn(child_conn)
    while True:
      try:
        #camera.resolution = (640, 480)
        camera.resolution = (200, 150)
        #camera.resolution = (240, 160)
        camera.start_recording('/dev/null', format='h264', motion_output=output)
        camera.wait_recording(0.25)
        #t = int(time.time())
        #name = '{}-image.jpg'.format(t)
        name = 'image.jpg'
        os.remove(name)
        camera.capture(name, use_video_port=True)
        camera.stop_recording()

        if child_conn.poll(0.01):
          s = child_conn.recv()
          if s == 'STOP': break

      except KeyboardInterrupt:
          break
    camera.close()
Example #52
0
def data_handler(spigot_data: SpigotData, lock: threading.Lock):
    """
    Main thread for reading output from spigot IO worker.
    Also interprets data.

    spigot_data - shared SpigotData object
    """
    client, child = Pipe()

    t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
    t.start()

    spigot_data.game.status = SpigotState.RUNNING

    running = True

    while running:
        while t.is_alive():
            if client.poll(0.3):
                buf = client.recv()
                parse_event(spigot_data, buf)
                spigot_data.add_message(buf)
                logging.debug('<<OUTPUT>> {}'.format(buf))

            while not spigot_data.commands.empty():
                command = spigot_data.commands.get()
                client.send(command)
                logging.debug('<<COMMAND>> {}'.format(command))

        # After java process is dead
        spigot_data.status = SpigotState.STOPPED
        spigot_data.add_message(info_message("""The server has stopped. Type 'start' to start. """
                                             """Type 'quit' to close Spigot Monitor"""))  # PEP8ers gonna hate

        spigot_data.game.players = {}  # No players are available on a stopped server...

        while True and not AUTO_RESTART:
            command = spigot_data.commands.get().strip()  # strip because commands have newline appended
            if command.lower() == 'start':
                t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
                t.start()
                logging.debug('Thread created.')
                break
            elif command.lower() == 'quit' or command.lower() == 'stop':
                message = info_message("KTHXBAI")
                spigot_data.add_message(message)
                logging.debug('Quitting program.')
                break

        if AUTO_RESTART:
            t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
            t.start()
            logging.debug('Thread created.')

        if not t.is_alive():  # thread hasn't started again
            running = False

    print('exiting data_handler loop')
Example #53
0
    def _proxy_loop(self, broker, *args):
        is_debug = partial(log.isEnabledFor, logging.DEBUG)
        pid = os.getpid()
        proc = None
        queue = self.queue

        def stop():
            try:
                if proc is not None:
                    child.send(STOP)
                    child.close()
                    proc.join()
            except Exception:
                log.error('%s failed to stop cleanly',
                    str(self), exc_info=True)
                raise
            else:
                log.debug('terminated %s', str(self))
            finally:
                self.pid = '%s-terminated' % self.pid

        while True:
            if proc is None or not proc.is_alive():
                # start new worker process
                child, parent = Pipe()
                cx = _reduce_connection(parent)  # HACK reduce for pickle
                proc = run_in_subprocess(worker_process, pid, cx, *args)
                self.pid = proc.pid

            task, return_to_pool = queue.get()
            if task == STOP:
                stop()
                break

            try:
                child.send(task)
                while not child.poll(task.heartrate):
                    if not proc.is_alive():
                        broker.task_failed(task)
                        raise Error('unknown cause of death')
                    broker.heartbeat(task)
                (result, status) = child.recv()
                broker.set_result(task, result)
            except Exception:
                log.error('%s died unexpectedly', str(self), exc_info=True)
                child.close()
                proc.stdin.close()
                proc = None
            else:
                if is_debug():
                    log.debug('%s completed task', str(self))
                if status == STOP:
                    child.close()
                    proc.stdin.close()
                    proc = None
            finally:
                return_to_pool(self)
Example #54
0
 def test_multiprocessing_pipe(self):
     parent_conn, child_conn = Pipe()
     p = Process(target=f, args=(child_conn,))
     p.start()
     for _ in range(5):
         while parent_conn.poll():
             print("Got from client", parent_conn.recv_bytes())  # prints "[42, None, 'hello']"
         time.sleep(1)
     parent_conn.send_bytes(b"stop")
     p.join()
Example #55
0
class ExternalDataManager(Process):
    def __init__(self, sim, address, port, map_path, free_move_only, hmac_key=HMAC_KEY_DEFAULT):
        Process.__init__(self, name='ExternalDataManager', sim=sim)
        self.sim = sim
        self.conn, child_conn = Pipe()
        self.service = ConnectionService(address, port, child_conn, map_path, free_move_only, hmac_key)
        self.service_process = MultiProcess(target=cherrypy.quickstart, args=(self.service, ))
        #self.service_process.daemon = True
        self.service_process.start()
        self.running = True
        self.free_move_only = free_move_only

    def run(self):
        for pers in self.sim.persons:
            if isinstance(pers, ExternalPerson):
                pers.current_coords = pers.current_coords_free_move
                pers.calculate_duration = pers.calculate_duration_free_move
                if self.free_move_only:
                    self.sim.geo.free_obj.add(pers)
        while self.running:
            sim = self.sim
            geo = self.sim.geo
            while(self.conn.poll()):
                person_id, node_id_start, node_id_end, x, y, time_received = self.conn.recv()
                #person_id, node_id_start, node_id_end, x, y, time_received, lat, lon = self.conn.recv()
                person = sim.get_person(person_id)
                if person == None:
                    print 'ExternalDataManager received unknown person id ', person_id, '. Discarded'
                    continue
                if not isinstance(person, ExternalPerson):
                    print 'Received ID ', person_id, ' does not belong to external person. Discarded'
                    continue
                person.last_received_coords = [x, y]
                if node_id_start is not None:
                    if person in self.sim.geo.free_obj:
                        print 'Removing person with ID ', person_id, ' from free objects set!'
                        self.sim.geo.free_obj.remove(person)
                    person.new_next_node = geo.way_nodes_by_id[geo.map_osmnodeid_nodeid[node_id_end]]
                    person.new_last_node = geo.way_nodes_by_id[geo.map_osmnodeid_nodeid[node_id_start]]
                    person.need_next_target = True
                else:
                    print 'Free move or no match found; free moving!'
                    self.sim.geo.free_obj.add(person)
                #for m in sim.monitors:
                #    if isinstance(m, SocketPlayerMonitor):
                #        m.add_heatmap_blip(lat, lon, 3, (0.0, 0.0, 1.0, 0.4))
                #lon, lat = utm.utm_to_latlong(x, y, sim.geo.zone)
                #for m in sim.monitors:
                #    if isinstance(m, SocketPlayerMonitor):
                #        m.add_heatmap_blip(lat, lon, 3, (1.0, 0.0, 0.0, 0.4))
                self.interrupt(person)
            yield hold, self, 1

    def shutdown(self):
        self.service_process.terminate()
Example #56
0
class DataPump:

    def __init__(self, fname, t0=0.0):
        """
        """
        self.fname = fname
        self._data_end, self._control_end = Pipe()
        self.process = Process(target=self._read_data, args=())
        self.process.start()
        self._control_end.send((True, t0))
        if self._control_end.poll(5.):
            self.dt = self._control_end.recv()
        else:
            warnings.warn("dt could not be retrived from video, waited 5 s",
                          RuntimeWarning)
            self.dt = np.nan

    def _read_data(self):
        """
        """

        vr = VideoReader(self.fname, color=False)
        self._data_end.send(vr.dt_ns * 1e-9)
        running = True

        while running:

            if self._data_end.poll():   # See if msg sent from get_data
                running, next_t = self._data_end.recv()   # get the time of the frame to read (from get_data)
                if running:
                    data = vr.get_frame(next_t)  # Read video frame
                    curr_t = vr.get_current_position(fmt='time')  # get time of frame from video, should be very close to self.data_t
                    self._data_end.send((data, curr_t))  # Send data via the pipe to get_data

        vr.close()

    def get_data(self, next_t):
        """
        Ask for a future frame and returns the previously asked for.
        """
        # Get previous frame and time of frame via the pipe from self._read_data
        data, curr_t = self._control_end.recv()
        # Tell self._read_data to read a new frame at time next_t
        self._control_end.send((True, next_t))

        return data, curr_t

    def close(self):
        """
        """
        self._control_end.send((False, None))
        self._control_end.close()
        self._data_end.close()
        self.process.join()
Example #57
0
class SimpleQueue(object):

    def __init__(self):
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._make_methods()
        return

    def empty(self):
        return not self._reader.poll()

    def __getstate__(self):
        assert_spawning(self)
        return (self._reader,
         self._writer,
         self._rlock,
         self._wlock)

    def __setstate__(self, state):
        self._reader, self._writer, self._rlock, self._wlock = state
        self._make_methods()

    def _make_methods(self):
        recv = self._reader.recv
        racquire, rrelease = self._rlock.acquire, self._rlock.release

        def get():
            racquire()
            try:
                return recv()
            finally:
                rrelease()

        self.get = get
        if self._wlock is None:
            self.put = self._writer.send
        else:
            send = self._writer.send
            wacquire, wrelease = self._wlock.acquire, self._wlock.release

            def put(obj):
                wacquire()
                try:
                    return send(obj)
                finally:
                    wrelease()

            self.put = put
        return
Example #58
0
    def start_kernel(self, kernel_id=None, config=None, resource_limits=None):
        """ A function for starting new kernels by forking.

        :arg str kernel_id: the id of the kernel to be started.
            If no id is passed, a uuid will be generated.
        :arg Ipython.config.loader config: kernel configuration.
        :arg dict resource_limits: a dict with keys resource.RLIMIT_*
            (see config_default documentation for explanation of valid options)
            and values of the limit for the given resource to be set in the
            kernel process
        :returns: kernel id and connection information which includes the
            kernel's ip, session key, and shell, heartbeat, stdin, and iopub
            port numbers
        :rtype: dict
        """
        kernel_logger.debug("start_kernel with config %s", config)
        if kernel_id is None:
            kernel_id = str(uuid.uuid4())
        if config is None:
            config = Config({"ip": self.ip})
        if resource_limits is None:
            resource_limits = {}
        config.HistoryManager.enabled = False

        dir = os.path.join(self.dir, kernel_id)
        try:
            os.mkdir(dir)
        except OSError:
            # TODO: take care of race conditions and other problems with us
            # using an 'unclean' directory
            pass
        currdir = os.getcwd()
        os.chdir(dir)

        p, q = Pipe()
        proc = Process(target=self.fork_kernel, args=(config, q, resource_limits))
        proc.start()
        os.chdir(currdir)
        # todo: yield back to the message processing while we wait
        for i in range(5):
            if p.poll(1):
                connection = p.recv()
                p.close()
                self.kernels[kernel_id] = (proc, connection)
                return {"kernel_id": kernel_id, "connection": connection}
            else:
                kernel_logger.info("Kernel %s did not start after %d seconds."
                                   % (kernel_id[:4], i))
        p.close()
        self.kill_process(proc)
        raise KernelError("Kernel start timeout.")
Example #59
0
File: runner.py Project: alvare/mdf
 def start_proc_thread_func():
     # start a child process running a pyro server and get the uri
     parent_conn, child_conn = Pipe()
     process = Process(target=_start_remote_server, args=(sys.argv, child_conn))
     process.daemon = True
     process.start()
     timeout = time.clock() + 60
     while process.is_alive() and time.clock() < timeout:
         if parent_conn.poll(1):
             break
     else:
         raise Exception("failed to start sub-process")
     uri = parent_conn.recv()
     server = Pyro4.Proxy(uri)
     server._pyroOneway.add("shutdown")
     return process, server