Beispiel #1
2
def checkmultiprocess(ipqueue,cacheResult):
    if ipqueue.qsize() == 0:
        return
    processlist = []
    "如果ip数小于512,只使用一个子进程,否则则使用指定进程数,每个进程处理平均值的数量ip"
    max_threads = g_maxthreads
    maxprocess = g_useprocess
    if ipqueue.qsize() < g_maxthreads:
        max_threads = ipqueue.qsize()
        maxprocess = 1
    else:
        max_threads = (ipqueue.qsize() + g_useprocess) / g_useprocess
        if max_threads > g_maxthreads:
            max_threads = g_maxthreads
    #multiprocessing.log_to_stderr(logging.DEBUG)
    for i in xrange(0,maxprocess):
        p = Process(target=callsingleprocess,args=(ipqueue,cacheResult,max_threads))
        p.daemon = True
        processlist.append(p)
        p.start()
    
    try:
        for p in processlist:
            p.join()
    except KeyboardInterrupt:
        PRINT("need wait all process end...")
        for p in processlist:
            if p.is_alive():
                p.terminate()  
def build_rec_multi(options, familylist):
    '''
    Run "build_reconciled_tree" on multiple threads.
    '''
    global q
    for fam in familylist:
        q.put(fam)
    global n

    for i in range(options.threads):
        p = Process(target=build_reconciled_tree, name='%i' % (i+1), args = (options,))
        p.start()
    sleep(options.threads*0.05)
    q.join()
    while n.qsize() > 0:
        for _ in range(10):
            if n.qsize() > 0:
                print n.get(),
        print ""
    sleep(options.threads*0.1)
    if p.is_alive() and q.empty():
        sleep(options.threads*0.2)
        if p.is_alive() and q.empty():
            p.terminate()
    return None
Beispiel #3
0
def gentree_multi(options, familylist, gene_sp):
    '''
    Run "pli2tree" on multiple threads.
    '''
    global q
    for fam in familylist:
        q.put(fam)
    global n
    print "# Building tree from multiple alignment for the following families: \n"
    for i in range(options.threads):
        p = Process(target=pli2tree, name='%i' % (i+1), args = (options, gene_sp))
        p.start()
    sleep(options.threads*0.05)
    q.join()
    while n.qsize() > 0:
        for _ in range(10):
            if n.qsize() > 0:
                print n.get(),
        print ""
    sleep(options.threads*0.05)
    if p.is_alive() and q.empty():
        sleep(options.threads*0.2)
        if p.is_alive() and q.empty():
            p.terminate()
    return None
Beispiel #4
0
def main():
    """
    Creates instances of the above methods and occassionally checks for crashed
    worker processes & relaunches.
    """
    worker_process = list()
    get_update_process = Process(target=get_updates)
    get_update_process.start()
    for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])):
        worker_process.append(Process(target=process_updates))
        worker_process[i].start()
    time_worker = Process(target=check_time_args)
    time_worker.start()
    while RUNNING.value:
        time.sleep(30)
        for index, worker in enumerate(worker_process):
            if not worker.is_alive():
                del worker_process[index]
                worker_process.append(Process(target=process_updates))
                worker_process[-1].start()
        if not time_worker.is_alive():
            time_worker = Process(target=check_time_args)
            time_worker.start()
        if not get_update_process.is_alive():
            get_update_process = Process(target=get_updates)
            get_update_process.start()
    get_update_process.join()
    time_worker.join()
    for worker in worker_process:
        worker.join()
Beispiel #5
0
    def runFunction(self, func, args, timeout=3600):
        def target(func, args, retQ):
            ret= func(*args)
            retQ.put(ret)

        retQ = Queue()
        process = Process(target=target, args=(func, args, retQ))
        process.start()
        try:
            ret = retQ.get(block=True, timeout=timeout)
        except Empty:
            ret = (-1, "function timeout, killed")
            try:
                if process.is_alive():
                    process.terminate()
                    process.join(2)
                if process.is_alive():
                    os.kill(int(process.pid), signal.SIGKILL)
                    process.join(2)
            except:
                if process.is_alive():
                    try:
                        os.kill(int(process.pid), signal.SIGKILL)
                    except:
                        pass
                    process.join(2)
        return ret
def main(configs, timeout=10 * 60):
    makedirs('data/results', exist_ok=True)
    for config in configs:
        outpath = resultpath(config)
        if path.isfile(outpath):
            continue

        out_queue = Queue()
        def worker():
            result = None
            try:
                result = run_classification(config)
            except BaseException as exc:
                print(traceback.format_exc(), file=stderr)
                result = {'error': repr(exc)}
            out_queue.put(result)
   
        p = Process(target=worker)
        try:
            p.start()
            p.join(timeout)
            if not p.is_alive():
                result = out_queue.get()
            else:
                result = {'error': 'timed out'}
            out_obj = { 'config': config \
                      , 'result': result }
            with gzip.open(outpath, mode='xt') as f:
                json.dump(utils.namedtuples_replaced(out_obj), f, cls=NamedtupleJSONEncoder, indent=4)
        finally:
            if p.is_alive():
                p.terminate()
Beispiel #7
0
  def run(self):
    timeout = None
    try:
      timeout = self.timeout
    except:
      pass

    sleeptime = 5
    p = Process(target=self.run_)
    p.start()
    if not timeout:
      p.join()
      status = 'normal'
    else:
      total_sleep = 0

      while total_sleep < timeout:
        time.sleep(sleeptime)
        total_sleep = total_sleep + sleeptime
        if not p.is_alive():
          p.join()
          status = 'normal'
          break

      if p.is_alive():
        p.terminate()
        status = 'timeout'
      else:
        p.join()
        status = 'normal'

    return status
Beispiel #8
0
def mixer_and_priority_synthesis(s_ast, synrkt, bugs, mutators, score):
  queue = Queue()

  mixer = Process(target=mixer_synthesis, args=(s_ast, synrkt, bugs,
    mutator, True, queue))

  priority = Process(target=priority_synthesis, args=(s_ast, synrkt, bugs,
    mutator, score, True, queue))

  mixer.start()
  priority.start()

  while True:
    if not queue.empty():

      # terminate all processes
      if mixer.is_alive():
        mixer.terminate()

      if priority.is_alive():
        priority.terminate()

      # display results
      (fixes, synthesizer) = queue.get()
      if fixes:
        for fix in fixes:
          print "At line " + str(fix.lineno) + " and offset " + str(fix.col_offset) 
          print "\t " + SourceVisitor().visit(fix)
      else:
        print "No solution found!"

      print "Winner: " + synthesizer

      break
    time.sleep(1)
def create_tables_force(engine, delay, retries):
    """Create the tables and **KILL ANY BLOCKING PROCESSES**.

    This command will spawn a process to create the new tables in
    order to find out which process is blocking us.  If we didn't do
    this concurrently, then the table creation will have disappeared
    by the time we tried to find its blocker in the postgres backend
    tables.

    """

    logger.info('Running table creator named %s', app_name)
    logger.warning('Running with force=True option %s', app_name)

    from multiprocessing import Process
    p = Process(target=create_graph_tables, args=(engine, delay))
    p.start()
    time.sleep(delay)

    if p.is_alive():
        logger.warning('Table creation blocked!')
        kill_blocking_psql_backend_processes(engine)

        #  Wait some time for table creation to proceed
        time.sleep(4)

    if p.is_alive():
        if retries <= 0:
            raise RuntimeError('Max retries exceeded.')

        logger.warning('Table creation failed, retrying.')
        return create_tables_force(engine, delay, retries-1)
    def collect(self, targetdir, timestamp, delta):
        from logging import root
        from multiprocessing import Process
        from os import getpid
        from .. import multiprocessing_logger

        # We want to copy the files in a child process, so in case the filesystem is stuck, we won't get stuck too
        kwargs = dict(targetdir=targetdir, timestamp=timestamp, delta=delta)
        try:
            [logfile_path] = [
                handler.target.baseFilename for handler in root.handlers if self._is_my_kind_of_logging_handler(handler)
            ] or [None]
        except ValueError:
            logfile_path = None
        subprocess = Process(
            target=multiprocessing_logger,
            args=(logfile_path, getpid(), Windows_Event_Logs.collect_process),
            kwargs=kwargs,
        )
        subprocess.start()
        subprocess.join(self.timeout_in_seconds)
        if subprocess.is_alive():
            msg = "Did not finish collecting {!r} within the {} seconds timeout_in_seconds"
            logger.error(msg.format(self, self.timeout_in_seconds))
            subprocess.terminate()
            if subprocess.is_alive():
                logger.info("Subprocess {!r} terminated".format(subprocess))
            else:
                logger.error("Subprocess {!r} is stuck".format(subprocess))
            raise TimeoutError()
        elif subprocess.exitcode:
            logger.error("Subprocess {!r} returned non-zero exit code".format(subprocess))
            raise RuntimeError(subprocess.exitcode)
Beispiel #11
0
class LiveviewServerProtocol(WebSocketServerProtocol):
    def __init__(self, endpoint_url):
        print("LiveviewServerProtocol init.")
        self.endpoint_url = endpoint_url

    def onConnect(self, request):
        print("Client connecting: {0}".format(request.peer))
        self.queue = Queue()
        self.process = Process(target=liveview_main, args=(self.endpoint_url, self.queue))
        self.process.start()

    #     @asyncio.coroutine
    def onOpen(self):
        while True:
            self.sendMessage("UNCHI".encode("utf-8"), False)
            time.sleep(0.001)
        print("WebSocket connection open.")
        if self.process and self.process.is_alive():
            while True:
                if not self.queue.empty():
                    payload = self.queue.get()
                    print(payload)
                    self.sendMessage(payload)

    #                 yield from asyncio.sleep(0.001)

    def onClose(self, wasClean, code, reason):
        print("WebSocket connection closed: {0}".format(reason))
        if self.process and self.process.is_alive():
            self.process.terminate()

    # this is hack that enables dynamic protocol initialization
    def __call__(self):
        return self
  def run(self, output_prefix, run_dict, cmd_dict):
    self.set_params(run_dict)
    self.cmd_dict = cmd_dict

    timeout = None
    try:
      timeout = self.timeout
    except:
      pass

    sleeptime = 5
    p = Process(target=self.run_)
    p.start()
    if not timeout:
      p.join()
      status = 'normal'
    else:
      total_sleep = 0

      while total_sleep < timeout:
        time.sleep(sleeptime)
        total_sleep = total_sleep + sleeptime
        if not p.is_alive():
          p.join()
          status = 'normal'
          break

      if p.is_alive():
        p.terminate()
        status = 'timeout'
      else:
        p.join()
        status = 'normal'

    return status
class UDPPipe(Pipe):

    class PipeHandler(SocketServer.BaseRequestHandler):
        def handle(self):
            data = self.request[0]
            print("Got data from " + str(self.client_address) + ": " + str(data))
            socket = self.request[1]
            socket.sendto(data, (self.server.reader_ip, self.server.reader_port))

    def __init__(self, host, port):
        self.server = SocketServer.UDPServer((host, port), UDPPipe.PipeHandler)
        self.server.writer_ip = None
        self.server.reader_ip = None
        self.server.reader_port = None
        self.server_proc = Process(target=self.server.serve_forever)

    @property
    def writer_ip(self):
        return self.server.writer_ip

    @writer_ip.setter
    def writer_ip(self, ip):
        if self.server_proc.is_alive():
            raise Exception('Can not modify Pipe writer_ip when server is working')

        self.server.writer_ip = ip

    @property
    def reader_ip(self):
        return self.server.reader_ip

    @reader_ip.setter
    def reader_ip(self, ip):
        if self.server_proc.is_alive():
            raise Exception('Can not modify Pipe reader_ip when server is working')

        self.server.reader_ip = ip

    @property
    def reader_port(self):
        return self.server.reader_port

    @reader_port.setter
    def reader_port(self, port):
        if self.server_proc.is_alive():
            raise Exception("Can not modify Pipe reader_port when server is running")

        self.server.reader_port = port

    def run(self):
        if (self.writer_ip is None) or (self.reader_ip is None):
            raise Exception('You must specify writer and reader ip before running Pipe')

        self.server_proc.start()

    def stop(self):
        self.server.shutdown()
        self.server.server_close()
        self.server_proc.join()
    def test_kills_process(self):

        p = Process(target=time.sleep, args=(100,))
        p.start()
        self.assertTrue(p.is_alive())
        kill_tree(p.pid)
        p.join(1)
        self.assertFalse(p.is_alive())
Beispiel #15
0
def data_handler(spigot_data: SpigotData, lock: threading.Lock):
    """
    Main thread for reading output from spigot IO worker.
    Also interprets data.

    spigot_data - shared SpigotData object
    """
    client, child = Pipe()

    t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
    t.start()

    spigot_data.game.status = SpigotState.RUNNING

    running = True

    while running:
        while t.is_alive():
            if client.poll(0.3):
                buf = client.recv()
                parse_event(spigot_data, buf)
                spigot_data.add_message(buf)
                logging.debug('<<OUTPUT>> {}'.format(buf))

            while not spigot_data.commands.empty():
                command = spigot_data.commands.get()
                client.send(command)
                logging.debug('<<COMMAND>> {}'.format(command))

        # After java process is dead
        spigot_data.status = SpigotState.STOPPED
        spigot_data.add_message(info_message("""The server has stopped. Type 'start' to start. """
                                             """Type 'quit' to close Spigot Monitor"""))  # PEP8ers gonna hate

        spigot_data.game.players = {}  # No players are available on a stopped server...

        while True and not AUTO_RESTART:
            command = spigot_data.commands.get().strip()  # strip because commands have newline appended
            if command.lower() == 'start':
                t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
                t.start()
                logging.debug('Thread created.')
                break
            elif command.lower() == 'quit' or command.lower() == 'stop':
                message = info_message("KTHXBAI")
                spigot_data.add_message(message)
                logging.debug('Quitting program.')
                break

        if AUTO_RESTART:
            t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
            t.start()
            logging.debug('Thread created.')

        if not t.is_alive():  # thread hasn't started again
            running = False

    print('exiting data_handler loop')
class AlppacaIntegrationTest(object):
    def __init__(self, config):
        self.config = config
        self.mock_job = Process(target=self.run_api_server_mock)
        self.alppaca_job = Process(target=self.run_alppaca)

    def __enter__(self):
        self.mock_job.start()
        # Ensure the mock IMS fully(!) up and running before starting Alppaca.
        # Otherwise, Alppaca will see the failure and start its backoff
        # behaviour.
        time.sleep(0.5)
        self.alppaca_job.start()
        time.sleep(0.5)
        return self

    def __exit__(self, *args):
        self.mock_job.terminate()
        self.alppaca_job.terminate()

        self.mock_job.join(10)
        self.alppaca_job.join(10)

        mock_alive = self.mock_job.is_alive()
        if mock_alive:
            os.kill(self.mock_job.pid, signal.SIGKILL)

        alppaca_alive = self.alppaca_job.is_alive()
        if alppaca_alive:
            os.kill(self.alppaca_job.pid, signal.SIGKILL)

        if mock_alive or alppaca_alive:
            raise Exception("Processe(s) that ignored SIGTERM: "
                            "API mock server: %s  Alppaca: %s" % (
                            mock_alive, alppaca_alive))

    def run_alppaca(self):
        daemon = AlppacaDaemon(pid_file="not used")
        daemon.config = self.config
        daemon.setup_logging()
        daemon.run()

    def run_api_server_mock(self):
        MockIms().run()

    def test_alppaca_returns_given_role(self):
        url = 'http://{host}:{port}/latest/meta-data/iam/security-credentials/'.format(
            host=self.config['bind_ip'], port=self.config['bind_port'])
        response = requests.get(url)

        assert response.status_code == 200, \
            "Response status code should be 200, was: '{0}'".format(response.status_code)
        assert(response.text == 'test_role'), \
            "Response text should be 'test_role', was: '{0}'".format(response.text)

    def execute(self):
        self.test_alppaca_returns_given_role()
Beispiel #17
0
def MakeTiles():
    ### STAGE 1 ###
    
    for kapPath in Resources.lstBsbFiles:
        if pleaseContinue:
            
            #we need to wait for this process to finish before moving on
            kapToVrtProc = Process(target=KapToVrt, args=(kapPath,))
            kapToVrtProc.start()
            while kapToVrtProc.is_alive():
                sleep(.1)
            
            vrtPath = kapPath[0:-4] + ".vrt"
            #print vrtPath
            if os.path.isfile(vrtPath):
                
                zxyFullPath = Resources.getTempDir() + "/" + kapPath.split("/")[-1][0:-4] + ".zxy/"
                tileError = True
                
                try:
                    vrtToTilesProc = Process(target=VrtToTiles, args=(Resources.getTempDir(), vrtPath, FindZoom.getKapZoom(kapPath),))
                    vrtToTilesProc.start()
                    while vrtToTilesProc.is_alive():
                        sleep(.1)
                    if os.path.isdir(zxyFullPath):
        #                    if len(os.listdir(zxyFullPath)) > 0:
        #                        tileError = False
                        for subDir in os.listdir(zxyFullPath):
                            if os.path.isdir(zxyFullPath + subDir):
                                tileError = False
                except:
                    pass
                    print "VrtToTiles failed!"
                    
                if tileError:
                        print "ERROR gdal_tiler failed processing chart: ", kapPath.split("/")[-1]
                        Resources.lstBsbErrorFiles.append(kapPath.split("/")[-1][0:-4]) 
                ###
                
                os.remove(vrtPath) #clean up vrt files
            else:
                ###map2gdal failed because there is no vrt file...
                print vrtPath
                print "ERROR map2gdal failed processing chart: ", kapPath.split("/")[-1]
                Resources.lstBsbErrorFiles.append(kapPath.split("/")[-1][0:-4])
            
            
#            mp = Process(target=TileMap, args=(kapPath, parent.currentChart, resourcePipeB, signalPipeA))
#            mp.start()
            
            #Event is bound to Step4.nextStage(self, event)
            wx.PostEvent(app, InThreadEvent(advance=False))
    
    #posting this event signals to parent window that the task is finished
    if pleaseContinue:
        #Event is bound to Step4.nextStage(self, event)
        wx.PostEvent(app, InThreadEvent(advance=True))
    def test_does_not_fail(self):

        p = Process(target=child_process)
        p.start()
        self.assertTrue(p.is_alive())
        time.sleep(.1)
        kill_tree(p.pid)
        p.join(1)
        self.assertFalse(p.is_alive())
def main(argv):
    #specify host name and port number on the command line.
    TCP_IP = sys.argv[1] 
    TCP_PORT = int(sys.argv[2])  
    BUFFER_SIZE = 5000

    #--connect to server--#
    #create sock stream and connect  
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect((TCP_IP, TCP_PORT))
    s.settimeout(.2)

    name = getUsersHandle()
    s.sendall(networkKeys.ACTIONS.name + name + "\n")

    ############################################################
    ####  Start curses process #################################
    ############################################################
    cursesEnd, networkEnd = Pipe(duplex=True)
    cursesProcess = Process(target=cursesIO.cursesEngine, args=(cursesEnd,))
    cursesProcess.start()

    while cursesProcess.is_alive():

        #get messages from curses
        if networkEnd.poll():
            msg = networkEnd.recv()
            log("(NET MSG-FROM-CURSES):" + str(type(msg)) + str(msg) + "\n")

            #send curses message to server
            s.sendall(msg + "\n")

        #get update from server
        try:
            updateFromServer = s.recv(BUFFER_SIZE)
            updateFromServer = updateFromServer.decode()
            log("(NET MSG-TO-CURSES):" + updateFromServer + "\n")
            if not updateFromServer:
                networkEnd.send("/DISCONNECTED FROM SERVER\n")
                break
            #send network update to cureses
            networkEnd.send(updateFromServer)
        except socket.timeout:
            pass

    while cursesProcess.is_alive():
        pass

    ############################################################
    #### curses process has ended ##############################
    ############################################################

    #send termination message s.send(MESSAGE.encode())
    log("(NET): closing connection")
    #close connection
    s.close()
  def run(self):
    manager = Manager()
    joystickState = manager.dict()
    joystickStateNum = Value('d', 0.0)
    acousticStateNum = Value('d', 0.0)
    heartbeatValue = Array('i', [0]*3)

    acousticProcess = Process(target=self.acousticPlayer, args=(joystickStateNum, acousticStateNum))
    joystickReaderProcess = Process(target=self.joystickReader, args=(joystickState, joystickStateNum))
    joystickUpdaterProcess = Process(target=self.joystickUpdater, args=(joystickState, joystickStateNum, acousticStateNum, heartbeatValue))

    try:
      # Start the processes
      joystickReaderProcess.start()
      joystickUpdaterProcess.start()
      acousticProcess.start()

      # control heartbeat
      maxBrightness = 50
      increment = 1
      increasing = True
      brightness = 0
      while True:
        # If all processes are running, show blue/green heartbeat
        if acousticProcess.is_alive() and joystickReaderProcess.is_alive() and joystickUpdaterProcess.is_alive():
          heartbeatValue[0] = 0
          heartbeatValue[1] = maxBrightness-brightness
          heartbeatValue[2] = brightness
        # Otherwise show red/orange heartbeat
        else:
          heartbeatValue[0] = 25
          heartbeatValue[1] = 0
          heartbeatValue[2] = brightness/2
        # if joystick updater is running, then it will set the LEDs
        if not joystickUpdaterProcess.is_alive():
          self._leds.setPixel(0, *heartbeatValue)
          self._leds.show()
        
        brightness += increment * (1 if increasing else -1)
        if brightness > maxBrightness-2:
          increasing = False
        if brightness < 2:
          increasing = True
          
        sleep(0.05)
    except:
      pass

    # Something went wrong: kill the process
    acousticProcess.terminate()
    joystickReaderProcess.terminate()
    joystickUpdaterProcess.terminate()
    # show a red LED then exit
    self._leds.clearStrip()
    self._leds.setPixel(0, 25, 0, 0)
    self._leds.show()
class Breathe(object):

    def __init__(self):
        GPIO.setmode(GPIO.BCM) # We can also choose a BOARD numbering schemes.
        GPIO.setup(21, GPIO.OUT) # set GPIO 21 as output
        self.light = GPIO.PWM(21, 100) # create object for PWM on port 21 at 100 Hertz
        self.p = Process(target=calm, args=(self.light,))
        self.state = breathe_state(('CALM', 'ERRATIC', 'STOP'))
        self.restart_state = self.state.CALM

    def shutdown(self):
        if (self.p.is_alive()):
            self.p.terminate()
        self.light.stop()
        GPIO.cleanup()
        print("breathing shut down.")
        
    def restart(self):
        print("restarting breathing!")
        if (self.p.is_alive()):
            self.p.terminate()
        self.light.stop()
        GPIO.cleanup()
        
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(21, GPIO.OUT)
        self.light = GPIO.PWM(21, 100)
        self.light.start(0)
        if (self.restart_state == self.state.CALM):
            self.p = Process(target=calm, args=(self.light,))
            self.p.start()
        elif (self.restart_state == self.state.ERRATIC):
            self.p = Process(target=erratic, args=(self.light,))
            self.p.start()
        else:
            self.shutdown()

    def calm(self):
        if (self.p.is_alive()):
            self.p.terminate()
        self.light.stop()
        self.p = Process(target=calm, args=(self.light,))
        self.p.start()
        print("calm breathing")
        
    def erratic(self):
        if (self.p.is_alive()):
            self.p.terminate()
        self.light.stop()
        self.p = Process(target=erratic, args=(self.light,))
        self.p.start()
        print("erratic breathing")

    def set(self, state):
        self.restart_state = state
        print("state has been set:", self.restart_state)
Beispiel #22
0
def main(main_proc: object, main_cpipe: object):
    """ Listen on main_cpipe for signals.  Depending on what signal is recieved
    it will start new child processes.

    """

    window_dict = {}

    while main_proc.is_alive():
        try:
            signal, data = main_cpipe.recv()
        except KeyboardInterrupt:
            break

        if signal == 'quit':
            break
        if signal == 'refresh':
            # Make sure all children exit.
            logging.info('\n'.join([f'PID: {t.pid} of {t}' for t in active_children()]))
            for pid, proc in window_dict.items():
                logging.info(f'PROCESS: {pid}')
        if signal == 'new-proc':
            proc = Process(target=run_browser, args=(data,))
            proc.start()
            main_cpipe.send(('proc-pid', proc.pid))
            logging.info(f"MAIN_LOOP NEW_PROC: {data}")
            window_dict[proc.pid] = proc
            logging.info(f"child pid: {proc.pid}")
            logging.info(f'window_dict: {window_dict}')

        elif signal == 'terminate':
            proc = window_dict.pop(data, None)
            if proc:
                logging.info(f'Joining pid: {data}')
                proc.join(1)
                if proc.is_alive():
                    logging.info(f"Terminating: {proc}")
                    proc.terminate()

    logging.info("Quitting")

    logging.info(window_dict)
    for pid, proc in window_dict.items():
        logging.info(f"Joining: {proc}")
        proc.join(1)
        if proc.is_alive():
            logging.info(f"Terminating: {proc}")
            proc.terminate()

    # Make sure all children exit.
    active_children()

    return
Beispiel #23
0
def pfam_scan_multi(options):
    '''
    Run pfam_scan to recover the domains/protein families associated to
    each sequence.
    '''
    print "# Running PfamScan over the input sequences...\n"
    handle = open(options.outdir+"query.fasta", "r")
    n_seq = 0
    for line in handle:
        if line[0] == ">":
            n_seq = n_seq+1
    handle.close()
    if options.force and os.path.exists(options.outdir+"query_temp.pfam"):
        os.remove(options.outdir+"query_temp.pfam")
    if n_seq < 100 or options.threads == 1:
        os.system("perl "+os.path.abspath(options.stdir).replace(" ","\ ")+"" \
            "/pfam_scan.pl -e_dom "+str(options.pcut)+" -e_seq " \
            ""+str(options.pcut)+" -cpu "+str(options.threads)+" -fasta " \
            ""+os.path.abspath(options.outdir).replace(" ","\ ")+"/query.fasta"\
            " -outfile "+os.path.abspath(options.outdir).replace(" ","\ ")+"" \
            "/query_temp0.pfam -d " \
            ""+os.path.abspath(options.dbdir).replace(" ","\ "))
        return 1
    elif n_seq >= 100 or options.threads > 1:
        i, n_threads = split_query_fasta(options, n_seq)
        global q
        num_files = i+1
        if options.force:
            for j in range(num_files):
                if os.path.exists(options.outdir+"query_temp"+str(j)+".pfam"):
                    os.remove(options.outdir+"query_temp"+str(j)+".pfam")
        for j in range(num_files):
            q.put(j)

        sleep(options.threads*0.05)

        for j in range(n_threads):
            p = Process(target=pfam_scan_mp, name='%i' % (j+1), 
                args = (options,))
            p.start()

        sleep(options.threads*0.05)

        q.join()            

        sleep(options.threads*0.05)

        if p.is_alive() and q.empty():
            sleep(options.threads*0.2)
            if p.is_alive() and q.empty():
                p.terminate()

        return num_files
Beispiel #24
0
def calculator_1_process():

    a = Process(target=prime_calculator, args=(1, 10000))

    start_time = time.time()
    a.start()
    a_boolen = a.is_alive()

    while a_boolen == True:
        a_boolen = a.is_alive()

    print("--- 1 process took %s seconds --- \n" % (time.time() - start_time))
Beispiel #25
0
def __tcp_server_routine(server, verbosity=False):

    conn, addr = server.accept()
    p = Process(target=__tcp_server_process, args=(conn, addr, verbosity))
    # ps -ax | grep main.py
    p.start()
    # conn.close()
    print p, p.is_alive()
    time.sleep(2)
    print p, p.is_alive()

    return True
Beispiel #26
0
 def access_email_inbox(self):
     print("access email functions call begin...")
     p = Process(target=self.my_sub_process)
     p.start()
     print ("I am parent %d" % (os.getpid()))
     print("access email functions call end...")
     time.sleep(6)
     if p.is_alive():
         p.terminate()
     else:
         pass
     print ('child process status: %s' % (p.is_alive()))
Beispiel #27
0
def calculator_2_process():

    a = Process(target=prime_calculator, args=(1, 5000))
    b = Process(target=prime_calculator, args=(5000, 10000))

    start_time = time.time()
    a.start()
    b.start()
    a_boolen, b_boolen = a.is_alive(), b.is_alive()

    while a_boolen == True or b_boolen == True:
        a_boolen, b_boolen = a.is_alive(), b.is_alive()

    print("--- 2 process took %s seconds --- \n" % (time.time() - start_time))
Beispiel #28
0
    def bootstrap(self):
        print_head()
        self.verify_environment()
        self.load_config()
        self.bootstrap_storage_mixin()
        self.bootstrap_plugins()
        self.verify_plugin_settings()

        puts("Bootstrapping complete.")
        puts("\nStarting core processes:")
        # Scheduler
        scheduler_thread = Process(target=self.bootstrap_scheduler)
        # scheduler_thread.daemon = True

        # Bottle
        bottle_thread = Process(target=self.bootstrap_bottle)
        # bottle_thread.daemon = True

        # XMPP Listener
        xmpp_thread = Process(target=self.bootstrap_xmpp)
        # xmpp_thread.daemon = True

        with indent(2):
            try:
                # Start up threads.
                xmpp_thread.start()
                scheduler_thread.start()
                bottle_thread.start()
                errors = self.get_startup_errors()
                if len(errors) > 0:
                    default_room = self.get_room_from_name_or_id(settings.DEFAULT_ROOM)["room_id"]
                    error_message = "FYI, I ran into some problems while starting up:"
                    for err in errors:
                        error_message += "\n%s\n" % err
                    self.send_room_message(default_room, error_message, color="yellow")
                    puts(colored.red(error_message))

                while True:
                    time.sleep(100)
            except (KeyboardInterrupt, SystemExit):
                scheduler_thread.terminate()
                bottle_thread.terminate()
                xmpp_thread.terminate()
                print '\n\nReceived keyboard interrupt, quitting threads.',
                while (scheduler_thread.is_alive() or
                       bottle_thread.is_alive() or
                       xmpp_thread.is_alive()):
                        sys.stdout.write(".")
                        sys.stdout.flush()
                        time.sleep(0.5)
def force_create_graph_tables(engine, delay, retries):

    print 'Running table creator named', name
    p = Process(target=create_graph_tables, args=(engine, delay*2))
    p.start()

    time.sleep(1)

    if not p.is_alive():
        print('Process not blocked.')
        return p.join()

    else:
        print('Process is blocked, waiting {} seconds for unlock'
              .format(delay))
        time.sleep(delay)

    # If p has ended, the block was cleared without intervention
    if not p.is_alive():
        print('Process unblocked without killing anything.')
        return p.join()

    # Lookup blocking processes
    blocking = list(engine.execute(text(blocking_SQL), name=name))

    # Check if any high importance process is blocking us
    if is_blocked_by_no_kill(blocking):
        if retries <= 0:
            raise RuntimeError('Max retries exceeded')
        print('Trying again in {} seconds ({} retries remaining)'.format(
            delay, retries))
        time.sleep(delay)
        return force_create_graph_tables(engine, delay, retries-1)

    # Kill blocking processes
    for proc in blocking:
        bd_appname, bd_pid, bing_appname, bing_pid, query = proc

        # Skip other table_creators
        if bing_appname.startswith(name_root):
            continue

        # Kill anything in the way, it was deemed of low importance
        print('Killing blocking backend process: name({})\tpid({}): {}'
              .format(bing_appname, bing_pid, query))
        engine.execute(text('select pg_terminate_backend(:bing_pid);'),
                       bing_pid=bing_pid)

    return p.join()
Beispiel #30
0
def load_link(browser, link):
    ''' Return true if load successful, false otherwise. '''

    while True:

        p = Process(target=browser_get, args=(browser, link))
        p.start()
        p.join(LOAD_TIME)
        if p.is_alive():
            p.terminate()
        else:
            break

    while True:

        wait_time = READY_TIME
        start_time = time.time()

        ''' Wait for page to have completely loaded. '''
        while True:
            state = browser.execute_script('return document.readyState;')
            if state == 'complete':
                return True
            if time.time() - start_time > wait_time:
                logging.info("Document %s not ready after %ds", link, wait_time)
                break
            time.sleep(1)

        wait_time = wait_time * READY_RATIO
        if wait_time > MAX_READY_TIME * READY_RATIO:
            logging.error("Skipping document %s.  Was never ready.", link)
            return False
        else:
            logging.info("Increasing wait time to %ds", wait_time)
class LightBluePebble(object):
    """ a wrapper for LightBlue that provides Serial-style read, write and close"""

    def __init__(self, id, should_pair, debug_protocol=False, connection_process_timeout=60):

        self.mac_address = id
        self.debug_protocol = debug_protocol
        self.should_pair = should_pair

        manager = multiprocessing.Manager()
        self.send_queue = manager.Queue()
        self.rec_queue = manager.Queue()

        self.bt_teardown = multiprocessing.Event()
        self.bt_message_sent = multiprocessing.Event()
        self.bt_connected = multiprocessing.Event()

        self.bt_socket_proc = Process(target=self.run)
        self.bt_socket_proc.daemon = True
        self.bt_socket_proc.start()

        # wait for a successful connection from child process before returning to main process
        self.bt_connected.wait(connection_process_timeout)
        if not self.bt_connected.is_set():
            raise LightBluePebbleError(id, "Connection timed out, LightBlueProcess was provided %d seconds to complete connecting" % connection_process_timeout)

    def write(self, message):
        """ send a message to the LightBlue processs"""
        try:
            self.send_queue.put(message)
            self.bt_message_sent.wait()
        except:
            self.bt_teardown.set()
            if self.debug_protocol:
                log.debug("LightBlue process has shutdown (queue write)")

    def read(self):
        """ read a pebble message from the LightBlue processs"""
        try:
            tup = self.rec_queue.get()
            return tup
        except Queue.Empty:
            return (None, None, None, '')
        except:
            self.bt_teardown.set()
            if self.debug_protocol:
                log.debug("LightBlue process has shutdown (queue read)")
            return (None, None, None, '')

    def close(self):
        """ close the LightBlue connection process"""
        self.bt_teardown.set()

    def is_alive(self):
        return self.bt_socket_proc.is_alive()

    def run(self):
        """ create bluetooth process paired to mac_address, must be run as a process"""
        from lightblue import pair, socket as lb_socket, finddevices, selectdevice

        def autodetect(self):
            list_of_pebbles = list()

            if self.mac_address is not None and len(self.mac_address) is 4:
                # we have the friendly name, let's get the full mac address
                log.warn("Going to get full address for device %s, ensure device is broadcasting." % self.mac_address)
                # scan for active devices
                devices = finddevices(timeout=8)

                for device in devices:
                    if re.search(r'Pebble ' + self.mac_address, device[1], re.IGNORECASE):
                        log.debug("Found Pebble: %s @ %s" % (device[1], device[0]))
                        list_of_pebbles.append(device)

                if len(list_of_pebbles) is 1:
                    return list_of_pebbles[0][0]
                else:
                    raise LightBluePebbleError(self.mac_address, "Failed to find Pebble")
            else:
                # no pebble id was provided... give them the GUI selector
                try:
                    return selectdevice()[0]
                except TypeError:
                    log.warn("failed to select a device in GUI selector")
                    self.mac_address = None

        # notify that the process has started
        log.debug("LightBlue process has started on pid %d" % os.getpid())

        # do we need to autodetect?
        if self.mac_address is None or len(self.mac_address) is 4:
            self.mac_address = autodetect(self)

        # create the bluetooth socket from the mac address
        if self.should_pair and self.mac_address is not None:
            pair(self.mac_address)
        try:
            self._bts = lb_socket()
            self._bts.connect((self.mac_address, 1))  # pebble uses RFCOMM port 1
            self._bts.setblocking(False)
        except:
            raise LightBluePebbleError(self.mac_address, "Failed to connect to Pebble")

        # give them the mac address for using in faster connections
        log.debug("Connection established to " + self.mac_address)

        # Tell our parent that we have a pebble connected now
        self.bt_connected.set()

        send_data = e = None
        while not self.bt_teardown.is_set():
            # send anything in the send queue
            try:
                send_data = self.send_queue.get_nowait()
                self._bts.send(send_data)
                if self.debug_protocol:
                    log.debug("LightBlue Send: %r" % send_data)
                self.bt_message_sent.set()
            except Queue.Empty:
                pass
            except (IOError, EOFError):
                self.bt_teardown.set()
                e = "Queue Error while sending data"

            # if anything is received relay it back
            rec_data = None
            try:
                rec_data = self._bts.recv(4)
            except (socket.timeout, socket.error):
                # Exception raised from timing out on nonblocking
                pass

            if (rec_data is not None) and (len(rec_data) == 4):
                # check the Stream Multiplexing Layer message and get the length of the data to read
                size, endpoint = unpack("!HH", rec_data)
                resp = ''
                while len(resp) < size:
                    try:
                        resp += self._bts.recv(size-len(resp))
                    except (socket.timeout, socket.error):
                        # Exception raised from timing out on nonblocking
                        # TODO: Should probably have some kind of timeout here
                        pass
                try:
                    if self.debug_protocol:
                        log.debug("{}: {} {} ".format(endpoint, resp, rec_data))
                    self.rec_queue.put(("watch", endpoint, resp, rec_data))

                except (IOError, EOFError):
                    self.BT_TEARDOWN.set()
                    e = "Queue Error while recieving data"
                    pass
                if self.debug_protocol:
                    log.debug("LightBlue Read: %r " % resp)

        # just let it die silent whenever the parent dies and it throws an EOFERROR
        if e is not None and self.debug_protocol:
            raise LightBluePebbleError(self.mac_address, "LightBlue polling loop closed due to " + e)
Beispiel #32
0
#进程函数的使用
from multiprocessing import Process
from time import sleep

a = 1


def worker(sec, msg):
    #当worker作为子进程运行时,对全局量a 的修改只会
    #影响在子进程中a的值 ,对父进程没有影响
    global a
    a = 1000
    for i in range(3):
        sleep(sec)
        print("the worker msg", msg)
    print(a)

p = Process(name = 'worker',\
    target = worker,args = (2,),\
    kwargs = {'msg':'You are a big man'})

p.start()

#进程名称
print("进程名称:", p.name)
print("进程PID:", p.pid)
print('进程状态:', p.is_alive())

p.join()
print('parent:', a)
Beispiel #33
0
class Device(object):
    JOIN_TIMEOUT = 1.0

    SYNC_TX_CHUNK_SIZE = 0
    CONTINUOUS_TX_CHUNK_SIZE = 0

    DATA_TYPE = np.float32

    class Command(Enum):
        STOP = 0
        SET_FREQUENCY = 1
        SET_SAMPLE_RATE = 2
        SET_BANDWIDTH = 3
        SET_RF_GAIN = 4
        SET_IF_GAIN = 5
        SET_BB_GAIN = 6
        SET_DIRECT_SAMPLING_MODE = 7
        SET_FREQUENCY_CORRECTION = 8
        SET_CHANNEL_INDEX = 9
        SET_ANTENNA_INDEX = 10

    ASYNCHRONOUS = False

    DEVICE_LIB = None
    DEVICE_METHODS = {
        Command.SET_FREQUENCY.name: "set_center_freq",
        Command.SET_SAMPLE_RATE.name: "set_sample_rate",
        Command.SET_BANDWIDTH.name: "set_bandwidth",
        Command.SET_RF_GAIN.name: "set_rf_gain",
        Command.SET_IF_GAIN.name: {"rx": "set_if_rx_gain", "tx": "set_if_tx_gain"},
        Command.SET_BB_GAIN.name: {"rx": "set_baseband_gain"}
    }

    @classmethod
    def get_device_list(cls):
        return []

    @classmethod
    def process_command(cls, command, ctrl_connection, is_tx: bool):
        is_rx = not is_tx
        if command == cls.Command.STOP.name:
            return cls.Command.STOP.name

        tag, value = command

        try:
            if isinstance(cls.DEVICE_METHODS[tag], str):
                method_name = cls.DEVICE_METHODS[tag]
            elif isinstance(cls.DEVICE_METHODS[tag], dict):
                method_name = cls.DEVICE_METHODS[tag]["rx" if is_rx else "tx"]
            else:
                method_name = None
        except KeyError:
            method_name = None

        if method_name:
            try:
                try:
                    check_method_name = cls.DEVICE_METHODS[tag+"_get_allowed_values"]
                    allowed_values = getattr(cls.DEVICE_LIB, check_method_name)()
                    next_allowed = min(allowed_values, key=lambda x: abs(x-value))
                    if value != next_allowed:
                        ctrl_connection.send("{}: {} not in range of supported values. Assuming {}".format(
                            tag, value, next_allowed
                        ))
                        value = next_allowed
                except (KeyError, AttributeError):
                    pass

                ret = getattr(cls.DEVICE_LIB, method_name)(value)
                ctrl_connection.send("{0} to {1}:{2}".format(tag, value, ret))
            except AttributeError as e:
                logger.warning(str(e))

    @classmethod
    def setup_device(cls, ctrl_connection: Connection, device_identifier):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def init_device(cls, ctrl_connection: Connection, is_tx: bool, parameters: OrderedDict) -> bool:
        if cls.setup_device(ctrl_connection, device_identifier=parameters["identifier"]):
            for parameter, value in parameters.items():
                cls.process_command((parameter, value), ctrl_connection, is_tx)
            return True
        else:
            return False

    @classmethod
    def adapt_num_read_samples_to_sample_rate(cls, sample_rate: float):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def shutdown_device(cls, ctrl_connection, is_tx: bool):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def enter_async_receive_mode(cls, data_connection: Connection, ctrl_connection: Connection) -> int:
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def prepare_sync_receive(cls, ctrl_connection: Connection):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def receive_sync(cls, data_conn: Connection):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def enter_async_send_mode(cls, callback: object):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def prepare_sync_send(cls, ctrl_connection: Connection):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def send_sync(cls, data):
        raise NotImplementedError("Overwrite this method in subclass!")

    @classmethod
    def device_receive(cls, data_connection: Connection, ctrl_connection: Connection, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=False, parameters=dev_parameters):
            ctrl_connection.send("failed to start rx mode")
            return False

        try:
            cls.adapt_num_read_samples_to_sample_rate(dev_parameters[cls.Command.SET_SAMPLE_RATE.name])
        except NotImplementedError:
            # Many SDRs like HackRF or AirSpy do not need to calculate SYNC_RX_CHUNK_SIZE
            # as default values are either fine or given by the hardware
            pass

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_receive_mode(data_connection, ctrl_connection)
        else:
            ret = cls.prepare_sync_receive(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start rx mode")
            return False

        exit_requested = False
        ctrl_connection.send("successfully started rx mode")

        while not exit_requested:
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.25)
                except KeyboardInterrupt:
                    pass
            else:
                cls.receive_sync(data_connection)
            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=False)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        cls.shutdown_device(ctrl_connection, is_tx=False)
        data_connection.close()
        ctrl_connection.close()

    @classmethod
    def device_send(cls, ctrl_connection: Connection, send_config: SendConfig, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=True, parameters=dev_parameters):
            ctrl_connection.send("failed to start tx mode")
            return False

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_send_mode(send_config.get_data_to_send)
        else:
            ret = cls.prepare_sync_send(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start tx mode")
            return False

        exit_requested = False
        buffer_size = cls.CONTINUOUS_TX_CHUNK_SIZE if send_config.continuous else cls.SYNC_TX_CHUNK_SIZE
        if not cls.ASYNCHRONOUS and buffer_size == 0:
            logger.warning("Send buffer size is zero!")

        ctrl_connection.send("successfully started tx mode")

        while not exit_requested and not send_config.sending_is_finished():
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.5)
                except KeyboardInterrupt:
                    pass
            else:
                cls.send_sync(send_config.get_data_to_send(buffer_size))

            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=True)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        if not cls.ASYNCHRONOUS:
            # Some Sync send calls (e.g. USRP) are not blocking, so we wait a bit here to ensure
            # that the send buffer on the SDR is cleared
            time.sleep(0.75)

        if exit_requested:
            logger.debug("{}: exit requested. Stopping sending".format(cls.__class__.__name__))
        if send_config.sending_is_finished():
            logger.debug("{}: sending is finished.".format(cls.__class__.__name__))

        cls.shutdown_device(ctrl_connection, is_tx=True)
        ctrl_connection.close()

    def __init__(self, center_freq, sample_rate, bandwidth, gain, if_gain=1, baseband_gain=1,
                 resume_on_full_receive_buffer=False):
        super().__init__()

        self.error_not_open = -4242

        self.__bandwidth = bandwidth
        self.__frequency = center_freq
        self.__gain = gain  # = rf_gain
        self.__if_gain = if_gain
        self.__baseband_gain = baseband_gain
        self.__sample_rate = sample_rate

        self.__channel_index = 0
        self.__antenna_index = 0

        self.__freq_correction = 0
        self.__direct_sampling_mode = 0
        self.bandwidth_is_adjustable = True

        self.is_in_spectrum_mode = False
        self.sending_is_continuous = False
        self.continuous_send_ring_buffer = None
        self.num_samples_to_send = None  # None = get automatically. This value needs to be known in continuous send mode
        self._current_sent_sample = Value("L", 0)
        self._current_sending_repeat = Value("L", 0)

        self.success = 0
        self.error_codes = {}
        self.device_messages = []

        self.receive_process_function = self.device_receive
        self.send_process_function = self.device_send

        self.parent_data_conn, self.child_data_conn = Pipe(duplex=False)
        self.parent_ctrl_conn, self.child_ctrl_conn = Pipe()
        self.send_buffer = None
        self.send_buffer_reader = None

        self.device_serial = None
        self.device_number = 0

        self.samples_to_send = np.array([], dtype=self.DATA_TYPE)
        self.sending_repeats = 1  # How often shall the sending sequence be repeated? 0 = forever

        self.resume_on_full_receive_buffer = resume_on_full_receive_buffer  # for Spectrum Analyzer or Protocol Sniffing
        self.current_recv_index = 0
        self.is_receiving = False
        self.is_transmitting = False

        self.device_ip = "192.168.10.2"  # For USRP and RTLSDRTCP

        self.receive_buffer = None

        self.spectrum_x = None
        self.spectrum_y = None

        self.apply_dc_correction = False

    def _start_read_rcv_buffer_thread(self):
        self.read_recv_buffer_thread = threading.Thread(target=self.read_receiving_queue)
        self.read_recv_buffer_thread.daemon = True
        self.read_recv_buffer_thread.start()

    def _start_read_message_thread(self):
        self.read_dev_msg_thread = threading.Thread(target=self.read_device_messages)
        self.read_dev_msg_thread.daemon = True
        self.read_dev_msg_thread.start()

    @property
    def has_multi_device_support(self):
        return False

    @property
    def current_sent_sample(self):
        return self._current_sent_sample.value // 2

    @current_sent_sample.setter
    def current_sent_sample(self, value: int):
        self._current_sent_sample.value = value * 2

    @property
    def current_sending_repeat(self):
        return self._current_sending_repeat.value

    @current_sending_repeat.setter
    def current_sending_repeat(self, value: int):
        self._current_sending_repeat.value = value

    @property
    def device_parameters(self) -> OrderedDict:
        return OrderedDict([(self.Command.SET_FREQUENCY.name, self.frequency),
                            (self.Command.SET_SAMPLE_RATE.name, self.sample_rate),
                            (self.Command.SET_BANDWIDTH.name, self.bandwidth),
                            (self.Command.SET_RF_GAIN.name, self.gain),
                            (self.Command.SET_IF_GAIN.name, self.if_gain),
                            (self.Command.SET_BB_GAIN.name, self.baseband_gain),
                            ("identifier", self.device_serial)])

    @property
    def send_config(self) -> SendConfig:
        if self.num_samples_to_send is None:
            total_samples = len(self.send_buffer)
        else:
            total_samples = 2 * self.num_samples_to_send
        return SendConfig(self.send_buffer, self._current_sent_sample, self._current_sending_repeat,
                          total_samples, self.sending_repeats, continuous=self.sending_is_continuous,
                          iq_to_bytes_method=self.iq_to_bytes,
                          continuous_send_ring_buffer=self.continuous_send_ring_buffer)

    @property
    def receive_process_arguments(self):
        return self.child_data_conn, self.child_ctrl_conn, self.device_parameters

    @property
    def send_process_arguments(self):
        return self.child_ctrl_conn, self.send_config, self.device_parameters

    def init_recv_buffer(self):
        if self.receive_buffer is None:
            num_samples = SettingsProxy.get_receive_buffer_size(self.resume_on_full_receive_buffer,
                                                                self.is_in_spectrum_mode)
            self.receive_buffer = IQArray(None, dtype=self.DATA_TYPE, n=int(num_samples))

    def log_retcode(self, retcode: int, action: str, msg=""):
        msg = str(msg)
        error_code_msg = self.error_codes[retcode] if retcode in self.error_codes else "Error Code: " + str(retcode)

        if retcode == self.success:
            if msg:
                formatted_message = "{0}-{1} ({2}): Success".format(type(self).__name__, action, msg)
            else:
                formatted_message = "{0}-{1}: Success".format(type(self).__name__, action)
            logger.info(formatted_message)
        else:
            if msg:
                formatted_message = "{0}-{1} ({4}): {2} ({3})".format(type(self).__name__, action, error_code_msg,
                                                                      retcode, msg)
            else:
                formatted_message = "{0}-{1}: {2} ({3})".format(type(self).__name__, action, error_code_msg, retcode)
            logger.error(formatted_message)

        self.device_messages.append(formatted_message)

    @property
    def received_data(self):
        return self.receive_buffer[:self.current_recv_index]

    @property
    def sent_data(self):
        return self.samples_to_send[:self.current_sent_sample]

    @property
    def sending_finished(self):
        return self.current_sent_sample == len(self.samples_to_send)

    @property
    def bandwidth(self):
        return self.__bandwidth

    @bandwidth.setter
    def bandwidth(self, value):
        if not self.bandwidth_is_adjustable:
            return

        if value != self.__bandwidth:
            self.__bandwidth = value
            self.set_device_bandwidth(value)

    def set_device_bandwidth(self, bw):
        try:
            self.parent_ctrl_conn.send((self.Command.SET_BANDWIDTH.name, int(bw)))
        except (BrokenPipeError, OSError):
            pass

    @property
    def frequency(self):
        return self.__frequency

    @frequency.setter
    def frequency(self, value):
        if value != self.__frequency:
            self.__frequency = value
            self.set_device_frequency(value)

    def set_device_frequency(self, value):
        try:
            self.parent_ctrl_conn.send((self.Command.SET_FREQUENCY.name, int(value)))
        except (BrokenPipeError, OSError):
            pass

    @property
    def gain(self):
        return self.__gain

    @gain.setter
    def gain(self, value):
        if value != self.__gain:
            self.__gain = value
            self.set_device_gain(value)

    def set_device_gain(self, gain):
        try:
            # Do not cast gain to int here, as it may be float e.g. for normalized USRP gain or LimeSDR gain
            self.parent_ctrl_conn.send((self.Command.SET_RF_GAIN.name, gain))
        except (BrokenPipeError, OSError):
            pass

    @property
    def if_gain(self):
        return self.__if_gain

    @if_gain.setter
    def if_gain(self, value):
        if value != self.__if_gain:
            self.__if_gain = value
            self.set_device_if_gain(value)

    def set_device_if_gain(self, if_gain):
        try:
            # Do not cast gain to int here, as it may be float e.g. for normalized USRP gain or LimeSDR gain
            self.parent_ctrl_conn.send((self.Command.SET_IF_GAIN.name, if_gain))
        except (BrokenPipeError, OSError):
            pass

    @property
    def baseband_gain(self):
        return self.__baseband_gain

    @baseband_gain.setter
    def baseband_gain(self, value):
        if value != self.__baseband_gain:
            self.__baseband_gain = value
            self.set_device_baseband_gain(value)

    def set_device_baseband_gain(self, baseband_gain):
        try:
            # Do not cast gain to int here, as it may be float e.g. for normalized USRP gain or LimeSDR gain
            self.parent_ctrl_conn.send((self.Command.SET_BB_GAIN.name, baseband_gain))
        except (BrokenPipeError, OSError):
            pass

    @property
    def sample_rate(self):
        return self.__sample_rate

    @sample_rate.setter
    def sample_rate(self, value):
        if value != self.__sample_rate:
            self.__sample_rate = value
            self.set_device_sample_rate(value)

    def set_device_sample_rate(self, sample_rate):
        try:
            self.parent_ctrl_conn.send((self.Command.SET_SAMPLE_RATE.name, int(sample_rate)))
        except (BrokenPipeError, OSError):
            pass

    @property
    def channel_index(self) -> int:
        return self.__channel_index

    @channel_index.setter
    def channel_index(self, value: int):
        if value != self.__channel_index:
            self.__channel_index = value
            self.set_device_channel_index(value)

    def set_device_channel_index(self, value):
        try:
            self.parent_ctrl_conn.send((self.Command.SET_CHANNEL_INDEX.name, int(value)))
        except (BrokenPipeError, OSError):
            pass

    @property
    def antenna_index(self):
        return self.__antenna_index

    @antenna_index.setter
    def antenna_index(self, value):
        if value != self.__antenna_index:
            self.__antenna_index = value
            self.set_device_antenna_index(value)

    def set_device_antenna_index(self, value):
        try:
            self.parent_ctrl_conn.send((self.Command.SET_ANTENNA_INDEX.name, int(value)))
        except (BrokenPipeError, OSError):
            pass

    @property
    def freq_correction(self):
        return self.__freq_correction

    @freq_correction.setter
    def freq_correction(self, value):
        if value != self.__freq_correction:
            self.__freq_correction = value
            self.set_device_freq_correction(value)

    def set_device_freq_correction(self, value):
        try:
            self.parent_ctrl_conn.send((self.Command.SET_FREQUENCY_CORRECTION.name, int(value)))
        except (BrokenPipeError, OSError):
            pass

    @property
    def direct_sampling_mode(self):
        return self.__direct_sampling_mode

    @direct_sampling_mode.setter
    def direct_sampling_mode(self, value):
        if value != self.__direct_sampling_mode:
            self.__direct_sampling_mode = value
            self.set_device_direct_sampling_mode(value)

    def set_device_direct_sampling_mode(self, value):
        try:
            self.parent_ctrl_conn.send((self.Command.SET_DIRECT_SAMPLING_MODE.name, int(value)))
        except (BrokenPipeError, OSError):
            pass

    def start_rx_mode(self):
        self.init_recv_buffer()
        self.parent_data_conn, self.child_data_conn = Pipe(duplex=False)
        self.parent_ctrl_conn, self.child_ctrl_conn = Pipe()

        self.is_receiving = True
        logger.info("{0}: Starting RX Mode".format(self.__class__.__name__))
        self.receive_process = Process(target=self.receive_process_function,
                                       args=self.receive_process_arguments)
        self.receive_process.daemon = True
        self._start_read_rcv_buffer_thread()
        self._start_read_message_thread()
        try:
            self.receive_process.start()
        except OSError as e:
            logger.error(repr(e))
            self.device_messages.append(repr(e))

    def stop_rx_mode(self, msg):
        try:
            self.parent_ctrl_conn.send(self.Command.STOP.name)
        except (BrokenPipeError, OSError) as e:
            logger.debug("Closing parent control connection: " + str(e))

        logger.info("{0}: Stopping RX Mode: {1}".format(self.__class__.__name__, msg))

        if hasattr(self, "receive_process") and self.receive_process.is_alive():
            self.receive_process.join(self.JOIN_TIMEOUT)
            if self.receive_process.is_alive():
                logger.warning("{0}: Receive process is still alive, terminating it".format(self.__class__.__name__))
                self.receive_process.terminate()
                self.receive_process.join()

        self.is_receiving = False
        for connection in (self.parent_ctrl_conn, self.parent_data_conn, self.child_ctrl_conn, self.child_data_conn):
            try:
                connection.close()
            except OSError as e:
                logger.exception(e)

    def start_tx_mode(self, samples_to_send: np.ndarray = None, repeats=None, resume=False):
        self.is_transmitting = True
        self.parent_ctrl_conn, self.child_ctrl_conn = Pipe()
        self.init_send_parameters(samples_to_send, repeats, resume=resume)

        logger.info("{0}: Starting TX Mode".format(self.__class__.__name__))

        self.transmit_process = Process(target=self.send_process_function,
                                        args=self.send_process_arguments)

        self.transmit_process.daemon = True
        self._start_read_message_thread()
        self.transmit_process.start()

    def stop_tx_mode(self, msg):
        try:
            self.parent_ctrl_conn.send(self.Command.STOP.name)
        except (BrokenPipeError, OSError) as e:
            logger.debug("Closing parent control connection: " + str(e))

        logger.info("{0}: Stopping TX Mode: {1}".format(self.__class__.__name__, msg))

        if hasattr(self, "transmit_process") and self.transmit_process.is_alive():
            self.transmit_process.join(self.JOIN_TIMEOUT)
            if self.transmit_process.is_alive():
                logger.warning("{0}: Transmit process is still alive, terminating it".format(self.__class__.__name__))
                self.transmit_process.terminate()
                self.transmit_process.join()

        self.is_transmitting = False
        try:
            self.parent_ctrl_conn.close()
        except OSError as e:
            logger.exception(e)

        try:
            self.child_ctrl_conn.close()
        except OSError as e:
            logger.exception(e)

    @staticmethod
    def bytes_to_iq(buffer) -> np.ndarray:
        pass

    @staticmethod
    def iq_to_bytes(complex_samples: np.ndarray):
        pass

    def read_device_messages(self):
        while self.is_receiving or self.is_transmitting:
            try:
                message = self.parent_ctrl_conn.recv()
                try:
                    splitted = message.split(":")
                    action = ":".join(splitted[:-1])
                    return_code = splitted[-1]
                    self.log_retcode(int(return_code), action)
                except ValueError:
                    self.device_messages.append("{0}: {1}".format(self.__class__.__name__, message))
            except (EOFError, UnpicklingError, OSError, ConnectionResetError) as e:
                logger.info("Exiting read device message thread due to " + str(e))
                break
        self.is_transmitting = False
        self.is_receiving = False
        logger.debug("Exiting read device errors thread")

    def read_receiving_queue(self):
        while self.is_receiving:
            try:
                byte_buffer = self.parent_data_conn.recv_bytes()
                samples = self.bytes_to_iq(byte_buffer)
                n_samples = len(samples)
                if n_samples == 0:
                    continue

                if self.apply_dc_correction:
                    samples = samples - np.mean(samples, axis=0)

            except OSError as e:
                logger.exception(e)
                continue
            except EOFError:
                logger.info("EOF Error: Ending receive thread")
                break

            if self.current_recv_index + n_samples >= len(self.receive_buffer):
                if self.resume_on_full_receive_buffer:
                    self.current_recv_index = 0
                    if n_samples >= len(self.receive_buffer):
                        n_samples = len(self.receive_buffer) - 1
                else:
                    self.stop_rx_mode(
                        "Receiving buffer is full {0}/{1}".format(self.current_recv_index + n_samples,
                                                                  len(self.receive_buffer)))
                    return

            self.receive_buffer[self.current_recv_index:self.current_recv_index + n_samples] = samples[:n_samples]
            self.current_recv_index += n_samples

        logger.debug("Exiting read_receive_queue thread.")

    def init_send_parameters(self, samples_to_send: IQArray = None, repeats: int = None, resume=False):
        if samples_to_send is not None:
            if isinstance(samples_to_send, IQArray):
                samples_to_send = samples_to_send.convert_to(self.DATA_TYPE)
            else:
                samples_to_send = IQArray(samples_to_send).convert_to(self.DATA_TYPE)

            self.samples_to_send = samples_to_send
            self.send_buffer = None

        if self.send_buffer is None:
            if isinstance(self.samples_to_send, IQArray):
                self.send_buffer = self.iq_to_bytes(self.samples_to_send.data)
            else:
                self.send_buffer = self.iq_to_bytes(self.samples_to_send)
        elif not resume:
            self.current_sending_repeat = 0

        if repeats is not None:
            self.sending_repeats = repeats
Beispiel #34
0
class Worker(object):
    """This class is used for poller and reactionner to work.
    The worker is a process launch by theses process and read Message in a Queue
    (self.s) (slave)
    They launch the Check and then send the result in the Queue self.m (master)
    they can die if they do not do anything (param timeout)

    """

    _id = 0  # None
    _process = None
    _mortal = None
    _idletime = None
    _timeout = None
    _control_q = None

    def __init__(self,
                 _id,
                 slave_q,
                 returns_queue,
                 processes_by_worker,
                 mortal=True,
                 timeout=300,
                 max_plugins_output_length=8192,
                 target=None,
                 loaded_into='unknown',
                 http_daemon=None):
        self._id = self.__class__._id
        self.__class__._id += 1

        self._mortal = mortal
        self._idletime = 0
        self._timeout = timeout
        self.slave_q = None
        self.processes_by_worker = processes_by_worker
        self._control_q = Queue()  # Private Control queue for the Worker
        # By default, take our own code
        if target is None:
            target = self.work
        self._process = Process(target=self._prework,
                                args=(target, slave_q, returns_queue,
                                      self._control_q))
        self.returns_queue = returns_queue
        self.max_plugins_output_length = max_plugins_output_length
        self.i_am_dying = False
        # Keep a trace where the worker is launch from (poller or reactionner?)
        self.loaded_into = loaded_into
        if os.name != 'nt':
            self.http_daemon = http_daemon
        else:  # windows forker do not like pickle http/lock
            self.http_daemon = None

    def _prework(self, real_work, *args):
        """Simply drop the BrokHandler before doing the real_work"""
        for handler in list(logger.handlers):
            if isinstance(handler, BrokHandler):
                logger.info("Cleaning BrokHandler %r from logger.handlers..",
                            handler)
                logger.removeHandler(handler)
        real_work(*args)

    def is_mortal(self):
        """
        Accessor to _mortal attribute

        :return: A boolean indicating if the worker is mortal or not.
        :rtype: bool
        """
        return self._mortal

    def start(self):
        """
        Start the worker. Wrapper for calling start method of the process attribute

        :return: None
        """
        self._process.start()

    def terminate(self):
        """
        Wrapper for calling terminate method of the process attribute
        Also close queues (input and output) and terminate queues thread

        :return: None
        """
        # We can just terminate process, not threads
        self._process.terminate()
        # Is we are with a Manager() way
        # there should be not such functions
        if hasattr(self._control_q, 'close'):
            self._control_q.close()
            self._control_q.join_thread()
        if hasattr(self.slave_q, 'close'):
            self.slave_q.close()
            self.slave_q.join_thread()

    def join(self, timeout=None):
        """
         Wrapper for calling join method of the process attribute

        :param timeout: time to wait for the process to terminate
        :type timeout: int
        :return: None
        """
        self._process.join(timeout)

    def is_alive(self):
        """
        Wrapper for calling is_alive method of the process attribute

        :return: A boolean indicating if the process is alive
        :rtype: bool
        """
        return self._process.is_alive()

    def is_killable(self):
        """
        Determine whether a process is killable :

        * process is mortal
        * idletime > timeout

        :return: a boolean indicating if it is killable
        :rtype: bool
        """
        return self._mortal and self._idletime > self._timeout

    def add_idletime(self, time):
        """
        Increment idletime

        :param time: time to increment in seconds
        :type time: int
        :return: None
        """
        self._idletime += time

    def reset_idle(self):
        """
        Reset idletime (set to 0)

        :return: None
        """
        self._idletime = 0

    def send_message(self, msg):
        """
        Wrapper for calling put method of the _control_q attribute

        :param msg: the message to put in queue
        :type msg: str
        :return: None
        """
        self._control_q.put(msg)

    def set_zombie(self):
        """
        Set the process as zombie (mortal to False)

        :return:None
        """
        self._mortal = False

    def get_new_checks(self):
        """
        Get new checks if less than nb_checks_max
        If no new checks got and no check in queue, sleep for 1 sec
        REF: doc/alignak-action-queues.png (3)

        :return: None
        """
        try:
            while len(self.checks) < self.processes_by_worker:
                # print "I", self._id, "wait for a message"
                msg = self.slave_q.get(block=False)
                if msg is not None:
                    self.checks.append(msg.get_data())
                # print "I", self._id, "I've got a message!"
        except Empty, exp:
            if len(self.checks) == 0:
                self._idletime += 1
                time.sleep(1)
        # Maybe the Queue() is not available, if so, just return
        # get back to work :)
        except IOError, exp:
            return
Beispiel #35
0
class Controller(tk.Frame):
    def __init__(self, parent, pipe_beacon):
        tk.Frame.__init__(self, parent)
        self.parent = parent
        self.parent.resizable(width=False, height=False)
        self.parent.iconbitmap(resource_path("assets/satellite.ico"))
        self.parent.title("Dream2space Ground Station")

        # Scan for serial ports
        ports = self.scan_serial_ports()
        self.ports = ports

        # Pipe for beacon
        self.pipe_beacon = pipe_beacon

        # List of pending missions
        self.pending_mission_list = []

        # List of executing missions
        self.current_mission_list = []

        # Put all pages into container
        self.container = tk.Frame(self.parent)
        self.container.grid()
        self.make_start_page()

        # Poll and check for missions to execute
        self.mission_execution_check()

    # Initializing method to create Start Page
    def make_start_page(self):
        self.start = StartPage(self.container, self)

    # Scan Serial ports in PC
    def scan_serial_ports(self):
        ports = []
        if sys.platform.startswith('win'):
            ports = ['COM%s' % (i + 1) for i in range(256)]
        elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
            # this excludes your current terminal "/dev/tty"
            ports = glob.glob('/dev/tty[A-Za-z]*')

        result = []
        for port in ports:
            try:
                s = serial.Serial(port)
                s.close()
                result.append(port)
            except (OSError, serial.SerialException):
                continue

        result.insert(0, " ")

        # In testing, add dummy entries
        if IS_TESTING:
            result.append("COM14")
            result.append("COM15")

        return result

    # Handle transition from Start Page to Main Page
    # Upon pressing button to select Serial ports
    def handle_transition(self):
        # Extract ports selected
        self.port_ttnc = self.start.get_ttnc_port()
        self.port_payload = self.start.get_payload_port()

        # Ports selected are wrong -> Prompt users to reselect
        if self.port_ttnc == self.port_payload or self.port_ttnc == " " or self.port_ttnc == " ":
            # Same ports selected
            self.start.set_port_warning_message()

        # Ports selected are correct -> Proceed to generate main page
        else:
            # Pass ttnc serial object via pipe to thread
            self.pipe_beacon.send(self.port_ttnc)

            # Erase Start Page
            self.container.grid_forget()
            self.container = tk.Frame(self.parent)
            self.container.pack()

            # Generate Container to store new page
            self.main_page = MainPage(parent=self.container, controller=self, beacon_pipe=self.pipe_beacon,
                                      width=app_param.APP_WIDTH, height=app_param.APP_HEIGHT)

    # Handles Housekeeping Process after button pressed
    def handle_hk_process_start(self):
        if IS_TESTING:
            self.housekeeping_process = Process(target=sample_hk_command_process, daemon=True)  # Testing
        else:
            self.is_hk_process_success = False
            self.prev_file_number = len(os.listdir(
                app_param.HOUSEKEEPING_DATA_FOLDER_FILEPATH))
            self.housekeeping_process = Process(target=process_get_HK_logs, daemon=True,
                                                args=(self.pipe_beacon, self.port_ttnc, ))
        self.housekeeping_process.start()

        # Disable mission and housekeeping commands
        self.main_page.show_disable_command_after_hk_command()

    # Checks regularly if housekeeping process is complete

    def hk_process_checking(self):
        # If process still alive, continute to check
        if self.housekeeping_process.is_alive():
            self.after(100, self.hk_process_checking)

        # If process ended, inform user
        else:
            is_success = False

            if not IS_TESTING:
                # Determine if telecommand obtaining is successful
                curr_number_files = len(os.listdir(
                    app_param.HOUSEKEEPING_DATA_FOLDER_FILEPATH))
                if curr_number_files > self.prev_file_number:
                    self.is_hk_process_success = True
                    self.prev_file_number = curr_number_files
            else:
                self.is_hk_process_success = True

            # Housekeeping data parsing sucess - Open up explorer
            if self.is_hk_process_success == True:
                path = os.path.relpath(
                    app_param.HOUSEKEEPING_DATA_FOLDER_FILEPATH)
                if sys.platform.startswith('win'):
                    os.startfile(path)
                elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
                    subprocess.check_call(['xdg-open', '--', path])

                # display success message
                is_success = True

            # Housekeeping data parsing failed
            else:
                # display fail message
                is_success = False

            # Update screens
            self.main_page.show_enable_command_after_hk_command()
            self.main_page.show_status_after_hk_command(is_success)

            # Undo flag
            self.is_hk_process_success = False

    # Open mission window
    def open_mission_downlink_command_window(self):
        self.mission_window = MissionWindow(self.parent, self)

    # Handle mission checking and scheduling after submited on mission window
    def handle_mission_scheduling(self):

        def validate_mission(mission_input, pending_mission_list, current_mission_list):
            # print(mission_input)

            # Validate if (1) mission time is after current time, (2) downlink time after mission time
            is_mission_time_future = mission_input.mission_datetime > datetime.datetime.now()
            is_downlink_after_mission = mission_input.downlink_datetime > mission_input.mission_datetime
            num_mission = len(self.pending_mission_list)

            # Generate a complete list of current and pending missions
            merge_list = pending_mission_list+current_mission_list

            # Validate if (1) new mission is not less than 15 sec before or after a mission
            # (2) new downlink is not less than 10 mins before or after a downlink
            is_new_mission_allowed = True
            for mission in merge_list:
                if abs(mission.mission_datetime - mission_input.mission_datetime).total_seconds() <= 15:
                    is_new_mission_allowed = False
                    break
                if abs(mission.downlink_datetime - mission_input.downlink_datetime).total_seconds() <= 3 * 3 * 60:
                    is_new_mission_allowed = False
                    break

            if is_new_mission_allowed and is_mission_time_future and is_downlink_after_mission and num_mission < 3:
                return True
            else:
                return False

        # Do input validation
        mission = self.mission_window.get_user_mission_input()
        is_valid_input = validate_mission(mission, self.pending_mission_list, self.current_mission_list)

        if is_valid_input:
            # Close top window
            self.mission_window.handle_mission_success()

            # Add into pending mission list
            self.pending_mission_list.append(mission)
            self.pending_mission_list.sort(key=lambda x: x.downlink_datetime)  # Sort on earliest downlink datetime
            # print(self.pending_mission_list)

            # Send CCSDS mission command to Cubesat
            if IS_TESTING:
                self.mission_command_process = Process(target=sample_mission_command_process, daemon=True)  # Testing
            else:
                self.mission_command_process = Process(target=process_send_mission_telecommand, daemon=True, args=(
                    mission, self.pipe_beacon, self.port_ttnc, ))  # Testing
            self.mission_command_process.start()

            # Update screens
            self.main_page.show_disable_command_after_mission_command()
            self.main_page.update_pending_mission_table(self.pending_mission_list)

        else:
            # Input time is not valid
            num_current_missions = len(self.pending_mission_list)
            self.mission_window.display_error_message(num_current_missions)

    def mission_execution_check(self):
        # print(f"CHECK! {datetime.datetime.now()}")

        num_mission = len(self.pending_mission_list)
        # print(self.pending_mission_list)
        if num_mission != 0:
            # Check top most mission item
            # Start collection process if within 2 minutes of downlink
            earliest_mission = self.pending_mission_list[0]
            upcoming_downlink_datetime = earliest_mission.downlink_datetime

            if upcoming_downlink_datetime - datetime.datetime.now() < datetime.timedelta(seconds=120):
                print(f"less than 2 minutes to mission!!")
                self.current_mission_list.append(earliest_mission)
                del self.pending_mission_list[0]

                if IS_TESTING:
                    self.downlink_process = Process(target=sample_downlink_process, daemon=True)  # Testing
                else:
                    self.downlink_process = Process(
                        target=process_handle_downlink, daemon=True,
                        args=(self.port_payload, earliest_mission.get_mission_name(),
                              earliest_mission.get_mission_datetime_string(),
                              earliest_mission.get_downlink_datetime_string(),))

                self.downlink_process.start()

                # Render on the missions screens
                self.main_page.update_pending_mission_table(self.pending_mission_list)
                self.main_page.update_current_mission_table(self.current_mission_list)

        try:
            if len(self.current_mission_list) != 0 and not self.downlink_process.is_alive():
                del self.current_mission_list[0]
                self.main_page.update_current_mission_table(self.current_mission_list)
        except AttributeError:
            pass

        self.after(app_param.APP_DOWNLINK_PROCESS_CHECK_INTERVAL, self.mission_execution_check)

    # Respond to button pressed when User wishes to view completed Missions/Downlink
    def view_completed_missions(self):

        # If no mission created yet, not records found
        # Show popup warning
        if not os.path.exists(app_param.GROUND_STN_MISSION_LOG_FILEPATH):
            messagebox.showerror(title="Dream2space Ground Station",
                                 message="Mission records not found!\nTry to send a mission command.")

        else:
            if sys.platform.startswith('win'):
                # Replace slash with backslash
                os.startfile(app_param.GROUND_STN_MISSION_LOG_FILEPATH.replace("/", "\\"))

                path = os.path.relpath(app_param.GROUND_STN_MISSION_FOLDER_PATH)
                os.startfile(path)
            else:
                os.startfile(app_param.GROUND_STN_MISSION_LOG_FILEPATH)
    if cfg.getSetting('enable_plexgdm') == 'True':
        if PlexGDM.Run() > 0:
            param['IP_PMS'] = PlexGDM.getIP_PMS()
            param['Port_PMS'] = PlexGDM.getPort_PMS()
            param['Addr_PMS'] = param['IP_PMS'] + ':' + param['Port_PMS']

    dprint('PlexConnect', 0, "PMS: {0}", param['Addr_PMS'])

    if cfg.getSetting('enable_dnsserver') == 'True':
        p_DNSServer = Process(target=DNSServer.Run,
                              args=(pipe_DNSServer[1], param))
        p_DNSServer.start()

        time.sleep(0.1)
        if not p_DNSServer.is_alive():
            dprint('PlexConnect', 0, "DNSServer not alive. Shutting down.")
            sys.exit(1)

    p_WebServer = Process(target=WebServer.Run,
                          args=(pipe_WebServer[1], param))
    p_WebServer.start()

    time.sleep(0.1)
    if not p_WebServer.is_alive():
        dprint('PlexConnect', 0, "WebServer not alive. Shutting down.")
        if cfg.getSetting('enable_dnsserver') == 'True':
            pipe_DNSServer[0].send('shutdown')
            p_DNSServer.join()
        sys.exit(1)
        #for i in range(top_k_results):
        #   print(labels[top_k_indices[i]], predictions[top_k_indices[i]] / 255.0)
        if detections is not None:
            label = '%s: %d%%' % (labels[predicted_label],
                                  int((detections[predicted_label] / 255.0) *
                                      100))
            cv2.putText(frame, label, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 0), 2)
            frame = cv2.resize(frame, (500, 500))
            cv2.imshow('frame', frame)
            if cv2.waitKey(1) == ord('q'):
                break
            # update the FPS counter
            fps.update()
    else:
        cv2.destroyAllWindows()
        break

# stop the timer and display FPS information
fps.stop()
time.sleep(1)
print(p, p.is_alive())
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
print("process id: ", os.getpid())
print("parent process:", os.getppid())
#p.terminate()
p.kill()
#p.join()
video.release()
Beispiel #38
0
#         time.sleep(1)
"""
1.先创建主进程:
    A:创建子进程
    B:启动子进程
    
主进程不能早死于子进程:
"""
"""
子进程中打印数字:1-1000
主进程中打印字母:A

获取进程的pid
"""


def put_num():
    for i in range(1, 1000):
        print(i, end=" ")


if __name__ == '__main__':
    print("A")
    print(os.getpid())
    p = Process(target=put_num)
    p.start()
    if p.is_alive():
        print("B")
    print(current_process())
    # p.join()
Beispiel #39
0
class Worker:
    """This class is used for poller and reactionner to work.
    The worker is a process launch by theses process and read Message in a Queue
    (self.s) (slave)
    They launch the Check and then send the result in the Queue self.m (master)
    they can die if they do not do anything (param timeout)

    """

    id = 0  # None
    _process = None
    _mortal = None
    _idletime = None
    _timeout = None
    _c = None

    def __init__(self, id, s, returns_queue, processes_by_worker, mortal=True, timeout=300, max_plugins_output_length=8192, target=None, loaded_into='unknown', http_daemon=None):
        self.id = self.__class__.id
        self.__class__.id += 1

        self._mortal = mortal
        self._idletime = 0
        self._timeout = timeout
        self.s = None
        self.processes_by_worker = processes_by_worker
        self._c = Queue()  # Private Control queue for the Worker
        # By default, take our own code
        if target is None:
            target = self.work
        self._process = Process(target=target, args=(s, returns_queue, self._c))
        self.returns_queue = returns_queue
        self.max_plugins_output_length = max_plugins_output_length
        self.i_am_dying = False
        # Keep a trace where the worker is launch from (poller or reactionner?)
        self.loaded_into = loaded_into
        self.http_daemon = http_daemon
        

    def is_mortal(self):
        return self._mortal

    def start(self):
        self._process.start()

    # Kill the background process
    # AND close correctly the queues (input and output)
    # each queue got a thread, so close it too....
    def terminate(self):
        # We can just terminate process, not threads
        if not is_android:
            self._process.terminate()
        # Is we are with a Manager() way
        # there should be not such functions
        if hasattr(self._c, 'close'):
            self._c.close()
            self._c.join_thread()
        if hasattr(self.s, 'close'):
            self.s.close()
            self.s.join_thread()

    def join(self, timeout=None):
        self._process.join(timeout)

    def is_alive(self):
        return self._process.is_alive()

    def is_killable(self):
        return self._mortal and self._idletime > self._timeout

    def add_idletime(self, time):
        self._idletime = self._idletime + time

    def reset_idle(self):
        self._idletime = 0

    def send_message(self, msg):
        self._c.put(msg)

    # A zombie is immortal, so kill not be kill anymore
    def set_zombie(self):
        self._mortal = False

    # Get new checks if less than nb_checks_max
    # If no new checks got and no check in queue,
    # sleep for 1 sec
    # REF: doc/shinken-action-queues.png (3)
    def get_new_checks(self):
        try:
            while(len(self.checks) < self.processes_by_worker):
                #print "I", self.id, "wait for a message"
                msg = self.s.get(block=False)
                if msg is not None:
                    self.checks.append(msg.get_data())
                #print "I", self.id, "I've got a message!"
        except Empty, exp:
            if len(self.checks) == 0:
                self._idletime = self._idletime + 1
                time.sleep(1)
        # Maybe the Queue() is not available, if so, just return
        # get back to work :)
        except IOError, exp:
            return
Beispiel #40
0
class camera:
    def __init__(self, flags):
        # flags[0] = quit
        # flags[1] = add user
        # flags[2] = camera on or off
        self.flags = flags
        self.to_scan = 0
        self.db_update = 0
        self.face_thread = None
        self.db_thread = None
        self.db = database.Database('python', 'Hinoob22')
        self.recognizer = recognizer.Recognizer()
        self.faces = set()
        self.faceCascade = cv2.CascadeClassifier(
            "haarcascade_frontalface_default.xml")

    #used for testing
    def display_camera_full(self, dev, data, timestamp):
        data = cv2.resize(data, (1900, 1050))
        cv2.imshow('RGB', frame_convert2.video_cv(data))
        if cv2.waitKey(1) == 27:
            self.flags[0] = False

    #used for testing
    def display_camera_half(self, dev, data, timestamp):
        data = cv2.resize(data, (1280, 720))
        cv2.imshow('RGB', frame_convert2.video_cv(data))
        if cv2.waitKey(1) == 27:
            self.flags[0] = False

    #used for testing
    def display_camera_fourth(self, dev, data, timestamp):
        if self.to_scan == 120:
            print('flags[1]:' + str(self.flags[1]))
            if self.flags[1] != False:
                print('adding new user')
                rgb = data[:, :, ::-1]
                self.add_new_user(rgb, self.recognizer)
            else:
                if self.face_thread != None and self.face_thread.is_alive():
                    self.face_thread.terminate()
                rgb = data[:, :, ::-1]
                self.face_thread = Process(target=self.face_detect,
                                           args=(rgb, self.recognizer))
                self.face_thread.start()
            self.to_scan = 0
        cv2.imshow('RGB', frame_convert2.video_cv(data))
        if cv2.waitKey(1) == 27:
            self.flags[0] = False
        self.to_scan += 1

    #main function
    def display_camera_test(self, dev, data, timestamp):
        if self.to_scan == 60:
            print('flags' + str(self.flags))
            if self.flags[1] != False:
                rgb = data[:, :, ::-1]
                self.add_new_user(data, rgb, self.recognizer)
            else:
                if self.face_thread != None and self.face_thread.is_alive():
                    while True:
                        if not self.face_thread.is_alive():
                            break
                rgb = data[:, :, ::-1]
                self.face_thread = threading.Thread(target=self.face_detect,
                                                    args=(data, rgb,
                                                          self.recognizer,
                                                          self.faces))
                self.face_thread.start()
            self.to_scan = 0
        if self.db_update == 480:
            print(self.faces)
            if self.db_thread != None and self.db_thread.is_alive():
                self.db_thread.terminate()
            if self.face_thread != None and self.face_thread.is_alive():
                while True:
                    if not self.face_thread.is_alive():
                        break
            self.db_thread = Process(target=self.update_db,
                                     args=(self.faces, ))
            self.db_thread.start()
            self.db_update = 0
            self.faces = set()
        if self.flags[2] == True:
            data = cv2.flip(data, 1)
            data = cv2.resize(data, (1900, 1050))
            cv2.imshow('RGB', frame_convert2.video_cv(data))
        else:
            data = cv2.resize(data, (1, 1))
            cv2.imshow('RGB', frame_convert2.video_cv(data))
        if cv2.waitKey(10) == 27:
            self.flags[0] = False
        self.to_scan += 1
        self.db_update += 1

    # freenect body
    def body(self, *args):
        if self.flags[0]:
            cv2.destroyWindow('RGB')
            self.flags = False
            raise freenect.Kill

    # properly initializes cv2 and runs freenect
    def open_camera(self):
        cv2.startWindowThread()
        cv2.namedWindow('RGB')
        cv2.moveWindow('RGB', 0, 0)
        cv2.startWindowThread()
        print('starting freenect')
        freenect.runloop(video=self.display_camera_test, body=self.body)

    # takes a bgr image from the kinect and uses the recognizer to detect and recognize faces.
    def face_detect(self, bgr, image, recognizer, faces):
        gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
        face_locs = self.faceCascade.detectMultiScale(gray,
                                                      scaleFactor=1.1,
                                                      minNeighbors=5,
                                                      minSize=(30, 30))
        locations = [[y, x + w, y + h, x] for (x, y, w, h) in face_locs]
        results = None
        if len(locations) > 0:
            try:
                results = recognizer.recognize_face_loc(image, locations)
            except:
                print('exception occured in face_detect for camera')
        print(results)
        if results and 'obama' in results:
            print('obama detected')
            #playsound('./obama.mp3')
            #self.engine.say('A P T obama obama obama obama obama obama obama')
            #self.engine.runAndWait()
        if results:
            for k in results:
                faces.add(k)

# detects and adds a new user face encoding to our dataset of faces

    def add_new_user(self, bgr, image, recognizer):

        gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
        face_locs = self.faceCascade.detectMultiScale(gray,
                                                      scaleFactor=1.1,
                                                      minNeighbors=5,
                                                      minSize=(30, 30))
        locations = [[y, x + w, y + h, x] for (x, y, w, h) in face_locs]
        if len(locations) > 0:
            if (recognizer.detect_and_add(self.flags[1], image,
                                          locations) == True):
                print('user found changing add user flag to false')
                print('user: '******'updating db')
        if len(names) > 0:
            db = database.Database('python', 'Hinoob22')
            for k in names:
                print('adding: ' + k)
                db.add_time_now(k)
            db.close()
Beispiel #41
0
class MainWindow(QMainWindow):
	UseDock=False
	ShowBBMatchTableView=False

	def __init__(self,database_name):
		super(MainWindow,self).__init__()
		self.setWindowTitle("DarunGrim 4")
		self.setWindowIcon(QIcon('DarunGrim.png'))

		self.PerformDiffProcess=None
		self.DatabaseName=database_name

		self.LogDialog=LogTextBoxDialog()
		self.LogDialog.resize(800,600)

		if RedirectStdOutErr:
			self.PHOut=PrintHook(True,func=self.onTextBoxDataReady)
			self.PHOut.Start()

			self.PHErr=PrintHook(False,func=self.onTextBoxDataReady)
			self.PHErr.Start()

		self.NonMaxGeometry=None
		self.DarunGrimEngine=DarunGrimEngine.DarunGrim(start_ida_listener=True)
		self.readSettings()

		# Menu
		self.createActions()
		self.createMenus()

		#Use dock? not yet
		if not self.UseDock:
			bottom_splitter=QSplitter()
			self.GraphSplitter=QSplitter()

		# Functions
		self.FunctionMatchTableView=QTableView()
		vheader=QHeaderView(Qt.Orientation.Vertical)
		vheader.setResizeMode(QHeaderView.ResizeToContents)
		self.FunctionMatchTableView.setVerticalHeader(vheader)
		self.FunctionMatchTableView.horizontalHeader().setResizeMode(QHeaderView.Stretch)
		self.FunctionMatchTableView.setSortingEnabled(True)
		self.FunctionMatchTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
		
		if self.ShowBBMatchTableView:
			self.BBMatchTableView=QTableView()
			vheader=QHeaderView(Qt.Orientation.Vertical)
			vheader.setResizeMode(QHeaderView.ResizeToContents)
			self.BBMatchTableView.setVerticalHeader(vheader)
			self.BBMatchTableView.horizontalHeader().setResizeMode(QHeaderView.Stretch)
			self.BBMatchTableView.setSortingEnabled(True)
			self.BBMatchTableView.setSelectionBehavior(QAbstractItemView.SelectRows)

		if self.UseDock:
			dock=QDockWidget("Functions",self)
			dock.setObjectName("Functions")
			dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
			dock.setWidget(self.FunctionMatchTableView)
			self.addDockWidget(Qt.BottomDockWidgetArea,dock)
		else:
			bottom_splitter.addWidget(self.FunctionMatchTableView)

		# Blocks
		self.BlockTableModel=BlockTable(self,database_name)
		self.BlockTableView=QTableView()
		vheader=QHeaderView(Qt.Orientation.Vertical)
		vheader.setResizeMode(QHeaderView.ResizeToContents)
		self.BlockTableView.setVerticalHeader(vheader)
		self.BlockTableView.horizontalHeader().setResizeMode(QHeaderView.Stretch)
		self.BlockTableView.setSortingEnabled(True)
		self.BlockTableView.setModel(self.BlockTableModel)
		self.BlockTableView.setSelectionBehavior(QAbstractItemView.SelectRows)

		if self.UseDock:
			dock=QDockWidget("Blocks",self)
			dock.setObjectName("Blocks")
			dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
			dock.setWidget(self.BlockTableView)
			self.addDockWidget(Qt.BottomDockWidgetArea,dock)		
		else:
			bottom_splitter.addWidget(self.BlockTableView)

		bottom_splitter.setStretchFactor(0,1)
		bottom_splitter.setStretchFactor(1,0)

		# Function Graph
		self.OrigFunctionGraph=MyGraphicsView()
		self.OrigFunctionGraph.setRenderHints(QPainter.Antialiasing)

		if self.UseDock:
			dock=QDockWidget("Orig",self)
			dock.setObjectName("Orig")
			dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
			dock.setWidget(view)
			self.addDockWidget(Qt.TopDockWidgetArea,dock)
		else:
			self.GraphSplitter.addWidget(self.OrigFunctionGraph)

		# Function Graph
		self.PatchedFunctionGraph=MyGraphicsView()
		self.PatchedFunctionGraph.setRenderHints(QPainter.Antialiasing)

		if self.UseDock:
			dock=QDockWidget("Patched",self)
			dock.setObjectName("Patched")
			dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
			dock.setWidget(view)
			self.addDockWidget(Qt.TopDockWidgetArea,dock)
		else:
			self.GraphSplitter.addWidget(self.PatchedFunctionGraph)

		self.RefreshGraphViews()

		if not self.UseDock:
			virt_splitter=QSplitter()
			virt_splitter.setOrientation(Qt.Vertical)

			virt_splitter.addWidget(self.GraphSplitter)

			if self.ShowBBMatchTableView:
				tab_widget=QTabWidget()
				tab_widget.addTab(bottom_splitter,"Functions..")
				tab_widget.addTab(self.BBMatchTableView,"Basic blocks...")
				virt_splitter.addWidget(tab_widget)
			else:
				virt_splitter.addWidget(bottom_splitter)

			virt_splitter.setStretchFactor(0,1)
			virt_splitter.setStretchFactor(1,0)

			main_widget=QWidget()
			vlayout=QVBoxLayout()
			vlayout.addWidget(virt_splitter)
			main_widget.setLayout(vlayout)
			self.setCentralWidget(main_widget)
			self.show()
		
		self.clearAreas()
		if database_name:
			self.OpenDatabase(database_name)
		self.restoreUI()

	def RefreshGraphViews(self):
		if self.ShowGraphs==True:
			self.OrigFunctionGraph.show()
			self.PatchedFunctionGraph.show()
			self.GraphSplitter.show()
		else:
			self.OrigFunctionGraph.hide()
			self.PatchedFunctionGraph.hide()
			self.GraphSplitter.hide()

	def clearAreas(self):
		self.OrigFunctionGraph.clear()
		self.PatchedFunctionGraph.clear()

		self.FunctionMatchTable=FunctionMatchTable(self)
		self.FunctionMatchTableView.setModel(self.FunctionMatchTable)

		if self.ShowBBMatchTableView:
			self.BBMatchTable=BBMatchTable(self)
			self.BBMatchTableView.setModel(self.BBMatchTable)

		self.BlockTableModel=BlockTable(self)
		self.BlockTableView.setModel(self.BlockTableModel)

	def manageFileStore(self):
		dialog=FileStoreBrowserDialog(database_name=self.FileStoreDatabase, darungrim_storage_dir=self.FileStoreDir)
		dialog.exec_()

	def newFromFileStore(self):
		dialog=NewDiffingFromFileStoreDialog(database_name=self.FileStoreDatabase, darungrim_storage_dir=self.FileStoreDir)
		if dialog.exec_():
			result_filename='%s-%s.dgf' % (dialog.OrigFileSHA1, dialog.PatchedFileSHA1)
			log_filename='%s-%s.log' % (dialog.OrigFileSHA1, dialog.PatchedFileSHA1)

			self.StartPerformDiff(dialog.OrigFilename,
								dialog.PatchedFilename,
								os.path.join(self.DataFilesDir, result_filename),
								os.path.join(self.DataFilesDir, log_filename),
								debug=False
							)

			file_store_database=FileStoreDatabase.Database(self.FileStoreDatabase)
			file_store_database.AddSession(dialog.name_line.text(), dialog.description_line.text(), dialog.OrigFileID, dialog.PatchedFileID, result_filename)

	def openFromFileStore(self):
		dialog=SessionsDialog(database_name=self.FileStoreDatabase)
		if dialog.exec_():
			self.OpenDatabase(os.path.join(self.DataFilesDir, dialog.GetFilename()))
			self.setWindowTitle("DarunGrim 4 %s" % dialog.GetDescription())

	def new(self):
		dialog=NewDiffingDialog()
		if dialog.exec_():
			src_filename = str(dialog.Filenames['Orig'])
			target_filename = str(dialog.Filenames['Patched'])
			result_filename = str(dialog.Filenames['Result'])
			log_filename=result_filename+'.log'
			is_src_target_storage = False

			if src_filename.lower()[-4:]=='.dgf' and target_filename.lower()[-4:]=='.dgf':
				is_src_target_storage=True

			self.StartPerformDiff(
				src_filename,
				target_filename,
				result_filename,
				log_filename,
				is_src_target_storage=is_src_target_storage,
			)

	def reanalyze(self):
		database = DarunGrimDatabase.Database(self.DatabaseName)
		[src_filename,target_filename] = database.GetDGFFileLocations()
		database.Close()
		del database

		result_filename=''
		if self.DatabaseName[-4:].lower()=='.dgf':
			prefix=self.DatabaseName[0:-4]
		else:
			prefix=self.DatabaseName

		i=0
		while True:
			result_filename=prefix+'-%d.dgf' % i
			if not os.path.isfile(result_filename):
				break
			i+=1

		log_filename=result_filename + '.log'

		self.StartPerformDiff(src_filename,
								target_filename,
								str(self.DatabaseName),
								log_filename=log_filename,
								is_src_target_storage=True,
								debug=False)

	def onTextBoxDataReady(self,data):
		if not self.LogDialog.isVisible():
			self.LogDialog.show()
		self.LogDialog.addText(data)

	def onDiffLogReady(self,data):
		if not self.LogDialog.isVisible():
			self.LogDialog.show()
		self.LogDialog.addText(data)

	def PerformDiffCancelled(self):
		if self.PerformDiffProcess!=None:
			self.PerformDiffProcess.terminate()
			self.PerformDiffProcessCancelled=True

	def StartPerformDiff(self,src_filename,target_filename,result_filename,log_filename='',is_src_target_storage=False, debug=False):
		print "Start Diffing Process: %s vs %s -> %s" % (src_filename,target_filename,result_filename)
		self.clearAreas()

		if os.path.isfile(log_filename):
			os.unlink(log_filename)

		try:
			os.makedirs(os.path.dirname(result_filename))
		except:
			pass

		src_ida_log_filename=result_filename+'.src.log'
		target_ida_log_filename=result_filename+'.target.log'

		q=None
		debug=False
		if debug:
			self.PerformDiffProcess=None
			PerformDiffThread(src_filename,target_filename,result_filename,log_level=self.LogLevel,dbg_storage_dir=self.DataFilesDir,is_src_target_storage=is_src_target_storage,src_ida_log_filename = src_ida_log_filename, target_ida_log_filename = target_ida_log_filename, ida_path=self.IDAPath, ida64_path=self.IDA64Path, q=q)
		else:
			q=Queue()
			self.PerformDiffProcess=Process(target=PerformDiffThread,args=(src_filename,target_filename,result_filename,log_filename,self.LogLevel,self.DataFilesDir,is_src_target_storage,src_ida_log_filename,target_ida_log_filename,self.IDAPath,self.IDA64Path,q))
			self.PerformDiffProcess.start()

		self.PerformDiffProcessCancelled=False
		if self.PerformDiffProcess!=None:
			qlog_thread=QueReadThread(q)
			self.LogDialog.SetCancelCallback(self.PerformDiffCancelled)
			self.LogDialog.DisableClose()
			self.LogDialog.show()
			qlog_thread.data_read.connect(self.onDiffLogReady)
			qlog_thread.start()

			log_threads=[]
			for filename in [log_filename,src_ida_log_filename,target_ida_log_filename]:
				log_thread=LogThread(filename)
				log_thread.data_read.connect(self.onDiffLogReady)
				log_thread.start()
				log_threads.append(log_thread)

			while True:
				time.sleep(0.01)
				if not self.PerformDiffProcess.is_alive():
					break

				qApp.processEvents()

			for log_thread in log_threads:
				log_thread.end()
			qlog_thread.end()

			self.LogDialog.EnableClose()

			if not self.PerformDiffProcessCancelled:
				self.LogDialog.addText("Diffing process finished.")
			else:
				self.LogDialog.addText("Diffing process cancelled.")
			self.LogDialog.SetCancelCallback(None)
			self.PerformDiffProcess=None

		if not self.PerformDiffProcessCancelled:
			self.OpenDatabase(result_filename)

	def open(self):
		(filename,filter)=QFileDialog.getOpenFileName(self,"Open...")
		if filename:
			self.clearAreas()
			self.OpenDatabase(filename)

	def OpenFolder(self,folder):
		try:
			subprocess.check_call(['explorer',  folder])
		except:
			pass

	def openOriginalFilesLocation(self):
		database = DarunGrimDatabase.Database(self.DatabaseName)
		[src_filename,target_filename]=database.GetFilesLocation()
		self.OpenFolder(os.path.dirname(src_filename))

	def openPatchedFilesLocation(self):
		database = DarunGrimDatabase.Database(self.DatabaseName)
		[src_filename,target_filename]=database.GetFilesLocation()
		self.OpenFolder(os.path.dirname(target_filename))

	def OpenIDA(self,filename):
		ida_filename=filename

		if filename[-4:].lower()!='.idb' and filename[-4:].lower()!='.i64':
			for path in [filename[0:-4] + '.idb', filename[0:-4] + '.i64']:
				if os.path.isfile(path):
					ida_filename=path
					break

		self.DarunGrimEngine.OpenIDA(ida_filename)

	def synchronizeIDA(self):
		if self.DatabaseName:
			database = DarunGrimDatabase.Database(self.DatabaseName)
			[src_filename,target_filename]=database.GetFilesLocation()
		
			self.DarunGrimEngine.SetSourceController(src_filename)
			self.DarunGrimEngine.SetTargetController(target_filename)
			self.OpenIDA(src_filename)
			self.OpenIDA(target_filename)

	def captureWindow(self):
		(filename,filter)=QFileDialog.getSaveFileName(self,'Save file', filter="*.png")
		if filename:
			pixmap=QPixmap.grabWidget(super(QMainWindow,self))
			pixmap.save(filename,"png")

	def saveOrigGraph(self):
		(filename,filter)=QFileDialog.getSaveFileName(self,'Save file', filter="*.png")
		if filename:
			self.OrigFunctionGraph.SaveImg(filename)

	def savePatchedGraph(self):
		(filename,filter)=QFileDialog.getSaveFileName(self,'Save file', filter="*.png")
		if filename:
			self.PatchedFunctionGraph.SaveImg(filename)

	def showLogs(self):
		self.LogDialog.show()

	def toggleShowGraphs(self):
		if self.ShowGraphs==True:
			self.ShowGraphs=False
		else:
			self.ShowGraphs=True
		self.RefreshGraphViews()

	def toggleSyncrhonizeIDAUponOpening(self):
		if self.SyncrhonizeIDAUponOpening==True:
			self.SyncrhonizeIDAUponOpening=False
		else:
			self.SyncrhonizeIDAUponOpening=True

	def showConfiguration(self):
		dialog=ConfigurationDialog( file_store_dir=self.FileStoreDir, 
									data_files_dir=self.DataFilesDir,
									ida_path=self.IDAPath,
									ida64_path=self.IDA64Path,
									log_level=self.LogLevel
								)
		if dialog.exec_():
			self.FileStoreDir=dialog.file_store_dir_line.text()
			self.DataFilesDir=dialog.data_files_dir_line.text()
			self.FileStoreDatabase=os.path.join(self.DataFilesDir,'index.db')
			self.IDAPath=dialog.ida_path_line.text()
			self.IDA64Path=dialog.ida64_path_line.text()
			self.DarunGrimEngine.SetIDAPath(self.IDAPath)
			self.DarunGrimEngine.SetIDAPath(self.IDA64Path,True)
			self.LogLevel=int(dialog.log_level_line.text())

	def serverInfo(self):
		dialog=ServerInfoDialog(port=self.DarunGrimEngine.ListeningPort)
		dialog.exec_()

	def toggleStaysOnTop(self):
		if self.StaysOnTop==True:
			self.StaysOnTop=False
			self.hide()
			self.setWindowFlags(self.windowFlags()& ~Qt.WindowStaysOnTopHint)			
			self.show()
		else:
			self.StaysOnTop=True
			self.hide()
			self.setWindowFlags(self.windowFlags()|Qt.WindowStaysOnTopHint)
			self.show()

	def intallIDAPlugin(self):
		(ret1,message1)=self.DarunGrimEngine.InstallIDAPlugin('DarunGrimPlugin.plw')
		(ret2,message2)=self.DarunGrimEngine.InstallIDAPlugin('DarunGrimPlugin.p64')
		if not ret1 or not ret2:
			msg_box=QMessageBox()
			if message1!=message2:
				message1 += '\n' + message2
			msg_box.setText('Try to run the program with an Administrator privilege\n' + message1)
			msg_box.exec_()
			return False

		else:
			msg_box=QMessageBox()
			msg_box.setText('Installation successful\n'+message1 + '\n' + message2)
			msg_box.exec_()
			return True

	def createActions(self):
		self.newAct = QAction("New Diffing...",
								self,
								shortcut=QKeySequence.New,
								statusTip="Create new diffing output",
								triggered=self.new
							)

		self.openAct = QAction("Open...",
								self,
								shortcut=QKeySequence.Open,
								statusTip="Open a dgf database",
								triggered=self.open
							)

		self.manageFileStoreAct = QAction("Manage FileStore...",
								self,
								statusTip="Manage FileStore",
								triggered=self.manageFileStore
							)

		self.newFromFileStoreAct = QAction("New Diffing (FileStore)...",
								self,
								statusTip="Create new diffing output",
								triggered=self.newFromFileStore
							)

		self.openFromFileStoreAct = QAction("Open Diffing (FileStore)...",
								self,
								statusTip="Open diffing output",
								triggered=self.openFromFileStore
							)

		self.reanalyzeAct = QAction("Reanalyze...",
								self,
								statusTip="Reanalyze current files",
								triggered=self.reanalyze
							)

		self.synchornizeIDAAct= QAction("Synchornize IDA",
								self,
								statusTip="Synchronize IDA",
								triggered=self.synchronizeIDA
							)

		self.openOriginalFilesLocationAct = QAction("Open Orininal Files Location",
								self,
								statusTip="Open original file location",
								triggered=self.openOriginalFilesLocation
							)

		self.openPatchedFilesLocationAct = QAction("Open Patched Files Location",
								self,
								statusTip="Open patched file location",
								triggered=self.openPatchedFilesLocation
							)

		self.captureWindowAct = QAction("Capture...",
								self,
								statusTip="Save patched graph",
								triggered=self.captureWindow
							)

		self.saveOrigGraphAct = QAction("Save orig graph...",
								self,
								statusTip="Save original graph",
								triggered=self.saveOrigGraph
							)

		self.savePatchedGraphAct = QAction("Save patched graph...",
								self,
								statusTip="Save patched graph",
								triggered=self.savePatchedGraph
							)

		self.showLogsAct = QAction("Show logs...",
								self,
								statusTip="Show logs",
								triggered=self.showLogs
							)

		self.showGraphsAct = QAction("Show graphs...",
								self,
								statusTip="Show graphs",
								triggered=self.toggleShowGraphs,
								checkable=True
							)
		
		self.showGraphsAct.setChecked(self.ShowGraphs)

		self.syncrhonizeIDAUponOpeningAct = QAction("Synchronize IDA upon opening...",
								self,
								statusTip="Synchronize IDA upon opening",
								triggered=self.toggleSyncrhonizeIDAUponOpening,
								checkable=True
							)
		self.syncrhonizeIDAUponOpeningAct.setChecked(self.SyncrhonizeIDAUponOpening)

		self.configurationAct = QAction("Configuration...",
								self,
								statusTip="Configuration",
								triggered=self.showConfiguration
							)

		self.serverInfoAct = QAction("Server...",
								self,
								statusTip="Server Info",
								triggered=self.serverInfo
							)

		self.staysOnTopAct = QAction("Statys on top...",
								self,
								statusTip="Server Info",
								triggered=self.toggleStaysOnTop,
								checkable=True
							)
		self.staysOnTopAct.setChecked(self.StaysOnTop)

		self.intallIDAPluginAct = QAction("Install IDA Plugin...",
								self,
								statusTip="Install IDA Plugin...",
								triggered=self.intallIDAPlugin
							)

	def createMenus(self):
		self.fileMenu = self.menuBar().addMenu("&File")
		self.fileMenu.addAction(self.newAct)
		self.fileMenu.addAction(self.openAct)
		self.fileMenu.addAction(self.manageFileStoreAct)
		self.fileMenu.addAction(self.newFromFileStoreAct)
		self.fileMenu.addAction(self.openFromFileStoreAct)
		self.fileMenu.addAction(self.reanalyzeAct)

		self.analysisMenu = self.menuBar().addMenu("&Analysis")

		self.analysisMenu.addAction(self.synchornizeIDAAct)
		self.analysisMenu.addAction(self.openOriginalFilesLocationAct)
		self.analysisMenu.addAction(self.openPatchedFilesLocationAct)
		
		self.analysisMenu.addAction(self.captureWindowAct)
		self.analysisMenu.addAction(self.saveOrigGraphAct)
		self.analysisMenu.addAction(self.savePatchedGraphAct)
		self.analysisMenu.addAction(self.showLogsAct)

		self.optionsMenu = self.menuBar().addMenu("&Options")
		self.optionsMenu.addAction(self.showGraphsAct)
		self.optionsMenu.addAction(self.syncrhonizeIDAUponOpeningAct)
		self.optionsMenu.addAction(self.staysOnTopAct)
		self.optionsMenu.addAction(self.configurationAct)
		self.optionsMenu.addAction(self.serverInfoAct)
		self.optionsMenu.addAction(self.intallIDAPluginAct)

	def OpenDatabase(self,databasename):
		self.DatabaseName=databasename

		self.FunctionMatchTable=FunctionMatchTable(self,self.DatabaseName)
		self.FunctionMatchTableView.setModel(self.FunctionMatchTable)
		selection=self.FunctionMatchTableView.selectionModel()
		if selection!=None:
			selection.selectionChanged.connect(self.handleFunctionMatchTableChanged)

		if self.ShowBBMatchTableView:
			self.BBMatchTable=BBMatchTable(self,self.DatabaseName)
			self.BBMatchTableView.setModel(self.BBMatchTable)
			selection=self.BBMatchTableView.selectionModel()
			if selection!=None:
				selection.selectionChanged.connect(self.handleBBMatchTableChanged)

		database = DarunGrimDatabase.Database(self.DatabaseName)
		self.setWindowTitle("DarunGrim 4 - %s" % (database.GetDescription()))

		if self.SyncrhonizeIDAUponOpening:
			self.synchronizeIDA()

	def ColorController(self, type, disasms, match_info):
		for (address,[end_address,disasm]) in disasms.items():
			if not match_info.has_key(address):
				#Red block
				self.DarunGrimEngine.ColorAddress(type, address, end_address+1, 0x0000FF)
			elif match_info[address][1]!=100:
				#Yellow block
				self.DarunGrimEngine.ColorAddress(type, address, end_address+1, 0x00FFFF)
		
	def handleFunctionMatchTableChanged(self,selected,dselected):
		for item in selected:
			for index in item.indexes():
				[source_function_address, target_function_address] = self.FunctionMatchTable.GetFunctionAddresses(index.row())
				self.BlockTableModel=BlockTable(self,self.DatabaseName,source_function_address, target_function_address)
				self.BlockTableView.setModel(self.BlockTableModel)
				selection=self.BlockTableView.selectionModel()
				if selection!=None:
					selection.selectionChanged.connect(self.handleBlockTableChanged)

				database=DarunGrimDatabase.Database(self.DatabaseName)

				(source_disasms, source_links) = database.GetFunctionDisasmLines("Source", source_function_address)
				(target_disasms, target_links) = database.GetFunctionDisasmLines("Target", target_function_address)

				source_match_info=self.BlockTableModel.GetSourceMatchInfo()
				target_match_info=self.BlockTableModel.GetTargetMatchInfo()

				#IDA Sync
				self.ColorController(0, source_disasms, source_match_info )
				self.ColorController(1, target_disasms, target_match_info )
				self.DarunGrimEngine.JumpToAddresses(source_function_address, target_function_address)

				if self.ShowGraphs:
					# Draw graphs
					self.OrigFunctionGraph.SetDatabaseName(self.DatabaseName)
					self.OrigFunctionGraph.DrawFunctionGraph("Source", source_function_address, source_disasms, source_links, source_match_info)
					self.OrigFunctionGraph.SetSelectBlockCallback(self.SelectedBlock)
					self.OrigFunctionGraph.HilightAddress(source_function_address)

					self.PatchedFunctionGraph.SetDatabaseName(self.DatabaseName)
					self.PatchedFunctionGraph.DrawFunctionGraph("Target", target_function_address, target_disasms, target_links, target_match_info)
					self.PatchedFunctionGraph.SetSelectBlockCallback(self.SelectedBlock)
					self.PatchedFunctionGraph.HilightAddress(target_function_address)

				break

	def handleBBMatchTableChanged(self,selected,dselected):
		pass

	def handleBlockTableChanged(self,selected,dselected):
		for item in selected:
			for index in item.indexes():
				[orig_address,patched_address]=self.BlockTableModel.GetBlockAddresses(index.row())

				if self.ShowGraphs:
					if orig_address!=0:
						self.OrigFunctionGraph.HilightAddress(orig_address)

					if patched_address!=0:
						self.PatchedFunctionGraph.HilightAddress(patched_address)

				self.DarunGrimEngine.JumpToAddresses(orig_address, patched_address)
				break

	def SelectedBlock(self,graph,address):
		if graph==self.OrigFunctionGraph:
			matched_address=self.BlockTableModel.GetMatchAddresses(0,address)
			if matched_address!=None:
				self.PatchedFunctionGraph.HilightAddress(matched_address)
				self.DarunGrimEngine.JumpToAddresses(0, matched_address)

		elif graph==self.PatchedFunctionGraph:
			matched_address=self.BlockTableModel.GetMatchAddresses(1,address)
			if matched_address!=None:
				self.OrigFunctionGraph.HilightAddress(matched_address)
				self.DarunGrimEngine.JumpToAddresses(matched_address, 0)

	def changeEvent(self,event):
		if event.type()==QEvent.WindowStateChange:
			if (self.windowState()&Qt.WindowMinimized)==0 and \
				 (self.windowState()&Qt.WindowMaximized)==0 and \
				 (self.windowState()&Qt.WindowFullScreen)==0 and \
				 (self.windowState()&Qt.WindowActive)==0:
					pass

	def resizeEvent(self,event):
		if not self.isMaximized():
			self.NonMaxGeometry=self.saveGeometry()

	def restoreUI(self):
		settings=QSettings("DarunGrim LLC", "DarunGrim")
		
		if settings.contains("geometry/non_max"):
			self.NonMaxGeometry=settings.value("geometry/non_max")
			self.restoreGeometry(self.NonMaxGeometry)
		else:
			self.resize(800,600)
			self.NonMaxGeometry=self.saveGeometry()
		
		if settings.contains("isMaximized"):
			if settings.value("isMaximized")=="true":
				self.setWindowState(self.windowState()|Qt.WindowMaximized)
		self.restoreState(settings.value("windowState"))


		self.FirstConfigured=False
		if not settings.contains("General/FirstConfigured"):
			self.showConfiguration()
			if self.intallIDAPlugin():
				self.FirstConfigured=True
		else:
			self.FirstConfigured=True

	def readSettings(self):
		settings=QSettings("DarunGrim LLC", "DarunGrim")

		self.ShowGraphs=True
		if settings.contains("General/ShowGraphs"):
			if settings.value("General/ShowGraphs")=='true':
				self.ShowGraphs=True
			else:
				self.ShowGraphs=False

		self.SyncrhonizeIDAUponOpening=False
		if settings.contains("General/SyncrhonizeIDAUponOpening"):
			if settings.value("General/SyncrhonizeIDAUponOpening")=='true':
				self.SyncrhonizeIDAUponOpening=True
			else:
				self.SyncrhonizeIDAUponOpening=False

		self.StaysOnTop=False
		if settings.contains("General/StaysOnTop"):
			if settings.value("General/StaysOnTop")=='true':
				self.StaysOnTop=True
			else:
				self.StaysOnTop=False

		if self.StaysOnTop==True:
			self.setWindowFlags(self.windowFlags()|Qt.WindowStaysOnTopHint)			
		else:
			self.setWindowFlags(self.windowFlags()& ~Qt.WindowStaysOnTopHint)

		self.FileStoreDir = os.path.join(os.getcwd(), "DarunGrimStore")
		if settings.contains("General/FileStoreDir"):
			self.FileStoreDir=settings.value("General/FileStoreDir")
		
		if not os.path.isdir(self.FileStoreDir):
			try:
				os.makedirs(self.FileStoreDir)
			except:
				import traceback
				traceback.print_exc()

		self.FileStoreDatabase='index.db'
		if settings.contains("General/FileStoreDatabase"):
			self.FileStoreDatabase=settings.value("General/FileStoreDatabase")

		self.DataFilesDir=os.path.join(os.getcwd(), "DarunGrimData")
		if settings.contains("General/DataFilesDir"):
			self.DataFilesDir=settings.value("General/DataFilesDir")

		if not os.path.isdir(self.DataFilesDir):
			try:
				os.makedirs(self.DataFilesDir)
			except:
				import traceback
				traceback.print_exc()

		self.IDAPath=''
		if settings.contains("General/IDAPath"):
			self.IDAPath=settings.value("General/IDAPath")
		else:
			files=self.DarunGrimEngine.LocateIDAExecutables()
			if len(files)>0:
				self.IDAPath=files[0][0]
		
		self.DarunGrimEngine.SetIDAPath(self.IDAPath)

		if not self.DarunGrimEngine.CheckIDAPlugin():
			#print 'DarunGrim plugin is missing'
			pass

		self.IDA64Path=''
		if settings.contains("General/IDA64Path"):
			self.IDAPath=settings.value("General/IDA64Path")
		else:
			files=self.DarunGrimEngine.LocateIDAExecutables(is_64=True)
			if len(files)>0:
				self.IDA64Path=files[0][0]

		self.DarunGrimEngine.SetIDAPath(self.IDA64Path,is_64=True)

		self.LogLevel=10
		if settings.contains("General/LogLevel"):
			self.LogLevel=int(settings.value("General/LogLevel"))

	def saveSettings(self):
		settings = QSettings("DarunGrim LLC", "DarunGrim")
		settings.setValue("General/ShowGraphs", self.ShowGraphs)
		settings.setValue("General/SyncrhonizeIDAUponOpening", self.SyncrhonizeIDAUponOpening)
		settings.setValue("General/StaysOnTop", self.StaysOnTop)
		settings.setValue("General/FileStoreDir", self.FileStoreDir)
		settings.setValue("General/FileStoreDatabase", self.FileStoreDatabase)
		settings.setValue("General/DataFilesDir", self.DataFilesDir)
		settings.setValue("General/LogLevel", self.LogLevel)
		
		if self.FirstConfigured==True:
			settings.setValue("General/FirstConfigured", self.FirstConfigured)
		
		if self.NonMaxGeometry!=None:
			settings.setValue("geometry/non_max", self.NonMaxGeometry)
		settings.setValue("isMaximized", self.isMaximized())
		settings.setValue("windowState", self.saveState())

	def closeEvent(self, event):
		self.PerformDiffCancelled()
		self.saveSettings()
		QMainWindow.closeEvent(self, event)
Beispiel #42
0
class AstroPrintPipeline(object):
    def __init__(self, device, size, rotation, source, encoding, onFatalError):
        self._logger = logging.getLogger(__name__)
        self._parentConn, self._processConn = Pipe(True)
        self._pendingReqs = {}
        self._preservativePendingReqs = {}
        self._lastReqId = 0
        self._device = device
        self._rotation = rotation
        self._source = source.lower()
        self._encoding = encoding.lower()
        self._size = tuple([int(x) for x in size.split('x')])
        self._sendCondition = Condition()
        self._onFatalError = onFatalError
        self._responseListener = ProcessResponseListener(
            self._parentConn, self._onProcessResponse)
        self._responseListener.start()
        self._process = None
        self._listening = False

    def __del__(self):
        self._logger.debug('Pipeline Process Controller removed')

    def _kill(self):
        if self._process:
            try:
                os.kill(self._process.pid, signal.SIGKILL)
                self._process.join()
            except OSError as e:
                # error 3: means the pid is not valid, so the process has been killed
                if e.errno != 3:
                    raise e

            self._process = None

    def startProcess(self):
        if self._process:
            # This should almost never happen (but it does)
            # Make sure the previous process is killed before a new one is started
            self._logger.warn(
                "A previous process was still running, killing it")
            self._kill()

        onListeningEvent = Event()
        errorState = Value(
            'b',
            False)  #If True, it means the process had an error while starting
        self._listening = False

        self._process = Process(
            target=startPipelineProcess,
            args=(self._device, self._size, self._rotation, self._source,
                  self._encoding, onListeningEvent, errorState,
                  (self._parentConn, self._processConn),
                  settings().getInt(['camera', 'debug-level'])))
        self._process.daemon = True
        self._process.start()
        if onListeningEvent.wait(20.0):
            if errorState.value:
                self._logger.error('Pipeline Failed to start.')
                self._kill()
                self._logger.debug('Pipeline Process killed.')

            else:
                self._logger.debug('Pipeline Process Started.')
                self._listening = True

        else:
            self._logger.debug(
                'Timeout while waiting for pipeline process to start')
            self._kill()

    def stopProcess(self):
        if self._process:
            if self._listening:
                self._sendReqToProcess({'action': 'shutdown'})
                self._process.join(
                    2.0)  #Give it two seconds to exit and kill otherwise

            if self._process.exitcode is None:
                self._logger.warn(
                    'Process did not shutdown properly. Terminating...')
                self._process.terminate()
                self._process.join(
                    2.0
                )  # Give it another two secods to terminate, otherwise kill
                if self._process and self._process.exitcode is None:
                    self._logger.warn(
                        'Process did not terminate properly. Sending KILL signal...'
                    )
                    self._kill()

            self._logger.debug('Process terminated')
            self._process = None

    @property
    def processRunning(self):
        return self._process and self._process.is_alive()

    def stop(self):
        self._responseListener.stop()
        self.stopProcess()

        #It's possible that stop is called as a result of a response which is
        #executed in the self._responseListener Thread. You can't join your own thread!
        if current_thread() != self._responseListener:
            self._responseListener.join()

        self._responseListener = None

    def startLocalVideo(self, onFrameTakenCallback):
        def postprocesingLocalVideoFrame(resp):
            if resp:
                if isinstance(resp, dict) and 'error' in resp:
                    self._logger.error(
                        'Error during local video\'s frames capture: %s' %
                        resp['error'])
                    onFrameTakenCallback(None)
                else:
                    from base64 import b64decode
                    try:
                        onFrameTakenCallback(b64decode(resp))
                    except TypeError as e:
                        self._logger.error(
                            'Invalid returned local video\'s frame. Received. Error: %s'
                            % e)
                        onFrameTakenCallback(None)
            else:
                onFrameTakenCallback(None)

        self._sendPreservativeReq({'action': 'startLocalVideo'},
                                  postprocesingLocalVideoFrame)

    def stopLocalVideo(self, doneCallback=None):
        self._sendReqToProcess({'action': 'stopLocalVideo'}, doneCallback)

    def isLocalVideoPlaying(self, doneCallback):
        self._sendReqToProcess({'action': 'isLocalVideoPlaying'}, doneCallback)

    def startVideo(self, doneCallback=None):
        self._sendReqToProcess({'action': 'startVideo'}, doneCallback)

    def stopVideo(self, doneCallback=None):
        self._sendReqToProcess({'action': 'stopVideo'}, doneCallback)

    def isVideoPlaying(self, doneCallback):
        self._sendReqToProcess({'action': 'isVideoPlaying'}, doneCallback)

    def isAnyVideoPlaying(self, doneCallback):
        self._sendReqToProcess({'action': 'isAnyVideoPlaying'}, doneCallback)

    def takePhoto(self, doneCallback, text=None):
        def postprocesing(resp):
            if resp:
                if isinstance(resp, dict) and 'error' in resp:
                    self._logger.error('Error during photo capture: %s' %
                                       resp['error'])
                    doneCallback(None)
                else:
                    from base64 import b64decode
                    try:
                        doneCallback(b64decode(resp))
                    except TypeError as e:
                        self._logger.error(
                            'Invalid returned photo. Received. Error: %s' % e)
                        doneCallback(None)

            else:
                doneCallback(None)

        if text is not None:
            self._sendReqToProcess(
                {
                    'action': 'takePhoto',
                    'data': {
                        'text': text
                    }
                }, postprocesing)
        else:
            self._sendReqToProcess({
                'action': 'takePhoto',
                'data': None
            }, postprocesing)

    def _onProcessResponse(self, id, data):
        if id is 0:  # this is a broadcast, likely an error. Inform all pending requests
            self._logger.warn(
                'Broadcasting error to ALL pending requests [ %s ]' %
                repr(data))

            if self._pendingReqs:
                for cb in self._pendingReqs.values():
                    if cb:
                        cb(data)

            if self._preservativePendingReqs:
                for cb in self._preservativePendingReqs.values():
                    if cb:
                        cb(data)

            if data and 'error' in data and data['error'] == 'fatal_error':
                message = 'Fatal error occurred in video streaming (%s)' % data[
                    'details'] if 'details' in data else 'unkonwn'

                #signaling for remote peers
                manage_fatal_error_webrtc = blinkerSignal(
                    'manage_fatal_error_webrtc')
                manage_fatal_error_webrtc.send(self, message=message)

                #event for local peers
                eventManager().fire(Events.GSTREAMER_EVENT,
                                    {'message': message})

                try:
                    self._logger.info(
                        "Trying to get list of formats supported by your camera..."
                    )
                    self._logger.info(
                        subprocess.Popen("v4l2-ctl --list-formats-ext -d %s" %
                                         str(self._device),
                                         shell=True,
                                         stdout=subprocess.PIPE).stdout.read())

                except:
                    self._logger.error("Unable to retrieve supported formats")

                #shutdown the process
                self._pendingReqs = {}
                self._preservativePendingReqs = {}
                self._onFatalError()

        elif id in self._pendingReqs:
            try:
                callback = self._pendingReqs[id]
                if callback:
                    callback(data)

                del self._pendingReqs[id]

                if self._logger.isEnabledFor(logging.DEBUG):
                    if sys.getsizeof(data) > 50:
                        dataStr = "%d bytes" % sys.getsizeof(data)
                    else:
                        dataStr = repr(data)

                    self._logger.debug('Response for %d handled [ %s ]' %
                                       (id, dataStr))

            except Exception:
                self._logger.error("Problem executing callback response",
                                   exc_info=True)

        elif id in self._preservativePendingReqs:
            try:
                callback = self._preservativePendingReqs[id]
                if callback:
                    callback(data)

                if self._logger.isEnabledFor(logging.DEBUG):
                    if sys.getsizeof(data) > 50:
                        dataStr = "%d bytes" % sys.getsizeof(data)
                    else:
                        dataStr = repr(data)

                    self._logger.debug(
                        'Preservative response for %d handled [ %s ]' %
                        (id, dataStr))

            except Exception:
                self._logger.error("Problem executing callback response",
                                   exc_info=True)

        else:

            self._logger.error(
                "There's no pending [normal/preservative] request for response %s"
                % id)

    def _sendPreservativeReq(self, data, callback=None):
        self.__addReq(data, callback, True)

    def _sendReqToProcess(self, data, callback=None):
        self.__addReq(data, callback, False)

    def __addReq(self, data, callback, preservative):
        if self.processRunning:
            with self._sendCondition:
                if self._listening:
                    self._lastReqId += 1
                    id = self._lastReqId

                    if preservative:
                        self._preservativePendingReqs[id] = callback
                    else:
                        self._pendingReqs[id] = callback

                    self._parentConn.send((id, data))
                    self._logger.debug('Sent request %s to process [ %s ]' %
                                       (id, repr(data)))

                else:
                    self._logger.debug(
                        'Process not listening. There was a problem while starting it.'
                    )
                    if callback:
                        callback({
                            'error':
                            'not_listening',
                            'details':
                            'The process is not currently listening to requests'
                        })

        else:
            self._logger.debug('Process not running. Trying to restart')
            self.startProcess()
            if self.processRunning:
                self._sendReqToProcess(data, callback)

            else:
                self._logger.error('Unable to re-start pipeline process.')
                if callback:
                    callback({
                        'error': 'no_process',
                        'details': 'Unable to re-start process'
                    })
Beispiel #43
0
def start_sampler(task_queue, response_queue):
    p = Process(target=sampler.Sampler.sampler,
                args=(task_queue, response_queue))
    p.start()
    assert (p.is_alive() == True)
    return p
Beispiel #44
0
from multiprocessing import Process


def run_test(name1, name2, name3, **kwargs):
    print('子进程:%s' % os.getpid())
    print(locals())
    print('子进程终止')


if __name__ == '__main__':
    print('主进程:%s' % os.getpid())
    # 元祖里面一个参数,带上“,”
    p = Process(target=run_test,
                args=(
                    1,
                    2,
                    'test',
                ),
                kwargs={
                    'name4': 4,
                    'name5': 5
                })

    p.start()

    print('子进程pid:%s,子进程状态:%s' % (p.pid, p.is_alive()))
    # 主进程等待子进程,timeout超时时间
    p.join(timeout=100)

    print('主进程终止')
Beispiel #45
0
class HumanTrackedEventWatcher(ALModule):
    """ A module to react to HumanTracked and PeopleLeft events """
    def __init__(self):
        ALModule.__init__(self, "humanEventWatcher")
        global memory
        memory = ALProxy("ALMemory", ip_robot, port_robot)
        memory.subscribeToEvent("ALBasicAwareness/HumanTracked",
                                "humanEventWatcher",
                                "onHumanTracked")
        memory.subscribeToEvent("ALBasicAwareness/PeopleLeft",
                                "humanEventWatcher",
                                "onPeopleLeft")
        #memory.subscribeToEvent('WordRecognized', ip_robot, 'wordRecognized')
        self.speech_reco = ALProxy("ALSpeechRecognition", ip_robot, port_robot)
        self.text_to_speech=ALProxy("ALTextToSpeech", ip_robot, port_robot)
        self.is_speech_reco_started = False
        self.photo_apture=ALProxy("ALPhotoCapture", ip_robot, port_robot)
        self.cameraMap = {
            'Top': 0,
            'Bottom': 1
        }
        self.camera_id=0
        self.recordFolder = "/home/nao/recordings/cameras/"
        self.p = Process(target=interat)

    def onHumanTracked(self, key, value, msg):
        """ callback for event HumanTracked """
        print "got HumanTracked: detected person with ID:", str(value)
        if value >= 0:  # found a new person
            self.start_speech_reco()
            position_human = self.get_people_perception_data(value)
            [x, y, z] = position_human
            print "The tracked person with ID", value, "is at the position:", \
                "x=", x, "/ y=",  y, "/ z=", z
            print "Aha, I saw a human!"
            self.p.is_alive()
            if (
                    self.p.is_alive()):
                pass
            else:
                self.p = Process(target=interat)
                self.p.start()
        else:
            pass



    def onPeopleLeft(self, key, value, msg):
        """ callback for event PeopleLeft """
        print "got PeopleLeft: lost person", str(value)
        self.p.terminate()
        self.stop_speech_reco()

    def start_speech_reco(self):
        """ start asr when someone's detected in event handler class """
        if not self.is_speech_reco_started:
            try:
                data = memory.getData("WordRecognized")
                print(data)
            except RuntimeError:
                print "ASR already started"
            self.speech_reco.setVisualExpression(True)
            self.speech_reco.subscribe("BasicAwareness_Test")
            self.is_speech_reco_started = True
            print "start ASR"

    def stop_speech_reco(self):
        """ stop asr when someone's detected in event handler class """
        if self.is_speech_reco_started:
            self.speech_reco.unsubscribe("BasicAwareness_Test")
            self.is_speech_reco_started = False
            print "stop ASR"

    def get_people_perception_data(self, id_person_tracked):
        memory = ALProxy("ALMemory", ip_robot, port_robot)
        memory_key = "PeoplePerception/Person/" + str(id_person_tracked) + \
                     "/PositionInWorldFrame"
        return memory.getData(memory_key)
Beispiel #46
0
try:
    queue__key_exchanges = Queue()
    queue__key_exchanges_signaller = Queue()
    queue__audio_receiver = Queue()
    queue__audio_receiver_signaller = Queue()
    process__key_exchange_facilitator = Process(
        target=ke.handle_client_connections,
        args=(queue__key_exchanges, queue__key_exchanges_signaller))
    process__audio_receiver = Process(target=audio_server.handle_audio_receipt,
                                      args=(queue__audio_receiver,
                                            queue__audio_receiver_signaller))
    process__key_exchange_facilitator.start()
    process__audio_receiver.start()

    logging.debug("Status of process__key_exchange_facilitator: " +
                  str(process__key_exchange_facilitator.is_alive()))
    logging.debug("Status of process__audio_receiver: " +
                  str(process__audio_receiver.is_alive()))

    countdown = 5
    iterate_flag = True
    while iterate_flag:
        logging.debug("Waiting to receive data from <key_exchanges> queue")
        queue_data = queue__key_exchanges.get(block=True)
        assert queue_data == signals.SIG_CHECKPOINT
        participant_count = queue__key_exchanges.get(block=True)
        queue__audio_receiver.put(participant_count)
        countdown -= 1
        if countdown <= 0:
            iterate_flag = False
Beispiel #47
0
class MidiOverNetSender(object):
    def __init__(self):
        self._midiInputList = None
        self._midiOverNetProcess = None
        self._midiOverNetQueue = Queue(32)
        self._statusQueue = Queue(1024)
        self._debugPrintQueue = Queue(1024)

    def scanInputs(self):
        pygame.midi.init()
        self._midiInputList = []
        midiDevices = pygame.midi.get_count()
        for i in range(midiDevices):
            midiSys, midiName, midiInput, midiOutput, midiOpen = pygame.midi.get_device_info(i) #@UnusedVariable
            if(midiInput == 1):
                if(midiOpen == 1):
                    midiState = "Already in use."
                else:
                    midiState = "Available."
                print("input id: %d \"%s\" %s" % (i, midiName, midiState))
                self._midiInputList.append((midiName, midiOpen, i))
            if(midiOutput == 1):
                if(midiOpen == 1):
                    midiState = "Already in use."
                else:
                    midiState = "Available."
                print("output id: %d \"%s\" %s" % (i, midiName, midiState))
        return self._midiInputList

#        self._host = "10.242.10.145"
#        self._port = 2020
    def startMidiOverNetProcess(self, inputId, host, port, guiHost, guiPort, useBroadcast, filterClock):
        if((inputId < len(self._midiInputList)) and (inputId >= 0)):
            midiName, midiOpen, pygameMidiId = self._midiInputList[inputId] #@UnusedVariable
            if(useBroadcast == True):
                host = '<broadcast>'
            clockInfo = "Sending MIDI clock."
            if(filterClock == True):
                clockInfo = "Filtering MIDI clock!"
            print "Starting MidiOverNetPorcess. From MIDI input: \"%s\" to host: %s:%d %s" %(midiName, host, port, clockInfo)
            self._midiOverNetProcess = Process(target=midiOverNetProcess, args=(host, port, guiHost, guiPort, useBroadcast, filterClock, pygameMidiId, self._midiOverNetQueue, self._statusQueue, self._debugPrintQueue))
            self._midiOverNetProcess.name = "midiUdpSender"
            self._midiOverNetProcess.start()

    def getMidiName(self, inputId):
        if((inputId < len(self._midiInputList)) and (inputId >= 0)):
            midiName, midiOpen, pygameMidiId = self._midiInputList[inputId] #@UnusedVariable
            return midiName
        return ""

    def stopMidiOverNetProcess(self):
        if(self._midiOverNetProcess != None):
            print "Stopping midiUdpSender"
            self._midiOverNetQueue.put("QUIT")
            self._midiOverNetProcess.join(20.0)
            if(self._midiOverNetProcess.is_alive()):
                print "midiUdpSender did not respond to quit command. Terminating."
                self._midiOverNetProcess.terminate()
        self._midiOverNetProcess = None

    def getMidiStatus(self):
        run = True
        clocks = 0
        midis = 0
        daemon = 0
        if(self._midiOverNetProcess != None):
            while(run):
                try:
                    status = self._statusQueue.get_nowait()
                    if(status == 1):
                        clocks += 1
                    elif(status == 2):
                        midis += 1
                    daemon += 1
                except Empty:
                    run = False
            try:
                for i in range(512): #@UnusedVariable
                    message = self._debugPrintQueue.get_nowait()
                    print "midiOverNetProcess: " + message
            except Empty:
                pass
        return (daemon, clocks, midis)
 def test_distributed_with_partition_servers(self):
     sync_path = TemporaryDirectory()
     self.addCleanup(sync_path.cleanup)
     entity_name = "e"
     relation_config = RelationSchema(name="r",
                                      lhs=entity_name,
                                      rhs=entity_name)
     base_config = ConfigSchema(
         dimension=10,
         relations=[relation_config],
         entities={entity_name: EntitySchema(num_partitions=4)},
         entity_path=None,  # filled in later
         edge_paths=[],  # filled in later
         checkpoint_path=self.checkpoint_path.name,
         num_machines=2,
         num_partition_servers=1,
         distributed_init_method="file://%s" %
         os.path.join(sync_path.name, "sync"),
     )
     dataset = generate_dataset(base_config,
                                num_entities=100,
                                fractions=[0.4])
     self.addCleanup(dataset.cleanup)
     train_config = attr.evolve(
         base_config,
         entity_path=dataset.entity_path.name,
         edge_paths=[dataset.relation_paths[0].name],
     )
     # Just make sure no exceptions are raised and nothing crashes.
     trainer0 = Process(name="trainer#0",
                        target=train,
                        args=(train_config, ),
                        kwargs={"rank": 0})
     trainer1 = Process(name="trainer#1",
                        target=train,
                        args=(train_config, ),
                        kwargs={"rank": 1})
     partition_server = Process(name="partition server#0",
                                target=run_partition_server,
                                args=(train_config, ),
                                kwargs={"rank": 0})
     # FIXME In Python 3.7 use kill here.
     self.addCleanup(trainer0.terminate)
     self.addCleanup(trainer1.terminate)
     self.addCleanup(partition_server.terminate)
     trainer0.start()
     trainer1.start()
     partition_server.start()
     done = [False, False]
     while not all(done):
         time.sleep(1)
         if not trainer0.is_alive() and not done[0]:
             self.assertEqual(trainer0.exitcode, 0)
             done[0] = True
         if not trainer1.is_alive() and not done[1]:
             self.assertEqual(trainer1.exitcode, 0)
             done[1] = True
         if not partition_server.is_alive():
             self.fail("Partition server died with exit code %d" %
                       partition_server.exitcode)
     partition_server.terminate()  # Cannot be shut down gracefully.
     partition_server.join()
     logging.info("Partition server died with exit code %d",
                  partition_server.exitcode)
     self.assertCheckpointWritten(train_config, version=1)
Beispiel #49
0
 #父进程创建消息队列,并传给各个子进程:
 q = Queue(3)
 pw = Process(target=write, args=(
     q,
     3,
 ))
 pr = Process(target=read, args=(q, ))
 #启动子进程pw,写入:
 pw.start()
 #启动子进程pr,读取:
 pr.start()
 is_running('pw')
 is_running('pr')
 #等待pw结束:
 pw.join()
 print('Is writing process alive?', pw.is_alive())
 is_running('pw')
 is_running('pr')
 time.sleep(5)  #等待pr读完最后一个数据
 #pr进程是死循环,若无消息读取,会一直阻塞自己,只能强制终止:
 print('Is reading process alive?', pr.is_alive())
 is_running('pw')
 is_running('pr')
 pr.terminate()
 print('Main Process kills the reading process')
 print('Is reading process alive?', pr.is_alive())
 is_running('pw')
 is_running('pr')
 print('Is reading process alive?', pr.is_alive())
 print('All data have been written and read.')
 print('Is reading process alive?', pr.is_alive())
Beispiel #50
0
        frame = pygame.surfarray.make_surface(back)
        #frame = pygame.transform.scale(frame, tuple(sc_shape))
        rects.append(screen.blit(frame, (0, 0)))

        pgClock.tick(pgFps)
        pygame.display.update(rects)


if __name__ == "__main__":
    cam_q = Queue()  # the Queue for camera img
    is_Running = Value(c_bool, True)
    saving = Value(c_bool, False)

    man = Manager()
    form = man.list([None, None])
    camera = Process(args=(cam_q, is_Running, form, ), target=grabCam,
                     kwargs={'mode': "camera", "c_num": 0, "secs": 100, "saving": saving})
    windows = Process(target=showImg, args=(cam_q, is_Running, form,),
                      kwargs={'mode': "traj", 'calibrate': False, "full": False, "saving": saving})

    windows.start()
    camera.start()
    while True:
        time.sleep(1)

        if not (camera.is_alive() and windows.is_alive()):
            windows.terminate()
            camera.terminate()
            camera.join()
            windows.join()
            break
Beispiel #51
0
class BaseIndex(object):
    """Search Index Interface
    """
    config = {
        'async_reindex': 0,
        'ignore_errors': 0,
        'reindex_check': '1,1',
        'number_of_shards': 4,
        'index_parallel': 1,
        'index_speed': 100,
        'error_wait': 10,
    }
    index_settings_keys = ['number_of_shards']
    allow_async_reindex = False
    force_next_reindex = False
    magic_exit_code = 84
    check_all_field = True
    skip_check_count = False
    reindex_process = None
    next_index_name = None
    last_current_index = None

    def __init__(self, engine, source, config={}):
        assert(self.__index_name__)
        if config:
            self.config.update(config)
            self.config['index_speed'] = float(self.config['index_speed'])
        rename_key = 'rename_' + self.__index_name__
        if rename_key in self.config:
            self.__index_name__ = self.config[rename_key]
        if self.allow_async_reindex:
            self.allow_async_reindex = self.config['async_reindex']
        self.set_reindex_options(self.config.get('reindex', ''),
            self.config.get('reindex_check', ''))
        self.source = source
        self.engine = engine
        self.engine.add_index(self)
        self.after_init()

    def __del__(self):
        self.stop_childs()

    def __str__(self):
        return self.__index_name__

    def __repr__(self):
        return self.__index_name__

    @classmethod
    def name(klass):
        return klass.__index_name__

    @staticmethod
    def index_created_time(name):
        prefix, suffix = name.rsplit('_', 1)
        try:
            s_time = time.strptime(suffix, SUFFIX_FORMAT)
            suffix = time.mktime(s_time)
        except:
            suffix = 0
        return suffix

    @property
    def current_index(self):
        key = self.__index_name__
        return self.engine.get_index(key)

    def index_age(self, name=None):
        if not name:
            name = self.current_index
        if not name:
            return time.time()
        suffix = BaseIndex.index_created_time(name)
        return int(time.time() - int(suffix))

    def set_reindex_options(self, reindex_period, reindex_check):
        # reindex_period - two digits, first is age in days, seconds is weekday
        if reindex_period:
            self.max_age, self.reindex_day = map(int, reindex_period.split(','))
            self.max_age *= 86400
        # reindex_check - two digits, first is min docs count, second is max age of last doc
        if reindex_check:
            self.rc_mindocs, self.rc_max_age = map(int, reindex_check.split(','))
            self.rc_max_age *= 86400

    def after_init(self):
        pass

    def need_reindex(self):
        return not self.current_index

    def create_index(self, name):
        return

    def new_index(self, is_async=False):
        index_key = self.__index_name__
        index_key_next = "{}.next".format(index_key)
        # try restore last index (in case of crash)
        name = self.engine.get_index(index_key_next)
        current_index = self.current_index
        if current_index and name <= current_index:
            name = None
        if name and not name.startswith(index_key):
            name = None
        if name and not self.engine.index_exists(name):
            name = None
        if name and self.index_age(name) > 5 * 24 * 3600:
            name = None
        if name:
            logger.info("Use already created index %s", name)
        else:
            suffix = time.strftime(SUFFIX_FORMAT)
            name = "{}_{}".format(index_key, suffix)
            self.create_index(name)
            self.engine.set_index(index_key_next, name)
        # check current not same to new
        assert name != current_index, "same index name"
        return name

    def delete_index(self, name):
        index_key = self.__index_name__
        index_key_prev = "{}.prev".format(index_key)
        self.engine.set_index(index_key_prev, name)

    def set_current(self, name):
        if self.engine.should_exit:
            return
        index_key = self.__index_name__
        old_index = self.current_index
        if name != old_index:
            logger.info("Change current %s index %s -> %s",
                        index_key, old_index, name)
            if self.check_index(name):
                self.engine.set_index(index_key, name)
            self.last_current_index = name
            # assert(self.current_index == name)
            if old_index:
                self.delete_index(old_index)
        # remove index.next key
        index_key_next = "{}.next".format(index_key)
        if self.engine.get_index(index_key_next) == name:
            self.engine.set_index(index_key_next, '')
        return name

    def test_exists(self, index_name, info):
        return self.engine.test_exists(index_name, info)

    def test_noindex(self, item):
        return False

    def before_index_item(self, item):
        return True

    def handle_error(self, error, exc_info):
        if self.config['ignore_errors']:
            logger.error("%s %s (ignored)", type(error).__name__, str(error))
        else:
            raise exc_info[0], exc_info[1], exc_info[2]

    def indexing_stat(self, index_name, fetched, indexed, iter_count, last_date):
        if not last_date and fetched < 10 and indexed < 1:
            return
        logger.info("[%s] Fetched %d indexed %d last %s",
            index_name, fetched, indexed, last_date or '-')
        pause = float(iter_count) / float(self.config['index_speed'] or 1)
        if pause > 1:
            logger.info("Wait %1.1f sec", pause)
        if pause > 0.01:
            self.engine.sleep(pause)

    def index_item(self, index_name, item):
        if not item.get('meta') or not item.get('data'):
            logger.error("[%s] No data %s", index_name, str(item))
            return None
        if item['meta']['dateModified'] != item['data']['dateModified']:
            logger.error("[%s] dateModified mismatch %s", index_name, str(item))
            return None
        if self.test_noindex(item):
            if self.engine.debug:
                logger.debug("[%s] Noindex %s %s", index_name,
                             item['data'].get('id', ''),
                             item['data'].get('tenderID', ''))
            return None

        self.before_index_item(item)

        return self.engine.index_item(index_name, item)

    def index_source(self, index_name=None, reset=False, reindex=False):
        if self.engine.slave_mode:
            if not self.engine.heartbeat(self.source):
                self.engine.sleep(1)
                return

        if not index_name:
            # also check maybe current index was changed
            if self.last_current_index != self.current_index:
                self.last_current_index = self.current_index
                reset = True
            index_name = self.current_index

        if not index_name:
            if not self.reindex_process:
                logger.warning("No current index for %s", repr(self))
            return

        if reset or self.source.need_reset():
            self.source.reset()

        index_count = 0
        total_count = 0
        # heartbeat always True in master mode
        # heartbeat return True in slave mode only if master fail
        while self.engine.heartbeat(self.source):
            info = {}
            iter_count = 0
            items_list = self.source.items()
            if not items_list:
                break
            for info in items_list:
                if self.engine.should_exit:
                    return
                if not self.test_exists(index_name, info):
                    try:
                        item = self.source.get(info)
                        if self.index_item(index_name, item):
                            index_count += 1
                    except Exception as e:
                        self.handle_error(e, sys.exc_info())
                # update statistics
                iter_count += 1
                total_count += 1
                # update heartbeat for long indexing
                if iter_count >= 500:
                    self.indexing_stat(
                        index_name, total_count, index_count,
                        iter_count, info.get('dateModified', '-'))
                    if not self.engine.heartbeat(self.source):
                        break
                    iter_count = 0

            if self.engine.should_exit:
                return
            # break if nothing iterated
            if iter_count > 0:
                self.indexing_stat(index_name, total_count, index_count,
                    iter_count, info.get('dateModified', '-'))
            elif getattr(self.source, 'last_skipped', None):
                last_skipped = self.source.last_skipped or ""
                logger.info("[%s] Fetched %d, last_skipped %s",
                    index_name, total_count, last_skipped or '-')
            elif not info:
                break
            # break on each iteration if not in full reindex mode
            if not reindex and self.config['index_parallel'] and index_count < total_count:
                logger.debug("[%s] Swith loop", index_name)
                return

        return index_count

    def stop_childs(self):
        if self.source:
            self.source.should_exit = True
        if not self.reindex_process or not self.reindex_process.pid:
            return
        if self.reindex_process.pid == os.getpid():
            return
        logger.info("Terminate subprocess %s pid %s",
            self.reindex_process.name, str(self.reindex_process.pid))
        try:
            self.reindex_process.terminate()
        except (AttributeError, OSError):
            pass

    def check_subprocess(self):
        if self.reindex_process:
            self.reindex_process.join(1)
        if not self.reindex_process or self.reindex_process.is_alive():
            return
        if self.reindex_process.exitcode == self.magic_exit_code:
            logger.info("Reindex-%s subprocess success, reset source",
                self.__index_name__)
            if self.next_index_name:
                self.set_current(self.next_index_name)
                self.next_index_name = None
            self.source.reset()
        else:
            logger.error("Reindex-%s subprocess fail, exitcode = %d",
                self.__index_name__, self.reindex_process.exitcode)
        # close process
        self.reindex_process = None

    def check_index(self, index_name):
        if not index_name or self.engine.should_exit:
            return False

        # check index mappings by check _all field
        if self.check_all_field:
            try:
                info = self.engine.index_info(index_name)
                stat = self.engine.index_stats(index_name)
            except Exception as e:
                logger.error("[%s] Check index failed: %s", index_name, str(e))
                self.force_next_reindex = True
                return False
            doc_type = self.source.__doc_type__
            if '_all' not in info['mappings'][doc_type]:
                logger.error("[%s] Check index failed: _all field not found, please reindex!",
                    index_name)
                self.force_next_reindex = True
                return False

        if self.skip_check_count:
            if self.check_all_field:
                logger.info("[%s] Total docs %d, last indexed not tested",
                    index_name, stat['docs']['count'])
            return True

        # check index docs count afetr reindex
        body = {
            "query": {"match_all": {}},
            "sort": {"dateModified": {"order": "desc"}}
        }
        try:
            res = self.engine.search(body, start=0, limit=1, index=index_name)
        except:
            res = None
        if not res or not res.get('items'):
            logger.error("[%s] Check failed: empty or corrupted index", index_name)
            return False

        logger.info("[%s] Total docs %d, last indexed %s",
            index_name, res['total'], res['items'][0]['dateModified'])

        if self.rc_mindocs and res['total'] < self.rc_mindocs:
            logger.error("[%s] Check index failed: not enought docs %d, required %d",
                index_name, res['total'], self.rc_mindocs)
            return False

        if self.rc_max_age:
            min_date = datetime.now() - timedelta(seconds=self.rc_max_age)
            iso_min_date = min_date.isoformat()
            last_indexed = res['items'][0]['dateModified']
            if last_indexed < iso_min_date:
                logger.error("[%s] Check index failed: last indexed is too old %s, "+
                    "required %s", index_name, last_indexed, iso_min_date)
                return False

        return True

    def check_on_start(self):
        if not self.current_index:
            return True
        if self.check_index(self.current_index):
            return True
        if not self.engine.index_exists(self.current_index):
            self.set_current('')

    def async_reindex(self):
        logger.info("*** Start Reindex-%s in subprocess",
            self.__index_name__)
        # reconnect elatic and prevent future stop_childs
        self.engine.start_in_subprocess()

        self.index_source(self.next_index_name, reset=True, reindex=True)

        if self.check_index(self.next_index_name):
            exit_code = self.magic_exit_code
        else:
            exit_code = 1

        # exit with specific code to signal master process reset source
        logger.info("*** Exit subprocess")
        sys.exit(exit_code)

    def reindex(self):
        # check reindex process is alive
        if self.reindex_process and self.reindex_process.is_alive():
            return

        # clear reindex flag
        if self.force_next_reindex:
            self.force_next_reindex = False

        logger.info("Need reindex %s", self.__index_name__)
        # create new index and save name
        self.next_index_name = self.new_index()

        # reindex in old-way sync mode
        if not self.allow_async_reindex:
            self.index_source(self.next_index_name, reset=True, reindex=True)
            if self.check_index(self.next_index_name):
                self.set_current(self.next_index_name)
                return True
            return False

        # reindex in async mode, start new reindex process
        proc_name = "Reindex-%s" % self.__index_name__
        self.reindex_process = Process(
            target=self.async_reindex,
            name=proc_name)
        self.reindex_process.daemon = True
        self.reindex_process.start()
        # wait for child
        retry_count = 0
        while not self.reindex_process.is_alive() and retry_count < 30:
            self.engine.sleep(1)
            retry_count += 1
        # check child is alive
        if self.reindex_process.is_alive():
            logger.info("Subprocess started %s pid %d",
                self.reindex_process.name, self.reindex_process.pid)
        else:
            logger.error("Can't start subprocess")

    def process(self, allow_reindex=True):
        if self.engine.should_exit:
            return

        if self.reindex_process:
            self.check_subprocess()

        if self.need_reindex() and allow_reindex:
            self.reindex()

        return self.index_source()
Beispiel #52
0
    t = threading.Thread(target=io, args=(run, q))
    t.start()

    # Start display process
    p = Process(target=display, args=('bob', q))
    p.start()

    #custom code here ..

    for i in range(0, 10):
        #time.sleep(1)
        print("wait")

    print("Close Graph to stop and save data")

    while p.is_alive():
        time.sleep(0.33)

    pmtoff()
    laseroff()

    run.clear()
    print("Waiting for scheduler thread to join...")
    t.join()
    print("Waiting for graph window process to join...")

    err(mydll.rcDisconnect())
    print("Disconnecting laser!")

    p.join()
    print("Finished!")
Beispiel #53
0
class RTLSDRTCP(Device):
    BYTES_PER_SAMPLE = 2  # RTLSDR device produces 8 bit unsigned IQ data
    MAXDATASIZE = 65536
    ENDIAN = "big"
    RTL_TCP_CONSTS = [
        "NULL", "centerFreq", "sampleRate", "tunerGainMode", "tunerGain",
        "freqCorrection", "tunerIFGain", "testMode", "agcMode",
        "directSampling", "offsetTuning", "rtlXtalFreq", "tunerXtalFreq",
        "gainByIndex", "bandwidth", "biasTee"
    ]

    def __init__(self, freq, gain, srate, device_number, is_ringbuffer=False):
        super().__init__(0, freq, gain, srate, is_ringbuffer)

        self.open()  #open("127.0.0.1", 1234)

        self.success = 0

        self.is_receiving_p = Value('i', 0)
        """
        Shared Value to communicate with the receiving process.

        """

        #self.bandwidth_is_adjustable = hasattr(rtlsdr, "set_tuner_bandwidth")   # e.g. not in Manjaro Linux / Ubuntu 14.04
        self._max_frequency = 6e9
        self._max_sample_rate = 3200000
        self._max_frequency = 6e9
        self._max_bandwidth = 3200000
        self._max_gain = 500  # Todo: Consider get_tuner_gains for allowed gains here

        self.device_number = device_number

    def open(self, hostname="127.0.0.1", port=1234):
        try:
            # Create socket and connect
            self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM,
                                      socket.IPPROTO_TCP)
            self.sock.settimeout(1.0)  # Timeout 1s
            self.sock.connect((hostname, port))

            # Receive rtl_tcp initial data
            init_data = self.sock.recv(self.MAXDATASIZE)

            if len(init_data) != 12:
                return False
            if init_data[0:4] != b'RTL0':
                return False

            # Extract tuner name
            tuner_number = int.from_bytes(init_data[4:8], self.ENDIAN)
            if tuner_number == 1:
                self.tuner = "E4000"
            elif tuner_number == 2:
                self.tuner = "FC0012"
            elif tuner_number == 3:
                self.tuner = "FC0013"
            elif tuner_number == 4:
                self.tuner = "FC2580"
            elif tuner_number == 5:
                self.tuner = "R820T"
            elif tuner_number == 6:
                self.tuner = "R828D"
            else:
                self.tuner = "Unknown"

            # Extract IF and RF gain
            self.if_gain = int.from_bytes(init_data[8:10], self.ENDIAN)
            self.rf_gain = int.from_bytes(init_data[10:12], self.ENDIAN)

        except OSError as e:
            logger.info("Could not connect to rtl_tcp", hostname, port, "(",
                        str(e), ")")

    def close(self):
        self.sock.close()

    def set_parameter(self, param, value):  # returns error (True/False)
        msg = self.RTL_TCP_CONSTS.index(param).to_bytes(
            1, self.ENDIAN)  # Set param at bits 0-7
        msg += value.to_bytes(4, self.ENDIAN)  # Set value at bits 8-39

        try:
            self.sock.sendall(msg)  # Send data to rtl_tcp
        except OSError as e:
            logger.info("Could not set parameter", param, value, msg, "(",
                        str(e), ")")
            return True

        return False

    def start_rx_mode(self):
        self.init_recv_buffer()

        self.is_open = True
        self.is_receiving = True
        self.receive_process = Process(
            target=receive_sync,
            args=(self.child_conn, self.device_number, self.frequency,
                  self.sample_rate, self.gain))
        self.receive_process.daemon = True
        self._start_read_rcv_buffer_thread()
        self.receive_process.start()

    def stop_rx_mode(self, msg):
        self.is_receiving = False
        self.parent_conn.send("stop")

        logger.info("RTLSDRTCP: Stopping RX Mode: " + msg)

        if hasattr(self, "receive_process"):
            self.receive_process.join(0.3)
            if self.receive_process.is_alive():
                logger.warning(
                    "RTLSDRTCP: Receive process is still alive, terminating it"
                )
                self.receive_process.terminate()
                self.receive_process.join()
                self.parent_conn, self.child_conn = Pipe()

        if hasattr(self, "read_queue_thread"
                   ) and self.read_recv_buffer_thread.is_alive():
            try:
                self.read_recv_buffer_thread.join(0.001)
                logger.info("RTLSDRTCP: Joined read_queue_thread")
            except RuntimeError:
                logger.error("RTLSDRTCP: Could not join read_queue_thread")

    def read_sync(self):
        return self.sock.recv(self.MAXDATASIZE)

    def set_device_frequency(self, frequency):
        error = self.set_parameter("centerFreq", int(frequency))
        self.log_retcode(error, "Set center frequency")
        return error

    def set_device_sample_rate(self, sample_rate):
        error = self.set_parameter("sampleRate", int(sample_rate))
        self.log_retcode(error, "Set sample rate")
        return error

    def set_freq_correction(self, ppm):
        error = self.set_parameter("freqCorrection", int(ppm))
        self.log_retcode(error, "Set frequency correction")
        return error

    def set_offset_tuning(self, on: bool):
        error = self.set_parameter("offsetTuning", on)
        self.log_retcode(error, "Set offset tuning")
        return error

    def set_gain_mode(self, manual: bool):
        error = self.set_parameter("tunerGainMode", manual)
        self.log_retcode(error, "Set gain mode manual")
        return error

    def set_if_gain(self, gain):
        error = self.set_parameter("tunerIFGain", int(gain))
        self.log_retcode(error, "Set IF gain")
        return error

    def set_gain(self, gain):
        error = self.set_parameter("tunerGain", int(gain))
        self.log_retcode(error, "Set tuner gain")
        return error

    def set_bandwidth(self, bandwidth):
        error = self.set_parameter("bandwidth", int(bandwidth))
        self.log_retcode(error, "Set tuner bandwidth")
        return error

    @staticmethod
    def unpack_complex(buffer, nvalues: int):
        """
        The raw, captured IQ data is 8 bit unsigned data.

        :return:
        """
        result = np.empty(nvalues, dtype=np.complex64)
        unpacked = np.frombuffer(buffer,
                                 dtype=[('r', np.uint8), ('i', np.uint8)])
        result.real = (unpacked['r'] / 127.5) - 1.0
        result.imag = (unpacked['i'] / 127.5) - 1.0
        return result

    @staticmethod
    def pack_complex(complex_samples: np.ndarray):
        return (127.5 * (complex_samples.view(np.float32) + 1.0)).astype(
            np.uint8).tostring()
Beispiel #54
0
class AppTask(WatchTask):
    template_files = '.html', '.jinja', '.jinja2'

    def __init__(self, config: Config, loop: asyncio.AbstractEventLoop):
        self._config = config
        self._reloads = 0
        self._session = ClientSession(loop=loop)
        self._runner = None
        super().__init__(self._config.code_directory, loop)

    async def _run(self):
        try:
            self._start_dev_server()
            running = True
            while running:
                try:
                    service_next = self._awatch.__anext__()
                    common_next = self._commonql_awatch.__anext__()
                    done, pending = await asyncio.wait((service_next, common_next), return_when=asyncio.FIRST_COMPLETED)
                    for pending_task in pending:
                        pending_task.cancel()
                    changes = done.pop().result()
                except StopAsyncIteration:
                    running = False
                else:
                    self._reloads += 1
                    if any(f.endswith('.py') for _, f in changes):
                        logger.debug('%d changes, restarting server', len(changes))
                        self._stop_dev_server()
                        self._start_dev_server()
                        await self._src_reload_when_live()
                    elif len(changes) > 1 or any(f.endswith(self.template_files) for _, f in changes):
                        # reload all pages
                        await src_reload(self._app)
                    else:
                        # a single (non template) file has changed, reload a single file.
                        await src_reload(self._app, changes.pop()[1])
        except Exception as exc:
            logger.exception(exc)
            await self._session.close()
            raise AiohttpDevException('error running dev server')

    async def _src_reload_when_live(self, checks=20):
        if self._app[WS]:
            url = 'http://localhost:{.main_port}/?_checking_alive=1'.format(self._config)
            logger.debug('checking app at "%s" is running before prompting reload...', url)
            for i in range(checks):
                await asyncio.sleep(0.1, loop=self._app.loop)
                try:
                    async with self._session.get(url):
                        pass
                except OSError as e:
                    logger.debug('try %d | OSError %d app not running', i, e.errno)
                else:
                    logger.debug('try %d | app running, reloading...', i)
                    await src_reload(self._app)
                    return

    def _start_dev_server(self):
        act = 'Start' if self._reloads == 0 else 'Restart'
        logger.info('%sing dev server at http://%s:%s ●', act, self._config.host, self._config.main_port)

        try:
            tty_path = os.ttyname(sys.stdin.fileno())
        except OSError:  # pragma: no branch
            # fileno() always fails with pytest
            tty_path = '/dev/tty'
        except AttributeError:
            # on windows, without a windows machine I've no idea what else to do here
            tty_path = None

        self._process = Process(target=serve_main_app, args=(self._config, tty_path))
        self._process.start()

    def _stop_dev_server(self):
        if self._process.is_alive():
            logger.debug('stopping server process...')
            os.kill(self._process.pid, signal.SIGINT)
            self._process.join(5)
            if self._process.exitcode is None:
                logger.warning('process has not terminated, sending SIGKILL')
                os.kill(self._process.pid, signal.SIGKILL)
                self._process.join(1)
            else:
                logger.debug('process stopped')
        else:
            logger.warning('server process already dead, exit code: %s', self._process.exitcode)

    async def close(self, *args):
        self.stopper.set()
        self._stop_dev_server()
        await asyncio.gather(super().close(), self._session.close())
Beispiel #55
0
def run_task(task, internal_storage):
    """
    Runs a single job within a separate process
    """
    start_tstamp = time.time()
    setup_lithops_logger(task.log_level)

    backend = os.environ.get('__LITHOPS_BACKEND', '')
    logger.info("Lithops v{} - Starting {} execution".format(__version__, backend))
    logger.info("Execution ID: {}/{}".format(task.job_key, task.id))

    if task.runtime_memory:
        logger.debug('Runtime: {} - Memory: {}MB - Timeout: {} seconds'
                     .format(task.runtime_name, task.runtime_memory, task.execution_timeout))
    else:
        logger.debug('Runtime: {} - Timeout: {} seconds'.format(task.runtime_name, task.execution_timeout))

    env = task.extra_env
    env['LITHOPS_WORKER'] = 'True'
    env['PYTHONUNBUFFERED'] = 'True'
    env['LITHOPS_CONFIG'] = json.dumps(task.config)
    env['__LITHOPS_SESSION_ID'] = '-'.join([task.job_key, task.id])
    os.environ.update(env)

    call_status = CallStatus(task.config, internal_storage)
    call_status.response['worker_start_tstamp'] = start_tstamp
    call_status.response['host_submit_tstamp'] = task.host_submit_tstamp
    call_status.response['call_id'] = task.id
    call_status.response['job_id'] = task.job_id
    call_status.response['executor_id'] = task.executor_id

    show_memory_peak = strtobool(os.environ.get('SHOW_MEMORY_PEAK', 'False'))

    try:
        # send init status event
        call_status.send('__init__')

        if show_memory_peak:
            mm_handler_conn, mm_conn = Pipe()
            memory_monitor = Thread(target=memory_monitor_worker, args=(mm_conn, ))
            memory_monitor.start()

        task.stats_file = os.path.join(task.task_dir, 'task_stats.txt')
        handler_conn, jobrunner_conn = Pipe()
        taskrunner = TaskRunner(task, jobrunner_conn, internal_storage)
        logger.debug('Starting TaskRunner process')
        jrp = Process(target=taskrunner.run) if is_unix_system() else Thread(target=taskrunner.run)
        jrp.start()

        jrp.join(task.execution_timeout)
        logger.debug('TaskRunner process finished')

        if jrp.is_alive():
            # If process is still alive after jr.join(job_max_runtime), kill it
            try:
                jrp.terminate()
            except Exception:
                # thread does not have terminate method
                pass
            msg = ('Function exceeded maximum time of {} seconds and was '
                   'killed'.format(task.execution_timeout))
            raise TimeoutError('HANDLER', msg)

        if show_memory_peak:
            mm_handler_conn.send('STOP')
            memory_monitor.join()
            peak_memory_usage = int(mm_handler_conn.recv())
            logger.info("Peak memory usage: {}".format(sizeof_fmt(peak_memory_usage)))
            call_status.response['peak_memory_usage'] = peak_memory_usage

        if not handler_conn.poll():
            logger.error('No completion message received from JobRunner process')
            logger.debug('Assuming memory overflow...')
            # Only 1 message is returned by jobrunner when it finishes.
            # If no message, this means that the jobrunner process was killed.
            # 99% of times the jobrunner is killed due an OOM, so we assume here an OOM.
            msg = 'Function exceeded maximum memory and was killed'
            raise MemoryError('HANDLER', msg)

        if os.path.exists(task.stats_file):
            with open(task.stats_file, 'r') as fid:
                for l in fid.readlines():
                    key, value = l.strip().split(" ", 1)
                    try:
                        call_status.response[key] = float(value)
                    except Exception:
                        call_status.response[key] = value
                    if key in ['exception', 'exc_pickle_fail', 'result', 'new_futures']:
                        call_status.response[key] = eval(value)

    except Exception:
        # internal runtime exceptions
        print('----------------------- EXCEPTION !-----------------------')
        traceback.print_exc(file=sys.stdout)
        print('----------------------------------------------------------')
        call_status.response['exception'] = True

        pickled_exc = pickle.dumps(sys.exc_info())
        pickle.loads(pickled_exc)  # this is just to make sure they can be unpickled
        call_status.response['exc_info'] = str(pickled_exc)

    finally:
        call_status.response['worker_end_tstamp'] = time.time()

        # Flush log stream and save it to the call status
        task.log_stream.flush()
        with open(task.log_file, 'rb') as lf:
            log_str = base64.b64encode(zlib.compress(lf.read())).decode()
            call_status.response['logs'] = log_str

        call_status.send('__end__')

        # Unset specific env vars
        for key in task.extra_env:
            os.environ.pop(key, None)
        os.environ.pop('__LITHOPS_TOTAL_EXECUTORS', None)

        logger.info("Finished")
Beispiel #56
0
    def crawl(self, cp_engines=None):
        def _browser_launch(queue, cp_engines, opts, urls, users, browser_id,
                            logfile, workdir, pt_suite):
            cpbr = CPBrowserRunner(cp_engines, opts, urls, users, browser_id,
                                   logfile, workdir, pt_suite)
            if browser_id:
                try:
                    cpbr.run()
                except RuntimeError:
                    logging.error(traceback.format_exc())
            else:
                cpbr.run()
            if queue:
                queue.put(cpbr)
            return cpbr

        if not cp_engines:
            cp_engines = [CPEngineBase]

        if self.opts is None:
            raise CPCrawlerException(
                "the init_opts() method must be called before run()")

        users = self._gen_users(self.opts.user)

        if users and len(users) > 1 and len(users) > self.opts.real_browsers:
            raise CPCrawlerException(
                "ERROR: number of users (%d) can't be higher than number of browsers (%d)"
                % (len(users), self.opts.real_browsers))

        if self.opts.real_browsers > 0:
            real_browsers = []
            cpbr_objs = {}

            for i in range(1, self.opts.real_browsers + 1):
                cpbr_objs[i] = Queue()
                b = Process(target=_browser_launch,
                            args=(cpbr_objs[i], cp_engines, self.opts,
                                  self.urls, users, i, self.logfile,
                                  self.workdir, self.pt_suite))
                b.start()
                real_browsers.append(b)
                time.sleep(self.opts.instances_delay)

            delay = 3
            telemetry_fname = os.path.join(self.workdir, "telemetry.log")
            report_fname = os.path.join(self.workdir, "report.html")

            print("")
            print("Notes:")
            print("   * Tests executed by %d %s browsers%s%s" %
                  (self.opts.real_browsers, self.opts.browser,
                   " and %d python browsers" %
                   (self.opts.real_browsers * self.opts.python_browsers)
                   if self.opts.python_browsers else "",
                   " on %d users:\n       %s" %
                   (len(users), "\n       ".join(users)) if users else ""))
            print("   * All data is stored in %s" % self.workdir)
            print("   * The %s file is updated every %d sec" %
                  (report_fname, delay))
            print("")

            while True:
                time.sleep(delay)
                all_dead = True
                for b in real_browsers:
                    if not b.is_alive():
                        continue
                    all_dead = False

                if all_dead:
                    break

            page_stats_summary = PageStatsSummary()

            for i in range(1, self.opts.real_browsers + 1):
                cpbr = cpbr_objs[i].get()
                if os.path.exists(cpbr.stderr_fname):
                    f = open(cpbr.stderr_fname, 'r')
                    data = f.read()
                    f.close()
                    if data:
                        print("browser.%d stderr:\n%s" % (i, data))

                for ps in cpbr.page_stats_summary.page_stats:
                    page_stats_summary.add_page_stats(ps)

            report_fname = os.path.join(self.workdir, "report.html")

        else:
            cpbr = _browser_launch(None, cp_engines, self.opts, self.urls,
                                   users, 0, self.logfile, self.workdir,
                                   self.pt_suite)
            page_stats_summary = cpbr.page_stats_summary

        return page_stats_summary.page_stats
class Transformer:
  '''The transformer'''
  TICK = 0.2
  WORLD_TICK = 0.1

  def __init__(self,
               initial_state: (float, float),
               goal_state: (float, float),
               agent_ctor,
               action_velocities=((-0.5, 0.5), (-0.5, 0.5)),
               action_time_max=0.5):
    self.transformers = {}
    self.state = TransformerIState.WaitAction
    self.reverse_actions = []
    self.queue = Queue()
    self.thread = None
    self.agent = agent_ctor(self, initial_state, goal_state, action_velocities, action_time_max)
    self.other_starts_goals = []
    self.log = logging.getLogger(f'tf-{self.agent.id}')
    self.need_acknowledgement = set()
    self.conflicts_with = set()
    self.current_action = None
    self.is_leader = self.get_id() == 0
    self.tick_count = 0
    self.reverse_schedule = None
    self.awaited_reversions = None
    self.pose = self.agent.pose

  def compute_reverse_schedule(self, action_log):
    '''Figure out how far back to go and what actions can run in parallel'''
    conflicted_agents = self.conflicts_with.copy()
    back_idx = -1
    while conflicted_agents:
      layer = action_log[back_idx]

    return []

  def update_ticks(self, _, count):
    '''Update the internal tick counter'''
    self.tick_count = max(count, self.tick_count)

  def register_transformer(self, transformer):
    '''Register a new transformer'''
    self.transformers[transformer.get_id()] = TransformerState(transformer.get_start(), [],
                                                               None, False, None,
                                                               transformer.get_queue(), False)
    self.other_starts_goals.append(transformer.get_start())
    self.other_starts_goals.append(transformer.get_goal())

  def get_start(self):
    '''Return the agent's start'''
    return self.agent.pose

  def get_goal(self):
    '''Return the agent's goal'''
    return self.agent.goal

  def get_id(self) -> int:
    '''Getter for this transformer's ID (same as its agent's ID)'''
    return self.agent.id

  def get_queue(self):
    '''Getter for this transformer's queue'''
    return self.queue

  def get_agent(self):
    '''Getter for this transformer's agent'''
    return self.agent

  def start(self):
    '''Start the process'''
    if not self.thread:
      other_starts_goals = np.vstack(self.other_starts_goals)
      self.thread = Process(target=self.run, args=(self.queue, other_starts_goals))
      self.thread.start()

    return self.thread

  def stop(self):
    '''Stop the process'''
    if self.thread and self.thread.is_alive():
      self.state = TransformerIState.AtGoal
      self.thread.join()

  def agent_request(self, other_starts_goals, _, action_req):
    '''Handle an action request from an agent'''
    if self.state is not TransformerIState.WaitAction:
      self.log.warning('Ignoring action request; mode is %s', self.state)
      return
    self.log.info('Received agent request')

    self.conflicts_with.clear()
    action, start_pose = action_req
    self.current_action = action_req
    for step in np.arange(Transformer.WORLD_TICK, action.time, Transformer.WORLD_TICK):
      pose = start_pose + step * action.vel_vec
      pose_array = np.full_like(other_starts_goals, pose)
      # Check that the action doesn't conflict with starts/goals
      if np.linalg.norm(pose_array - other_starts_goals,
                        -np.inf) <= Agent.GOAL_THRESHOLD + Agent.RADIUS:
        # We come too close to some start or goal; reject
        self.log.debug('Agent request conflicts with start or goal; rejecting')
        self.agent.queue.put(
            Msg(SenderTypes.Transformer, MessageTypes.ActionReject, self.get_id(), action_req))
        return

      # Check that the action doesn't conflict with known current/potential actions
      for (tf_id, transformer) in self.transformers.items():
        if test_conflict((action, pose), (ZERO_ACTION, transformer.pose)):
          # We'd run into where an agent currently is
          self.agent.queue.put(
              Msg(SenderTypes.Transformer, MessageTypes.ActionReject, self.get_id(), action_req))
          return

        if transformer.curr_action:
          tf_action, tf_pose = transformer.curr_action
          # if transformer.running:
          #   ticks_elapsed = self.tick_count - transformer.start_tick
          #   tf_pose += tf_action.vel_vec * ticks_elapsed * Transformer.WORLD_TICK

          if test_conflict((action, pose), (tf_action, tf_pose)):
            # The action conflicts with a currently executing or approved action
            self.conflicts_with.add(tf_id)
            self.log.debug('Agent request conflicts with approved action; rejecting')
            self.agent.queue.put(
                Msg(SenderTypes.Transformer, MessageTypes.ActionReject, self.get_id(), action_req))
            return

    # We can skip the reverse schedulability check because this is the special case where actions
    # are exactly reversible
    # Check with the other transformers
    self.need_acknowledgement = set(
        tf_id for tf_id in self.transformers if not self.transformers[tf_id].at_goal)
    if self.need_acknowledgement:
      self.log.debug('Sending out for approval')
      self.send_to_all(
          Msg(SenderTypes.Transformer, MessageTypes.ActionRequest, self.get_id(), action_req))
      self.log.debug('Need approval from %s', ic.format(self.need_acknowledgement))
      self.state = TransformerIState.WaitAcknowledge
    else:
      self.start_action()

  def send_to_all(self, msg):
    '''Send the same message to all transformers'''
    for transformer in self.transformers.values():
      if not transformer.at_goal:
        transformer.queue.put(msg)

  def agent_blocked(self, _id, _data):
    '''Handle the agent signalling they have no more actions to suggest'''
    self.state = TransformerIState.Conflicted
    self.send_to_all(Msg(SenderTypes.Transformer, MessageTypes.Conflicted, self.get_id()))

  def agent_action_done(self, _id, data):
    '''Handle the agent completing an action'''
    if self.state not in (TransformerIState.RevertingFollow, TransformerIState.RevertingLeader):
      self.state = TransformerIState.WaitAction
      self.current_action = None
      self.pose = data
    self.send_to_all(
        Msg(SenderTypes.Transformer, MessageTypes.ActionDone, self.get_id(), self.pose))

  def agent_at_goal(self, _id, pose):
    '''Handle the agent reaching its goal'''
    self.log.success('Reached goal!')
    self.state = TransformerIState.AtGoal
    self.pose = pose
    self.send_to_all(Msg(SenderTypes.Transformer, MessageTypes.AtGoal, self.get_id(), pose))

  def tf_request(self, tf_id, req):
    '''Handle a transformer requesting an action'''
    if self.state in (TransformerIState.RevertingFollow, TransformerIState.RevertingLeader):
      self.log.warning('Received action request from %d while reverting. Ignoring it...', tf_id)
      return

    self.log.debug('Received request from %d', tf_id)
    transformer = self.transformers[tf_id]
    # Check for conflict
    if self.current_action and test_conflict(self.current_action, req):
      # The actions are incompatible!
      self.log.notice('Action from %d conflicts with my action', tf_id)
      if self.get_id() > tf_id:
        self.log.notice('Approving action from %d and entering Conflicted', tf_id)
        transformer.queue.put(
            Msg(SenderTypes.Transformer, MessageTypes.ActionAccept, self.get_id()))
        self.transformers[tf_id].curr_action = req
        self.transformers[tf_id].running = False
        self.conflicts_with.add(tf_id)
        self.state = TransformerIState.Conflicted
        self.send_to_all(Msg(SenderTypes.Transformer, MessageTypes.Conflicted, self.get_id()))
      else:
        self.log.debug('Rejecting action from %d', tf_id)
        transformer.queue.put(
            Msg(SenderTypes.Transformer, MessageTypes.ActionReject, self.get_id()))

      return

    if test_conflict((ZERO_ACTION, self.pose), req):
      self.log.debug('Rejecting action from %d', tf_id)
      transformer.queue.put(Msg(SenderTypes.Transformer, MessageTypes.ActionReject, self.get_id()))
      return

    transformer.queue.put(Msg(SenderTypes.Transformer, MessageTypes.ActionAccept, self.get_id()))
    self.transformers[tf_id].curr_action = req
    self.transformers[tf_id].running = False

  def tf_reject(self, tf_id, _data):
    '''Handle an action being rejected'''
    self.log.debug('Action rejected by %d', tf_id)
    self.agent.queue.put(
        Msg(SenderTypes.Transformer, MessageTypes.ActionReject, self.get_id(), self.current_action))
    self.current_action = None
    self.state = TransformerIState.WaitAction

  def start_action(self):
    '''Utility method to trigger an approved action'''
    self.log.debug('Starting action')
    self.agent.queue.put(
        Msg(SenderTypes.Transformer, MessageTypes.ActionAccept, self.get_id(),
            self.current_action[0]))
    self.send_to_all(Msg(SenderTypes.Transformer, MessageTypes.ActionStart, self.get_id()))
    self.state = TransformerIState.WaitAction

  def tf_accept(self, tf_id, _):
    '''Handle an action being accepted'''
    self.log.debug('Action accepted by %d', tf_id)
    self.need_acknowledgement.discard(tf_id)
    if not self.need_acknowledgement and self.state is TransformerIState.WaitAcknowledge:
      self.reverse_actions.append((self.current_action, self.tick_count))
      self.start_action()

  def tf_cancel(self, tf_id, _):
    '''Handle another transformer cancelling an action request'''
    self.transformers[tf_id].curr_action = None
    self.transformers[tf_id].running = False
    if self.state is TransformerIState.Conflicted and tf_id in self.conflicts_with:
      self.conflicts_with.remove(tf_id)
      if not self.conflicts_with:
        self.state = TransformerIState.WaitAcknowledge
        self.start_action()

  def tf_at_goal(self, tf_id, pose):
    '''Handle another transformer reaching its goal'''
    self.is_leader = tf_id == self.get_id() - 1
    if tf_id in self.conflicts_with:
      self.conflicts_with.remove(tf_id)
      if not self.conflicts_with:
        self.start_action()

    self.transformers[tf_id].at_goal = True
    self.transformers[tf_id].pose = pose
    # A transformer reaching its goal may cause an action with pending accepts to be ready
    self.tf_accept(tf_id, None)

  def tf_action_start(self, tf_id, _):
    '''Handle another transformer starting an action'''
    self.transformers[tf_id].running = True
    self.transformers[tf_id].start_tick = self.tick_count
    self.transformers[tf_id].rev_path.append(
        (self.transformers[tf_id].curr_action, self.tick_count))

  def tf_action_done(self, tf_id, pose):
    '''Handle another transformer finishing an action'''
    self.transformers[tf_id].running = False
    self.transformers[tf_id].start_tick = None
    self.transformers[tf_id].curr_action = None
    self.transformers[tf_id].pose = pose
    if self.awaited_reversions:
      self.awaited_reversions.discard(tf_id)

  def tf_conflicted(self, tf_id, _):
    '''Handle another transformer announcing it is conflicted'''
    self.transformers[tf_id].conflicted = True
    # Note: This is only to make the formatting happier
    self_conflicted = self.state is TransformerIState.Conflicted
    if self_conflicted and all(transformer.conflicted for transformer in self.transformers):
      if self.is_leader:
        self.send_to_all(Msg(SenderTypes.Transformer, MessageTypes.StartReversion, self.get_id()))
        self.state = TransformerIState.RevertingLeader
        self.reverse_schedule = self.compute_reverse_schedule(
            [self.reverse_actions] +
            [self.transformers[tf_id].rev_path for tf_id in self.transformers])

  def tf_reversion_start(self, _id, _data):
    '''Handle the leader signalling the start of reversion'''
    self.state = TransformerIState.RevertingFollow

  def tf_reversion_end(self, _id, _data):
    '''Handle the leader signalling the end of reversion'''
    self.state = TransformerIState.WaitAction

  def tf_reversion_step(self, _id, reverse_action):
    '''Handle the leader requiring a reverse action step'''
    self.agent.queue.put(
        Msg(SenderTypes.Transformer, MessageTypes.StepReversion, self.get_id(), reverse_action))

  def run(self, queue, other_starts_goals):
    '''The main loop'''
    handlers = {
        (SenderTypes.Simulator, MessageTypes.TickUpdate):
            self.update_ticks,
        (SenderTypes.Agent, MessageTypes.ActionRequest):
            partial(self.agent_request, other_starts_goals),
        (SenderTypes.Agent, MessageTypes.NoValidActions):
            self.agent_blocked,
        (SenderTypes.Agent, MessageTypes.ActionDone):
            self.agent_action_done,
        (SenderTypes.Agent, MessageTypes.AtGoal):
            self.agent_at_goal,
        (SenderTypes.Transformer, MessageTypes.ActionRequest):
            self.tf_request,
        (SenderTypes.Transformer, MessageTypes.ActionReject):
            self.tf_reject,
        (SenderTypes.Transformer, MessageTypes.ActionAccept):
            self.tf_accept,
        (SenderTypes.Transformer, MessageTypes.ActionCancelled):
            self.tf_cancel,
        (SenderTypes.Transformer, MessageTypes.AtGoal):
            self.tf_at_goal,
        (SenderTypes.Transformer, MessageTypes.ActionStart):
            self.tf_action_start,
        (SenderTypes.Transformer, MessageTypes.ActionDone):
            self.tf_action_done,
        (SenderTypes.Transformer, MessageTypes.Conflicted):
            self.tf_conflicted,
        (SenderTypes.Transformer, MessageTypes.StartReversion):
            self.tf_reversion_start,
        (SenderTypes.Transformer, MessageTypes.EndReversion):
            self.tf_reversion_end,
        (SenderTypes.Transformer, MessageTypes.StepReversion):
            self.tf_reversion_step
    }

    while self.state is not TransformerIState.AtGoal:
      while not queue.empty():
        msg = queue.get()
        msg.dispatch(handlers)

      if self.state is TransformerIState.RevertingLeader:
        if not self.reverse_schedule:
          # TODO: Need to handle the case where the leader had to revert N steps
          self.start_action()
          self.send_to_all(Msg(SenderTypes.Transformer, MessageTypes.EndReversion, self.get_id()))
        elif not self.awaited_reversions:
          reversion_layer = self.reverse_schedule.pop()
          self.awaited_reversions = {action.agent_id for action in reversion_layer}
          for action in reversion_layer:
            self.transformers[action.agent_id].queue.put(
                Msg(SenderTypes.Transformer, MessageTypes.StepReversion, self.get_id(),
                    Action(action.agent_id, action.reverse, [], action.time)))

      sleep(Transformer.TICK)
Beispiel #58
0
class Cluster(object):
    def __init__(self, broker=None):
        self.broker = broker or get_broker()
        self.sentinel = None
        self.stop_event = None
        self.start_event = None
        self.pid = current_process().pid
        self.host = socket.gethostname()
        self.timeout = Conf.TIMEOUT
        signal.signal(signal.SIGTERM, self.sig_handler)
        signal.signal(signal.SIGINT, self.sig_handler)

    def start(self):
        # Start Sentinel
        self.stop_event = Event()
        self.start_event = Event()
        self.sentinel = Process(target=Sentinel,
                                args=(self.stop_event, self.start_event,
                                      self.broker, self.timeout))
        self.sentinel.start()
        logger.info(_('Q Cluster-{} starting.').format(self.pid))
        while not self.start_event.is_set():
            sleep(0.1)
        return self.pid

    def stop(self):
        if not self.sentinel.is_alive():
            return False
        logger.info(_('Q Cluster-{} stopping.').format(self.pid))
        self.stop_event.set()
        self.sentinel.join()
        logger.info(_('Q Cluster-{} has stopped.').format(self.pid))
        self.start_event = None
        self.stop_event = None
        return True

    def sig_handler(self, signum, frame):
        logger.debug(
            _('{} got signal {}').format(
                current_process().name,
                Conf.SIGNAL_NAMES.get(signum, 'UNKNOWN')))
        self.stop()

    @property
    def stat(self):
        if self.sentinel:
            return Stat.get(self.pid)
        return Status(self.pid)

    @property
    def is_starting(self):
        return self.stop_event and self.start_event and not self.start_event.is_set(
        )

    @property
    def is_running(self):
        return self.stop_event and self.start_event and self.start_event.is_set(
        )

    @property
    def is_stopping(self):
        return self.stop_event and self.start_event and self.start_event.is_set(
        ) and self.stop_event.is_set()

    @property
    def has_stopped(self):
        return self.start_event is None and self.stop_event is None and self.sentinel
Beispiel #59
0
class EzhilFileExecuter(EzhilRedirectOutput):
    """ run on construction - build a Ezhil lexer/parser/runtime and execute the file pointed to by @files;
        When constructed with a @TIMEOUT value, the process may terminate without and output, otherwise it dumps the output
        to a file named, 
    """
    def get_output(self):
        return [self.tmpf_name, self.fProcName, self.data]

    def __delete__(self):
        if self.tmpf and hasattr(self.tmpf, 'name'):
            os.unlink(self.tmpf.name)
            self.tmpf = None
        if self.fProcName:
            os.unlink(self.fProcName)
            self.fProcName = None
        if hasattr(self.p, 'terminate'):
            self.p.terminate()
        pass

    def __init__(self,
                 file_input,
                 debug=False,
                 redirectop=False,
                 TIMEOUT=None):
        EzhilRedirectOutput.__init__(self, redirectop, debug)
        self.dbg_msg(u"ezil file executer\n")
        self.fProcName = ""
        self.data = ""
        self.tmpf_name = ""
        self.p = None
        self.TIMEOUT = TIMEOUT
        if (not redirectop):  #run serially and exit.
            try:
                ezhil_file_parse_eval(file_input, self.redirectop, self.debug)
                self.exitcode = 0
            except Exception as e:
                self.exitcode = -1
                traceback.print_tb(sys.exc_info()[2])
                raise e
        else:
            self.dbg_msg("EzhilFileExecuter - entering the redirect mode\n")
            self.p = Process(target=ezhil_file_parse_eval,
                             kwargs={
                                 'file_input': file_input,
                                 'redirectop': redirectop,
                                 'debug': debug
                             })

    def run(self):
        if self.p:
            try:
                self.dbg_msg("begin redirect mode\n")
                self.p.start()
                if (self.TIMEOUT is not None):
                    start = time()
                    self.dbg_msg("timeout non-zero\n")
                    raise_timeout = False
                    while self.p.is_alive():
                        self.dbg_msg("in busy loop : %d , %d \n" %
                                     (time() - start, self.TIMEOUT))
                        self.dbg_msg("SLEEP\n")
                        sleep(5)  #poll every 5 seconds
                        if ((time() - start) > self.TIMEOUT):
                            self.dbg_msg("Reached timeout = %d\n" %
                                         self.TIMEOUT)
                            raise_timeout = True
                            break
                        # now you try and read all the data from file, , and unlink it all up.
                    self.fProcName = EzhilRedirectOutput.pidFileName(
                        self.p.pid)
                    self.tmpf_name = self.tmpf.name

                    # dump stuff from fProcName into the stdout
                    fp = open(self.fProcName, 'r')
                    print(
                        u"######### ------- dump output ------- ##############"
                    )
                    self.data = fp.read()
                    print(self.data)
                    fp.close()

                    if raise_timeout:
                        raise TimeoutException(self.TIMEOUT)
                    #os.unlink( fProcName)
            except Exception as e:
                print("exception ", unicode(e))
                traceback.print_tb(sys.exc_info()[2])
                raise e
            finally:
                # reset the buffers
                if (self.redirectop):
                    #self.tmpf.close()
                    sys.stdout = self.old_stdout
                    sys.stderr = self.old_stderr
                    sys.stdout.flush()
                    sys.stderr.flush()

                # cleanup the cruft files
                #if self.tmpf and hasattr(self.tmpf,'name'):
                #    os.unlink( self.tmpf.name )
                #self.tmpf = None
                #if self.fProcName:
                #    os.unlink( self.fProcName )
                #self.fProcName = None

                # nuke the process
                if hasattr(self.p, 'terminate'):
                    self.p.terminate()
                self.exitcode = self.p.exitcode
        else:
            pass  #nothing to run
Beispiel #60
0
class MultiProcessScheduler:
    def __init__(self):
        self.cond = Condition()  # default to RLock

        # If duplex is False then the pipe is unidirectional
        # conn1 for receiving messages and conn2 for sending messages.
        conn1, conn2 = Pipe(duplex=False)
        self.connREAD = conn1
        self.connWRITE = conn2

        # a holder to the closest task to execute
        # it is not safe to access this variable directly as
        # there might be data on the pipe, use self.__getClosestTask()
        self._closestTask = None

        # multiprocessing.Queue is used here to exchange task between the add
        # call and the service running __run() method
        self.queue = SimpleQueue()

        # dummy Process, the correct one will be created when the first
        # task is added
        self.service = Process()

    # TODO create destructor to avoid leaving with items on queue

    def __getClosestTask(self):
        '''
        return the closest task to execute (i.e., top on pq)
        '''
        if self.connREAD.poll():
            ret = None
            while self.connREAD.poll():
                ret = self.connREAD.recv()
            self._closestTask = ret
            print("[conn] closestTaskUpdate: ", self._closestTask)
        return self._closestTask

    def add(self, task):
        if type(task) is not Task:
            raise TypeError
        self.queue.put(task)
        if not self.service.is_alive():
            # it seams that Process.run() is a blocking call
            # so the only way to re-run the process is to create another one
            self.service = Process(target=MultiProcessScheduler.__run,
                                   args=(self.cond, self.queue,
                                         self.connWRITE),
                                   daemon=False)
            self.service.start()
        else:
            # notify the condition variable if the new task has the
            # closest execution time
            closestTask = self.__getClosestTask()
            if closestTask and task.time < closestTask.time:
                self.cond.acquire()
                self.cond.notify()
                self.cond.release()

    @staticmethod
    def __run(cond, queue, conn):
        tasksQueue = []
        print("[run] starting", queue.empty())
        while True:
            # remove tasks from queue and add to
            # internal priorityQueue (tasksQueue)
            while not queue.empty():
                task = queue.get()
                heappush(tasksQueue, task)
                print("[run] adding task to pq: ", task)

            # if there are task on the priority queue,
            # check when the closest one should be runned
            if tasksQueue:
                etime, _, _ = task = tasksQueue[0]
                now = time()
                if etime < now:
                    # only pop before running
                    # if a task is not being running in a given time,
                    # the next this loop runs that task might not be the
                    # closest one
                    _, fn, args = heappop(tasksQueue)
                    print("[run] running:", task)
                    p = Process(target=fn, args=args, daemon=False)
                    p.start()
                else:
                    delay = etime - now

                    print("[run] sleeping for ", delay, task)

                    # send the closest task to the pipe
                    conn.send(task)

                    cond.acquire()
                    cond.wait(timeout=delay)

            if not tasksQueue and queue.empty():
                # only stop the service if there are no task anwhere
                break
        print("[run] done")