예제 #1
0
    def main(channel_name: str, amqp_target: str, amqp_key: str, peer_target: str,
             tx_versioner: TransactionVersioner, broadcast_queue: mp.Queue, properties: ModuleProcessProperties=None):
        if properties is not None:
            ModuleProcess.load_properties(properties, "txcreator")

        logging.info(f"Channel TX Creator start")

        broadcast_queue.cancel_join_thread()

        queue_name = conf.CHANNEL_TX_CREATOR_QUEUE_NAME_FORMAT.format(channel_name=channel_name, amqp_key=amqp_key)
        service = ChannelTxCreatorInnerService(broadcast_queue,
                                               amqp_target,
                                               queue_name,
                                               conf.AMQP_USERNAME,
                                               conf.AMQP_PASSWORD,
                                               channel_name=channel_name,
                                               peer_target=peer_target,
                                               tx_versioner=tx_versioner)

        def _on_signal(signal_num):
            logging.error(f"Channel TX Creator has been received signal({repr(signal_num)})")
            service.stop()

        service.loop.add_signal_handler(signal.SIGTERM, _on_signal, signal.SIGTERM)
        service.loop.add_signal_handler(signal.SIGINT, _on_signal, signal.SIGINT)

        service.serve(connection_attempts=conf.AMQP_CONNECTION_ATTEMPTS,
                      retry_delay=conf.AMQP_RETRY_DELAY, exclusive=True)
        logging.info("ChannelTxCreatorInnerService: started")
        service.serve_all()

        service.cleanup()
        service.loop.close()
        logging.info("ChannelTxCreatorInnerService: stopped")
예제 #2
0
class PipeFrame(object):
    def __init__(self, cpu_count=_cpu_count() - 1, buffer_size=10000):
        self.input_queue = Queue(buffer_size)
        self.output_queue = Queue()
        self.cpu_count = cpu_count

    def run(self, _pipeline, load=None):
        start_time = time.time()
        load_function = load if load else _pipeline.feed

        if _pipeline.source == 'batch':
            load_function(self.input_queue)

        worker_list = []
        for i in range(self.cpu_count):
            worker = _pipeline(name="worker-{0}".format(i),
                               input_queue=self.input_queue,
                               output_queue=self.output_queue)
            worker_list.append(worker)

        [worker.start() for worker in worker_list]

        if _pipeline.source == 'stream':
            load_function(self.input_queue)

        [worker.join() for worker in worker_list]

        print("Done!")
        self.input_queue.cancel_join_thread()
        end_time = time.time()
        print("Elapsed time: {0}s".format(round(end_time - start_time, 2)))
예제 #3
0
파일: pronew.py 프로젝트: naiaden/VacCor
	def run(self):
                program_start = time.time()

		wQ = Queue()
                wQ.cancel_join_thread()
		wP = []
		
		for i in range(tp.threads):
			p = Process(target=tp.process_file_write, args=(i,wQ,))
			wP.append(p)
			p.start()

                # 10000 files enter here
                sys.stdout.write(Fore.GREEN + "Reading from stdin...")
                input_files = sys.stdin.readlines()
                for input_file in input_files:
                        wQ.put(input_file.rstrip())
                print Fore.GREEN + "\rDone reading from stdin. I found %d files." % (wQ.qsize())

		for _ in wP:
			wQ.put(None)

		for p in wP:
			p.join()

                program_stop = time.time()
		print Fore.GREEN + "Processed %d files in %f seconds (%fs avg)" % (len(input_files), program_stop-program_start, (program_stop-program_start)/len(input_files))
예제 #4
0
파일: base.py 프로젝트: 1stvamp/apiary
    def start(self):
        """Run the load test."""

        start_time = time.time()

        job_queue = Queue()
        stats_queue = Queue()

        workers = []

        delay = self.options.stagger_workers / 1000.0
        for i in xrange(self.options.workers):
            worker = WorkerBeeProcess(self.options, self.protocol, job_queue, stats_queue)
            worker.start()
            workers.append(worker)
            time.sleep(delay)

        # TODO: consider waiting until workers are ready
        #if self.options.startup_wait:
        #    print "workers started; waiting %d seconds..." % self.options.startup_wait
        #    time.sleep(self.options.startup_wait)

        stats_gatherer = StatsGatherer(self.options, stats_queue)
        stats_gatherer.start()

        queen = QueenBee(self.options, self.arguments, job_queue, stats_queue)
        queen.start()

        # Now wait while the queen does its thing.
        try:
            queen.join()
        except KeyboardInterrupt:
            print "Interrupted, shutting down..."
            queen.terminate()

        print "Waiting for workers to complete jobs and terminate (may take up to %d seconds)..." % self.options.max_ahead

        try:
            stop = Message(Message.STOP)
            for worker in xrange(self.options.workers * self.options.threads):
                job_queue.put(stop)

            # Now wait for the workers to get the message.  This may take a few
            # minutes as the QueenBee likes to stay ahead by a bit.

            for worker in workers:
                worker.join()

            # Tell the Stats Gatherer that it's done.
            stats_queue.put(Message(Message.STOP))

            # Wait for it to finish.
            stats_gatherer.join()

            print "Completed %d jobs in %0.2f seconds." % (queen.jobs_sent.value, time.time() - start_time)
        except KeyboardInterrupt:
            print "Interrupted before shutdown process completed."

            job_queue.cancel_join_thread()
            stats_queue.cancel_join_thread()
예제 #5
0
 def __init__(self, rigol_backend="usbtmc", checkqueuedelay=.09, addqueuetime=.2):
     """
     checkqueuedelay -> How quickly python will check the queues for new voltage
         data to plot.  This value must be less than addqueuetime to ensure that
         every waveform is being plotted.
     addqueuetime -> python waits <addqueuetime> seconds between each query about
         the voltages from the oscilloscope.
     q1 and q2 are the the queues which hold the voltage data for channel 1 and 2
         on the oscilloscope.
     """
     self.lock = RLock()
     self.checkqueuedelay = int(checkqueuedelay * 1000)  # in ms
     self.addqueuetime = addqueuetime  # in sec
     self.dev = rigol.Rigol(rigol_backend)
     self.dev.waveformPointsMode("NORM")
     self.vpp = self.dev.askChannelScale(1) * 4
     self.vpp2 = self.dev.askChannelScale(2) * 4
     self.x = self.dev.getTimebase()
     q1 = Queue()
     q1.cancel_join_thread()
     q2 = Queue()
     q2.cancel_join_thread()
     self.ch1 = False
     self.ch2 = False
     self.start(q1, q2)
예제 #6
0
class DataBuffer(object):
#========================

  def __init__(self, binary=False):
  #--------------------------------
    self._binary = binary
    self._queue = Queue()
    self._data = [ ]
    self._datalen = 0
    self._pos = 0

  def close(self):
  #---------------
    self._queue.cancel_join_thread()

  def put(self, data):
  #-------------------
    self._queue.put(data)

  def __iter__(self):
  #------------------
    while True:
      while self._pos >= self._datalen:
        data = self._queue.get()
        if data is None: raise StopIteration
        self._data = format_binary(data) if self._binary else format_text(data)
        self._datalen = len(data)
        self._pos = 0
      self._pos += 1
      d = self._data[self._pos - 1]
      ## Data could be a 2-D (or higher?) array.
      if self._binary:
        yield d if not isinstance(d, np.ndarray) else ''.join(d.flatten().tolist())
      else:
        yield d if isinstance(d, str) else ' '.join(d.flatten().tolist())
예제 #7
0
    def main(channel_name: str, amqp_target: str, amqp_key: str,
             tx_versioner: TransactionVersioner, tx_queue: mp.Queue, properties: ModuleProcessProperties=None):
        if properties is not None:
            ModuleProcess.load_properties(properties, "txreceiver")

        logging.info(f"Channel TX Receiver start")

        tx_queue.cancel_join_thread()

        queue_name = conf.CHANNEL_TX_RECEIVER_QUEUE_NAME_FORMAT.format(channel_name=channel_name, amqp_key=amqp_key)
        service = ChannelTxReceiverInnerService(amqp_target, queue_name,
                                                conf.AMQP_USERNAME, conf.AMQP_PASSWORD,
                                                tx_versioner=tx_versioner, tx_queue=tx_queue)

        async def _stop_loop():
            service.loop.stop()

        def _on_signal(signal_num):
            logging.error(f"Channel TX Receiver has been received signal({repr(signal_num)})")
            asyncio.run_coroutine_threadsafe(_stop_loop(), service.loop)

        service.loop.add_signal_handler(signal.SIGTERM, _on_signal, signal.SIGTERM)
        service.loop.add_signal_handler(signal.SIGINT, _on_signal, signal.SIGINT)

        service.serve(connection_attempts=conf.AMQP_CONNECTION_ATTEMPTS,
                      retry_delay=conf.AMQP_RETRY_DELAY, exclusive=True)
        logging.info("ChannelTxReceiverInnerService: started")
        service.serve_all()

        service.loop.close()

        logging.info("ChannelTxReceiverInnerService: stopped")
예제 #8
0
 def __init__(self,
              rigol_backend="usbtmc",
              checkqueuedelay=.09,
              addqueuetime=.2):
     """
     checkqueuedelay -> How quickly python will check the queues for new voltage
         data to plot.  This value must be less than addqueuetime to ensure that
         every waveform is being plotted.
     addqueuetime -> python waits <addqueuetime> seconds between each query about
         the voltages from the oscilloscope.
     q1 and q2 are the the queues which hold the voltage data for channel 1 and 2
         on the oscilloscope.
     """
     self.lock = RLock()
     self.checkqueuedelay = int(checkqueuedelay * 1000)  # in ms
     self.addqueuetime = addqueuetime  # in sec
     self.dev = rigol.Rigol(rigol_backend)
     self.dev.waveformPointsMode("NORM")
     self.vpp = self.dev.askChannelScale(1) * 4
     self.vpp2 = self.dev.askChannelScale(2) * 4
     self.x = self.dev.getTimebase()
     q1 = Queue()
     q1.cancel_join_thread()
     q2 = Queue()
     q2.cancel_join_thread()
     self.ch1 = False
     self.ch2 = False
     self.start(q1, q2)
def worker(
    parent_conn: Connection, step_queue: Queue, pickled_env_factory: str, worker_id: int
) -> None:
    env_factory: Callable[[int], UnityEnvironment] = cloudpickle.loads(
        pickled_env_factory
    )
    env = env_factory(worker_id)

    def _send_response(cmd_name, payload):
        parent_conn.send(EnvironmentResponse(cmd_name, worker_id, payload))

    try:
        while True:
            cmd: EnvironmentCommand = parent_conn.recv()
            if cmd.name == "step":
                all_action_info = cmd.payload
                actions = {}
                memories = {}
                texts = {}
                values = {}
                for brain_name, action_info in all_action_info.items():
                    actions[brain_name] = action_info.action
                    memories[brain_name] = action_info.memory
                    texts[brain_name] = action_info.text
                    values[brain_name] = action_info.value
                all_brain_info = env.step(actions, memories, texts, values)
                # The timers in this process are independent from all the processes and the "main" process
                # So after we send back the root timer, we can safely clear them.
                # Note that we could randomly return timers a fraction of the time if we wanted to reduce
                # the data transferred.
                # TODO get gauges from the workers and merge them in the main process too.
                step_response = StepResponse(all_brain_info, get_timer_root())
                step_queue.put(EnvironmentResponse("step", worker_id, step_response))
                reset_timers()
            elif cmd.name == "external_brains":
                _send_response("external_brains", env.external_brains)
            elif cmd.name == "reset_parameters":
                _send_response("reset_parameters", env.reset_parameters)
            elif cmd.name == "reset":
                all_brain_info = env.reset(
                    cmd.payload[0], cmd.payload[1], cmd.payload[2]
                )
                _send_response("reset", all_brain_info)
            elif cmd.name == "close":
                break
    except (KeyboardInterrupt, UnityCommunicationException):
        logger.info(f"UnityEnvironment worker {worker_id}: environment stopping.")
        step_queue.put(EnvironmentResponse("env_close", worker_id, None))
    finally:
        # If this worker has put an item in the step queue that hasn't been processed by the EnvManager, the process
        # will hang until the item is processed. We avoid this behavior by using Queue.cancel_join_thread()
        # See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread for
        # more info.
        logger.debug(f"UnityEnvironment worker {worker_id} closing.")
        step_queue.cancel_join_thread()
        step_queue.close()
        env.close()
        logger.debug(f"UnityEnvironment worker {worker_id} done.")
예제 #10
0
    def get_news(self,
                 start_time=None,
                 until_time=None,
                 news_count=None,
                 topic_filter=None):
        """ The main process of a parser """
        t_start = time.time()
        threads = self.threads
        self._check_args(start_time, until_time, news_count, topic_filter,
                         threads)
        # Some parsers do not have start time, so need to check
        if not start_time:
            start_time = datetime.datetime.now()
        if not until_time:
            until_time = self.until_time_default

        Q_urls = Queue(0)  # Queue, contains news urls and for deeper parsing
        Q_out = Queue(0)  # Queue, contains news outputs
        sync_flag = Value('i', 1)  # Flag for stoping processes

        workers = []
        # Proceses for getting news by url
        for _ in range(threads):
            workers.append(
                Process(target=self._process_news,
                        args=(Q_urls, Q_out, sync_flag, topic_filter)))
            workers[-1].start()

        out = []

        try:
            # Parsing pages with urls ("Лента новостей")
            self.parse_pages(Q_urls, Q_out, sync_flag, start_time, until_time,
                             news_count, topic_filter)
            # Clearing output queue while processes still working
            # Probably significantly slowing down other workers, need to fix it
            self._listen_queue(workers, Q_out, out)
        except KeyboardInterrupt:
            sync_flag.value = 2  # Immediate stop, without clearing Q_urls
            logging.warning('Stopping parsing...')
            # But we still need to clear the out queue
            try:
                self._listen_queue(workers, Q_out, out)
            except KeyboardInterrupt:
                # If something goes wrong -- just kill everybody
                logging.warning('Hard stopping parsing on join...')
                Q_out.cancel_join_thread()
                for worker in workers:
                    worker.terminate()

        # Waiting for processes to stop
        for i, worker in enumerate(workers):
            worker.join()
            logging.debug("END OF THREAD " + str(i))

        logging.debug('END OF PARSING, TIME ' + str(time.time() - t_start))
        return sorted(out, key=lambda x: x['date'], reverse=True)
예제 #11
0
def dlt_filter_ack_msg_handler():
    queue = Queue()

    handler = DLTFilterAckMessageHandler(queue)
    try:
        handler.start()
        queue.cancel_join_thread()

        yield (handler, queue)
    finally:
        handler.stop()
        queue.close()
예제 #12
0
class CaptureVideoThreaded(Process):
    # class CaptureVideoThreaded():
    def __init__(self, queue_size=20):
        self.stopped = Value(ctypes.c_bool, False)
        self.Q = Queue(maxsize=queue_size)
        super(CaptureVideoThreaded, self).__init__()

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def stop(self):
        self.stopped.value = True

    def run(self):
        cap = cv2.VideoCapture(
            sys.argv[-1]
        )  # have to open stream only here so multithreading works, otherwise it would halt in read

        i = 0
        while not self.stopped.value:
            i += 1

            # if self.Q.full():
            #     print("CAP_THREAD {}: full {}".format(i, self.Q.qsize()), flush=True)
            # else:
            #     print("CAP_THREAD {}: not_full {}".format(i, self.Q.qsize()), flush=True)

            if not self.Q.full():
                # read the next frame from the file
                (grabbed, im_input) = cap.read()

                # fgmask_mog2 = fgbg_mog2.apply(im_input)  # should this be im_input_gray
                # im_moved = cv2.bitwise_and(im_input, im_input, mask=fgmask_mog2)

                # if the `grabbed` boolean is `False`, then we have reached the end of the video file
                if not grabbed:
                    print("Stopping CaptureVideoThread because not grabbed")
                    self.stop()
                else:
                    self.Q.put(im_input)
            else:
                time.sleep(0.001)

        cap.release()
        self.Q.close()
        self.Q.cancel_join_thread()
예제 #13
0
def queued_generator(data_src,
                     callback_fx,
                     num_epochs,
                     batch_size,
                     num_workers,
                     extra_data_src_list=[],
                     queue_prewait_seconds=0):
    # sanity check data_src shapes
    if len(extra_data_src_list) > 0:
        if not all([len(d) == len(data_src) for d in extra_data_src_list]):
            raise ValueError('All data sources must have the same length')

    num_inputs = len(data_src)
    batches_per_epoch = ceil(num_inputs / batch_size)
    total_batches = batches_per_epoch * num_epochs

    print(
        'queued_generator --> %s: num_workers: %s batch_size: %s prewait_seconds: %s'
        %
        (callback_fx.__name__, num_workers, batch_size, queue_prewait_seconds))
    out_queue = Queue()
    p = Process(target=queued_generator_helper,
                args=(out_queue, data_src, callback_fx, num_epochs, batch_size,
                      num_workers, extra_data_src_list))
    try:
        p.start()
        time.sleep(queue_prewait_seconds)
        for i in range(total_batches):
            print('\t\tfetching data %s/%s' % (i, total_batches))
            out = out_queue.get()
            if out is not None:
                yield out
        # p.join()  # pool is created/destroyed for each epoch of data
        out_queue.close()
        out_queue.join_thread()
    except StopIteration:
        print("Internal generator is done")
        p.terminate()
        out_queue.close()
        out_queue.join_thread()
    except Exception as e:
        p.terminate()
        out_queue.close()
        out_queue.join_thread()
        out_queue.cancel_join_thread(
        )  # i'm not sure what this does, but we just want to exit quickly
        raise e
예제 #14
0
def beak_main(stop: threading.Event, config: Config.Interface, log_q: Queue,
              rx_q: Queue, tx_q: Queue, rr_q: Queue) -> None:
    try:
        configure_log(log_q)
        beak = create_beak(config, rx_q, tx_q, rr_q)

        # Run the interface until stop signal received. The beak interfaces
        # run their own thread, so this thread is free to idle or possibly
        # handle something else in the future.
        with beak:
            stop.wait()

    except (KeyboardInterrupt, SystemExit):
        pass  # Prevent stack trace caused by keyboard interrupt
    finally:
        # Drain queues so the parent process doesn't block while trying to
        # join this process
        drain_queue(rx_q)

        # Let parent thread handle the joining of these queues
        tx_q.cancel_join_thread()
        rr_q.cancel_join_thread()
예제 #15
0
    def _main(broadcast_queue: mp.Queue,
              channel: str,
              self_target: str,
              properties: ModuleProcessProperties = None):
        if properties is not None:
            ModuleProcess.load_properties(properties, f"{channel}_broadcast")

        logging.info(f"BroadcastScheduler process({channel}) start")

        broadcast_queue.cancel_join_thread()

        broadcaster = _Broadcaster(channel, self_target)
        broadcaster.start()

        original_sigterm_handler = signal.getsignal(signal.SIGTERM)
        original_sigint_handler = signal.getsignal(signal.SIGINT)

        def _signal_handler(signal_num, frame):
            signal.signal(signal.SIGTERM, original_sigterm_handler)
            signal.signal(signal.SIGINT, original_sigint_handler)
            logging.error(
                f"BroadcastScheduler process({channel}) has been received signal({signal_num})"
            )
            broadcast_queue.put((None, None))
            broadcaster.stop()

        signal.signal(signal.SIGTERM, _signal_handler)
        signal.signal(signal.SIGINT, _signal_handler)

        while True:
            command, params = broadcast_queue.get()
            if not broadcaster.is_running or command is None:
                break
            broadcaster.handle_command(command, params)

        while not broadcast_queue.empty():
            broadcast_queue.get()

        logging.info(f"BroadcastScheduler process({channel}) end")
예제 #16
0
    def __init__(self, loader):
        self.dataset = loader.dataset
        self.batch_size = loader.batch_size
        self.drop_last = loader.drop_last
        self.shuffle = loader.shuffle
        self.worker_num = loader.worker_num
        self.sample_iter = iter(loader.sampler)
        self.file_num = len(self.dataset)
        self.batches_outstanding = 0
        self.send_idx = 0
        self.worker_queue_idx = 0
        self.shutdown = False
        if self.dataset.package:
            self.buffer_list = []
        self.workers_done_event = Event()
        # signal.signal(signal.SIGINT, self._shutdown_workers)
        # signal.signal(signal.SIGTERM, self._shutdown_workers)

        self.index_queues = []
        self.workers = []
        if self.worker_num > 0:
            self.dataqueue = Queue()
            self.reorder_dict = {}
            self.rcvd_idx = 0
            index_queue = Queue()
            index_queue.cancel_join_thread()
            w = Process(
                target=self._worker_loop,
                args=(self._collate_fn,
                      self.dataset,
                      self.dataqueue,
                      index_queue,
                      self.workers_done_event))
            w.daemon = True
            w.start()
            self.index_queues.append(index_queue)
            self.workers.append(w)
            for _ in range(2 * self.worker_num):
                self._put_indices()
예제 #17
0
class EventHistory():
    
    def __init__(self):
        self.queue = Queue()
        self.events = []
        self.running_events = {}
        self.last_aggregation = datetime.datetime.now()
        self.agg_template = {
            'commit': 0,
            'rollback': 0,
            'max_latency': 0.0,
            'running': 0,
            'running_latency': 0.0
        }

    def register_start(self, name):
        event_id = uuid.uuid4()
        self.queue.put({
            'name': name,
            'event_id': event_id,
            'time': datetime.datetime.now()
        })
        return event_id

    def register_finish(self, event_id, status):
        self.queue.put({
            'event_id': event_id,
            'status': status,
            'time': datetime.datetime.now()
        })

    def load_queue(self):
        while not self.queue.empty():
            event = self.queue.get()
            if 'name' in event:
                # start mark
                self.running_events[event['event_id']] = event
            else:
                # finish mark
                if event['event_id'] in self.running_events:
                    start_ev = self.running_events[event['event_id']]
                    self.events.append({
                        'name': start_ev['name'],
                        'started_at': start_ev['time'],
                        'finished_at': event['time'],
                        'status': event['status']
                    })
                    self.running_events.pop(event['event_id'], None)
                else:
                    # found finish event without corresponding start
                    raise
        return

    def aggregate(self):
        self.load_queue()

        agg = {}
        for ev in self.events:
            if ev['finished_at'] < self.last_aggregation:
                continue

            if ev['name'] not in agg:
                agg[ev['name']] = self.agg_template.copy()

            named_agg = agg[ev['name']]
            latency = (ev['finished_at'] - ev['started_at']).total_seconds()
            named_agg[ev['status']] += 1
            if named_agg['max_latency'] < latency:
                named_agg['max_latency'] = latency

        for value in self.running_events.itervalues():
            if value['name'] not in agg:
                agg[value['name']] = self.agg_template.copy()

            named_agg = agg[value['name']]
            latency = (datetime.datetime.now() - value['time']).total_seconds()
            if 'started' in named_agg:
                named_agg['running'] += 1
                if latency > named_agg['running_latency']:
                    named_agg['running_latency'] = latency
            else:
                named_agg['running'] = 1
                named_agg['running_latency'] = latency

        self.last_aggregation = datetime.datetime.now()
        return agg

    def aggregate_by(self, period):
        return

    def close(self):
        print('closing queue')
        self.queue.close()
        print('clearing queue')
        self.load_queue()
        print('joining queue')
        self.queue.cancel_join_thread()
예제 #18
0
            self._log("sub: `proc_name` missing for mtype(%s)" % mtype)
            return
        
        if mtype is None:
            self._log("sub: `mtype` missing for proc_name(%s)" % pname)
            return
        
        subs=self._map.get(mtype, [])
        subs.append(pname)
        self._map[mtype]=subs
            

## ================================================================

_centralInputQueue=Queue()
_centralInputQueue.cancel_join_thread()
_mswitch=MessageSwitch(_centralInputQueue)
        
Bus.subscribe("_sub",           _mswitch._hsub)
Bus.subscribe("proc",           _mswitch._hproc)
Bus.subscribe("start",          _mswitch._hstart)

Bus.subscribe("mqueue?",        _mswitch._qmqueue)
Bus.subscribe("mswitch_pump",   _mswitch._hpump)
Bus.subscribe("mswitch_params", _mswitch._hparams)

Bus.subscribe("proc_starting",  _mswitch._hproc_starting)
Bus.subscribe("xsub",           _mswitch._hxsub)
Bus.subscribe("xbridge",        _mswitch._hxbridge)

def worker(
    parent_conn: Connection,
    step_queue: Queue,
    pickled_env_factory: str,
    worker_id: int,
    engine_configuration: EngineConfig,
    log_level: int = logging_util.INFO,
) -> None:
    env_factory: Callable[
        [int, List[SideChannel]], UnityEnvironment
    ] = cloudpickle.loads(pickled_env_factory)
    env_parameters = EnvironmentParametersChannel()
    engine_configuration_channel = EngineConfigurationChannel()
    engine_configuration_channel.set_configuration(engine_configuration)
    stats_channel = StatsSideChannel()
    env: BaseEnv = None
    # Set log level. On some platforms, the logger isn't common with the
    # main process, so we need to set it again.
    logging_util.set_log_level(log_level)

    def _send_response(cmd_name: EnvironmentCommand, payload: Any) -> None:
        parent_conn.send(EnvironmentResponse(cmd_name, worker_id, payload))

    def _generate_all_results() -> AllStepResult:
        all_step_result: AllStepResult = {}
        for brain_name in env.behavior_specs:
            all_step_result[brain_name] = env.get_steps(brain_name)
        return all_step_result

    try:
        env = env_factory(
            worker_id, [env_parameters, engine_configuration_channel, stats_channel]
        )
        while True:
            req: EnvironmentRequest = parent_conn.recv()
            if req.cmd == EnvironmentCommand.STEP:
                all_action_info = req.payload
                for brain_name, action_info in all_action_info.items():
                    if len(action_info.action) != 0:
                        env.set_actions(brain_name, action_info.action)
                env.step()
                all_step_result = _generate_all_results()
                # The timers in this process are independent from all the processes and the "main" process
                # So after we send back the root timer, we can safely clear them.
                # Note that we could randomly return timers a fraction of the time if we wanted to reduce
                # the data transferred.
                # TODO get gauges from the workers and merge them in the main process too.
                env_stats = stats_channel.get_and_reset_stats()
                step_response = StepResponse(
                    all_step_result, get_timer_root(), env_stats
                )
                step_queue.put(
                    EnvironmentResponse(
                        EnvironmentCommand.STEP, worker_id, step_response
                    )
                )
                reset_timers()
            elif req.cmd == EnvironmentCommand.BEHAVIOR_SPECS:
                _send_response(EnvironmentCommand.BEHAVIOR_SPECS, env.behavior_specs)
            elif req.cmd == EnvironmentCommand.ENVIRONMENT_PARAMETERS:
                for k, v in req.payload.items():
                    if isinstance(v, ParameterRandomizationSettings):
                        v.apply(k, env_parameters)
            elif req.cmd == EnvironmentCommand.RESET:
                env.reset()
                all_step_result = _generate_all_results()
                _send_response(EnvironmentCommand.RESET, all_step_result)
            elif req.cmd == EnvironmentCommand.CLOSE:
                break
    except (
        KeyboardInterrupt,
        UnityCommunicationException,
        UnityTimeOutException,
        UnityEnvironmentException,
        UnityCommunicatorStoppedException,
    ) as ex:
        logger.info(f"UnityEnvironment worker {worker_id}: environment stopping.")
        step_queue.put(
            EnvironmentResponse(EnvironmentCommand.ENV_EXITED, worker_id, ex)
        )
        _send_response(EnvironmentCommand.ENV_EXITED, ex)
    finally:
        # If this worker has put an item in the step queue that hasn't been processed by the EnvManager, the process
        # will hang until the item is processed. We avoid this behavior by using Queue.cancel_join_thread()
        # See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread for
        # more info.
        logger.debug(f"UnityEnvironment worker {worker_id} closing.")
        step_queue.cancel_join_thread()
        step_queue.close()
        if env is not None:
            env.close()
        logger.debug(f"UnityEnvironment worker {worker_id} done.")
예제 #20
0
class WorkerProcess(object):
    def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
                 drop_counter_value, queue_maxsize,
                 mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
        self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
        self.id = idnum
        self.topic = topic
        self.collname = collname
        self.queue = Queue(queue_maxsize)
        self.out_counter = Counter(out_counter_value)
        self.in_counter  = Counter(in_counter_value)
        self.drop_counter = Counter(drop_counter_value)
        self.worker_out_counter = Counter()
        self.worker_in_counter  = Counter()
        self.worker_drop_counter = Counter()
        self.mongodb_host = mongodb_host
        self.mongodb_port = mongodb_port
        self.mongodb_name = mongodb_name
        self.nodename_prefix = nodename_prefix
        self.quit = Value('i', 0)

        self.process = Process(name=self.name, target=self.run)
        self.process.start()

    def init(self):
        global use_setproctitle
	if use_setproctitle:
            setproctitle("mongodb_log %s" % self.topic)

        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()

        rospy.init_node(WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname),
                        anonymous=False)

        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class, self.enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                print("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
            except rospy.ROSInitException:
                print("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
                self.subscriber = None

    def run(self):
        self.init()

        print("ACTIVE: %s" % self.name)

        # run the thread
        self.dequeue()

        # free connection
        # self.mongoconn.end_request()

    def is_quit(self):
        return self.quit.value == 1

    def shutdown(self):
        if not self.is_quit():
            #print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
            self.quit.value = 1
            self.queue.put("shutdown")
            while not self.queue.empty(): sleep(0.1)
        #print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
        self.process.join()
        self.process.terminate()

 


    def qsize(self):
        return self.queue.qsize()

    def enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.drop_counter.increment()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            #self.queue.put((topic, data, current_time or datetime.now()))
            self.queue.put((topic, data, rospy.get_time()))
            self.in_counter.increment()
            self.worker_in_counter.increment()

    def dequeue(self):
        while not self.is_quit():
            t = None
            try:
                t = self.queue.get(True)
            except IOError:
                # Anticipate Ctrl-C
                #print("Quit W1: %s" % self.name)
                self.quit.value = 1
                break
            if isinstance(t, tuple):
                self.out_counter.increment()
                self.worker_out_counter.increment()
                topic = t[0]
                msg   = t[1]
                ctime = t[2]

                if isinstance(msg, rospy.Message):
                    doc = ros_datacentre.util.msg_to_document(msg)
                    doc["__recorded"] = ctime or datetime.now()
                    doc["__topic"]    = topic
                    try:
                        #print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
                        #pprint.pprint(doc)
                        self.collection.insert(doc)
                    except InvalidDocument, e:
                        print("InvalidDocument " + current_process().name + "@" + topic +": \n")
                        print e
                    except InvalidStringData, e:
                        print("InvalidStringData " + current_process().name + "@" + topic +": \n")
                        print e

            else:
예제 #21
0
class Lantern(object):
    # variables to hold lantern state
    _powered = DEFAULT_POWERED
    _color = DEFAULT_COLOR

    def __init__(self, powered=DEFAULT_POWERED, color=DEFAULT_COLOR):
        # queue for lantern state changes
        # will be used for communication between Lantern object and UI
        self.changes_q = Queue()
        self.changes_q.cancel_join_thread()

        # setting initial state
        self._powered = powered
        self._color = color

    @property
    def state(self):
        """
        Return lantern state as a tuple
        """
        return (self.powered, self.color)

    @redraw_on_change
    def set_power(self, value):
        """
        Setter for the `powered` attribute. Manages lantern power state.
        True - lantern is On, False - lantern is Off.

        :type value: bool
        :param value: flag should lantern be powered or not
        :rtype: NoneType
        """
        if not isinstance(value, bool):
            raise ValueError('`powered` value must be a boolean')

        else:
            self._powered = value

    @redraw_on_change
    def set_color(self, rgb_color):
        """
        Setter for the `color` attribute.
        Manages lantern color. Color value must be a string in format '#FFFFFF'
        representing the RGB colors.

        :type rgb_color: string
        :param rgb_color: hex string of RGB color code
        :rtype: NoneType
        """
        try:
            val = int(rgb_color.lstrip('#'), 16)
            if val < 0x000000 or val > 0xFFFFFF:
                raise ValueError('invaid color value:', rgb_color)
        except ValueError:
            logger.error('invaid color value: %s', rgb_color)

        else:
            self._color = rgb_color

    def _send_redraw(self):
        """
        Send lantern current state to the message queue to cause UI redraw
        """
        logger.debug('sending message with new state: %s', repr(self.state))
        self.changes_q.put(self.state)

    def destroy(self):
        self.changes_q.put(None)
        logger.debug('lantern destroyed')

    powered = property(lambda self: self._powered, set_power)
    color = property(lambda self: self._color, set_color)

    power_off = lambda self: self.set_power(False)
    power_on = lambda self: self.set_power(True)
def worker(
    parent_conn: Connection,
    step_queue: Queue,
    pickled_env_factory: str,
    worker_id: int,
    # engine_configuration: EngineConfig,
) -> None:
    # env_factory: Callable[[int, List[SideChannel]], UnityEnvironment] = cloudpickle.loads(pickled_env_factory)
    env_factory = cloudpickle.loads(pickled_env_factory)
    # shared_float_properties = FloatPropertiesChannel()
    shared_float_properties = None
    # engine_configuration_channel = EngineConfigurationChannel()
    # engine_configuration_channel.set_configuration(engine_configuration)
    # env: BaseEnv = env_factory(worker_id, [shared_float_properties, engine_configuration_channel])
    env: BaseEnv = env_factory(worker_id)

    def _send_response(cmd_name, payload):
        parent_conn.send(EnvironmentResponse(cmd_name, worker_id, payload))

    def _generate_all_results(action) -> AllStepResult:
        all_step_result: AllStepResult = {}
        brain_name = env.spec.id
        observation, reward, done, info = env.step(action=action)
        # print(observation)
        # print('=====================================+')
        save_info = info["exchange"].performance
        save_info.to_csv("results.csv")
        # env.render(mode="chart")
        max_step = False
        obs = [np.asarray([observation], dtype=np.float32)]
        reward = [np.asarray([reward], dtype=np.float32)]
        done = [np.asarray([done], dtype=np.float32)]
        max_step = [np.asarray([max_step], dtype=np.float32)]
        all_step_result[brain_name] = BatchedStepResult(obs=obs,
                                                        reward=reward,
                                                        done=done,
                                                        max_step=max_step,
                                                        agent_id=[0],
                                                        action_mask=None)
        return all_step_result

    def _reset_results() -> AllStepResult:
        all_step_result: AllStepResult = {}
        brain_name = env.spec.id
        observation = env.reset()
        obs = [np.asarray([observation], dtype=np.float32)]
        reward = [np.asarray([-10], dtype=np.float32)]
        done = [np.asarray([False], dtype=np.float32)]
        max_step = [np.asarray([False], dtype=np.float32)]
        all_step_result[brain_name] = BatchedStepResult(obs=obs,
                                                        reward=reward,
                                                        done=done,
                                                        max_step=max_step,
                                                        agent_id=[0],
                                                        action_mask=None)
        return all_step_result

    def external_brains():
        result = {}
        brain_name = env.spec.id
        # for brain_name in env.get_agent_groups():

        result[brain_name] = group_spec_to_brain_parameters(
            brain_name, {
                "observation_shapes": [(env.observation_space, )],
                "action_shape": (env.action_scheme.action_space.n, ),
                "action_type": 'DISCRETE'
            })
        return result

    try:
        while True:

            cmd: EnvironmentCommand = parent_conn.recv()

            if cmd.name == "step":
                all_action_info = cmd.payload
                action = 1
                for brain_name, action_info in all_action_info.items():
                    # if len(action_info.action) != 0:
                    action = action_info.action[0][0]
                    # observation, reward, done, info = env.step(action_info.action[0][0])

                # env.step()
                all_step_result = _generate_all_results(action)
                # The timers in this process are independent from all the processes and the "main" process
                # So after we send back the root timer, we can safely clear them.
                # Note that we could randomly return timers a fraction of the time if we wanted to reduce
                # the data transferred.
                # TODO get gauges from the workers and merge them in the main process too.
                step_response = StepResponse(all_step_result)
                step_queue.put(
                    EnvironmentResponse("step", worker_id, step_response))
                # reset_timers()
            elif cmd.name == "external_brains":
                exr_brains = external_brains()
                _send_response("external_brains", exr_brains)
            # elif cmd.name == "get_properties":
            #     reset_params = shared_float_properties.get_property_dict_copy()
            #     _send_response("get_properties", reset_params)
            elif cmd.name == "reset":
                # for k, v in cmd.payload.items():
                #     shared_float_properties.set_property(k, v)
                env.reset()
                all_step_result = _reset_results()
                _send_response("reset", all_step_result)
            elif cmd.name == "close":
                break
    except (KeyboardInterrupt):
        logger.info(
            f"UnityEnvironment worker {worker_id}: environment stopping.")
        step_queue.put(EnvironmentResponse("env_close", worker_id, None))
    finally:
        # If this worker has put an item in the step queue that hasn't been processed by the EnvManager, the process
        # will hang until the item is processed. We avoid this behavior by using Queue.cancel_join_thread()
        # See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread for
        # more info.
        logger.debug(f"UnityEnvironment worker {worker_id} closing.")
        step_queue.cancel_join_thread()
        step_queue.close()
        env.close()
        logger.debug(f"UnityEnvironment worker {worker_id} done.")
예제 #23
0
def worker(
    parent_conn: Connection,
    step_queue: Queue,
    pickled_env_factory: str,
    worker_id: int,
    engine_configuration: EngineConfig,
) -> None:
    env_factory: Callable[[int, List[SideChannel]],
                          UnityEnvironment] = cloudpickle.loads(
                              pickled_env_factory)
    shared_float_properties = FloatPropertiesChannel()
    engine_configuration_channel = EngineConfigurationChannel()
    engine_configuration_channel.set_configuration(engine_configuration)
    stats_channel = StatsSideChannel()
    env: BaseEnv = env_factory(
        worker_id,
        [shared_float_properties, engine_configuration_channel, stats_channel],
    )

    def _send_response(cmd_name, payload):
        parent_conn.send(EnvironmentResponse(cmd_name, worker_id, payload))

    def _generate_all_results() -> AllStepResult:
        all_step_result: AllStepResult = {}
        for brain_name in env.get_agent_groups():
            all_step_result[brain_name] = env.get_step_result(brain_name)
        return all_step_result

    def external_brains():
        result = {}
        for brain_name in env.get_agent_groups():
            result[brain_name] = group_spec_to_brain_parameters(
                brain_name, env.get_agent_group_spec(brain_name))
        return result

    try:
        while True:
            cmd: EnvironmentCommand = parent_conn.recv()
            if cmd.name == "step":
                all_action_info = cmd.payload
                for brain_name, action_info in all_action_info.items():
                    if len(action_info.action) != 0:
                        env.set_actions(brain_name, action_info.action)
                env.step()
                all_step_result = _generate_all_results()
                # The timers in this process are independent from all the processes and the "main" process
                # So after we send back the root timer, we can safely clear them.
                # Note that we could randomly return timers a fraction of the time if we wanted to reduce
                # the data transferred.
                # TODO get gauges from the workers and merge them in the main process too.
                env_stats = stats_channel.get_and_reset_stats()
                step_response = StepResponse(all_step_result, get_timer_root(),
                                             env_stats)
                step_queue.put(
                    EnvironmentResponse("step", worker_id, step_response))
                reset_timers()
            elif cmd.name == "external_brains":
                _send_response("external_brains", external_brains())
            elif cmd.name == "get_properties":
                reset_params = shared_float_properties.get_property_dict_copy()
                _send_response("get_properties", reset_params)
            elif cmd.name == "reset":
                for k, v in cmd.payload.items():
                    shared_float_properties.set_property(k, v)
                env.reset()
                all_step_result = _generate_all_results()
                _send_response("reset", all_step_result)
            elif cmd.name == "close":
                break
    except (KeyboardInterrupt, UnityCommunicationException,
            UnityTimeOutException):
        logger.info(
            f"UnityEnvironment worker {worker_id}: environment stopping.")
        step_queue.put(EnvironmentResponse("env_close", worker_id, None))
    finally:
        # If this worker has put an item in the step queue that hasn't been processed by the EnvManager, the process
        # will hang until the item is processed. We avoid this behavior by using Queue.cancel_join_thread()
        # See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread for
        # more info.
        logger.debug(f"UnityEnvironment worker {worker_id} closing.")
        step_queue.cancel_join_thread()
        step_queue.close()
        env.close()
        logger.debug(f"UnityEnvironment worker {worker_id} done.")
예제 #24
0
class WorkerProcess(object):
    def __init__(self, idnum, topic, collname, in_counter_value,
                 out_counter_value, drop_counter_value, queue_maxsize,
                 mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
        self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
        self.id = idnum
        self.topic = topic
        self.collname = collname
        self.queue = Queue(queue_maxsize)
        self.out_counter = Counter(out_counter_value)
        self.in_counter = Counter(in_counter_value)
        self.drop_counter = Counter(drop_counter_value)
        self.worker_out_counter = Counter()
        self.worker_in_counter = Counter()
        self.worker_drop_counter = Counter()
        self.mongodb_host = mongodb_host
        self.mongodb_port = mongodb_port
        self.mongodb_name = mongodb_name
        self.nodename_prefix = nodename_prefix
        self.quit = Value('i', 0)

        self.process = Process(name=self.name, target=self.run)
        self.process.start()

    def init(self):
        global use_setproctitle
        if use_setproctitle:
            setproctitle("mongodb_log %s" % self.topic)

        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()

        rospy.init_node(WORKER_NODE_NAME %
                        (self.nodename_prefix, self.id, self.collname),
                        anonymous=False)

        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(
                    self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class,
                                                   self.enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                print("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
            except rospy.ROSInitException:
                print("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
                self.subscriber = None

    def run(self):
        self.init()

        print("ACTIVE: %s" % self.name)

        # run the thread
        self.dequeue()

        # free connection
        # self.mongoconn.end_request()

    def is_quit(self):
        return self.quit.value == 1

    def shutdown(self):
        if not self.is_quit():
            #print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
            self.quit.value = 1
            self.queue.put("shutdown")
            while not self.queue.empty():
                sleep(0.1)
        #print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
        self.process.join()
        self.process.terminate()

    def qsize(self):
        return self.queue.qsize()

    def enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.drop_counter.increment()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            #self.queue.put((topic, data, current_time or datetime.now()))
            self.queue.put((topic, data, rospy.get_time()))
            self.in_counter.increment()
            self.worker_in_counter.increment()

    def dequeue(self):
        while not self.is_quit():
            t = None
            try:
                t = self.queue.get(True)
            except IOError:
                # Anticipate Ctrl-C
                #print("Quit W1: %s" % self.name)
                self.quit.value = 1
                break
            if isinstance(t, tuple):
                self.out_counter.increment()
                self.worker_out_counter.increment()
                topic = t[0]
                msg = t[1]
                ctime = t[2]

                if isinstance(msg, rospy.Message):
                    doc = ros_datacentre.util.msg_to_document(msg)
                    doc["__recorded"] = ctime or datetime.now()
                    doc["__topic"] = topic
                    try:
                        #print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
                        #pprint.pprint(doc)
                        self.collection.insert(doc)
                    except InvalidDocument, e:
                        print("InvalidDocument " + current_process().name +
                              "@" + topic + ": \n")
                        print e
                    except InvalidStringData, e:
                        print("InvalidStringData " + current_process().name +
                              "@" + topic + ": \n")
                        print e

            else:
예제 #25
0
def main():
    import time
    from multiprocessing import Process, Queue

    from wikidump.iterator import iter_read

    prepare_db(DB_NAME)

    data_queue = Queue(100)
    write_queue = Queue(10)

    processors = []

    for idx in range(3):
        processor = Process(target=process,
                            args=(
                                data_queue,
                                write_queue,
                            ),
                            name='processor-%d' % idx)
        processor.start()
        processors.append(processor)

    writer = Process(target=write,
                     args=(
                         DB_NAME,
                         write_queue,
                     ),
                     name='writer')
    writer.start()

    i = iter_read()
    try:
        for idx, elm_str in enumerate(i):
            data_queue.put(elm_str)

            if idx >= 1_000_000:
                break
    except KeyboardInterrupt:
        print('\nUser terminated. Will clean up...\n')
    finally:
        del i

        print('Waiting for all processor processes to join...', end='')
        live_processes = [p for p in processors if p.is_alive()]
        while live_processes:
            try:
                data_queue.put(None, False)
            except queue.Full:
                pass
            finally:
                time.sleep(0.5)
                live_processes = [p for p in processors if p.is_alive()]

        for p in processors:
            p.join()
            time.sleep(0.1)
        print('    done.')

        print('Waiting for writer process to join...', end='')
        while writer.is_alive():
            try:
                write_queue.put(None, False)
            except queue.Full:
                time.sleep(0.5)
                pass
        print('    done.')

    # This is used to clean up remained write action on all queues,
    # release locking on pending thread(s).
    try:
        flush_queue(data_queue)
        data_queue.cancel_join_thread()
        data_queue.close()
        data_queue.join_thread()
    except BrokenPipeError:
        print('broken pipe error while trying to clean up data queue.')

    try:
        flush_queue(write_queue)
        write_queue.cancel_join_thread()
        write_queue.close()
        write_queue.join_thread()
    except BrokenPipeError:
        print('borken pipe error while trying to clean up write queue.')
예제 #26
0
    while True:
        if not save_screenshots_queue.empty():
            create_date, filename = save_screenshots_queue.get()
            with open(filename, 'rb') as file:
                resp = requests.post(
                    url=url,
                    data={
                        'description': description or str(),
                        'create_date': create_date
                    },
                    files={
                        'image': (filename, file)
                    }
                )
            if resp.json()['status'] == 'success':
                try:
                    os.remove(filename)
                except FileNotFoundError:
                    pass
            

if __name__ == '__main__':
    command_queue = Queue()
    save_screenshots_queue = Queue()
    
    command_queue.cancel_join_thread()
    save_screenshots_queue.cancel_join_thread()

    gui = GUI(command_queue=command_queue, save_screenshots_queue=save_screenshots_queue)
    gui.master.mainloop()
예제 #27
0
class Translator(object):
    def __init__(self, settings):
        """
        Loads translation models.
        """
        self._models = settings.models
        self._num_processes = settings.num_processes
        self._verbose = settings.verbose
        self._retrieved_translations = defaultdict(dict)
        self._batch_size = settings.minibatch_size

        # load model options
        self._load_model_options()
        # set up queues
        self._init_queues()
        # init worker processes
        self._init_processes()

    def _load_model_options(self):
        """
        Loads config options for each model.
        """

        self._options = []
        for model in self._models:
            config = load_config_from_json_file(model)
            setattr(config, 'reload', model)
            self._options.append(config)

        _, _, _, self._num_to_target = util.load_dictionaries(self._options[0])

    def _init_queues(self):
        """
        Sets up shared queues for inter-process communication.
        """
        self._input_queue = Queue()
        self._output_queue = Queue()

    def shutdown(self):
        """
        Executed from parent process to terminate workers,
        method: "poison pill".
        """
        for process in self._processes:
            self._input_queue.put(None)

    def _init_processes(self):
        """
        Starts child (worker) processes.
        """
        processes = [None] * self._num_processes
        for process_id in range(self._num_processes):
            processes[process_id] = Process(target=self._start_worker,
                                            args=(process_id, ))
            processes[process_id].start()

        self._processes = processes

    ### MODEL LOADING AND TRANSLATION IN CHILD PROCESS ###

    def _load_models(self, process_id, sess):
        """
        Loads models and returns them
        """
        logging.debug("Process '%s' - Loading models\n" % (process_id))

        import tensorflow as tf
        models = []
        for i, options in enumerate(self._options):
            with tf.variable_scope("model%d" % i) as scope:
                model = rnn_model.RNNModel(options)
                saver = model_loader.init_or_restore_variables(
                    options, sess, ensemble_scope=scope)
                models.append(model)

        logging.info("NOTE: Length of translations is capped to {}".format(
            self._options[0].translation_maxlen))
        return models

    def _start_worker(self, process_id):
        """
        Function executed by each worker once started. Do not execute in
        the parent process.
        """

        # load TF functionality
        import tensorflow as tf
        tf_config = tf.ConfigProto()
        tf_config.allow_soft_placement = True
        sess = tf.Session(config=tf_config)
        models = self._load_models(process_id, sess)
        ensemble = inference.InferenceModelSet(models, self._options)

        # listen to queue in while loop, translate items
        while True:
            input_item = self._input_queue.get()

            if input_item is None:
                break
            idx = input_item.idx
            request_id = input_item.request_id

            output_item = self._translate(process_id, input_item, ensemble,
                                          sess)
            self._output_queue.put((request_id, idx, output_item))

        return

    def _translate(self, process_id, input_item, ensemble, sess):
        """
        Actual translation (model sampling).
        """

        # unpack input item attributes
        k = input_item.k
        x = input_item.batch
        #max_ratio = input_item.max_ratio

        y_dummy = numpy.zeros(shape=(len(x), 1))
        x, x_mask, _, _ = util.prepare_data(x,
                                            y_dummy,
                                            self._options[0].factors,
                                            maxlen=None)

        sample = ensemble.beam_search(sess, x, x_mask, k)

        return sample

    ### WRITING TO AND READING FROM QUEUES ###

    def _send_jobs(self, input_, translation_settings):
        """
        """
        source_batches = []

        try:
            batches, idxs = util.read_all_lines(self._options[0], input_,
                                                self._batch_size)
        except exception.Error as x:
            logging.error(x.msg)
            for process in self._processes:
                process.terminate()
            sys.exit(1)

        for idx, batch in enumerate(batches):

            input_item = QueueItem(
                verbose=self._verbose,
                k=translation_settings.beam_size,
                normalization_alpha=translation_settings.normalization_alpha,
                nbest=translation_settings.n_best,
                batch=batch,
                idx=idx,
                request_id=translation_settings.request_id)

            self._input_queue.put(input_item)
            source_batches.append(batch)
        return idx + 1, source_batches, idxs

    def _retrieve_jobs(self, num_samples, request_id, timeout=5):
        """
        """
        while len(self._retrieved_translations[request_id]) < num_samples:
            resp = None
            while resp is None:
                try:
                    resp = self._output_queue.get(True, timeout)
                # if queue is empty after 5s, check if processes are still alive
                except Empty:
                    for midx in range(self._num_processes):
                        if not self._processes[midx].is_alive(
                        ) and self._processes[midx].exitcode != 0:
                            # kill all other processes and raise exception if one dies
                            self._input_queue.cancel_join_thread()
                            self._output_queue.cancel_join_thread()
                            for idx in range(self._num_processes):
                                self._processes[idx].terminate()
                            logging.error(
                                "Translate worker process {0} crashed with exitcode {1}"
                                .format(self._processes[midx].pid,
                                        self._processes[midx].exitcode))
                            sys.exit(1)
            request_id, idx, output_item = resp
            self._retrieved_translations[request_id][idx] = output_item
            #print self._retrieved_translations

        for idx in range(num_samples):
            yield self._retrieved_translations[request_id][idx]

        # then remove all entries with this request ID from the dictionary
        del self._retrieved_translations[request_id]

    ### EXPOSED TRANSLATION FUNCTIONS ###

    def translate(self, source_segments, translation_settings):
        """
        Returns the translation of @param source_segments.
        """

        logging.info('Translating {0} segments...\n'.format(
            len(source_segments)))
        start_time = time.time()
        n_batches, source_batches, idxs = self._send_jobs(
            source_segments, translation_settings)

        n_sent = 0
        outputs = [None] * n_batches
        for i, samples in enumerate(
                self._retrieve_jobs(n_batches,
                                    translation_settings.request_id)):
            outputs[i] = list(samples)
            n_sent += len(samples)
            logging.info('Translated {} sents'.format(n_sent))

        outputs = [beam for batch in outputs for beam in batch]
        outputs = numpy.array(outputs, dtype=numpy.object)
        outputs = outputs[idxs.argsort()]

        translations = []
        for i, beam in enumerate(outputs):
            if translation_settings.normalization_alpha:
                beam = [(sent_cost[0], sent_cost[1] / len(sent_cost[0])**
                         translation_settings.normalization_alpha)
                        for sent_cost in beam]
            beam = sorted(beam, key=lambda sent_cost1: sent_cost1[1])

            if translation_settings.n_best is True:
                n_best_list = []
                for j, (sent, cost) in enumerate(beam):
                    target_words = util.seq2words(sent,
                                                  self._num_to_target,
                                                  join=False)
                    translation = Translation(sentence_id=i,
                                              source_words=source_segments[i],
                                              target_words=target_words,
                                              score=cost,
                                              hypothesis_id=j)
                    n_best_list.append(translation)
                translations.append(n_best_list)
            else:
                best_hypo, cost = beam[0]
                target_words = util.seq2words(best_hypo,
                                              self._num_to_target,
                                              join=False)
                translation = Translation(sentence_id=i,
                                          source_words=source_segments[i],
                                          target_words=target_words,
                                          score=cost)
                translations.append(translation)

        duration = time.time() - start_time
        logging.info(
            'Translated {} sents in {} sec. Speed {} sents/sec'.format(
                n_sent, duration, n_sent / duration))

        return translations

    def translate_file(self, input_object, translation_settings):
        """
        """
        source_segments = input_object.readlines()
        return self.translate(source_segments, translation_settings)

    def translate_string(self, segment, translation_settings):
        """
        Translates a single segment
        """
        if not segment.endswith('\n'):
            segment += '\n'
        source_segments = [segment]
        return self.translate(source_segments, translation_settings)

    def translate_list(self, segments, translation_settings):
        """
        Translates a list of segments
        """
        source_segments = [
            s + '\n' if not s.endswith('\n') else s for s in segments
        ]
        return self.translate(source_segments, translation_settings)

    ### FUNCTIONS FOR WRITING THE RESULTS ###

    def write_translation(self, output_file, translation,
                          translation_settings):
        """
        Writes a single translation to a file or STDOUT.
        """
        output_items = []
        # sentence ID only for nbest
        if translation_settings.n_best is True:
            output_items.append(str(translation.sentence_id))

        # translations themselves
        output_items.append(" ".join(translation.target_words))

        # write scores for nbest?
        if translation_settings.n_best is True:
            output_items.append(str(translation.score))

        if translation_settings.n_best is True:
            output_file.write(" ||| ".join(output_items) + "\n")
        else:
            output_file.write("\n".join(output_items) + "\n")

    def write_translations(self, output_file, translations,
                           translation_settings):
        """
        Writes translations to a file or STDOUT.
        """
        if translation_settings.n_best is True:
            for nbest_list in translations:
                for translation in nbest_list:
                    self.write_translation(output_file, translation,
                                           translation_settings)
        else:
            for translation in translations:
                self.write_translation(output_file, translation,
                                       translation_settings)
예제 #28
0
class Output:
    @classmethod
    def setup(self):
        self.output_queue = Queue()

        self.output_thread = Thread(target=self.output_worker,
                                    args=(self.output_queue, ))
        self.output_thread.daemon = True
        self.output_thread.start()

    @classmethod
    def stop(self):
        self.output_queue.put(None)
        self.output_thread.join()
        self.output_queue.close()
        self.output_queue.cancel_join_thread()

    @classmethod
    def write(self, message):
        self.output_queue.put(message)

    @classmethod
    def output_worker(self, output_queue):
        while True:
            message = output_queue.get()
            if message == None:
                break

            if type(message) == str:
                message = {'message': message}

            if not 'time' in message:
                now = datetime.now()
                message['time'] = now.strftime(time_format)

            if 'message_type' in message and message['message_type'] == 'http':
                output_format = http_output_format
            elif 'message_type' in message and message['message_type'] == 'dns':
                output_format = dns_output_format
            elif 'message_type' in message and message[
                    'message_type'] == 'port_service':
                output_format = port_service_output_format
            elif 'message_type' in message and message['message_type'] == 'smb':
                output_format = smb_output_format
            elif 'message_type' in message and message[
                    'message_type'] == 'mssql':
                output_format = mssql_output_format
            elif 'message_type' in message and message[
                    'message_type'] == 'mysql':
                output_format = mysql_output_format
            elif 'message_type' in message and message[
                    'message_type'] == 'postgresql':
                output_format = postgresql_output_format
            elif 'target' in message:
                output_format = target_output_format
            else:
                output_format = simple_output_format

            # Remove control characters which breaks terminal
            message = output_format.format(**message)
            message = ''.join([
                c if ord(c) not in [0x9d, 0x9e, 0x9f] else '\\x%x' % ord(c)
                for c in message
            ])

            tqdm.write(message)
            sys.stdout.flush()
예제 #29
0
class ClientResourceHelper(ResourceHelper):

    RUN_DURATION = 60
    QUEUE_WAIT_TIME = 5
    SYNC_PORT = 1
    ASYNC_PORT = 2

    def __init__(self, setup_helper):
        super(ClientResourceHelper, self).__init__(setup_helper)
        self.vnfd_helper = setup_helper.vnfd_helper
        self.scenario_helper = setup_helper.scenario_helper

        self.client = None
        self.client_started = Value('i', 0)
        self.all_ports = None
        self._queue = Queue()
        self._result = {}
        self._terminated = Value('i', 0)

    def _build_ports(self):
        self.networks = self.vnfd_helper.port_pairs.networks
        self.uplink_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.uplink_ports)
        self.downlink_ports = \
            self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.downlink_ports)
        self.all_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.all_ports)

    def port_num(self, intf):
        # by default return port num
        return self.vnfd_helper.port_num(intf)

    def get_stats(self, *args, **kwargs):
        try:
            return self.client.get_stats(*args, **kwargs)
        except STLError:
            LOG.error('TRex client not connected')
            return {}

    def generate_samples(self, ports, key=None, default=None):
        # needs to be used ports
        last_result = self.get_stats(ports)
        key_value = last_result.get(key, default)

        if not isinstance(last_result, Mapping):  # added for mock unit test
            self._terminated.value = 1
            return {}

        samples = {}
        # recalculate port for interface and see if it matches ports provided
        for intf in self.vnfd_helper.interfaces:
            name = intf["name"]
            port = self.vnfd_helper.port_num(name)
            if port in ports:
                xe_value = last_result.get(port, {})
                samples[name] = {
                    "rx_throughput_fps": float(xe_value.get("rx_pps", 0.0)),
                    "tx_throughput_fps": float(xe_value.get("tx_pps", 0.0)),
                    "rx_throughput_mbps": float(xe_value.get("rx_bps", 0.0)),
                    "tx_throughput_mbps": float(xe_value.get("tx_bps", 0.0)),
                    "in_packets": int(xe_value.get("ipackets", 0)),
                    "out_packets": int(xe_value.get("opackets", 0)),
                }
                if key:
                    samples[name][key] = key_value
        return samples

    def _run_traffic_once(self, traffic_profile):
        traffic_profile.execute_traffic(self)
        self.client_started.value = 1
        time.sleep(self.RUN_DURATION)
        samples = self.generate_samples(traffic_profile.ports)
        time.sleep(self.QUEUE_WAIT_TIME)
        self._queue.put(samples)

    def run_traffic(self, traffic_profile):
        # if we don't do this we can hang waiting for the queue to drain
        # have to do this in the subprocess
        self._queue.cancel_join_thread()
        # fixme: fix passing correct trex config file,
        # instead of searching the default path
        try:
            self._build_ports()
            self.client = self._connect()
            self.client.reset(ports=self.all_ports)
            self.client.remove_all_streams(self.all_ports)  # remove all streams
            traffic_profile.register_generator(self)

            while self._terminated.value == 0:
                self._run_traffic_once(traffic_profile)

            self.client.stop(self.all_ports)
            self.client.disconnect()
            self._terminated.value = 0
        except STLError:
            if self._terminated.value:
                LOG.debug("traffic generator is stopped")
                return  # return if trex/tg server is stopped.
            raise

    def terminate(self):
        self._terminated.value = 1  # stop client

    def clear_stats(self, ports=None):
        if ports is None:
            ports = self.all_ports
        self.client.clear_stats(ports=ports)

    def start(self, ports=None, *args, **kwargs):
        # pylint: disable=keyword-arg-before-vararg
        # NOTE(ralonsoh): defining keyworded arguments before variable
        # positional arguments is a bug. This function definition doesn't work
        # in Python 2, although it works in Python 3. Reference:
        # https://www.python.org/dev/peps/pep-3102/
        if ports is None:
            ports = self.all_ports
        self.client.start(ports=ports, *args, **kwargs)

    def collect_kpi(self):
        if not self._queue.empty():
            kpi = self._queue.get()
            self._result.update(kpi)
            LOG.debug('Got KPIs from _queue for %s %s',
                      self.scenario_helper.name, self.RESOURCE_WORD)
        return self._result

    def _connect(self, client=None):
        if client is None:
            client = STLClient(username=self.vnfd_helper.mgmt_interface["user"],
                               server=self.vnfd_helper.mgmt_interface["ip"],
                               verbose_level=LoggerApi.VERBOSE_QUIET)

        # try to connect with 5s intervals, 30s max
        for idx in range(6):
            try:
                client.connect()
                break
            except STLError:
                LOG.info("Unable to connect to Trex Server.. Attempt %s", idx)
                time.sleep(5)
        return client
예제 #30
0
class Translator(object):
    def __init__(self, decoder_settings):
        """
        Loads translation models.
        """
        self._models = decoder_settings.models
        self._num_processes = decoder_settings.num_processes
        self._device_list = decoder_settings.device_list
        self._verbose = decoder_settings.verbose
        self._retrieved_translations = defaultdict(dict)

        # load model options
        self._load_model_options()
        # load and invert dictionaries
        self._build_dictionaries()
        # set up queues
        self._init_queues()
        # init worker processes
        self._init_processes()

    def _load_model_options(self):
        """
        Loads config options for each model.
        """
        options = []
        for model in self._models:
            options.append(load_config(model))
            # backward compatibility
            fill_options(options[-1])

        self._options = options

    def _build_dictionaries(self):
        """
        Builds and inverts source and target dictionaries, taken
        from the first model since all of them must have the same
        vocabulary.
        """
        dictionaries = self._options[0]['dictionaries']
        dictionaries_source = dictionaries[:-1]
        dictionary_target = dictionaries[-1]

        # load and invert source dictionaries
        word_dicts = []
        word_idicts = []
        for dictionary in dictionaries_source:
            word_dict = load_dict(dictionary)
            if self._options[0]['n_words_src']:
                for key, idx in word_dict.items():
                    if idx >= self._options[0]['n_words_src']:
                        del word_dict[key]
            word_idict = dict()
            for kk, vv in word_dict.iteritems():
                word_idict[vv] = kk
            word_idict[0] = '<eos>'
            word_idict[1] = 'UNK'
            word_dicts.append(word_dict)
            word_idicts.append(word_idict)

        self._word_dicts = word_dicts
        self._word_idicts = word_idicts

        # load and invert target dictionary
        word_dict_trg = load_dict(dictionary_target)
        word_idict_trg = dict()
        for kk, vv in word_dict_trg.iteritems():
            word_idict_trg[vv] = kk
        word_idict_trg[0] = '<eos>'
        word_idict_trg[1] = 'UNK'

        self._word_idict_trg = word_idict_trg

    def _init_queues(self):
        """
        Sets up shared queues for inter-process communication.
        """
        self._input_queue = Queue()
        self._output_queue = Queue()

    def shutdown(self):
        """
        Executed from parent process to terminate workers,
        method: "poison pill".
        """
        for process in self._processes:
            self._input_queue.put(None)

    def _init_processes(self):
        """
        Starts child (worker) processes.
        """
        processes = [None] * self._num_processes
        for process_id in xrange(self._num_processes):
            deviceid = ''
            if self._device_list is not None and len(self._device_list) != 0:
                deviceid = self._device_list[process_id %
                                             len(self._device_list)].strip()
            processes[process_id] = Process(target=self._start_worker,
                                            args=(process_id, deviceid))
            processes[process_id].start()

        self._processes = processes

    ### MODEL LOADING AND TRANSLATION IN CHILD PROCESS ###

    def _load_theano(self):
        """
        Loads models, sets theano shared variables and builds samplers.
        This entails irrevocable binding to a specific GPU.
        """

        from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
        from theano import shared

        from nmt import (build_sampler, gen_sample)
        from theano_util import (numpy_floatX, load_params, init_theano_params)

        trng = RandomStreams(1234)
        use_noise = shared(numpy_floatX(0.))

        fs_init = []
        fs_next = []

        for model, option in zip(self._models, self._options):
            param_list = numpy.load(model).files
            param_list = dict.fromkeys(
                [key for key in param_list if not key.startswith('adam_')], 0)
            params = load_params(model, param_list)
            tparams = init_theano_params(params)

            # always return alignment at this point
            f_init, f_next = build_sampler(tparams,
                                           option,
                                           use_noise,
                                           trng,
                                           return_alignment=True)

            fs_init.append(f_init)
            fs_next.append(f_next)

        return trng, fs_init, fs_next, gen_sample

    def _set_device(self, device_id):
        """
        Modifies environment variable to change the THEANO device.
        """
        if device_id != '':
            try:
                theano_flags = os.environ['THEANO_FLAGS'].split(',')
                exist = False
                for i in xrange(len(theano_flags)):
                    if theano_flags[i].strip().startswith('device'):
                        exist = True
                        theano_flags[i] = '%s=%s' % ('device', device_id)
                        break
                if exist is False:
                    theano_flags.append('%s=%s' % ('device', device_id))
                os.environ['THEANO_FLAGS'] = ','.join(theano_flags)
            except KeyError:
                # environment variable does not exist at all
                os.environ['THEANO_FLAGS'] = 'device=%s' % device_id

    def _load_models(self, process_id, device_id):
        """
        Modifies environment variable to change the THEANO device, then loads
        models and returns them.
        """
        logging.debug("Process '%s' - Loading models on device %s\n" %
                      (process_id, device_id))

        # modify environment flag 'device'
        self._set_device(device_id)

        # build and return models
        return self._load_theano()

    def _start_worker(self, process_id, device_id):
        """
        Function executed by each worker once started. Do not execute in
        the parent process.
        """
        # load theano functionality
        trng, fs_init, fs_next, gen_sample = self._load_models(
            process_id, device_id)

        # listen to queue in while loop, translate items
        while True:
            input_item = self._input_queue.get()

            if input_item is None:
                break
            idx = input_item.idx
            request_id = input_item.request_id

            output_item = self._translate(process_id, input_item, trng,
                                          fs_init, fs_next, gen_sample)
            self._output_queue.put((request_id, idx, output_item))

        return

    def _translate(self, process_id, input_item, trng, fs_init, fs_next,
                   gen_sample):
        """
        Actual translation (model sampling).
        """

        # unpack input item attributes
        normalization_alpha = input_item.normalization_alpha
        nbest = input_item.nbest
        idx = input_item.idx

        # logging
        logging.debug('{0} - {1}\n'.format(process_id, idx))

        # sample given an input sequence and obtain scores
        sample, score, word_probs, alignment, hyp_graph = self._sample(
            input_item, trng, fs_init, fs_next, gen_sample)

        # normalize scores according to sequence lengths
        if normalization_alpha:
            adjusted_lengths = numpy.array(
                [len(s)**normalization_alpha for s in sample])
            score = score / adjusted_lengths
        if nbest is True:
            output_item = sample, score, word_probs, alignment, hyp_graph
        else:
            # return translation with lowest score only
            sidx = numpy.argmin(score)
            output_item = sample[sidx], score[sidx], word_probs[
                sidx], alignment[sidx], hyp_graph

        return output_item

    def _sample(self, input_item, trng, fs_init, fs_next, gen_sample):
        """
        Sample from model.
        """

        # unpack input item attributes
        return_hyp_graph = input_item.return_hyp_graph
        return_alignment = input_item.return_alignment
        suppress_unk = input_item.suppress_unk
        k = input_item.k
        seq = input_item.seq

        return gen_sample(fs_init,
                          fs_next,
                          numpy.array(seq).T.reshape(
                              [len(seq[0]), len(seq), 1]),
                          trng=trng,
                          k=k,
                          maxlen=200,
                          stochastic=False,
                          argmax=False,
                          return_alignment=return_alignment,
                          suppress_unk=suppress_unk,
                          return_hyp_graph=return_hyp_graph)

    ### WRITING TO AND READING FROM QUEUES ###

    def _send_jobs(self, input_, translation_settings):
        """
        """
        source_sentences = []
        for idx, line in enumerate(input_):
            if translation_settings.char_level:
                words = list(line.decode('utf-8').strip())
            else:
                words = line.strip().split()

            x = []
            for w in words:
                w = [
                    self._word_dicts[i][f] if f in self._word_dicts[i] else 1
                    for (i, f) in enumerate(w.split('|'))
                ]
                if len(w) != self._options[0]['factors']:
                    logging.warning(
                        'Expected {0} factors, but input word has {1}\n'.
                        format(self._options[0]['factors'], len(w)))
                    for midx in xrange(self._num_processes):
                        self._processes[midx].terminate()
                    sys.exit(1)
                x.append(w)

            x += [[0] * self._options[0]['factors']]

            input_item = QueueItem(
                verbose=self._verbose,
                return_hyp_graph=translation_settings.get_search_graph,
                return_alignment=translation_settings.get_alignment,
                k=translation_settings.beam_width,
                suppress_unk=translation_settings.suppress_unk,
                normalization_alpha=translation_settings.normalization_alpha,
                nbest=translation_settings.n_best,
                seq=x,
                idx=idx,
                request_id=translation_settings.request_id)

            self._input_queue.put(input_item)
            source_sentences.append(words)
        return idx + 1, source_sentences

    def _retrieve_jobs(self, num_samples, request_id, timeout=5):
        """
        """
        while len(self._retrieved_translations[request_id]) < num_samples:
            resp = None
            while resp is None:
                try:
                    resp = self._output_queue.get(True, timeout)
                # if queue is empty after 5s, check if processes are still alive
                except Empty:
                    for midx in xrange(self._num_processes):
                        if not self._processes[midx].is_alive(
                        ) and self._processes[midx].exitcode != 0:
                            # kill all other processes and raise exception if one dies
                            self._input_queue.cancel_join_thread()
                            self._output_queue.cancel_join_thread()
                            for idx in xrange(self._num_processes):
                                self._processes[idx].terminate()
                            logging.error(
                                "Translate worker process {0} crashed with exitcode {1}"
                                .format(self._processes[midx].pid,
                                        self._processes[midx].exitcode))
                            sys.exit(1)
            request_id, idx, output_item = resp
            self._retrieved_translations[request_id][idx] = output_item
            #print self._retrieved_translations

        for idx in xrange(num_samples):
            yield self._retrieved_translations[request_id][idx]

        # then remove all entries with this request ID from the dictionary
        del self._retrieved_translations[request_id]

    ### EXPOSED TRANSLATION FUNCTIONS ###

    def translate(self, source_segments, translation_settings):
        """
        Returns the translation of @param source_segments.
        """
        logging.info('Translating {0} segments...\n'.format(
            len(source_segments)))
        n_samples, source_sentences = self._send_jobs(source_segments,
                                                      translation_settings)

        translations = []
        for i, trans in enumerate(
                self._retrieve_jobs(n_samples,
                                    translation_settings.request_id)):

            samples, scores, word_probs, alignment, hyp_graph = trans
            # n-best list
            if translation_settings.n_best is True:
                order = numpy.argsort(scores)
                n_best_list = []
                for j in order:
                    current_alignment = None if not translation_settings.get_alignment else alignment[
                        j]
                    translation = Translation(sentence_id=i,
                                              source_words=source_sentences[i],
                                              target_words=seqs2words(
                                                  samples[j],
                                                  self._word_idict_trg,
                                                  join=False),
                                              score=scores[j],
                                              alignment=current_alignment,
                                              target_probs=word_probs[j],
                                              hyp_graph=hyp_graph,
                                              hypothesis_id=j)
                    n_best_list.append(translation)
                translations.append(n_best_list)
            # single-best translation
            else:
                current_alignment = None if not translation_settings.get_alignment else alignment
                translation = Translation(sentence_id=i,
                                          source_words=source_sentences[i],
                                          target_words=seqs2words(
                                              samples,
                                              self._word_idict_trg,
                                              join=False),
                                          score=scores,
                                          alignment=current_alignment,
                                          target_probs=word_probs,
                                          hyp_graph=hyp_graph)
                translations.append(translation)
        return translations

    def translate_file(self, input_object, translation_settings):
        """
        """
        source_segments = input_object.readlines()
        return self.translate(source_segments, translation_settings)

    def translate_string(self, segment, translation_settings):
        """
        Translates a single segment
        """
        if not segment.endswith('\n'):
            segment += '\n'
        source_segments = [segment]
        return self.translate(source_segments, translation_settings)

    def translate_list(self, segments, translation_settings):
        """
        Translates a list of segments
        """
        source_segments = [
            s + '\n' if not s.endswith('\n') else s for s in segments
        ]
        return self.translate(source_segments, translation_settings)

    ### FUNCTIONS FOR WRITING THE RESULTS ###

    def write_alignment(self, translation, translation_settings):
        """
        Writes alignments to a file.
        """
        output_file = translation_settings.alignment_filename
        # TODO: 1 = TEXT, 2 = JSON
        if translation_settings.alignment_type == 1:
            output_file.write(translation.get_alignment_text() + "\n\n")
        else:
            output_file.write(translation.get_alignment_json() + "\n")

    def write_translation(self, output_file, translation,
                          translation_settings):
        """
        Writes a single translation to a file or STDOUT.
        """
        output_items = []
        # sentence ID only for nbest
        if translation_settings.n_best is True:
            output_items.append(str(translation.sentence_id))

        # translations themselves
        output_items.append(" ".join(translation.target_words))

        # write scores for nbest?
        if translation_settings.n_best is True:
            output_items.append(str(translation.score))

        # write probabilities?
        if translation_settings.get_word_probs:
            output_items.append(translation.get_target_probs())

        if translation_settings.n_best is True:
            output_file.write(" ||| ".join(output_items) + "\n")
        else:
            output_file.write("\n".join(output_items) + "\n")

        # write alignments to file?
        if translation_settings.get_alignment:
            self.write_alignment(translation, translation_settings)

        # construct hypgraph?
        if translation_settings.get_search_graph:
            translation.save_hyp_graph(
                translation_settings.search_graph_filename,
                self._word_idict_trg,
                detailed=True,
                highlight_best=True)

    def write_translations(self, output_file, translations,
                           translation_settings):
        """
        Writes translations to a file or STDOUT.
        """
        if translation_settings.n_best is True:
            for nbest_list in translations:
                for translation in nbest_list:
                    self.write_translation(output_file, translation,
                                           translation_settings)
        else:
            for translation in translations:
                self.write_translation(output_file, translation,
                                       translation_settings)
예제 #31
0
class SelectionStickService(Service):

    def __init__(self, service_name=None, camera_service=None):
        super().__init__(service_name)
        self._stop_tracking_event = Event()
        self._stop_event_detection_event = threading.Event()

        self.camera_img = None
        self._annotations_model = None
        self._physical_objects_model = None
        self._current_scene = None

        self.trigger_interval = SelectionEvent.trigger_interval
        self.repeat_interval = SelectionEvent.repeat_interval

        self.drawing_color = (255, 0, 255)
        self.object_name = None
        self.annotation_name = None
        self.annotation_position = ()

        self.event_timers_phys_obj = {}
        self.event_timers_annotation = {}

        self._camera_service = camera_service
        self._rect_queue = Queue(1)
        self._cam_frame_queue = Queue(1)

        self.__current_rect = None

    def set_current_scene(self, current_scene):
        self._current_scene = current_scene

    def start(self):
        self._camera_service.start_capture()

        tracking_process = SelectionStickTrackingProcess(self._rect_queue,
                                                         self._cam_frame_queue,
                                                         self._stop_tracking_event)
        tracking_process.name = "SelectionStickTrackingProcess"
        tracking_process.daemon = True
        tracking_process.start()

        tracking_thread = threading.Thread(target=self._start_tracking, args=(self._cam_frame_queue,))
        tracking_thread.daemon = True
        tracking_thread.start()

        event_detection_thread = threading.Thread(name="SelectionStickEventDetectionThread",
                                                  target=self._start_event_detection)
        event_detection_thread.daemon = True
        event_detection_thread.start()

    def _start_tracking(self, cam_frame_queue):
        while True:
            time.sleep(isar.SELECTION_STICK_TRACKING_INTERVAL)
            cam_frame = self._camera_service.get_frame()
            cam_frame_queue.put(cam_frame)

    def _start_event_detection(self):
        # get the center of marker rect
        # check if the center collides with any annotation and physcical objects.
        # put colliding objects/annotations with the timestamp in a dic
        # check the colliding objects, if more that three seconds colliding, then fire event.

        while not self._stop_event_detection_event.is_set():
            current_rect = self.get_current_rect()
            if current_rect is None:
                continue

            if self._current_scene is None:
                continue

            if self._annotations_model is None:
                continue

            if self._physical_objects_model is None:
                continue

            # center_point = self.get_center_point(in_image_coordinates=True)
            center_point = self.get_center_point(in_image_coordinates=False)
            if center_point is None:
                continue

            # center_in_scene = sceneutil.camera_coord_to_scene_coord(center_point)
            center_in_scene = center_point

            collides_with_object = False
            scene_phys_objs = self._current_scene.get_physical_objects()
            for phys_obj in scene_phys_objs:
                if phys_obj.collides_with_point(center_in_scene, sceneutil.scene_scale_factor_c):
                    phys_obj_name = phys_obj.name
                    collides_with_object = True
                    self.drawing_color = (0, 0, 255)
                    self.object_name = phys_obj_name

                    if phys_obj_name not in self.event_timers_phys_obj:
                        self.event_timers_phys_obj[phys_obj_name] = [time.time(), False]
                    else:
                        last = self.event_timers_phys_obj[phys_obj_name][0]
                        triggered = self.event_timers_phys_obj[phys_obj_name][1]
                        time_diff = time.time() - last

                        if time_diff > self.trigger_interval:
                            if not triggered:
                                self.fire_event(phys_obj)
                                self.event_timers_phys_obj[phys_obj_name][1] = True

                        if time_diff > self.repeat_interval:
                            self.fire_event(phys_obj)
                            self.event_timers_phys_obj[phys_obj_name][0] = time.time()

                    break

            if not collides_with_object:
                self.event_timers_phys_obj.clear()

            collides_with_annotation = False
            all_annotations = self._annotations_model.get_all_annotations()
            for annotation in all_annotations:
                if annotation.intersects_with_point(center_in_scene):
                    collides_with_annotation = True
                    self.drawing_color = (0, 0, 255)
                    annotation_name = annotation.name
                    self.annotation_name = annotation_name
                    self.annotation_position = annotation.position.get_value()
                    if annotation.name not in self.event_timers_annotation:
                        self.event_timers_annotation[annotation_name] = [time.time(), False]
                    else:
                        last = self.event_timers_annotation[annotation_name][0]
                        triggered = self.event_timers_annotation[annotation_name][1]
                        time_diff = time.time() - last
                        if time_diff > self.trigger_interval:
                            if not triggered:
                                self.fire_event(annotation)
                                self.event_timers_annotation[annotation_name][1] = True

                        if time_diff > self.repeat_interval:
                            self.fire_event(annotation)
                            self.event_timers_annotation[annotation_name][0] = time.time()
                            # del self.event_timers_annotation[annotation_name]

            if not collides_with_annotation:
                self.event_timers_annotation.clear()

            if not collides_with_object and not collides_with_annotation:
                self.drawing_color = (255, 0, 255)
                self.object_name = None
                self.annotation_name = None
                self.event_timers_phys_obj.clear()
                self.event_timers_annotation.clear()

    def stop(self):
        self._stop_tracking_event.set()
        self._stop_event_detection_event.set()
        self._cam_frame_queue.cancel_join_thread()
        self._rect_queue.cancel_join_thread()

    def fire_event(self, target):
        logger.info("Fire SelectionEvent on: " + str(target))
        scene_id = self._annotations_model.get_current_scene().name
        eventmanager.fire_selection_event(target, scene_id)

    def draw_current_rect(self, img, camera_projector_homography=None, debug=False):
        current_rect = self.get_current_rect()
        if current_rect is not None:
            if camera_projector_homography is not None:
                projected_points = cv2.perspectiveTransform(np.array([[current_rect[0], current_rect[2]]]), camera_projector_homography).squeeze()
                v1 = projected_points[0]
                v2 = projected_points[1]
            else:
                rect_in_scene = sceneutil.camera_coords_to_scene_coords(current_rect, for_projector=True)
                v1 = (rect_in_scene[0][0], rect_in_scene[0][1])
                v2 = (rect_in_scene[2][0], rect_in_scene[2][1])

            v1 = (int(v1[0]), int(v1[1]))
            v2 = (int(v2[0]), int(v2[1]))

            cv2.rectangle(img, v1, v2, self.drawing_color, thickness=2)

            if debug:
                camera_coord = current_rect[1]
                scene_c_coord = sceneutil.camera_coord_to_scene_coord_c(camera_coord)
                projector_coord = v1
                scene_p_coord = sceneutil.projector_coord_to_scene_coord_p(projector_coord)
                persisted_coord = ()
                if self.annotation_name is not None:
                    persisted_coord = self.annotation_position

                cv2.putText(img, "C: " + str(camera_coord), (v2[0], v2[1]), cv2.FONT_HERSHEY_COMPLEX, .5, self.drawing_color, 1)
                cv2.putText(img, "per: " + str(persisted_coord), (v2[0], v2[1] + 15), cv2.FONT_HERSHEY_COMPLEX, .5, self.drawing_color, 1)
                cv2.putText(img, "SC_C: " + str(scene_c_coord), (v2[0], v2[1] + 30), cv2.FONT_HERSHEY_COMPLEX, .5, self.drawing_color, 1)
                cv2.putText(img, "P: " + str(projector_coord), (v2[0], v2[1] + 45), cv2.FONT_HERSHEY_COMPLEX, .5, self.drawing_color, 1)
                cv2.putText(img, "SC_P: " + str(scene_p_coord), (v2[0], v2[1] + 60), cv2.FONT_HERSHEY_COMPLEX, .5, self.drawing_color, 1)

                if self.object_name is not None:
                    cv2.putText(img, self.object_name, (v1[0], v1[1] - 10), cv2.FONT_HERSHEY_COMPLEX, .5, self.drawing_color, 1)

                if self.annotation_name is not None:
                    cv2.putText(img, self.annotation_name, (v1[0], v1[1] - 10), cv2.FONT_HERSHEY_COMPLEX, .5, self.drawing_color, 1)

    def get_current_rect(self):
        try:
            self.__current_rect = self._rect_queue.get(block=True, timeout=isar.SELECTION_STICK_TRACKING_INTERVAL)
        except:
            pass

        return self.__current_rect

    def get_center_point(self, in_image_coordinates=True):
        rect = self.get_current_rect()
        if rect is None:
            return None

        v1 = rect[0]
        v2 = rect[2]

        center = (int((v1[0] + v2[0]) / 2), int((v1[1] + v2[1]) / 2))
        if in_image_coordinates:
            return center
        else:
            return sceneutil.camera_coord_to_scene_coord(center)

    def set_physical_objects_model(self, phm):
        self._physical_objects_model = phm

    def set_annotations_model(self, annot_model):
        self._annotations_model = annot_model
예제 #32
0
class DLTBroker(object):
    """DLT Broker class manages receiving and filtering of DLT Messages
    """
    def __init__(self,
                 ip_address,
                 port=DLT_DAEMON_TCP_PORT,
                 use_proxy=False,
                 enable_dlt_time=False,
                 **kwargs):
        """Initialize the DLT Broker

        :param str ip_address: IP address of the DLT Daemon. Defaults to TCP connection, unless a multicast address is
        used. In that case an UDP multicast connection will be used
        :param str post: Port of the DLT Daemon
        :param bool use_proxy: Ignored - compatibility option
        :param bool enable_dlt_time: Record the latest dlt message timestamp if enabled.
        :param **kwargs: All other args passed to DLTMessageHandler
        """

        # - dlt-time share memory init
        self._dlt_time_value = DLTTimeValue() if enable_dlt_time else None

        # - handlers init
        self.mp_stop_flag = Event()
        self.filter_queue = Queue()
        self.message_queue = Queue()
        kwargs["ip_address"] = ip_address
        kwargs["port"] = port
        kwargs["timeout"] = kwargs.get("timeout", DLT_CLIENT_TIMEOUT)
        self.msg_handler = DLTMessageHandler(
            self.filter_queue,
            self.message_queue,
            self.mp_stop_flag,
            kwargs,
            dlt_time_value=self._dlt_time_value,
        )
        self.context_handler = DLTContextHandler(self.filter_queue,
                                                 self.message_queue)

        self._ip_address = ip_address
        self._port = port
        self._filename = kwargs.get("filename")

    def start(self):
        """DLTBroker main worker method"""
        logger.debug(
            "Starting DLTBroker with parameters: use_proxy=%s, ip_address=%s, port=%s, filename=%s, multicast=%s",
            False, self._ip_address, self._port, self._filename,
            ip.ip_address(self._ip_address).is_multicast)

        if self._dlt_time_value:
            logger.debug("Enable dlt time for DLTBroker.")

        self.msg_handler.start()
        self.context_handler.start()

        # - ensure we don't block on join_thread() in stop()
        # https://docs.python.org/2.7/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread
        self.filter_queue.cancel_join_thread()
        self.message_queue.cancel_join_thread()

    def add_context(self, context_queue, filters=None):
        """Register context

        :param Queue context_queue: The queue to which new messages will
                                    be added
        :param tuple filters: An list of tuples (eg: [(apid, ctid)])
                              used to filter messages that go into this
                              queue.
        """
        if filters is None:
            filters = [(None, None)]

        if not isinstance(filters, (tuple, list)):
            raise RuntimeError("Context queue filters must be a tuple."
                               " Ex. (('SYS', 'JOUR'), ('AUDI', 'CAPI'))")

        self.context_handler.register(context_queue, filters)

    def remove_context(self, context_queue):
        """Unregister context

        :param Queue context_queue: The queue to unregister.
        """
        self.context_handler.unregister(context_queue)

    def stop(self):
        """Stop the broker"""
        logger.info("Stopping DLTContextHandler and DLTMessageHandler")

        logger.debug("Stop DLTMessageHandler")
        self.mp_stop_flag.set()

        logger.debug("Stop DLTContextHandler")
        self.context_handler.stop()

        logger.debug("Waiting on DLTContextHandler ending")
        self.context_handler.join()

        logger.debug("Waiting on DLTMessageHandler ending")
        if self.msg_handler.is_alive():
            try:
                self.msg_handler.terminate()
            except OSError:
                pass
            else:
                self.msg_handler.join()

        logger.debug("DLTBroker execution done")

    # pylint: disable=invalid-name
    def isAlive(self):
        """Backwards compatibility method

        Called from mtee.testing.connectors.tools.broker_assert. Will
        need to be replaced in MTEE eventually.
        """
        return any(
            (self.msg_handler.is_alive(), self.context_handler.is_alive()))

    def dlt_time(self):
        """Get time for the last dlt message

        The value is seconds from 1970/1/1 0:00:00

        :rtype: float
        """
        if self._dlt_time_value:
            return self._dlt_time_value.timestamp

        raise RuntimeError("Getting dlt time function is not enabled")
예제 #33
0
class WorkerProcess(object):
    def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
                 drop_counter_value, queue_maxsize,
                 mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
        self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
        self.id = idnum
        self.topic = topic
        self.collname = collname
        self.queue = Queue(queue_maxsize)
        self.out_counter = Counter(out_counter_value)
        self.in_counter  = Counter(in_counter_value)
        self.drop_counter = Counter(drop_counter_value)
        self.worker_out_counter = Counter()
        self.worker_in_counter  = Counter()
        self.worker_drop_counter = Counter()
        self.mongodb_host = mongodb_host
        self.mongodb_port = mongodb_port
        self.mongodb_name = mongodb_name
        self.nodename_prefix = nodename_prefix
        self.quit = Value('i', 0)

        # print "Creating process %s" % self.name
        self.process = Process(name=self.name, target=self.run)
        # self.process = Thread(name=self.name, target=self.run)
        # print "created %s" % self.process
        self.process.start()
        # print "started %s" % self.process

    def init(self):
        global use_setproctitle
        if use_setproctitle:
            setproctitle("mongodb_log %s" % self.topic)

        self.mongoconn = MongoClient(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()

        # clear signal handlers in this child process, rospy will handle signals for us
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        signal.signal(signal.SIGINT, signal.SIG_DFL)

        worker_node_name = WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname)
        # print "Calling init_node with %s from process %s" % (worker_node_name, mp.current_process())
        rospy.init_node(worker_node_name, anonymous=False)

        self.subscriber = None
        while not self.subscriber and not self.is_quit():
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class, self.enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                print("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
            except rospy.ROSInitException:
                print("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
                self.subscriber = None

    def run(self):
        self.init()

        print("ACTIVE: %s" % self.name)

        # run the thread
        self.dequeue()

        # free connection
        # self.mongoconn.end_request()

    def is_quit(self):
        return self.quit.value == 1

    def shutdown(self):
        if not self.is_quit():
            #print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
            self.quit.value = 1
            self.queue.put("shutdown")
            while not self.queue.empty(): sleep(0.1)
        #print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
        self.process.join()
        self.process.terminate()




    def qsize(self):
        return self.queue.qsize()

    def enqueue(self, data, topic, current_time=None):

        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.drop_counter.increment()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            #self.queue.put((topic, data, current_time or datetime.now()))
            self.queue.put((topic, data, rospy.get_time(), data._connection_header))
            self.in_counter.increment()
            self.worker_in_counter.increment()

    def dequeue(self):
        while not self.is_quit():
            t = None
            try:
                t = self.queue.get(True)
            except IOError:
                # Anticipate Ctrl-C
                #print("Quit W1: %s" % self.name)
                self.quit.value = 1
                break
            if isinstance(t, tuple):
                self.out_counter.increment()
                self.worker_out_counter.increment()
                topic = t[0]
                msg   = t[1]
                ctime = t[2]
                connection_header = t[3]

                if isinstance(msg, rospy.Message):

                    try:
                        #print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
                        #pprint.pprint(doc)
                        meta = {}
                        # switched to use inserted_at to match message_store
                        # meta["recorded"] = ctime or datetime.now()
                        meta["topic"]    = topic

                        if connection_header['latching'] == '1':
                            meta['latch'] = True
                        else:
                            meta['latch'] = False

                        if ctime is not None:
                            meta['inserted_at'] = datetime.utcfromtimestamp(ctime)
                        else:
                            meta['inserted_at'] = datetime.utcfromtimestamp(rospy.get_rostime().to_sec())


                        mongodb_store.util.store_message(self.collection, msg, meta)

                    except InvalidDocument as e:
                        print("InvalidDocument " + current_process().name + "@" + topic +": \n")
                        print(e)
                    except InvalidStringData as e:
                        print("InvalidStringData " + current_process().name + "@" + topic +": \n")
                        print(e)

            else:
                #print("Quit W2: %s" % self.name)
                self.quit.value = 1

        # we must make sure to clear the queue before exiting,
        # or the parent thread might deadlock otherwise
        #print("Quit W3: %s" % self.name)
        self.subscriber.unregister()
        self.subscriber = None
        while not self.queue.empty():
            t = self.queue.get_nowait()
        print("STOPPED: %s" % self.name)
예제 #34
0
    print('epoch {:d}, iter {:d}/{:d}, training loss: {:.3}, training acc: {:.3}, take {:.2}s'
          .format(imdb.epoch, (i + 1) % imdb.total_batch,
                  imdb.total_batch, loss_value, acc_value, _time))

    if (i + 1) % 25 == 0:
        T.tic()
        val_images, val_labels = queue_out.get()
        val_loss_value, val_acc_value, val_summary = sess.run(
            [loss, accuracy, merged], {input_data: val_images, label_data: val_labels, is_training: 0})
        _val_time = T.toc(average=False)
        print('###validation loss: {:.3}, validation acc: {:.3}, take {:.2}s'
              .format(val_loss_value, val_acc_value, _val_time))
        queue_in.put(True)

        global_step = imdb.epoch * imdb.total_batch + (i % imdb.total_batch)
        train_writer.add_summary(train_summary, global_step)
        val_writer.add_summary(val_summary, global_step)

    if (i % (imdb.total_batch * 2) == 0):
        save_path = cur_saver.save(sess, os.path.join(
            CKPTS_DIR,
            cfg.TRAIN_SNAPSHOT_PREFIX + '_epoch_' + str(imdb.epoch - 1) + '.ckpt'))
        print("Model saved in file: %s" % save_path)

# terminate child processes
if cfg.MULTITHREAD:
    imdb.close_all_processes()
queue_in.cancel_join_thread()
queue_out.cancel_join_thread()
val_data_process.terminate()
예제 #35
0
class PlasmaShmQueue:
    def __init__(self, maxsize: int = 0):
        r"""
        Use pyarrow in-memory plasma store to implement shared memory queue.

        Compared to native `multiprocess.Queue`, `PlasmaShmQueue` avoid pickle/unpickle
        and communication overhead, leading to better performance in multi-process
        application.

        :type maxsize: int
        :param maxsize: maximum size of the queue, `None` means no limit. (default: ``None``)
        """

        # Lazy start the plasma store manager
        global MGE_PLASMA_STORE_MANAGER
        if MGE_PLASMA_STORE_MANAGER is None:
            try:
                MGE_PLASMA_STORE_MANAGER = _PlasmaStoreManager()
            except Exception as e:
                err_info = (
                    "Please make sure pyarrow installed correctly!\n"
                    "You can try reinstall pyarrow and see if you can run "
                    "`plasma_store -s /tmp/mge_plasma_xxx -m 1000` normally.")
                raise RuntimeError(
                    "Exception happened in starting plasma_store: {}\n"
                    "Tips: {}".format(str(e), err_info))
        else:
            MGE_PLASMA_STORE_MANAGER.refcount += 1

        self.socket_name = MGE_PLASMA_STORE_MANAGER.socket_name

        # TODO: how to catch the exception happened in `plasma.connect`?
        self.client = None

        # Used to store the header for the data.(ObjectIDs)
        self.queue = Queue(maxsize)  # type: Queue

    def put(self, data, block=True, timeout=None):
        if self.client is None:
            self.client = plasma.connect(self.socket_name)
        try:
            object_id = self.client.put(data)
        except plasma.PlasmaStoreFull:
            raise RuntimeError("plasma store out of memory!")
        try:
            self.queue.put(object_id, block, timeout)
        except queue.Full:
            self.client.delete([object_id])
            raise queue.Full

    def get(self, block=True, timeout=None):
        if self.client is None:
            self.client = plasma.connect(self.socket_name)
        object_id = self.queue.get(block, timeout)
        if not self.client.contains(object_id):
            raise RuntimeError(
                "ObjectID: {} not found in plasma store".format(object_id))
        data = self.client.get(object_id)
        self.client.delete([object_id])
        return data

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def join(self):
        self.queue.join()

    def disconnect_client(self):
        if self.client is not None:
            self.client.disconnect()

    def close(self):
        self.queue.close()
        self.disconnect_client()
        global MGE_PLASMA_STORE_MANAGER
        MGE_PLASMA_STORE_MANAGER.refcount -= 1
        _clear_plasma_store()

    def cancel_join_thread(self):
        self.queue.cancel_join_thread()
예제 #36
0
class DLTBroker(object):
    """DLT Broker class manages receiving and filtering of DLT Messages
    """

    def __init__(self, ip_address, port=3490, use_proxy=False, **kwargs):
        """Initialize the DLT Broker

        :param str ip_address: IP address of the DLT Daemon
        :param str post: Port of the DLT Daemon
        :param bool use_proxy: Ignored - compatibility option
        :param **kwargs: All other args passed to DLTMessageHandler
        """
        # - handlers init
        self.mp_stop_flag = Event()

        self.filter_queue = Queue()
        self.message_queue = Queue()
        kwargs["ip_address"] = ip_address
        kwargs["timeout"] = kwargs.get("timeout", DLT_CLIENT_TIMEOUT)
        self.msg_handler = DLTMessageHandler(self.filter_queue, self.message_queue, self.mp_stop_flag, kwargs)
        self.context_handler = DLTContextHandler(self.filter_queue, self.message_queue)

        self._ip_address = ip_address
        self._port = port
        self._filename = kwargs.get("filename")

    def start(self):
        """DLTBroker main worker method"""
        logger.debug("Starting DLTBroker with parameters: use_proxy=%s, ip_address=%s, port=%s, filename=%s",
                     False, self._ip_address, self._port, self._filename)

        self.msg_handler.start()
        self.context_handler.start()

        # - ensure we don't block on join_thread() in stop()
        # https://docs.python.org/2.7/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread
        self.filter_queue.cancel_join_thread()
        self.message_queue.cancel_join_thread()

    def add_context(self, context_queue, filters=None):
        """Register context

        :param Queue context_queue: The queue to which new messages will
                                    be added
        :param tuple filters: An list of tuples (eg: [(apid, ctid)])
                              used to filter messages that go into this
                              queue.
        """
        if filters is None:
            filters = [(None, None)]

        if not isinstance(filters, (tuple, list)):
            raise RuntimeError("Context queue filters must be a tuple."
                               " Ex. (('SYS', 'JOUR'), ('AUDI', 'CAPI'))")

        self.context_handler.register(context_queue, filters)

    def remove_context(self, context_queue):
        """Unregister context

        :param Queue context_queue: The queue to unregister.
        """
        self.context_handler.unregister(context_queue)

    def stop(self):
        """Stop the broker"""
        logger.info("Stopping DLTContextHandler and DLTMessageHandler")

        # - stop the DLTMessageHandler process and DLTContextHandler thread
        self.mp_stop_flag.set()
        self.context_handler.stop()

        logger.debug("Waiting on DLTContextHandler and DLTMessageHandler")
        self.context_handler.join()
        if self.msg_handler.is_alive():
            try:
                self.msg_handler.terminate()
            except OSError:
                pass
            else:
                self.msg_handler.join()
        logger.debug("DLTBroker execution done")

    # pylint: disable=invalid-name
    def isAlive(self):
        """Backwards compatibility method

        Called from mtee.testing.connectors.tools.broker_assert. Will
        need to be replaced in MTEE eventually.
        """
        return any((self.msg_handler.is_alive(), self.context_handler.is_alive()))
예제 #37
0
class ClientResourceHelper(ResourceHelper):

    RUN_DURATION = 60
    QUEUE_WAIT_TIME = 5
    SYNC_PORT = 1
    ASYNC_PORT = 2

    def __init__(self, setup_helper):
        super(ClientResourceHelper, self).__init__(setup_helper)
        self.vnfd_helper = setup_helper.vnfd_helper
        self.scenario_helper = setup_helper.scenario_helper

        self.client = None
        self.client_started = Value('i', 0)
        self.all_ports = None
        self._queue = Queue()
        self._result = {}
        self._terminated = Value('i', 0)

    def _build_ports(self):
        self.networks = self.vnfd_helper.port_pairs.networks
        self.uplink_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.uplink_ports)
        self.downlink_ports = \
            self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.downlink_ports)
        self.all_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.all_ports)

    def port_num(self, intf):
        # by default return port num
        return self.vnfd_helper.port_num(intf)

    def get_stats(self, *args, **kwargs):
        try:
            return self.client.get_stats(*args, **kwargs)
        except STLError:
            LOG.error('TRex client not connected')
            return {}

    def _get_samples(self, ports, port_pg_id=False):
        raise NotImplementedError()

    def _run_traffic_once(self, traffic_profile):
        traffic_profile.execute_traffic(self)
        self.client_started.value = 1
        time.sleep(self.RUN_DURATION)
        samples = self._get_samples(traffic_profile.ports)
        time.sleep(self.QUEUE_WAIT_TIME)
        self._queue.put(samples)

    def run_traffic(self, traffic_profile, mq_producer):
        # if we don't do this we can hang waiting for the queue to drain
        # have to do this in the subprocess
        self._queue.cancel_join_thread()
        # fixme: fix passing correct trex config file,
        # instead of searching the default path
        mq_producer.tg_method_started()
        try:
            self._build_ports()
            self.client = self._connect()
            self.client.reset(ports=self.all_ports)
            self.client.remove_all_streams(self.all_ports)  # remove all streams
            traffic_profile.register_generator(self)

            iteration_index = 0
            while self._terminated.value == 0:
                iteration_index += 1
                if self._run_traffic_once(traffic_profile):
                    self._terminated.value = 1
                mq_producer.tg_method_iteration(iteration_index)

            self.client.stop(self.all_ports)
            self.client.disconnect()
            self._terminated.value = 0
        except STLError:
            if self._terminated.value:
                LOG.debug("traffic generator is stopped")
                return  # return if trex/tg server is stopped.
            raise

        mq_producer.tg_method_finished()

    def terminate(self):
        self._terminated.value = 1  # stop client

    def clear_stats(self, ports=None):
        if ports is None:
            ports = self.all_ports
        self.client.clear_stats(ports=ports)

    def start(self, ports=None, *args, **kwargs):
        # pylint: disable=keyword-arg-before-vararg
        # NOTE(ralonsoh): defining keyworded arguments before variable
        # positional arguments is a bug. This function definition doesn't work
        # in Python 2, although it works in Python 3. Reference:
        # https://www.python.org/dev/peps/pep-3102/
        if ports is None:
            ports = self.all_ports
        self.client.start(ports=ports, *args, **kwargs)

    def collect_kpi(self):
        if not self._queue.empty():
            kpi = self._queue.get()
            self._result.update(kpi)
            LOG.debug('Got KPIs from _queue for %s %s',
                      self.scenario_helper.name, self.RESOURCE_WORD)
        return self._result

    def _connect(self, client=None):
        if client is None:
            client = STLClient(username=self.vnfd_helper.mgmt_interface["user"],
                               server=self.vnfd_helper.mgmt_interface["ip"],
                               verbose_level=LoggerApi.VERBOSE_QUIET)

        # try to connect with 5s intervals, 30s max
        for idx in range(6):
            try:
                client.connect()
                break
            except STLError:
                LOG.info("Unable to connect to Trex Server.. Attempt %s", idx)
                time.sleep(5)
        return client
예제 #38
0
class TopicLogger(MongoDBLogger):
    """
    This class implements a generic topic logger.
    It simply dumps all messages received from the topic into the MongoDB.
    """

    def __init__(self, name, topic, collname, mongodb_host, mongodb_port, mongodb_name, max_queuesize=QUEUE_MAXSIZE):
        MongoDBLogger.__init__(self, name, topic, collname, mongodb_host, mongodb_port, mongodb_name)
        self.worker_out_counter = Counter()
        self.worker_in_counter = Counter()
        self.worker_drop_counter = Counter()
        self.queue = Queue(max_queuesize)

    def _init(self):
        """
        This method initializes this process.
        It initializes the connection to the MongoDB and subscribes to the topic.
        """
        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()
        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class, self._enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                rospy.logwarn("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
            except rospy.ROSInitException:
                rospy.logwarn("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
                self.subscriber = None

    def run(self):
        """
        This method does the actual logging.
        """
        self._init()
        rospy.logdebug("ACTIVE: %s" % self.name)
        # Process the messages
        while not self.is_quit():
            self._dequeue()

        # we must make sure to clear the queue before exiting,
        # or the parent thread might deadlock otherwise
        self.subscriber.unregister()
        self.subscriber = None
        while not self.queue.empty():
            self.queue.get_nowait()
        rospy.logdebug("STOPPED: %s" % self.name)

    def shutdown(self):
        self.queue.put("shutdown")
        super(TopicLogger, self).shutdown()

    def _sanitize_value(self, v):
        if isinstance(v, rospy.Message):
            return self._message_to_dict(v)
        elif isinstance(v, genpy.rostime.Time):
            t = datetime.utcfromtimestamp(v.secs)
            return t + timedelta(microseconds=v.nsecs / 1000.)
        elif isinstance(v, genpy.rostime.Duration):
            return v.secs + v.nsecs / 1000000000.
        elif isinstance(v, list):
            return [self._sanitize_value(t) for t in v]
        else:
            return v

    def _message_to_dict(self, val):
        d = {}
        for f in val.__slots__:
            d[f] = self._sanitize_value(getattr(val, f))
        return d

    def qsize(self):
        return self.queue.qsize()

    def _enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            self.queue.put((topic, data, rospy.get_time()))
            self.worker_in_counter.increment()

    def _dequeue(self):
        try:
            t = self.queue.get(True)
        except IOError:
            self.quit = True
            return
        if isinstance(t, tuple):
            self.worker_out_counter.increment()
            topic = t[0]
            msg = t[1]
            ctime = t[2]

            if isinstance(msg, rospy.Message):
                doc = self._message_to_dict(msg)
                doc["__recorded"] = ctime or datetime.now()
                doc["__topic"] = topic
                try:
                    self.collection.insert(doc)
                except (InvalidStringData, InvalidDocument), e:
                    rospy.logerr("%s %s@%s:\n%s" % (e.__class__.__name__, current_process().name, topic, e))
        else: