Beispiel #1
0
    def _runjob(pipe: Connection, procs: TaskCacheList, keys: List[int]):
        "runs a job"
        if pipe.poll():
            return

        frame = next(iter(procs.run()), None)
        if frame is None:
            pipe.send((None, None))
            return

        if callable(getattr(frame, 'bead', None)):
            raise NotImplementedError()

        for i in keys:
            if pipe.poll():
                return

            try:
                out = (i, frame[i])
            except ProcessorException as exc:
                out = (i, exc)

            if pipe.poll():
                return

            pipe.send(out)

        if not pipe.poll():
            pipe.send((None, None))
Beispiel #2
0
 def _multiprocess_generator_pool_child(pipe: Connection) -> None:
     while True:
         # wait for a message
         pipe.poll(None)
         # read the message
         msg = pipe.recv()
         if msg == SENTINEL:
             # received a sentinel message to terminate self
             break
         else:
             # start of a new function invocation
             func, batchsize, kwargs = msg
             # start a fresh batch of results
             batch = []
             try:
                 for result in func(**kwargs):
                     batch.append(result)
                     # batch is full, send it and start a new one
                     if len(batch) >= batchsize:
                         pipe.send([kwargs, batch, None])
                         batch = []
             except Exception as e:
                 # if an error happened send it up
                 pipe.send([kwargs, batch, e])
                 # continue to the next arg
             else:
                 # no error was thrown
                 # send any leftover lines smaller than a batch
                 pipe.send([kwargs, batch, None])
             # send a sentinel to say we've finished an arg
             pipe.send(SENTINEL)
Beispiel #3
0
def run_daemon(qa_pipe: Connection):
    wiki_daemon = WikiDaemon(WIKI_PAGE)
    wiki_daemon.update_wiki_cache()
    wiki_daemon.reload_spacy_docs()
    print("wiki_daemon: Child process started")
    next_wiki_update = time.time() + UPDATE_PERIOD_SECS
    while True:
        now = time.time()
        if next_wiki_update < now:
            print("wiki_daemon: Checking Wikipedia for updates")
            updated = wiki_daemon.update_wiki_cache()

            if updated:
                print("wiki_daemon: Got new revision, updating")

            next_wiki_update = now + UPDATE_PERIOD_SECS

        if qa_pipe.poll(timeout=next_wiki_update - now):
            try:
                question = qa_pipe.recv()
                qa_pipe.send(wiki_daemon.inquiry(question))
            except EOFError:
                # Pipe was closed on other end, we're done here
                qa_pipe.close()
                return
            except ValueError:
                print("Answer was too large to send!")

                # Make sure the caller isn't still waiting for an object
                try:
                    qa_pipe.send("")
                except EOFError:
                    qa_pipe.close()
                    return
Beispiel #4
0
    def device_send(cls, ctrl_connection: Connection, send_config: SendConfig, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=True, parameters=dev_parameters):
            return False

        if cls.ASYNCHRONOUS:
            cls.enter_async_send_mode(send_config.get_data_to_send)
        else:
            cls.prepare_sync_send(ctrl_connection)

        exit_requested = False
        buffer_size = cls.CONTINUOUS_SEND_BUFFER_SIZE if send_config.continuous else cls.SEND_BUFFER_SIZE
        if not cls.ASYNCHRONOUS and buffer_size == 0:
            logger.warning("Send buffer size is zero!")

        while not exit_requested and not send_config.sending_is_finished():
            if cls.ASYNCHRONOUS:
                time.sleep(0.5)
            else:
                cls.send_sync(send_config.get_data_to_send(buffer_size))

            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=True)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        if exit_requested:
            logger.debug("{}: exit requested. Stopping sending".format(cls.__class__.__name__))
        if send_config.sending_is_finished():
            logger.debug("{}: sending is finished.".format(cls.__class__.__name__))

        cls.shutdown_device(ctrl_connection)
        ctrl_connection.close()
Beispiel #5
0
    def update(self, conn: Connection, rtsp_url: str, buffer: bool):
        """
        Runs the rtspcam thread to grab data and keep the buffer empty.

        :param conn:  the Pipe to transmit data
        :param rtsp_url: the url of the rtspcam.
        :param buffer:  should the client read frame by frame from the buffer or just grab the latest frame?
        :return:
        """
        self.log.info(f"Starting video capture client for {rtsp_url}")
        cap = VideoCapture(rtsp_url, CAP_FFMPEG)
        self.log.info("Started...")
        run = True
        while run:
            if not conn.poll() and not buffer:
                self.log.debug("clearing buffer frame")
                cap.grab()
                continue
            rec_dat = conn.recv()
            if rec_dat == self.SEND_FRAME:
                self.log.debug("Sending next frame to parent process")
                return_value, frame = cap.read()
                conn.send(frame)
            elif rec_dat == self.END_PROCESS:
                self.log.debug("Closing connection")
                cap.release()
                run = False

        self.log.info("Camera Connection Closed")
        conn.close()
Beispiel #6
0
    def device_receive(cls, data_connection: Connection, ctrl_connection: Connection, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=False, parameters=dev_parameters):
            return False

        try:
            cls.adapt_num_read_samples_to_sample_rate(dev_parameters[cls.Command.SET_SAMPLE_RATE.name])
        except NotImplementedError:
            # Many SDRs like HackRF or AirSpy do not need to calculate READ_SAMPLES
            # as default values are either fine or given by the hardware
            pass

        if cls.ASYNCHRONOUS:
            cls.enter_async_receive_mode(data_connection)
        else:
            cls.prepare_sync_receive(ctrl_connection)

        exit_requested = False

        while not exit_requested:
            if cls.ASYNCHRONOUS:
                time.sleep(0.5)
            else:
                cls.receive_sync(data_connection)
            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=False)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        cls.shutdown_device(ctrl_connection)
        data_connection.close()
        ctrl_connection.close()
Beispiel #7
0
def lulapy_broker(conn: Connection) -> None:
    connections.append(conn)

    funcs: Dict[Text, Callable] = {
        'message': message,
        'subscribe': subscribe,
        'unsubscribe': unsubscribe,
        'close': close,
        'add_connection': add_connection
    }

    while True:

        if len(connections) == 0:
            break

        for conn in connections:
            if conn.closed:
                connections.remove(conn)
                for topic in topics.keys():
                    topics[topic].remove(conn)

            elif conn.poll():
                data = conn.recv()
                _type = data['type']

                funcs[_type](data, conn)
Beispiel #8
0
 def relay(
     sub_id: str,
     shutdown: threading.Event,
     channel: connection.Connection,
     callback: Callable[[Dict[str, Any]], None],
 ) -> None:
     while not shutdown.is_set():
         try:
             if channel.poll(timeout=1):
                 ev = channel.recv()
                 if ev['event_name'] == eventNames.SUBSCRIBED:
                     logger.info(
                         'Subscriber#{0} subscribe ack received'.format(
                             sub_id, ), )
                 elif ev['event_name'] == eventNames.UNSUBSCRIBED:
                     logger.info(
                         'Subscriber#{0} unsubscribe ack received'.format(
                             sub_id, ), )
                     break
                 elif ev['event_name'] == eventNames.DISPATCHER_SHUTDOWN:
                     logger.info(
                         'Subscriber#{0} received dispatcher shutdown event'
                         .format(sub_id, ), )
                     break
                 else:
                     callback(ev)
         except queue.Empty:
             pass
         except EOFError:
             break
         except KeyboardInterrupt:
             break
     logger.debug('bbye!!!')
def get_last_message(connection: Connection) -> dict:
    message = {}
    try:
        while connection.poll():
            message = connection.recv()
    except EOFError:
        pass
    return message
Beispiel #10
0
def Pconsumer_fcn(Pconn: Connection, loopCondition: Value):
    counter = 0
    while loopCondition.value == 1:
        if Pconn.poll(0.1):
            _ = Pconn.recv()
            counter += 1
    Pconn.close()
    print(f"Consumer msg: Received: {counter} messages\n")
Beispiel #11
0
def show_feed(index: int, pipe: Connection):
    name = "Cam " + str(index)
    size = big
    print("running size:", size)
    cap = cv2.VideoCapture(index, backend)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, size[0])
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, size[1])
    save_dir = "C:/users/phill/companion app save folder/"
    vid_ext = ".avi"
    codec = "DIVX"
    frame_size = big
    fps = 30

    writer = cv2.VideoWriter()
    self.writer = cv2.VideoWriter(
        save_dir + timestamp + self.name + '_output' + vid_ext,
        cv2.VideoWriter_fourcc(*codec), fps, frame_size)
    start = time.time()
    num_frames = 0
    resize = 0
    while True:
        if pipe.poll():
            msg = pipe.recv()
            if msg == "small":
                resize = 1
            elif msg == "big":
                resize = 2
            elif msg == "reset":
                resize = 0
            elif msg == "close":
                cv2.destroyWindow(name)
                cap.release()
                break
        ret, frame = cap.read()
        if ret and frame is not None:
            if resize == 1:
                frame = imutils.resize(frame, width=small[0])
            elif resize == 2:
                frame = imutils.resize(frame, width=big[0])
            cv2.imshow(name, frame)
            num_frames += 1
        else:
            print("Cam failed")
            cv2.destroyWindow(name)
            cap.release()
            break
        keypress = cv2.waitKey(1) % 0xFF
        if keypress == ord('q'):
            break
    end = time.time()
    seconds = end - start
    if seconds > 0:
        fps = num_frames / seconds
    else:
        fps = 0
    print("time taken:", seconds, "fps:", fps, "camera:", index,
          "frames handled:", num_frames)
    cap.release()
Beispiel #12
0
def _run_job(pipe_to_parent: Connection, job: MountainJob) -> None:
    result0 = job._execute(print_console_out=False)
    pipe_to_parent.send(result0.getObject())
    # wait for message to return
    while True:
        if pipe_to_parent.poll():
            pipe_to_parent.recv()
            return
        time.sleep(0.1)
def _pjh_run_job(pipe_to_parent: Connection, job: Dict[str, Any], kachery_config: dict) -> None:
    import kachery as ka
    ka.set_config(**kachery_config)
    hither._run_job(job)
    pipe_to_parent.send(job['result'].serialize())
    # wait for message to return
    while True:
        if pipe_to_parent.poll():
            pipe_to_parent.recv()
            return
        time.sleep(0.1)
Beispiel #14
0
def traffic_gen(mac: str, pipe_rcv: connection.Connection) -> None:
    with configure_eth_if() as so:
        payload = bytes.fromhex('ff')  # DUMMY_TRAFFIC code
        payload += bytes(1485)
        eth_frame = Ether(dst=mac, src=so.getsockname()[4],
                          type=0x2222) / raw(payload)
        try:
            while pipe_rcv.poll() is not True:
                so.send(raw(eth_frame))
        except Exception as e:
            raise e
Beispiel #15
0
    def device_send(cls, ctrl_connection: Connection, send_config: SendConfig,
                    dev_parameters: OrderedDict):
        if not cls.init_device(
                ctrl_connection, is_tx=True, parameters=dev_parameters):
            ctrl_connection.send("failed to start tx mode")
            return False

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_send_mode(send_config.get_data_to_send)
        else:
            ret = cls.prepare_sync_send(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start tx mode")
            return False

        exit_requested = False
        buffer_size = cls.CONTINUOUS_TX_CHUNK_SIZE if send_config.continuous else cls.SYNC_TX_CHUNK_SIZE
        if not cls.ASYNCHRONOUS and buffer_size == 0:
            logger.warning("Send buffer size is zero!")

        ctrl_connection.send("successfully started tx mode")

        while not exit_requested and not send_config.sending_is_finished():
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.5)
                except KeyboardInterrupt:
                    pass
            else:
                cls.send_sync(send_config.get_data_to_send(buffer_size))

            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(),
                                             ctrl_connection,
                                             is_tx=True)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        if not cls.ASYNCHRONOUS:
            # Some Sync send calls (e.g. USRP) are not blocking, so we wait a bit here to ensure
            # that the send buffer on the SDR is cleared
            time.sleep(0.75)

        if exit_requested:
            logger.debug("{}: exit requested. Stopping sending".format(
                cls.__class__.__name__))
        if send_config.sending_is_finished():
            logger.debug("{}: sending is finished.".format(
                cls.__class__.__name__))

        cls.shutdown_device(ctrl_connection, is_tx=True)
        ctrl_connection.close()
Beispiel #16
0
def receive(conn: Connection, _pipeline: Callable[[Iterator[dict]], Iterator]):
    """
    Процедура получения строки через сетевое соединение и обратной передачи данных нормализации
    """
    sentence = ''
    stemming.init_cache()
    tokenization.init_cache()
    config.init_cache()

    try:
        with stemming.jstem_ctx() as stemmer:
            while True:
                if conn.poll(timeout=_RTN_CONNECTION_LIFE_TIME):
                    sentence = conn.recv()

                    logger.debug(f'Received: "{sentence}"')

                    if _PROFILER:
                        profiler.enable()

                    start = process_time()
                    try:
                        analisys = normalization.analyze(sentence, stemmer)
                        result = list(_pipeline(analisys))
                    except Exception as e:
                        logger.error(f'Normalization failed with error: {e} \n {sentence}')
                        result = []
                    end = process_time()

                    if _PROFILER:
                        profiler.disable()

                    conn.send(result)
                    logger.debug(f'Sent: {result}')
                    perflog.debug(f'RTN time: {round((end-start) * 1000, 2)} ms')

                    if _PROFILER:
                        profiler.dump_stats(os.path.join(settings.ROOT_PATH, f'rtn_{time()}.cprof'))
                else:
                    raise TimeoutError
    except EOFError:
        logger.info('Incoming connection closed')
    except TimeoutError:
        logger.warning('Incoming connection timeout')
    except Exception:
        logger.exception(sentence if sentence else 'Normalization Failed')
    finally:
        stemming.cache_clear()
        tokenization.cache_clear()
        config.cache_clear()
        logger.debug('Closing connection...')
        conn.close()
        logger.debug('Connection closed')
Beispiel #17
0
def func_wrapper(create_callback: Callable, conn: Connection,
                 *args: Any) -> None:
    """Wrap a function with connection to receive and send data."""
    running = True

    # pylint: disable=unused-argument
    def handle_signal(signum: int, frame: Any) -> None:
        """Handle signal."""
        nonlocal running
        running = False
        conn.close()

    signal.signal(signal.SIGTERM, handle_signal)
    signal.signal(signal.SIGINT, handle_signal)

    try:
        callback = create_callback(*args)
    except Exception as exc:  # pylint: disable=broad-except
        LOGGER.error("Failed to create callback: %s", exc)
        return

    while running:

        while running:
            if conn.poll():
                break
            sleep(0.5)

        try:
            data = conn.recv()
        except EOFError:
            LOGGER.debug("Nothing more to receive")
            break
        except OSError:
            LOGGER.debug("Connection is closed")
            break
        try:
            result = callback(data)
        except Exception as exc:  # pylint: disable=broad-except
            LOGGER.error("Failed to run callback: %s", exc)
            break

        if not running:
            break
        try:
            conn.send(result)
        except ValueError:
            LOGGER.error("Failed to send result %s", result)
        except OSError:
            LOGGER.debug("Connection is closed")
            break

    LOGGER.debug("Exiting process")
Beispiel #18
0
def my_first_proc(conn: Connection, some_list: list):
    some_list[0] = 999
    print("Proc 1 id:", id(some_list), some_list, "pid:", os.getpid())
    for _ in range(10):
        print("🐉")
        time.sleep(1)

    while not conn.poll():
        time.sleep(1)

    received_list = conn.recv()
    print("Proc 1 received from proc 2, id:", id(received_list))
Beispiel #19
0
def _DecompileFileProcess (inputPipe: connection.Connection, outputPipe: connection.Connection) -> None:
	"""
	Intended to run in a separate process don't call this directly.
	"""

	while True:
		inputPipe.poll(None)

		request = inputPipe.recv()  # type: dict

		targetFilePath = request["targetFilePath"]  # type: str
		destinationFilePath = request["destinationFilePath"]  # type: str
		printFileName = request["printFileName"]  # type: str

		if printFileName is None:
			printFileName = targetFilePath

		try:
			with open(destinationFilePath, "w+", encoding = "utf-8") as destinationFile:
				destinationFile.write(str(unpyc3.decompile(targetFilePath)))
		except Exception as e:
			# noinspection SpellCheckingInspection
			print("Failed to decompile '", printFileName + "' with 'unpyc3' trying alternative 'decompyle3'. \n" + str(e), file = sys.stderr)

			try:
				with open(destinationFilePath, "w+", encoding = "utf-8") as destinationFile:
					Decompyle3Main.decompile_file(targetFilePath, outstream = destinationFile)
			except Exception as e:
				# noinspection SpellCheckingInspection
				print("decompyle3 failed to decompile '", printFileName + "'. \n" + str(e), file = sys.stderr)

				outputPipe.send(False)
				continue

		outputPipe.send(True)
		continue
Beispiel #20
0
    def device_send(cls, ctrl_connection: Connection, send_config: SendConfig, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=True, parameters=dev_parameters):
            ctrl_connection.send("failed to start tx mode")
            return False

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_send_mode(send_config.get_data_to_send)
        else:
            ret = cls.prepare_sync_send(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start tx mode")
            return False

        exit_requested = False
        buffer_size = cls.CONTINUOUS_TX_CHUNK_SIZE if send_config.continuous else cls.SYNC_TX_CHUNK_SIZE
        if not cls.ASYNCHRONOUS and buffer_size == 0:
            logger.warning("Send buffer size is zero!")

        ctrl_connection.send("successfully started tx mode")

        while not exit_requested and not send_config.sending_is_finished():
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.5)
                except KeyboardInterrupt:
                    pass
            else:
                cls.send_sync(send_config.get_data_to_send(buffer_size))

            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=True)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        if not cls.ASYNCHRONOUS:
            # Some Sync send calls (e.g. USRP) are not blocking, so we wait a bit here to ensure
            # that the send buffer on the SDR is cleared
            time.sleep(0.75)

        if exit_requested:
            logger.debug("{}: exit requested. Stopping sending".format(cls.__class__.__name__))
        if send_config.sending_is_finished():
            logger.debug("{}: sending is finished.".format(cls.__class__.__name__))

        cls.shutdown_device(ctrl_connection, is_tx=True)
        ctrl_connection.close()
Beispiel #21
0
        def log_timer(receiver: Connection, sender: Connection):
            print('downloading ', end='', flush=True)
            sender.close()

            # start ticker
            self.ticker.start()

            while True:
                if receiver.poll():
                    done = receiver.recv()
                    if done:
                        print(flush=True)
                        print('download done', flush=True)

                        # stop ticker when download process done
                        self.ticker.cancel()
                        break
Beispiel #22
0
def _model_process(model_function: Callable, conn: Connection, batch_size: int = -1, *,
                   poll_period: float = 0.5):
    model: Callable[[Collection[str], Collection[Hashable]], Collection[str]] = model_function()
    if batch_size <= 0:
        batch_size = float('inf')

    while True:
        batch: List[Tuple[str, Hashable]] = []
        while conn.poll() and len(batch) < batch_size:
            batch.append(conn.recv())

        if not batch:
            time.sleep(poll_period)
            continue

        messages, dialog_ids = zip(*batch)
        responses = model(messages, dialog_ids)
        for response, dialog_id in zip(responses, dialog_ids):
            conn.send((response, dialog_id))
Beispiel #23
0
    def _loop(self, task_reader: Connection, result_writer: Connection):
        if self.config.on_after_fork is not None:
            self.config.on_after_fork()

        executor = self._executor.run
        task_deserializer = AsyncTask.deserialize
        while True:
            if task_reader.poll(DURATION_MICROSECOND):
                message = task_reader.recv()
                task = task_deserializer(message)
                try:
                    result = executor(task)
                    if result is _SHUTDOWN_OBJECT:
                        break
                except Exception as e:
                    result = TaskError(e)
                result_writer.send((task.identifier, result))

        if self.config.on_shutdown is not None:
            self.config.on_shutdown()
Beispiel #24
0
		def recv_instruction_from_pipe(target_pipe: Connection):
			if target_pipe.poll():
				instruction = target_pipe.recv()

				# Pass the exception to the main process
				if isinstance(instruction, ExceptionMessage):
					self._main_pipe.send((instruction, None))

				# Invalid instruction object
				if not isinstance(instruction, GameInstruction):
					return GameInstruction(-1, comm.PlatformAction.NONE)

				# Invalid PlatformAction instruction
				if instruction.command != comm.PlatformAction.MOVE_LEFT and \
				   instruction.command != comm.PlatformAction.MOVE_RIGHT:
					instruction.command = comm.PlatformAction.NONE

				return instruction
			else:
				return GameInstruction(-1, comm.PlatformAction.NONE)
Beispiel #25
0
    def device_receive(cls, data_connection: Connection, ctrl_connection: Connection, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=False, parameters=dev_parameters):
            ctrl_connection.send("failed to start rx mode")
            return False

        try:
            cls.adapt_num_read_samples_to_sample_rate(dev_parameters[cls.Command.SET_SAMPLE_RATE.name])
        except NotImplementedError:
            # Many SDRs like HackRF or AirSpy do not need to calculate SYNC_RX_CHUNK_SIZE
            # as default values are either fine or given by the hardware
            pass

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_receive_mode(data_connection, ctrl_connection)
        else:
            ret = cls.prepare_sync_receive(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start rx mode")
            return False

        exit_requested = False
        ctrl_connection.send("successfully started rx mode")

        while not exit_requested:
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.25)
                except KeyboardInterrupt:
                    pass
            else:
                cls.receive_sync(data_connection)
            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=False)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        cls.shutdown_device(ctrl_connection, is_tx=False)
        data_connection.close()
        ctrl_connection.close()
Beispiel #26
0
 def watcher_loop(
     signal_rx: PipeConnection,
     interval_seconds: float,
     first_run_immediate: bool,
     do_work: Callable[[Value], None],
     error_callback: Callable[[Exception], None],
     shared_state: Value,
 ):
     signal(SIGINT, MPWorker.empty_signal_handler)
     signal(SIGTERM, MPWorker.empty_signal_handler)
     configured_interval = interval_seconds
     if first_run_immediate:
         interval_seconds = 0
     while not signal_rx.poll(timeout=interval_seconds):
         interval_seconds = configured_interval
         try:
             do_work(shared_state)
         except Exception as captured_exception:
             if error_callback is not None:
                 error_callback(captured_exception)
             else:
                 raise captured_exception
 def _safe_queue_put(self, worker_id: int, item: Any,
                     queue: mp.JoinableQueue, rx: Connection) -> bool:
     while True:
         # First we have to check to make sure the parent process is still alive
         # and consuming from the queue because there are circumstances where the
         # parent process can or exit stop consuming without automatically cleaning up
         # its children (the workers).
         # For example, when the parent process is killed with `kill -9`.
         # So the first thing we do is check to see if the parent has notified
         # us (the worker) to stop through the rx (receiver) connection.
         # Of course this only works if the parent was able to send out a notification,
         # which may not always be the case. So we have a backup check below.
         if rx.poll():
             logger.warning(
                 "worker %d received stop message from parent, exiting now",
                 worker_id)
             queue.cancel_join_thread()
             return False
         # The is the backup check.
         # The file descriptor associated with the rx (receiver) connection will
         # be readable if and only if the parent process has exited.
         # NOTE (epwalsh): this doesn't work on Mac OS X with `start_method == "fork"`
         # for some reason, i.e. the file descriptor doesn't show as readable
         # after the parent process has died.
         fds, _, _ = select.select([rx.fileno()], [], [], 0)
         if fds:
             logger.warning(
                 "worker %d parent process has died, exiting now",
                 worker_id)
             queue.cancel_join_thread()
             return False
         # If we're down here the parent process is still alive to the best of our
         # knowledge, so we can continue putting things on the queue.
         try:
             queue.put(item, True, 0.1)
             return True
         except Full:
             continue
    def poll_process(
        self,
        rx: Connection,
        analyzer_name: str,
    ) -> Iterator[ExecutionHit]:
        """
        Keep polling the spawned Process, and yield any ExecutionHits.
        (This will probably disappear if Analyzers move to Docker images.)
        """
        t = 0

        while True:
            p_res = rx.poll(timeout=5)
            if not p_res:
                t += 1
                LOGGER.info(
                    f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
                )
                continue

            result: Optional[Any] = rx.recv()
            if isinstance(result, ExecutionComplete):
                self.logger.info(f"Analyzer {analyzer_name} execution complete")
                return

            # emit any hits to an S3 bucket
            if isinstance(result, ExecutionHit):
                self.logger.info(
                    f"Analyzer {analyzer_name} emitting event for:"
                    f"{result.analyzer_name} {result.root_node_key}"
                )
                yield result

            assert not isinstance(
                result, ExecutionFailed
            ), f"Analyzer {analyzer_name} failed."
Beispiel #29
0
def state_farmer(q: Queue, exit_receiever: Connection, interval: int = 5):
    while not exit_receiever.poll():
        q.put(get_sys_state())
        time.sleep(interval)
Beispiel #30
0
def processor_process(
    connection: Connection,
    blueprint_path: str,
    init_params: dict,
    processor_dir: str,
    topic: str,
    source_topic: str,
    source_format: str,
    kafka_server: str,
    min_input_spacing: float,
    min_step_spacing: float,
    min_output_spacing: float,
):
    """
    Runs the given blueprint as a processor

    Is meant to be run in a separate process

    :param connection: a connection object to communicate with the main process
    :param blueprint_path: the path to the blueprint folder
    :param init_params: the initialization parameters to the processor as a dictionary
    :param processor_dir: the directory the created process will run in
    :param topic: the topic the process will send results to
    :param source_topic: the topic the process will receive data from
    :param source_format: the byte format of the data the process will receive
    :param kafka_server: the address of the kafka bootstrap server the process will use
    :param min_input_spacing: the minimum time between each input to the processor
    :param min_step_spacing: the minimum time between each step function call on the processor
    :param min_output_spacing: the minimum time between each results retrieval from the processor
    :return:
    """
    blueprint_path = os.path.realpath(blueprint_path)

    # Creates and sets, deleting if it already exists, the directory the process will use
    try:
        if os.path.exists(processor_dir):
            shutil.rmtree(processor_dir)
        os.makedirs(processor_dir)
        os.chdir(processor_dir)
    except PermissionError as e:
        connection.send({'type': 'error', 'value': e})
        return

    start_time = -1
    next_input_time = 0
    next_step_time = 0
    next_output_time = 0

    # Imports the blueprints and initializes the processor from it
    try:
        processor_instance = SourceFileLoader(
            os.path.basename(blueprint_path),
            os.path.join(blueprint_path,
                         '__init__.py')).load_module().P(**init_params)
    except TypeError as e:
        connection.send({'type': 'error', 'value': e})
        return
    except ValueError as e:
        connection.send({'type': 'error', 'value': e})
        return

    # Retrieves the inputs and outputs from the initialized processor
    if hasattr(processor_instance, 'outputs'):
        # Uses the outputs attribute from the processor if it exists
        outputs = [
            Variable(v.valueReference, v.name)
            for v in processor_instance.outputs
        ]
    else:
        # Otherwise it has to create outputs from output_names
        outputs = [
            Variable(i, name)
            for i, name in enumerate(processor_instance.output_names)
        ]
    if hasattr(processor_instance, 'inputs'):
        # Uses the inputs attribute from the processor if it exists
        inputs = [
            Variable(v.valueReference, v.name)
            for v in processor_instance.inputs
        ]
    else:
        # Otherwise it has to create inputs from input_names
        inputs = [
            Variable(i, name)
            for i, name in enumerate(processor_instance.input_names)
        ]

    # Add a helper attribute that lists outputs that are part of a matrix
    matrix_outputs = (processor_instance.matrix_outputs if hasattr(
        processor_instance, 'matrix_outputs') else [])

    initialized = True
    started = False
    byte_format = '<d'
    output_refs = []
    input_refs = []
    measurement_refs = []
    measurement_proportions = []

    topic_partition = TopicPartition(topic=source_topic, partition=0)
    output_buffer = bytearray()

    # Waits for the start signal from the main process
    start_params = None
    while start_params is None:
        msg = connection.recv()
        if msg['type'] == 'start':
            value = msg['value']
            start_params = value['start_params']
            output_refs = value['output_refs']
            input_refs = value['input_refs']
            measurement_refs = value['measurement_refs']
            measurement_proportions = value['measurement_proportions']
            byte_format = '<' + 'd' * (len(output_refs) + 1)
        elif msg['type'] == 'status':
            connection.send({
                'type': 'status',
                'value': {
                    'outputs': outputs,
                    'inputs': inputs,
                    # Add a helper attribute that lists outputs that are part of a matrix
                    'matrix_outputs': matrix_outputs,
                    'initialized': initialized,
                    'started': started,
                }
            })
        elif msg['type'] == 'stop':
            try:
                processor_instance.stop()
            finally:
                return

    try:
        # Calls the processors start function if present
        if hasattr(processor_instance, 'start'):
            processor_instance.start(start_time=next_output_time,
                                     **start_params)
    except TypeError as e:
        connection.send({'type': 'error', 'value': e})
        return
    except ValueError as e:
        connection.send({'type': 'error', 'value': e})
        return

    # Use a custom time for results if the processor defines it
    if hasattr(processor_instance, "get_time"):
        processor_custom_time = True
    else:
        processor_custom_time = False

    try:
        # Initializes kafka
        consumer = kafka.KafkaConsumer(source_topic,
                                       bootstrap_servers=kafka_server)
        producer = kafka.KafkaProducer(value_serializer=bytes,
                                       bootstrap_servers=kafka_server)
    except:
        connection.send({'type': 'error', 'value': 'Kafka error'})
        return

    started = True

    while True:
        try:
            while connection.poll():
                # Handles new data from main process
                conn_msg = connection.recv()
                if conn_msg['type'] == 'outputs':
                    output_refs = conn_msg['value']
                    byte_format = '<' + 'd' * (len(output_refs) + 1)
                elif conn_msg['type'] == 'inputs':
                    input_refs, measurement_refs, measurement_proportions = conn_msg[
                        'value']
                elif conn_msg['type'] == 'stop':
                    try:
                        processor_instance.stop()
                    finally:
                        return
                elif conn_msg['type'] == 'status':
                    connection.send({
                        'type': 'status',
                        'value': {
                            'outputs': outputs,
                            'inputs': inputs,
                            # Add a helper attribute that lists outputs that are part of a matrix
                            'matrix_outputs': matrix_outputs,
                            'initialized': initialized,
                            'started': started,
                        }
                    })

            messages = consumer.poll(10)
            for msg in messages.get(topic_partition, []):
                # Handles new data from kafka
                for data in struct.iter_unpack(source_format, msg.value):
                    current_time = data[0]
                    if start_time < 0:
                        start_time = current_time
                    if next_input_time <= current_time or math.isclose(
                            next_input_time, current_time, rel_tol=1e-15):
                        # Retrieve measurements from data where ref is positive and use a constant where ref is negative
                        measurements = [
                            (data[ref + 1] if ref >= 0 else 1) *
                            measurement_proportions[i]
                            for i, ref in enumerate(measurement_refs)
                        ]
                        processor_instance.set_inputs(input_refs, measurements)
                        next_input_time = current_time + min_input_spacing
                    if next_step_time <= current_time or math.isclose(
                            next_step_time, current_time, rel_tol=1e-15):
                        processor_instance.step(current_time - start_time)
                        next_step_time = current_time + min_step_spacing
                    if next_output_time <= current_time or math.isclose(
                            next_output_time, current_time, rel_tol=1e-15):
                        outputs = processor_instance.get_outputs(output_refs)
                        if outputs is None:
                            continue
                        if processor_custom_time:
                            current_time = processor_instance.get_time(
                            ) + start_time
                        output_buffer += struct.pack(byte_format, current_time,
                                                     *outputs)
                        if len(output_buffer) > 100 * len(output_refs):
                            producer.send(topic=topic, value=output_buffer)
                            output_buffer = bytearray()
                        next_output_time = current_time + min_output_spacing
        except Exception as e:
            logger.exception(f'Exception in processor {processor_dir}')
            connection.send({'type': 'error', 'value': e})
            return
def evolve(in_con:MConnection, out_con: MConnection, returnq: MQueue, settings: EnhanceModelSettings,
           optimal_primary_list: List[Gear], optimal_cost, cum_cost, secondaries=None, ev_set:EvolveSettings=None):
    if ev_set is None:
        ev_set = EvolveSettings()

    if secondaries is None:
        secondaries = settings[settings.P_FAIL_STACKER_SECONDARY]
    num_fs = ev_set.num_fs
    population_size = ev_set.pop_size
    ultra_elitism = ev_set.ultra_elite
    num_elites = ev_set.num_elites
    brood_size = ev_set.brood_size
    seent = set()
    this_seent = []
    trait_dominance = ev_set.trait_dom

    mutation_rate = ev_set.mutation_rate
    extinction_epoch = ev_set.extinction_epoch
    max_mutation = ev_set.max_mutation
    f_fitness = fitness_funcs[ev_set.fitness_function]
    oppressive_mode = ev_set.oppressive_mode
    oppress_suprem = ev_set.penalize_supremacy
    best = cum_cost
    best_fsl = None
    lb = 0

    def reg_prune(x: FailStackList):
        # sig = (x.starting_pos, x.secondary_gear, *x.secondary_map[:-1])
        sig = (secondaries.index(x.secondary_gear), *x.get_gnome())
        if sig in seent:
            return False
        else:
            seent.add(sig)
            this_seent.append(sig)  # Send this signature to the other processes
            return True

    def check_pruned(x: FailStackList):
        sig = (secondaries.index(x.secondary_gear), *x.get_gnome())
        return sig not in seent

    def accept_prune(x):
        return True

    check_2 = accept_prune
    check = accept_prune
    if oppress_suprem:
        check_2 = reg_prune
        check = check_pruned
    if oppressive_mode:
        check = reg_prune
        check_2 = reg_prune

    def get_randoms(size_):
        retlist = [
            FailStackList(settings, choice(secondaries), optimal_primary_list, optimal_cost, cum_cost, num_fs=num_fs) for _ in range(0, size_)]
        [p.generate_secondary_map(randint(10, 60)) for p in retlist]
        return retlist

    def mutate(new_indiv):
        this_max_mutation = int(ceil(min(lb / 4, max_mutation)))
        for i, v in enumerate(new_indiv.secondary_map[:-1]):
            if random() < mutation_rate:
                new_v = v + randint(-this_max_mutation, this_max_mutation)
                new_indiv.secondary_map[i] = max(1, new_v)
        if random() < mutation_rate:
            new_s = new_indiv.starting_pos + randint(-this_max_mutation, this_max_mutation)
            new_indiv.starting_pos = min(max(5, new_s), 60)
        if random() < mutation_rate:
            new_indiv.secondary_gear = choice(secondaries)
        new_indiv.secondary_map[-1] = 300

    best_fitness = fitness_func(optimal_cost, optimal_cost, cum_cost, cum_cost)
    this_brood_size = 0
    population = get_randoms(population_size)
    epoch_mode = False
    while True:
        if lb > extinction_epoch:
            seent = set()
            population = get_randoms(population_size)
            lb = 0
        [p.evaluate_map() for p in population]
        #pop_costs = best / numpy.array([f.fs_cum_cost for f in population])  # Bigger is better
        #fitness = numpy.sum(pop_costs, axis=1)
        pop_costs = numpy.array([f_fitness(f.fs_cost, optimal_cost, f.fs_cum_cost, cum_cost) for f in population])  # Bigger is better
        fitness = pop_costs
        sort_order = numpy.argsort(fitness, kind='mergesort')
        #for i in range(0, min(lb-15, brood_size)):
        #    bad_fsl = population[sort_order[i]]
        #    check_2(bad_fsl)
        if oppress_suprem:
            epoch_mode = lb > 5
            if epoch_mode:
                for i in population[:this_brood_size]:
                    check_2(i)
        brood_size = max(20, brood_size-lb)
        this_best_fitness = fitness[sort_order[-1]]
        if this_best_fitness > best_fitness:
            best_fitness = this_best_fitness
            best_fsl = population[sort_order[-1]]
            returnq.put((this_best_fitness, lb, (secondaries.index(best_fsl.secondary_gear), *best_fsl.get_gnome())), block=True)
            lb = 0
            #best = numpy.min([best, best_fsl.fs_cum_cost], axis=0)

        new_pop = []
        others_seent = []

        try:
            while in_con.poll():
                others_seent.extend(in_con.recv())
        except EOFError:  # Pipe broken: terminate loop
            out_con.close()
            return
        except BrokenPipeError:
            out_con.close()
            return
        for i in others_seent:
            this_len = len(seent)
            seent.add(tuple(i))
            if len(seent) > this_len:
                this_seent.append(i)

        for i in range(0, brood_size):
            breeder1 = choice(sort_order[-num_elites:])
            breeder1 = population[breeder1]
            if not epoch_mode and (best_fsl is not None) and (random() < ultra_elitism):
                breeder2 = best_fsl
            else:
                if random() > (lb * 0.1):
                    breeder2 = choice(sort_order[-num_elites:])
                    breeder2 = population[breeder2]
                else:
                    breeder2 = choice(sort_order)
                    breeder2 = population[breeder2]
            offspring = FailStackList(settings, choice([breeder1.secondary_gear, breeder2.secondary_gear]), optimal_primary_list,
                                      optimal_cost, cum_cost, num_fs=num_fs)
            offspring.secondary_map = breeder1.secondary_map.copy()  # this gets overwritten anyway
            for i in range(min(len(breeder1.secondary_map), len(breeder2.secondary_map))):
                if random() < trait_dominance:
                    if random() < 0.5:
                        offspring.secondary_map[i] = breeder1.secondary_map[i]
                    else:
                        offspring.secondary_map[i] = breeder2.secondary_map[i]
                else:
                    offspring.secondary_map[i] = int(round((breeder1.secondary_map[i] + breeder2.secondary_map[i]) / 2.0))

            if random() < trait_dominance:
                if random() < 0.5:
                    offspring.starting_pos = breeder1.starting_pos
                else:
                    offspring.starting_pos = breeder2.starting_pos
            else:
                offspring.starting_pos = int(round((breeder1.starting_pos + breeder2.starting_pos) / 2.0))

            offspring.secondary_map[-1] = 300
            mutate(offspring)
            if check(offspring):
                new_pop.append(offspring)
        this_brood_size = len(new_pop)
        new_pop.extend(get_randoms(population_size-len(new_pop)))
        population = new_pop

        if len(this_seent) > 0:
            out_con.send(this_seent)
            this_seent = []
        lb += 1
Beispiel #32
0
    def _slew_rate_process(
        self,
        axis: str,
        rate_target_pipe: Connection,
        axis_safe_event: Event,
        div_last_commanded_shared: Value,
    ):
        """Process for sending slew rate commands continuously until a target rate is achieved.

        This process helps the mount to accelerate smoothly, since this requires sending commands
        to the mount computer in rapid succession. Acceleration and slew rate step limits are
        enforced here. Commands are sent to the mount until the desired target slew rate is
        achieved, and then it will wait for a new rate target before sending further commands.

        Args:
            axis: The mount axis to be controlled by this process (one process per axis).
            rate_target_pipe: The receiving end connection to a multiprocessing pipe over which
                slew rate target values are sent. Rates are in degrees per second.
            axis_safe_event: When the axis is safed, meaning that motion is stopped, this event
                will be set. Otherwise, it will be cleared.
            div_last_commanded_shared: Shared memory storing the divisor value most recently
                commanded for this mount axis.
        """

        # Ignore SIGINT in this process (will be handled in main process)
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        # Last commanded rate is cached along with the time of last command to enforce acceleration
        # limit. Keep local copy of last commanded divisor to avoid accessing shared memory more
        # than necessary.
        axis_safe_event.set()
        div_target = 0
        div_last_commanded = div_last_commanded_shared.value
        time_last_commanded = time.perf_counter() - 1e-3
        shutdown = False

        while True:
            if shutdown == True:
                if div_last_commanded == 0:
                    return
            # only try to receive from the pipe if a new rate target is waiting or if the last-
            # received rate target has been achieved, in which case we want to block
            elif rate_target_pipe.poll() or div_last_commanded == div_target:
                rate_target = rate_target_pipe.recv()
                # None is a special value indicating that it is time to shut down this process
                if rate_target is None:
                    div_target = 0
                    shutdown = True
                else:
                    div_target = self.slew_rate_to_div(rate_target)
                    if div_target == div_last_commanded:
                        continue

            time_current = time.perf_counter()

            # may not be able to achieve div_target if it exceeds rate accel or step limits
            rate_target = self.div_to_slew_rate(div_target)
            rate_last_commanded = self.div_to_slew_rate(div_last_commanded)

            rate_to_command = self._apply_rate_accel_limit(
                rate_target, time_current, rate_last_commanded,
                time_last_commanded)
            rate_to_command = self._apply_rate_step_limit(
                rate_to_command, rate_last_commanded)

            div = self.slew_rate_to_div(rate_to_command)

            # Clear this event before sending the actual commands since the state of the mount
            # is about to change and because if the commands fail for some reason the state of
            # the mount will be unknown and cannot be assumed to be safe.
            if div != 0:
                axis_safe_event.clear()

            try:
                self._set_divisor(axis, div, div_last_commanded)
            except Gemini2Exception as e:
                # dangerous to give up because this thread is critical for stopping mount motion
                # safely; better to keep trying to send commands to the bitter end
                print(
                    f'Ignoring exception in {axis} slew rate command thread: {str(e)}'
                )
                continue

            div_last_commanded_shared.value = div
            div_last_commanded = div
            time_last_commanded = time_current

            if div_last_commanded == 0:
                axis_safe_event.set()