Exemple #1
0
class RunChannel:
    def __init__(self, global_config, channel):
        ch_config = channel_config(channel)
        src_workers, workers_to_start = source_workers(channel,
                                                       ch_config["sources"])
        self._source_workers = src_workers
        self._workers_to_start = workers_to_start
        self._tm = task_manager_for(global_config, channel, ch_config,
                                    self._workers_to_start)
        self._channel_worker = ChannelWorker(self._tm, global_config["logger"])

    def __enter__(self):
        self._channel_worker.start()
        self._tm.state.wait_while(State.BOOT)

        # Start any sources that are not yet alive.
        for key, src_worker in self._workers_to_start.items():
            if src_worker.is_alive():
                continue

            src_worker.start()

        self._tm.state.wait_while(State.ACTIVE)
        return self._tm

    def __exit__(self, type, value, traceback):
        if type:
            return False
        self._channel_worker.join()
        self._source_workers.remove_all(None)
Exemple #2
0
 def __init__(self, global_config, channel):
     ch_config = channel_config(channel)
     src_workers, workers_to_start = source_workers(channel,
                                                    ch_config["sources"])
     self._source_workers = src_workers
     self._workers_to_start = workers_to_start
     self._tm = task_manager_for(global_config, channel, ch_config,
                                 self._workers_to_start)
     self._channel_worker = ChannelWorker(self._tm, global_config["logger"])
Exemple #3
0
    def start_channel(self, channel_name, channel_config):
        channel_config = copy.deepcopy(channel_config)
        with START_CHANNEL_HISTOGRAM.labels(channel_name).time():
            # NB: Possibly override channel name
            channel_name = channel_config.get("channel_name", channel_name)
            source_configs = channel_config.pop("sources")
            src_workers = self.source_workers.update(channel_name,
                                                     source_configs)
            module_workers = validated_workflow(channel_name, src_workers,
                                                channel_config, self.logger)

            queue_info = [(worker.queue.name, worker.key)
                          for worker in src_workers.values()]
            self.logger.debug(f"Building TaskManger for {channel_name}")
            task_manager = TaskManager.TaskManager(
                channel_name,
                module_workers,
                dataspace.DataSpace(self.global_config),
                source_products(src_workers),
                self.exchange,
                self.broker_url,
                queue_info,
            )
            self.logger.debug(f"Building Worker for {channel_name}")
            worker = ChannelWorker(task_manager, self.global_config["logger"])
            WORKERS_COUNT.inc()
            with self.channel_workers.access() as workers:
                workers[channel_name] = worker

            # The channel must be started first so it can listen for the messages from the sources.
            self.logger.debug(f"Trying to start {channel_name}")
            worker.start()
            self.logger.info(f"Channel {channel_name} started")

            worker.wait_while(ProcessingState.State.BOOT)

            # Start any sources that are not yet alive.
            for key, src_worker in src_workers.items():
                if src_worker.is_alive():
                    continue
                if src_worker.exitcode == 0:  # pragma: no cover
                    # This can happen if the source's acquire method runs only once (e.g. when testing)
                    # and the first process completes before the next channel can use it.
                    raise RuntimeError(
                        f"The {key} source has already completed and cannot be used by channel {channel_name}."
                    )

                src_worker.start()
                self.logger.debug(
                    f"Started process {src_worker.pid} for source {key}")

            worker.wait_while(ProcessingState.State.ACTIVE)
def test_worker_logger_timed_rotation(global_config):
    global_config["logger"]["file_rotate_by"] = "time"
    worker = ChannelWorker(_TASK_MANAGER, global_config["logger"])
    worker.setup_logger()

    assert "TimedRotatingFileHandler" in str(worker.logger.handlers)
def test_worker_logger_sized_rotation(global_config):
    worker = ChannelWorker(_TASK_MANAGER, global_config["logger"])
    worker.setup_logger()

    assert "RotatingFileHandler" in str(worker.logger.handlers)
def test_worker_name(global_config):
    worker = ChannelWorker(_TASK_MANAGER, global_config["logger"])

    assert worker.name == f"DEChannelWorker-{_TASK_MANAGER.name}"