Exemplo n.º 1
0
 def get_server(self):
     'Better than monkeypatching for now; merge into Server ultimately'
     if self._state.value != State.INITIAL:
         if self._state.value == State.STARTED:
             raise ProcessError("Already started SharedMemoryServer")
         elif self._state.value == State.SHUTDOWN:
             raise ProcessError("SharedMemoryManager has shut down")
         else:
             raise ProcessError(
                 "Unknown state {!r}".format(self._state.value))
     return self._Server(self._registry, self._address,
                         self._authkey, self._serializer)
Exemplo n.º 2
0
def multiprocessing_run(func: Callable,
                        func_args: list,
                        func_log_name: str,
                        timeout: Union[int, None] = None):
    """
    Wraps callable object to a separate process using multiprocessing module
    :param func: callable object
    :param func_args: list of arguments for callable
    :param func_log_name: name of callable used for logging
    :param timeout: positive int to limit execution time
    :return: return value (or values) from callable object
    """
    queue = Queue()
    logger_queue = Queue(-1)
    process = Process(target=_mp_wrapped_func,
                      args=(func, func_args, queue, logger_queue))
    process.start()
    try:
        error_message, *ret_args = queue.get(timeout=timeout)
    except QueueEmpty:
        raise TimeoutError(
            "{func} running timed out!".format(func=func_log_name))
    finally:
        queue.close()

        # Extract logs from Queue and pass to root logger
        while not logger_queue.empty():
            rec = logger_queue.get()
            log.getLogger().handle(rec)
        logger_queue.close()

        if process.is_alive():
            process.terminate()
            process.join()
        else:
            exit_signal = multiprocessing_exitcode_to_signal(process.exitcode)
            if exit_signal:
                raise ProcessError(
                    "{func} was killed with a signal {signal}".format(
                        func=func_log_name, signal=exit_signal))

    if error_message:
        raise ProcessError("\n{func} running failed: \n{msg}".format(
            func=func_log_name, msg=error_message))

    ret_args = ret_args[0] if len(
        ret_args
    ) == 1 else ret_args  # unwrap from list if only 1 item is returned
    return ret_args
Exemplo n.º 3
0
def get_if_worker_healthy(workers: List[Process], queue: Queue):
    """

    Parameters
    ----------

    workers: List[Process]
        List of worker processes which should be in a healthy state,
        i.e. either terminated with exit code 0 (success) or are still
        running (exitcode is None in this case)

    queue: Queue
        A multiprocessing queue which is fed by the workers

    Returns
    -------

    item: An item from the queue

    """
    while True:
        try:
            item = queue.get(True, 5)
            return item
        except Empty:
            if not healthy(workers):
                raise ProcessError("At least one worker is dead.")
    raise Exception("The code should never reach here")
Exemplo n.º 4
0
    def execute_et_steps(self):
        """
        executes et steps in parallel using a pool of size
        input_args.pool_size which is defined on the command line
        """
        self._construct_etl_generator('et')

        # use chunksize=1 for the pool because we have a small number of
        # lengthy jobs rather than a large number of short jobs
        pool = Pool(processes=self.pool_size)
        pool_results = []
        try:
            pool_results = pool.map(_executor, self.et_generator, chunksize=1)
            pool.close()
            failures = [
                step_result for step_result in pool_results
                if step_result['status'] != "success"
            ]
            if failures:
                raise ProcessError({
                    'results': pool_results,
                    'failures': failures
                })
        except KeyboardInterrupt:
            print "...keyboard interrupt in map, terminating"
            raise
        finally:
            pool.terminate()
            pool.join()
        return pool_results
Exemplo n.º 5
0
 def execute_load_steps(self):
     """
     iterates over the load steps in the generator, using the method
     defined in the constructor based on the polling interval defined
     (or not) on the command line
     """
     self._construct_etl_generator('load')
     results = []
     for step in self.load_generator:
         step_result = _init_step_result(step)
         try:
             self._try_load_step(step)
             step_result['status'] = 'success'
         except (KeyboardInterrupt, KeyboardInterruptError):
             raise
         except Exception:
             step_result['status'] = 'error'
             step_result['error_info'] = _capture_error_info()
             raise ProcessError({
                 'results': results,
                 'failures': [step_result]
             })
         finally:
             step_result['end_time'] = time.time()
             results.append(step_result)
     return results
Exemplo n.º 6
0
 def run(self) -> None:
     while True:
         state = self.queue.get()
         if not isinstance(state, int):
             raise ProcessError(
                 "Illegal IPC-Message. Restart Lightstrip-Process....")
         else:
             self.loading(state)
Exemplo n.º 7
0
 def run(self) -> None:
     self.init_player()
     while True:
         cmd = self.queue.get()
         if isinstance(cmd, MusicCmd):
             if cmd is MusicCmd.PLAY:
                 self.unpause()
             else:
                 self.pause()
         else:
             raise ProcessError("Illegal IPC-Message. Restart MusicPlayer-Process....")
Exemplo n.º 8
0
def safe_search(pattern, text, timeout):
    compiled = re.compile(pattern)

    p = Process(target=search, args=(compiled, text))
    p.start()
    p.join(timeout)

    if p.exitcode is None:
        p.terminate()
        raise TimeoutError('search process timeout')

    if p.exitcode < 0:
        raise ProcessError('search process terminated')

    return bool(p.exitcode)
Exemplo n.º 9
0
 def execute_et_steps(self):
     """
     executes et steps serially
     """
     self._construct_etl_generator('et')
     results = []
     for step in self.et_generator:
         step_result = _init_step_result(step)
         try:
             step.execute()
             step_result['status'] = 'success'
         except (KeyboardInterrupt, KeyboardInterruptError):
             raise
         except Exception:
             step_result['status'] = 'error'
             step_result['error_info'] = _capture_error_info()
             raise ProcessError({
                 'results': results,
                 'failures': [step_result]
             })
         finally:
             step_result['end_time'] = time.time()
             results.append(step_result)
     return results
Exemplo n.º 10
0
    async def __handle_client(
        self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter,
    ) -> None:
        proto = AsyncProtocol(reader, writer)
        packet_type, worker_id, digest, pid = await proto.receive()

        async with self.__closing_lock:
            if self.__closing:
                proto.close()

        if packet_type == PacketTypes.BAD_INITIALIZER:
            packet_type, exc = await proto.receive()
            if packet_type != PacketTypes.EXCEPTION:
                await proto.send(PacketTypes.BAD_PACKET)
            else:
                set_exception(self.__futures, exc)
            await self.close()
            return

        if packet_type != PacketTypes.AUTH:
            await proto.send(PacketTypes.BAD_PACKET)
            if writer.can_write_eof():
                writer.write_eof()
            return

        if worker_id not in self.worker_ids:
            log.error("Unknown worker with id %r", worker_id)
            return

        expected_digest = hmac.HMAC(
            self.__cookie,
            worker_id,
            digestmod=hashlib.sha256,
        ).digest()

        if expected_digest != digest:
            await proto.send(PacketTypes.AUTH_FAIL)
            if writer.can_write_eof():
                writer.write_eof()
            log.debug("Bad digest %r expected %r", digest, expected_digest)
            return

        await proto.send(PacketTypes.AUTH_OK)
        self._statistic.processes += 1
        self._statistic.spawning += 1
        self.pids.add(pid)

        try:
            while not reader.at_eof():
                func: Callable
                args: Tuple[Any, ...]
                kwargs: Dict[str, Any]
                result_future: asyncio.Future
                process_future: asyncio.Future

                (
                    func, args, kwargs, result_future, process_future,
                ) = await self.tasks.get()

                if process_future.done() or result_future.done():
                    continue

                try:
                    process_future.set_result(pid)
                    await proto.send((PacketTypes.REQUEST, func, args, kwargs))
                    packet_type, payload = await proto.receive()

                    if result_future.done():
                        log.debug(
                            "Result future %r already done, skipping",
                            result_future,
                        )
                        continue

                    if packet_type == PacketTypes.RESULT:
                        result_future.set_result(payload)
                    elif packet_type in (
                        PacketTypes.EXCEPTION, PacketTypes.CANCELLED,
                    ):
                        result_future.set_exception(payload)
                    del packet_type, payload
                except (asyncio.IncompleteReadError, ConnectionError):
                    if not result_future.done():
                        result_future.set_exception(
                            ProcessError(f"Process {pid!r} unexpected exited"),
                        )
                    break
                except Exception as e:
                    if not result_future.done():
                        result_future.set_exception(e)

                    if not writer.is_closing():
                        if writer.can_write_eof():
                            writer.write_eof()
                        writer.close()
                    raise
        finally:
            self._statistic.processes -= 1
            self.pids.remove(pid)
Exemplo n.º 11
0
        step_result = _init_step_result(
            ETLStep(args_namespace,
                    args_namespace.start_date.strftime("%Y-%m-%d"), None))
        if type(e) in [KeyboardInterrupt, KeyboardInterruptError]:
            step_result['status'] = 'cancelled'
        else:
            step_result['error_info'] = _capture_error_info()
        step_result['start_time'] = start_time
        step_result['end_time'] = time.time()
        results.append(step_result)

    return results


if __name__ == '__main__':
    args_namespace = parse_command_line(sys.argv)
    load_package_config(args_namespace.config)
    results = ingest_multiple_dates_main(args_namespace)
    failures = [
        step_result for step_result in results
        if step_result['status'] != "success"
    ]
    if not failures:
        print "all et steps succeeded"
    else:
        print "job FAILED, quitting"
        print "the following steps failed:"
        for f in failures:
            print f['repr'], f['error_info']
        raise ProcessError(failures)
Exemplo n.º 12
0
        async def handler(start_event: asyncio.Event) -> None:
            log.debug("Starting to handle client")

            packet_type, salt = await receive()
            assert packet_type == PacketTypes.AUTH_SALT

            packet_type, digest = await receive()
            assert packet_type == PacketTypes.AUTH_DIGEST

            hasher = HASHER()
            hasher.update(salt)
            hasher.update(self.__cookie)

            if digest != hasher.digest():
                exc = AuthenticationError("Invalid cookie")
                await send(PacketTypes.EXCEPTION, exc)
                raise exc

            await send(PacketTypes.AUTH_OK, True)

            log.debug("Client authorized")

            packet_type, identity = await receive()
            assert packet_type == PacketTypes.IDENTITY
            process = self.__spawning.pop(identity)
            starting: asyncio.Future = self.__starting.pop(identity)

            if self.initializer is not None:
                initializer_done = self.__create_future()

                await step(self.initializer, self.initializer_args,
                           dict(self.initializer_kwargs), initializer_done)

                try:
                    await initializer_done
                except Exception as e:
                    starting.set_exception(e)
                    raise
                else:
                    starting.set_result(None)
                finally:
                    start_event.set()
            else:
                starting.set_result(None)
                start_event.set()

            while True:
                func: Callable
                args: Tuple[Any, ...]
                kwargs: Dict[str, Any]
                result_future: asyncio.Future
                process_future: asyncio.Future

                (
                    func,
                    args,
                    kwargs,
                    result_future,
                    process_future,
                ) = await self.tasks.get()

                try:
                    if process_future.done():
                        continue

                    process_future.set_result(process)

                    if result_future.done():
                        continue

                    await step(func, args, kwargs, result_future)
                except asyncio.IncompleteReadError:
                    await self.__wait_process(process)
                    self.__on_exit(process)

                    result_future.set_exception(
                        ProcessError(
                            "Process {!r} exited with code {!r}".format(
                                process,
                                process.returncode,
                            ), ), )
                    break
                except Exception as e:
                    if not result_future.done():
                        self.loop.call_soon(result_future.set_exception, e)

                    if not writer.is_closing():
                        self.loop.call_soon(writer.close)

                    await self.__wait_process(process)
                    self.__on_exit(process)

                    raise