def __init__(self, wake_time, wake_callback, *args, **kwargs):
     super(WakeTimerThread, self).__init__(*args, **kwargs)
     self.__wake_callback = wake_callback
     self.__wake_time = wake_time
     self.__abort_event = ThreadEvent()
     self.__wake_time = WakeTimerThread.place_waketime_ahead(
         self.__wake_time)
Exemplo n.º 2
0
    def __init__(self, amqp_url, routing_key, publish_tempo_sec,
                 exchange_name):
        '''Setup the example publisher object, passing in the URL we will use
    to connect to RabbitMQ.

    :param str amqp_url: The URL for connecting to RabbitMQ

    '''
        self._channel = None
        self._connection = None

        self._acked = 0
        self._nacked = 0
        self._deliveries = []
        self._message_number = 0

        self._closing = False
        self._stopping = False
        self.connect_error = False

        self._amqp_url = amqp_url
        self._task_run_event = ThreadEvent()
        self._publish_tempo_sec = publish_tempo_sec
        self._thread_queue = ThreadQueue(maxsize=500)

        self._tempo_controller = QueueToSampleTimeControl(
            i_max=1 / self.PUBLISH_FAST_INTERVAL_SEC, dt=publish_tempo_sec)

        # will set the exchange, queue and routing_keys names for the RabbitMq
        # server running on amqp_url
        self._rabbit_exchange_name = exchange_name
        self._rabbit_routing_key = routing_key
 def __init__(self, target_log_file: str):
     super().__init__(target_log_file)
     self._thread = Thread(target=self._thread_body, daemon=True)
     self._termination_event = ThreadEvent()
     # When first started it picks up logs for an hour before
     # to cover cases when logger is started long after node is started
     self._last_time_completed = time.time() - 60 * 60
Exemplo n.º 4
0
    def __init__(self,
                 configs,
                 cli_options=None,
                 cfg_patches=None,
                 cli_args=None,
                 no_local=False,
                 log_handlers=None,
                 wait_lock=False,
                 files=None,
                 ammo_file=None,
                 api_start=False,
                 manager=None,
                 debug=False):
        self.api_start = api_start
        self.wait_lock = wait_lock
        self.log_handlers = log_handlers if log_handlers is not None else []
        self.files = [] if files is None else files
        self.ammo_file = ammo_file
        self.config_paths = configs
        self.interrupted = ProcessEvent() if api_start else ThreadEvent()
        self.info = TankInfo(manager.dict()) if api_start else TankInfo(dict())
        self.config_list = self._combine_configs(configs, cli_options,
                                                 cfg_patches, cli_args,
                                                 no_local)
        self.core = TankCore(self.config_list, self.interrupted, self.info)
        self.folder = self.init_folder()
        self.init_logging(debug
                          or self.core.get_option(self.core.SECTION, 'debug'))

        is_locked = Lock.is_locked(self.core.lock_dir)
        if is_locked and not self.core.config.get_option(
                self.SECTION, 'ignore_lock'):
            raise LockError(is_locked)
Exemplo n.º 5
0
    def __init__(self,
                 amqp_url,
                 routing_key,
                 exchange_name,
                 message_ttl_in_ms=None):
        """Create a new instance of the consumer class, passing in the AMQP
    URL used to connect to RabbitMQ.

    :param str amqp_url: The AMQP url to connect with

    """
        self._connection = None
        self._channel = None
        self._closing = False
        self._consumer_tag = None
        self._url = amqp_url
        self._task_run_event = ThreadEvent()
        self._exchange_name = exchange_name
        self._routing_key = routing_key
        self._queue_name = None

        if not message_ttl_in_ms:
            self._message_ttl_in_ms = self.QUEUE_TTL_IN_MS
        else:
            self._message_ttl_in_ms = message_ttl_in_ms
Exemplo n.º 6
0
def thread_safe_attribute_test2(time_in_seconds, number_of_threads, alog_file):
    # a class to test against
    class A4(metaclass=MetaThreadSafeAttributes):
        _attributes = ['hammered_attribute']

        def __init__(self):
            self.hammered_attribute = 0

    # confirm that normal attributes are working
    a3 = A4()
    # confirm that the thread safe attribute is working as expected from main
    a3.hammered_attribute = 0
    assert (a3.hammered_attribute == 0)
    a3.hammered_attribute += 1
    assert (a3.hammered_attribute == 1)
    a3.hammered_attribute -= 1
    assert (a3.hammered_attribute == 0)

    # begin the multithreaded tests
    # make an event that can turn off all threads
    event = ThreadEvent()
    event.set()
    # create and start the thread
    for i in range(number_of_threads):
        thread = make_test_thread2("thrd_" + "{0:02}:".format(i), a3, event)
        thread.start()

    # let the test run for the desired time
    time.sleep(time_in_seconds)
    event.clear()
    time.sleep(0.5)
    assert (a3.hammered_attribute == number_of_threads * 100)
Exemplo n.º 7
0
    def __run_workers_supervisor(self):
        # Internal object to synchronize on the supervisor exit
        self.__supervisor_exited = ThreadEvent()

        self.__stopped_workers = {}
        workers_pool = cycle(self.__workers_pool)

        for worker in workers_pool:
            try:
                # Thread exit if all the workers have been stopped
                if len(self.__stopped_workers.keys()) == len(self.__workers):
                    break

                action_response = worker._value_bus.next(
                    blocking=True,
                    timeout=self.__DEFAULT_WORKER_SUPERVISOR_POLL_PERIOD)

                if isinstance(action_response, ActionResponse):
                    self.__process_action_response(action_response)
                elif isinstance(action_response, StopEvent):
                    self.__on_worker_stop(worker)
            except EmptyBus:
                continue

        self.__supervisor_exited.set()
Exemplo n.º 8
0
    def __init__(self, chart):
        super().__init__()
        self.chart = chart
        self.thread_event = ThreadEvent()
        self.signal_names = 'ABCDEFGHIMT'
        self.post_action = self._post_action

        self.thread_event.set()
Exemplo n.º 9
0
 def __init__(self,
              name: str,
              run: Callable[[], None],
              publish_event: Optional[Callable[[Event], None]] = None):
     super().__init__(name, publish_event)
     self.__thread = Thread(target=run)
     self._stopped = ThreadEvent()
     self.__thread.start()
Exemplo n.º 10
0
    def __init__(self, appliance):
        super(RestEventListener, self).__init__()
        self._appliance = appliance
        self._events_to_listen = []
        self._last_processed_id = 0  # this is used to filter out old or processed events
        self._stop_event = ThreadEvent()

        self.event_streams = appliance.rest_api.collections.event_streams
Exemplo n.º 11
0
    def __init__(self):
        # Super call
        super().__init__("queues.json")

        # Some setup
        self.__queues = {}
        self.__interrupt_times = {}

        # Init keep alive thread
        self.__keep_alive_stop = ThreadEvent()
        self.__keep_alive_t = None
Exemplo n.º 12
0
    def __init__(self, appliance):
        super(EventListener, self).__init__()
        self._appliance = appliance
        self._tool = EventTool(self._appliance)

        self._events_to_listen = []
        # last_id is used to ignore already arrived messages the database
        # When database is "cleared" the id of the last event is placed here. That is then used
        # in queries to prevent events of this id and earlier to get in.
        self._last_processed_id = None
        self._stop_event = ThreadEvent()
Exemplo n.º 13
0
    def start(self):
        self.shutdown_flag = ThreadEvent()

        self.thread = Thread(target=run_test_nanovault_ws_node,
                             kwargs={
                                 "shutdown_flag": self.shutdown_flag,
                                 "ws_mock_node": self
                             })
        self.thread.start()

        # Wait for a bit to let the thread start up
        time.sleep(0.05)
 def __init__(self, chart):
   super().__init__()
   self.thread_event = ThreadEvent()
   self.chart = chart
   def make_post_function(signal_name):
     def post_event():
       chart.post_fifo(Event(signal=signal_name))
     def quit():
       self.thread_event.clear()
     return quit if signal_name == 'T' else post_event
   self.thread_event.set()
   self.post_functions = {
     character:make_post_function(character) for character in 'ABCDEFGHIT'}
Exemplo n.º 15
0
    def __init__(self, name=None, instrumented=None):

        if instrumented is None:
            instrumented = True

        super().__init__(instrumented)
        self.locking_deque = LockingDeque()
        self.activeobject_task_event = ThreadEvent()
        # Over-write the deque in the Hsm with Queues with the one managed by the
        # LockingDeque object. This is the 'magic' in this object.  Any time a
        # post_fifo or post_lifo method within the Hsm is touched, it unknowingly uses
        # the LockingDeque object; which will also provide the 'get' method feature.
        # This 'get' method will allow our active object task to sleep until there
        # is something to do.  To understand this object, you need to grok this.
        self.queue = self.locking_deque

        # The active fabric is a singletlon that dispatches messages between all
        # active objects.  It provides a publish/subscribe infrastructure, and it will
        # post directly into our locking_deque object, and as a result, provide the
        # 'get' method of this object to unlock our task.
        self.fabric = ActiveFabric()
        # for writing live instrumentation
        self.writer = InstrumentionWriter()

        self.thread = None
        self.name = name

        # the QUEUE_SIZE is defined in HsmWithQueues
        self.posted_events_queue = deque(maxlen=self.__class__.QUEUE_SIZE)
        self.PostedEventThreadSpec = namedtuple('PostedEventThreadSpec', [
            'event',
            'queue_type',
            'total_times',
            'deferred',
            'period',
            'task_run_event',
        ])
        self.PostedEvent = namedtuple('PostedEvents', [
            'signal_name',
            'task_run_event',
            'uuid',
        ])

        self.register_live_spy_callback(
            self.__class__.live_spy_callback_default)

        self.register_live_trace_callback(
            self.__class__.live_trace_callback_default)

        self.last_live_trace_datetime = len(self.full.trace)
Exemplo n.º 16
0
    def __init__(self,
                 client: RpcClient,
                 names: List[str],
                 client_id: int = None,
                 logger: Logger = None):

        # Init some stuff
        self.client = client
        self.names = names
        self.logger = logger if logger is not None else getLogger(
            EventsListener.__name__)
        self.client_id = client_id
        self.ready = ThreadEvent()

        # Prepare listening thread
        self.listening_t = Thread(target=self.__listen_to_events, daemon=True)
        self.listening_t.start()
Exemplo n.º 17
0
    def __init__(self, amqp_url, routing_key, exchange_name, queue_name):
        """Create a new instance of the consumer class, passing in the AMQP
    URL used to connect to RabbitMQ.

    :param str amqp_url: The AMQP url to connect with

    """
        self._connection = None
        self._channel = None
        self._closing = False
        self._consumer_tag = None
        self._url = amqp_url
        self._task_run_event = ThreadEvent()
        self._thread_tempo_sec = self.THREAD_TEMPO_SEC
        self._exchange_name = exchange_name
        self._routing_key = routing_key
        self._queue_name = queue_name
Exemplo n.º 18
0
    def __init__(self, config, on_exit=None):
        """
        Constructor

        config  -- evesp.config.Config object
        on_exit -- Optional method to invoke when the engine has stopped
        """

        self.__state = EngineState.Initializing
        self.config = config
        self.db = Db.get_db(db_path=self.get_db_path())

        # Event object use to synchronize the threads when the engine enters
        # "Running" state
        self.__engine_running = ThreadEvent()

        self.components = {}
        self.__on_exit = on_exit
        self.__classes = {}
        self.__parsed_engine_config = False

        for comp_name, component in self.config.components.items():
            if comp_name == self.__ENGINE_COMP_NAME:
                self.__parse_engine_comp_config(component)
            else:
                if 'module' not in component:
                    raise AttributeError(
                        'No module name specified for the component name ' +
                        comp_name + ' - e.g. evesp.component.mock_component')
                self.__classes[comp_name] = \
                    component_class_by_module_name(component['module'])

                # Now that it's been used, removed the module key from
                # the component configuration
                del self.config.components[comp_name]['module']

        if not self.__parsed_engine_config:
            raise AttributeError(
                'The configuration file has no %s module configuration' %
                self.__ENGINE_COMP_NAME)

        self.__actions = {}
        self.__create_worker_pool()
        self.__state = EngineState.Ready
Exemplo n.º 19
0
    def _start(self):
        process_count = self.config.get("work.local.threads")

        if process_count == -1:
            process_count = max(int(os.cpu_count() / 2), 1)

        self.shutdown_flag = ThreadEvent()
        self.thread = Thread(target=run_work_thread,
                             kwargs={
                                 "process_count": process_count,
                                 "work_units": self.work_units,
                                 "work_lock": self.work_lock,
                                 "shutdown_flag": self.shutdown_flag
                             })
        self.thread.start()

        logger.info("Started work thread")

        self.started = True
Exemplo n.º 20
0
    def synchronous_future(self, func, *args, after_idle=False, **kwargs):
        # TODO: This breaks if called from the main thread, fix that?
        """ Executes a function in the GUI event loop, waiting either for its return, or for a Tk exception. """
        e = ThreadEvent()
        q = Queue(maxsize=1)
        self.event_signals.append(e)

        def func_with_event_set():
            q.put(func(*args, **kwargs))
            e.set()

        self.future(func_with_event_set, after_idle=after_idle)
        e.wait()
        try:  # Try and remove the event signal
            self.event_signals.remove(e)
        except ValueError:  # The signal was already removed because of an exception
            pass
        if q.empty():  # The function didn't successfully return
            return EXCEPTION
        else:
            return q.get()
Exemplo n.º 21
0
    def __init__(self,
                 bus: Optional[Bus] = None,
                 poll_seconds: Optional[float] = None,
                 **kwargs):
        """
        :param bus: Reference to the bus object to be used in the backend
        :param poll_seconds: If the backend implements a ``loop`` method, this parameter expresses how often the
            loop should run in seconds.
        :param kwargs: Key-value configuration for the backend
        """

        self._thread_name = self.__class__.__name__
        EventGenerator.__init__(self)
        ExtensionWithManifest.__init__(self)
        Thread.__init__(self, name=self._thread_name, daemon=True)

        # If no bus is specified, create an internal queue where
        # the received messages will be pushed
        self.bus = bus or Bus()
        self.poll_seconds = float(poll_seconds) if poll_seconds else None
        self.device_id = Config.get('device_id')
        self.thread_id = None
        self._stop_event = ThreadEvent()
        self._kwargs = kwargs
        self.logger = logging.getLogger(
            'platypush:backend:' + get_backend_name_by_class(self.__class__))
        self.zeroconf = None
        self.zeroconf_info = None

        # Internal-only, we set the request context on a backend if that
        # backend is intended to react for a response to a specific request
        self._request_context = kwargs['_req_ctx'] if '_req_ctx' in kwargs \
            else None

        if 'logging' in kwargs:
            self.logger.setLevel(
                getattr(logging,
                        kwargs.get('logging').upper()))
Exemplo n.º 22
0
    def __init__(self):
        """初始化事件引擎"""
        # 事件队列
        self.__queue = Queue()
        self.__count = 0  # 计数器,用于辅助实现优先级队列
        self.__file_opened = []
        # 事件引擎开关
        self.__active = False
        self.__finished = False
        self.__is_empty = ThreadEvent()
        self.__thread = None
        self.__exc_type = None
        self.__exc_value = None
        self.__exc_traceback = None
        # 计时器,用于触发计时器事件
        # self.__timer = QTimer()
        # self.__timer.timeout.connect(self.__onTimer)

        # 这里的__handlers是一个字典,用来保存对应的事件调用关系
        # 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
        self.__handlers = {}
        # 注册异步事件
        self.register(EVENT_ASYNC, lambda event: event.content['func']())
Exemplo n.º 23
0
 def start(self):
     self.stop_event = ThreadEvent()
     self.timer = Thread(target=self._run,
                         args=(self.task, self.delay, self.stop_event))
     self.timer.daemon = True
     self.timer.start()
Exemplo n.º 24
0
from itertools import repeat, cycle
from multiprocessing import Process as MultiProcess, Queue, Event as MultiEvent
from multiprocessing.pool import ThreadPool
from threading import Thread, Lock, Event as ThreadEvent
from collections import deque
from time import sleep
from setproctitle import setproctitle
from psutil import Process as PsProcess
from os import getpid
import pandas as pd
import os

# Thread shared variable for variables loaded
loaded_list = deque()
data_lock = Lock()
thread_stop_event = ThreadEvent()

class Generator:
    """ Class representing an object that analyses an image database and
    generate data from it """

    def __init__(self, input_shape, generator_conf, queue_mem=0.1):
        if not Path(generator_conf['path']).exists():
            raise FileNotFoundError(generator_conf['path'] + ' doesn\'t exist!')

        self.gen_conf = generator_conf.copy()
        self._input_shape = input_shape
        self._queue_mem = queue_mem
        self._gen_stop_event = None
        self._queue = None
        self._proc = None
Exemplo n.º 25
0
            time.sleep(0.020)


class ThreadKiller():
    def __init__(self, evt, count_down):
        '''running within main thread'''
        self.evt = evt
        self.kill_time = count_down

    def thread_stopper(self):
        '''running within killer thread'''
        time.sleep(self.kill_time)
        self.evt.clear()


evt = ThreadEvent()
evt.set()

gl1 = GetLock1(evt)
gl2 = GetLock2(evt, gl1=gl1)
killer = ThreadKiller(evt, count_down=0.1)

threads = []
threads.append(Thread(target=gl1.thread_method_1, name='th1', args=()))
threads.append(Thread(target=gl2.thread_method_2, name='th2', args=()))

for thread in threads:
    thread.start()

thread_stopper = Thread(target=killer.thread_stopper, name='stopper', args=())
thread_stopper.start()
Exemplo n.º 26
0
    def __init__(self, m3u8Cfg: dict = None):
        self.setting = m3u8Cfg if m3u8Cfg else cfg  # 如果不传配置参数,则使用默认参数.
        self._state = True  # 运行状态
        self._http = m_http()
        # requests 连接失败重连次数
        self._reconnect = self.setting['cfg']['reconnect']
        # requests超时时间
        self._timeout = self.setting['cfg']['timeout']
        # 文件保存文件夹
        self._save_folder = self.setting['save-folder']
        self._m3u8_file_hash = None
        self._create_m3u8_index_file = False  # 是否已经建立M3U8播放索引
        self._del_state = False  # 是否正在删除过期文件,LIVE模式需要使用.
        # 密钥
        self._m3u8_key = None
        # 下载文件线程池配置
        self._lock__ = Lock()  # 线程锁
        self._download_event__ = ThreadEvent()  # 下载线程同步
        self._complete_event__ = ThreadEvent()  # 下载完成通知事件
        self._event_create_m3u8_file__ = ThreadEvent()  # 生成m3u8索引文件事件
        self._download_complate_total__ = 0  # 统计下载完成数量
        self._download_thread_active_ = 0  # 下载线程活跃数,用于判断文件是否下载完成.
        self._downlaod_buffer_length = 0  # 下载缓存长度, 用于计算快偏移量
        self._download_pool_size = self.setting['download-poolsize']
        self._download_pool = pools(self._download_pool_size)

        # 初始化下载线程池
        for i in range(0, self._download_pool_size):
            self._download_pool.push(m_http())
        self._save_folder = self.__checkfolder__(self._save_folder)

        # 下载缓存索引,下载索引到达这个值时才生成播放索引文件
        self._ts_save_folder = None
        self._m3u8_buffer_index = self.setting['buffer-index']
        self._current_m3u8_file = None
        # live:直播(需要连续多次下载M3U8文件),whole:完整播放(点播不需要多次下载M3U8文件)
        self._play_mode__ = 'whole'
        # M3U8 TAGS
        self._prefix_url = None
        self._EXT_X_TARGETDURATION__ = None
        self._EXTINF__ = []
        self._EXT_X_VERSION__ = None
        self._EXT_X_BYTERANGE__ = None
        self._EXT_X_KEY__ = None
        self._EXT_X_MAP__ = None
        self._EXT_X_PROGRAM_DATE_TIME__ = None
        self._EXT_X_DATERANGE__ = None
        self._EXT_X_MEDIA_SEQUENCE__ = None
        self._EXT_X_DISCONTINUITY_SEQUENCE__ = None
        self._EXT_X_PLAYLIST_TYPE__ = None
        self._EXT_X_MEDIA__ = None
        self._EXT_X_STREAM_INF__ = None
        self._EXT_X_I_FRAME_STREAM_INF__ = None
        self._EXT_X_SESSION_DATA__ = None
        self._EXT_X_SESSION_KEY__ = None
        self._EXT_X_START__ = None
        self._EXT_X_DISCONTINUITY__ = None
        self._EXT_X_ENDLIST__ = None
        self._EXT_X_I_FRAMES_ONLY__ = None
        self._EXT_X_INDEPENDENT_SEGMENTS__ = None

        # 自己定义M3U8 处理属性
        self._EXT_X_M3U8_FILE_LIST__ = []  # 直播类有m3u8有多个m3u8文件地址
        self._EXT_X_M3U8_PLAY_LIST__ = []  # m3u8播放列表TS文件列表
        self._EXT_X_M3U8_PLAY_LIST_INDEX__ = 0  # 下载m3u8播放列表TS文件列表索引
        self._LIVE_DOWNLOAD_FILE = []  # 直播下载文件
Exemplo n.º 27
0
def _dispatch_in_thread(
    websocket: WebSocket,
    path: str,
    component: ComponentType,
    send: Callable[[Any], None],
    recv: Callable[[], Optional[LayoutEvent]],
) -> NoReturn:
    dispatch_thread_info_created = ThreadEvent()
    dispatch_thread_info_ref: idom.Ref[
        Optional[_DispatcherThreadInfo]] = idom.Ref(None)

    @copy_current_request_context
    def run_dispatcher() -> None:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        thread_send_queue: "ThreadQueue[LayoutUpdate]" = ThreadQueue()
        async_recv_queue: "AsyncQueue[LayoutEvent]" = AsyncQueue()

        async def send_coro(value: Any) -> None:
            thread_send_queue.put(value)

        async def recv_coro() -> Any:
            return await async_recv_queue.get()

        async def main() -> None:
            await serve_json_patch(
                idom.Layout(
                    ConnectionContext(component,
                                      value=Connection(request, websocket,
                                                       path))),
                send_coro,
                recv_coro,
            )

        main_future = asyncio.ensure_future(main())

        dispatch_thread_info_ref.current = _DispatcherThreadInfo(
            dispatch_loop=loop,
            dispatch_future=main_future,
            thread_send_queue=thread_send_queue,
            async_recv_queue=async_recv_queue,
        )
        dispatch_thread_info_created.set()

        loop.run_until_complete(main_future)

    Thread(target=run_dispatcher, daemon=True).start()

    dispatch_thread_info_created.wait()
    dispatch_thread_info = cast(_DispatcherThreadInfo,
                                dispatch_thread_info_ref.current)
    assert dispatch_thread_info is not None

    stop = ThreadEvent()

    def run_send() -> None:
        while not stop.is_set():
            send(dispatch_thread_info.thread_send_queue.get())

    Thread(target=run_send, daemon=True).start()

    try:
        while True:
            value = recv()
            dispatch_thread_info.dispatch_loop.call_soon_threadsafe(
                dispatch_thread_info.async_recv_queue.put_nowait, value)
    finally:  # pragma: no cover
        dispatch_thread_info.dispatch_loop.call_soon_threadsafe(
            dispatch_thread_info.dispatch_future.cancel)
Exemplo n.º 28
0
def init_dirq_consume(workers, daemonized, sockstat):
    """
       Initialize local cache/directory queue consumers. For each Queue defined
       in configuration, one worker process will be spawned and Publisher will
       be associated. Additional one process will be spawned to listen for
       queries on the socket. Register also local SIGTERM and SIGUSR events
       that will be triggered upon receiving same signals from daemon control
       process and that will be used to control the behaviour of spawned
       subprocesses and threads.
    """
    evsleep = 2
    consumers = list()
    localevents = dict()
    manager = Manager()

    for worker in workers:
        shared = Shared(worker=worker)

        # Create dictionaries that hold number of (published, consumed) messages
        # in seconds from epoch. Second from epoch is a key and number of
        # (published, consumed) messages in given second is associated value:
        #
        # { int(time.time()): num_of_bulk_msgs, ... }
        #
        # Counter is read on queries from socket.
        # collections.Counter cannot be shared between processes so
        # manager.dict() is used.
        shared.statint[worker]['consumed'] = manager.dict()
        shared.statint[worker]['published'] = manager.dict()
        shared.reload_confopts = manager.dict()

        # Create integer counters that will be shared across spawned processes
        # and that will keep track of number of published and consumed messages.
        # Counter is read on perodic status reports and signal SIGUSR1.
        shared.statint[worker]['consumed_periodic'] = Value('i', 1)
        shared.statint[worker]['published_periodic'] = Value('i', 1)

        if not getattr(shared, 'runtime', False):
            shared.runtime = dict()
            shared.runtime['started'] = datetime.datetime.now().strftime(
                '%Y-%m-%d %H:%M:%S')
            shared.runtime['started_epoch'] = str(int(time.time()))

        if shared.general['publishmsgfile']:
            shared.runtime.update(publisher=FilePublisher)

        if shared.general['publishargomessaging']:
            try:
                if shared.topic['avro']:
                    avsc = open(shared.topic['avroschema'])
                    shared.topic.update(schema=avro.schema.parse(avsc.read()))
            except Exception as e:
                shared.log.error(e)
                raise SystemExit(1)

            shared.runtime.update(publisher=MessagingPublisher)

        localevents.update({'lck-' + worker: Lock()})
        localevents.update({'usr1-' + worker: Event()})
        localevents.update({'period-' + worker: Event()})
        localevents.update({'term-' + worker: Event()})
        localevents.update({'termth-' + worker: ThreadEvent()})
        localevents.update({'giveup-' + worker: Event()})
        shared.runtime.update(evsleep=evsleep)
        shared.runtime.update(daemonized=daemonized)

        consumers.append(ConsumerQueue(events=localevents, worker=worker))
        if not daemonized:
            consumers[-1].daemon = False
        consumers[-1].start()

    shared = Shared()
    workers_name = ', '.join(consumer.name for consumer in consumers)
    shared.log.info('Started {} workers'.format(workers_name))

    if worker:
        localevents.update({'lck-stats': Lock()})
        localevents.update({'usr1-stats': Event()})
        localevents.update({'term-stats': Event()})
        localevents.update({'termth-stats': ThreadEvent()})
        localevents.update({'giveup-stats': Event()})
        statsp = StatSock(events=localevents, sock=sockstat)
        statsp.daemon = False
        statsp.start()

    prevstattime = int(time.time())
    while True:
        if int(time.time()
               ) - prevstattime >= shared.general['statseveryhour'] * 3600:
            shared.log.info('Periodic report (every %sh)' %
                            shared.general['statseveryhour'])
            for consumer in consumers:
                localevents['period-' + consumer.name].set()
                prevstattime = int(time.time())

        for consumer in consumers:
            if localevents['giveup-' + consumer.name].is_set():
                consumer.terminate()
                consumer.join(1)
                localevents['giveup-' + consumer.name].clear()

        if shared.event('term').is_set():
            for consumer in consumers:
                localevents['term-' + consumer.name].set()
                localevents['termth-' + consumer.name].set()
                consumer.join(1)
            shared.log.info('Stopped {} workers'.format(workers_name))
            localevents['term-stats'].set()
            localevents['termth-stats'].set()
            statsp.join(1)
            raise SystemExit(0)

        if shared.event('usr1').is_set():
            shared.log.info('Started %s' % shared.runtime['started'])
            for consumer in consumers:
                localevents['usr1-' + consumer.name].set()
            localevents['usr1-stats'].set()
            shared.event('usr1').clear()

        try:
            time.sleep(evsleep)
        except KeyboardInterrupt:
            for consumer in consumers:
                consumer.join(1)
            statsp.join(1)
            raise SystemExit(0)
Exemplo n.º 29
0
 def __init__(self):
     super(DistributorThread, self).__init__(ThreadQueue(), ThreadEvent())
Exemplo n.º 30
0
    def __post_event(self,
                     e,
                     times=None,
                     period=None,
                     deferred=None,
                     queue_type=None):
        '''
    The post_event method is used to post one-shots or periodic events to the
    active object.

    It constructs a fabric_task_event and a task, then starts the task.  The task will
    run periodically posting events into either the fifo or the lifo of the
    active object.

    Examples:
      # Post an 'A' signal event into the lifo every 1.0 seconds, 5 times.

      # On the first time, wait one second prior to posting.  This should take
      # about 6 seconds to complete
      ao.post_event(Event(signal=signals.A),
                      time=5,
                      period=1.0,
                      deferred=True,
                      queue_type='lifo')

      # Now to cancel it, you can to cancel all events with the same
      # signal_name
      time.sleep(2.0)
      ao.cancel_events(Event(signal=signals.A))

    Example:
      # Post an event, without a time or periodic constraint
      ao.post_event(Event(signal=signals.B))  # same as ao.post_fifo(Event(signal=signals.B)

    Example of posting an event with the same signal name several times:

      # construct a thread which will post signal A, 15 times every 1 second to
      # the lifo of the active object
      post_id_1 = ao.post_event(Event(signal=signals.A),
                      time=15,
                      period=1.0,
                      deferred=True,
                      queue_type='lifo')

      # construct a thread which will post signal A, 15 times every 10 seconds to
      # the fifo of the active object
      post_id_2 = ao.post_event(Event(signal=signals.A),
                      time=15,
                      period=10.0,
                      queue_type='fifo')

      # To cancel the first event posting thread and leave the second event
      # posting thread to run:
      ao.cancel_event(uuid=uuid1)

    Example of linking a posted event to a state function handler:

        @spy_on
        def some_state_function(chart, e):
          status = return_status.UNHANDLED

          if(e.signal == signals.ENTRY_SIGNAL):
            # This will cause us to transition into the other_state_function
            # once every three seconds, starting at the next rtc event
            one_shot_uuid = chart.post_event(Event(Signal=signal.TIME_OUT,
                                                   period=3.0,
                                                   queue_type='lifo'))

            # Now we graffiti this chart with the 'one_shot_uuid' attribute so
            # that we can cancel it upon exiting the state
            chart.augment(other=one_shot_uuid, name='one_shot_uuid')
            status = return_status.HANDLED

          elif(e.signal == signals.EXIT_SIGNAL):
            chart.cancel_event(uuid=chart.one_shot_uuid)
            del(chart.one_shot_uuid)
            status = return_status.HANDLED

          elif(e.signal == signals.TIME_OUT):
            status = chart.trans(other_state_function)

          else:
            status, chart.temp.fun = return_status.SUPER, chart.top

          return status

    '''
        if deferred is None:
            deferred = True
        if queue_type is None:
            queue_type = 'fifo'
        if times is None:
            times = 1
        # if our times are set to 1 and there is no period then just post our event
        # to the fifo/lifo
        if times == 1 and period is None:
            if queue_type == 'fifo':
                self.post_fifo(e, period=None)
            else:
                self.post_lifo(e, period=None)
        else:
            # create an exit event for the task, it will be shared with the
            # cancel_event/cancel_events methods, so that the task can be stopped by
            # someone using the ActiveObject api
            task_run_event = ThreadEvent()
            task_run_event.set()

            # set up the specification for this task
            posted_event_thread_spec = \
              self.PostedEventThreadSpec(
                event=e,
                queue_type=queue_type,
                deferred=deferred,
                period=period,
                total_times=times,
                task_run_event=task_run_event,
              )

            def post_event_thread_runner(spec, deferred, times_activated):
                # We have a Event object here that can be controlled by something
                # outside of our task.  If it is cleared, then this thread will just
                # exit and disappear from the system.
                while spec.task_run_event.is_set():
                    if deferred:
                        time.sleep(spec.period)
                    else:
                        # Pretend that we waited the first time we entered this function
                        # this way we can access the time.sleep on every pass through from
                        # now on.
                        deferred = True
                    # we might have been cancelled while we were sleeping
                    if spec.task_run_event.is_set() is not True:
                        break

                    times_activated += 1
                    if spec.queue_type == 'fifo':
                        self.post_fifo(spec.event)
                    else:
                        self.post_lifo(spec.event)

                    # If we don't want to run forever we can clear our own Event
                    if spec.total_times != 0:
                        if (times_activated >= spec.total_times):
                            spec.task_run_event.clear()

            thread = Thread(target=post_event_thread_runner,
                            args=(posted_event_thread_spec,
                                  posted_event_thread_spec.deferred, 0),
                            daemon=True)
            thread.name = uuid.uuid4()
            thread.start()

            # If we have run out of spots in our queue we should issue an
            # ActiveObjectOutOfPostedEventResources since it indicates a MAJOR design
            # problem
            if (len(self.posted_events_queue) < self.__class__.QUEUE_SIZE):
                # track this thread in our posted_events deque
                self.posted_events_queue.append(
                    self.PostedEvent(
                        e.signal_name,
                        task_run_event,
                        thread.name,
                    ))
            else:
                # Have the timer thread that we just constructed shut down (we
                # can't manage it in our posted_events deque)

                # This could easily happen if the user creates posted_event items on
                # entry and doesn't cancel them upon exiting the same state (see
                # comment in this function's docstring)
                pp(self.posted_events_queue)
                task_run_event.clear()
                raise (ActiveObjectOutOfPostedEventResources(
                    "posted_events_queue size is too small for what you have asked for"
                ))

        return thread.name