def finisher(outq, output_queue_handler, kill_boolean):
    """
    execute output_func one time per item in outq
    output_func will be provided an item from outq and only that

    this function is executed in it's own thread but only single threaded
    :param outq: output queue that we will pull from
    :param output_queue_handler: function to run on every item in outq
    :param kill_boolean: set by worker thread indicates we should all die

    :type outq: Queue
    :type output_queue_handler: callable
    :type kill_boolean: threading.Event
    """
    assert isinstance(outq, Queue)
    assert callable(output_queue_handler)
    assert isinstance(kill_boolean, threading.Event)
    
#     I have been unsuccessful in getting this to work reliably between class methods, regular methods and callable classes    
#     if get_num_input_vars(output_queue_handler) != 1:
#         # TODO: Need a sentinel that will kill everything
#         kill_boolean = True
#         raise RuntimeError("output_queue_handler function must take in at least one arg!")
    
    while True:
        if kill_boolean.is_set():
            LOGGER.warning("Got a death threat from kill_boolean, quitting")
            return
            
        # this will block forever
        output_var = outq.get()

        # This is our death signal, could use a sentinel here, but seemed like overkill for just this one thread
        if output_var is Sentinel("DIE"):
            LOGGER.warning("Finisher queue recieved death threat, quitting - if this didn't happen at the end of the program there's a problem")
            # Need to mark execution as complete!
            return
        try:
            output_queue_handler(output_var)
        except KillExecution:
            LOGGER.warning("we got a KillExecution exception inside of finisher, killing off our execution and returning")
            kill_boolean.set()
            return
    
    raise DMTEEerror("We should never get here, somehow we exited our while loop")
Example #2
0
from contextlib import contextmanager

import logbook
from sentinels import Sentinel
from vintage import warn_deprecation

from .. import hooks
from ..ctx import context
from ..exception_handling import handling_exceptions
from ..exceptions import CannotAddCleanup, IncorrectScope

_logger = logbook.Logger(__name__)



_LAST_SCOPE = Sentinel('LAST_SCOPE')
_DEDUCE = Sentinel('DEDUCE')



class CleanupManager(object):

    def __init__(self):
        super(CleanupManager, self).__init__()
        self._scope_stack = []
        self._scopes_by_name = {}
        self._pending = []
        self._allow_implicit_scopes = True
        self._default_scope = None

    @contextmanager
Example #3
0
from .query_utils import add_comma_separated_query_param
from .python import end_reraise_context
from .replication import handle_possible_replication_snapshot
from sentinels import Sentinel

DONT_CARE = Sentinel("DONT_CARE")
Example #4
0
from types import GeneratorType
from sentinels import Sentinel

from . import groups, registry
from ._compat import itervalues, string_types
from .exceptions import (CannotResolveDependencies, HookNotFound,
                         NameAlreadyUsed, NotNowException, UndefinedHook,
                         CannotMuteHooks, UnsupportedHookTags)
from .registration import Registration
from .utils import topological_sort_registrations

from vintage import warn_deprecation

_logger = logbook.Logger(__name__)

_REGISTER_NO_OP = Sentinel('REGISTER_NO_OP')


class Hook(object):
    def __init__(self,
                 group,
                 name,
                 arg_names=None,
                 doc=None,
                 deprecated=False,
                 can_be_muted=None):
        super(Hook, self).__init__()
        self.group = group
        self.name = name
        self.tags = None
        if self.group.is_global():
Example #5
0
    is_fragment,
)


__all__ = [
    "AUTO",
    "vc2_default_values_with_auto",
    "autofill_picture_number",
    "autofill_major_version",
    "autofill_parse_offsets",
    "autofill_parse_offsets_finalize",
    "autofill_and_serialise_stream",
]


AUTO = Sentinel("AUTO")
"""
A constant which may be placed in a
:py:mod:`~vc2_conformance.bitstream.vc2_fixeddicts` fixed dictionary field to
indicate that the various ``autofill_*`` functions in this module should
automatically compute a value for that field.
"""

vc2_default_values_with_auto = deepcopy(vc2_default_values)
"""
Like :py:data:`vc2_conformance.bitstreams.vc2_default_values` but with
:py:data:`AUTO` set as the default value for all fields which support it.
"""

vc2_default_values_with_auto[ParseInfo]["next_parse_offset"] = AUTO
vc2_default_values_with_auto[ParseInfo]["previous_parse_offset"] = AUTO
Example #6
0
def execute_dynamic_multithreaded_task(iterable, thread_checker_func,
                                       poll_period, worker_function,
                                       output_queue_handler):
    """
    Execute a function for every item in iterable with a dynamic number of threads as defined by the return of thread_checker_func

    :type iterable: any iterable
    :type thread_checker_func: function with zero parameters and returns int of # of threads should be running
    :type poll_period: int
    :type worker_function: function with at least 1 parameter
    :type output_queue_handler: function with at least 1 parameter

    :param iterable: Iterable to pass into worker_function
    :param thread_checker_func: function that accepts no args and will return int for # of threads we should run
    :param poll_period: how often (in sec) we will run thread_checker_func
    :param worker_function: function that will be run multi-threaded and once per item in file_list
    :param output_queue_handler: consume things that worker_function returns. this will run single threaded, once per execution

    :rtype : None - output_queue_handler should handle all output functionality
    """
    LOGGER.info("starting dynamic multithreaded execution")

    # Type checking on all inputs
    assert isinstance(iterable, collections.Iterable)
    assert callable(thread_checker_func)
    assert isinstance(poll_period, six.integer_types)
    assert callable(worker_function)
    assert callable(output_queue_handler)

    LOGGER.info("all assertions passed")

    # Validate function inputs are good (check to ensure they accept at least one variable
    # I haven't been able to get this to work reliably between callable classes / functions and class methods, giving up.
    #     if get_num_input_vars(worker_function) != 1:
    #         raise RuntimeError("worker_function must accept one and only one inputs")
    #
    #     if get_num_input_vars(output_queue_handler) != 1:
    #         raise RuntimeError("output_queue_handler must accept one and only one inputs")
    #
    #     if get_num_input_vars(thread_checker_func) != 0:
    #         raise RuntimeError("thread_checker_func must accept no inputs")
    #
    #     LOGGER.info("callables appear to have ok inputs")

    # prep the thread-wide variables
    inq = Queue()  # queue full of filenames
    outq = Queue()  # queue we will write from
    deathq = Queue(
    )  # queue to tell the next thread that's done with execution to die
    kill_boolean = threading.Event()

    LOGGER.info("loading up inq")
    # Load up inq
    inq.queue.extend(iterable)

    thread_list = []

    # spin up our finisher thread
    LOGGER.info("starting up finisher thread")
    fin_thread = threading.Thread(target=finisher,
                                  kwargs={
                                      "outq": outq,
                                      "output_queue_handler":
                                      output_queue_handler,
                                      "kill_boolean": kill_boolean
                                  })
    fin_thread.start()

    # do all the executions, scaling up/down as needed
    LOGGER.info("entering infinite loop (until job is done)")

    # initializing this so we don't die on the first run
    target_threads = 0

    while True:
        last_run = datetime.datetime.now()
        if kill_boolean.is_set():
            # everything should spin down and die
            LOGGER.debug("kill_boolean is true, we are going to stop now!")
            return

        if not inq.empty():
            # get new target for our threads
            target_threads = thread_checker_func(target_threads)

            # this could feasibly be done better, right now we are blocking until all deathq items are taken
            # we could do math and manage the deathq or spin up more threads based on that, which could make our deathq more accurate and less up / down
            # concern here is that this "control" algorithm get out of whack and vacillate up and down too much
            # Especially since we effect BDB Load

            # prob don't need this but doing it just in case
            thread_list = [t for t in thread_list if t.is_alive()]

            # spin up threads if need be
            while len(thread_list) < target_threads:
                LOGGER.debug("spinning up a new worker thread")
                base_kwargs = {
                    "inq": inq,
                    "outq": outq,
                    "deathq": deathq,
                    "worker_function": worker_function,
                    "kill_boolean": kill_boolean
                }
                t = threading.Thread(target=worker, kwargs=base_kwargs)
                t.start()
                thread_list.append(t)

            # kill any extra threads
            thread_overage = len(thread_list) - target_threads
            for i in range(thread_overage):
                # kill em
                LOGGER.debug("sending death signal to deathq")
                deathq.put(Sentinel("DIE"))

            # wait up to 10 min for deathq to be empty, then start forcibly killing threads
            # TODO: need to implement forcibly killing
            while not deathq.empty():
                time.sleep(1)

            # deathq is empty, which means we should have killed off however many threads we needed to
            # keeping this out of the if statement above in case we get exceptions in our child threads, we can spin up new workers
            thread_list = [t for t in thread_list if t.is_alive()]

            LOGGER.debug("Currently have %s threads running", len(thread_list))

        # only check for load every [poll_period] seconds
        while (datetime.datetime.now() -
               last_run).total_seconds() < poll_period:
            # Need to check if we're actually done
            if inq.empty():
                # inq is empty, we need to see if we have any threads
                thread_list = [t for t in thread_list if t.is_alive()]
                if not thread_list:
                    LOGGER.info(
                        "All worker threads are done, killing finisher thread")
                    outq.put(Sentinel("DIE"))

                    # wait for finisher thread to die
                    while fin_thread.is_alive():
                        LOGGER.info(
                            "finisher thread is still running, sleeping")
                        time.sleep(1)

                    LOGGER.info("All threads have spun down, returning!")
                    return
                else:
                    LOGGER.info(
                        "inq is empty, but looks like we still have %s threads running, we will wait until all threads complete",
                        len(thread_list))

            time.sleep(1)