def __init__(self, _registry: EventsProcessesRegistry):
        self._registry = _registry
        self._events_counter = multiprocessing.Value(ctypes.c_uint32, 0)

        self._running = multiprocessing.Event()
        self._sub_port = multiprocessing.Value(ctypes.c_uint16, 0)
        self._queue = multiprocessing.Queue()
        self._raw_events_lock = multiprocessing.RLock()

        self.events_log_base_dir.mkdir(parents=True, exist_ok=True)

        super().__init__(daemon=True)
Esempio n. 2
0
def test():
    manager = multiprocessing.Manager()

    gc.disable()

    print('\n\t######## testing Queue.Queue\n')
    test_queuespeed(threading.Thread, queue.Queue(),
                    threading.Condition())
    print('\n\t######## testing multiprocessing.Queue\n')
    test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
                    multiprocessing.Condition())
    print('\n\t######## testing Queue managed by server process\n')
    test_queuespeed(multiprocessing.Process, manager.Queue(),
                    manager.Condition())
    print('\n\t######## testing multiprocessing.Pipe\n')
    test_pipespeed()

    print()

    print('\n\t######## testing list\n')
    test_seqspeed(list(range(10)))
    print('\n\t######## testing list managed by server process\n')
    test_seqspeed(manager.list(list(range(10))))
    print('\n\t######## testing Array("i", ..., lock=False)\n')
    test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=False))
    print('\n\t######## testing Array("i", ..., lock=True)\n')
    test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=True))

    print()

    print('\n\t######## testing threading.Lock\n')
    test_lockspeed(threading.Lock())
    print('\n\t######## testing threading.RLock\n')
    test_lockspeed(threading.RLock())
    print('\n\t######## testing multiprocessing.Lock\n')
    test_lockspeed(multiprocessing.Lock())
    print('\n\t######## testing multiprocessing.RLock\n')
    test_lockspeed(multiprocessing.RLock())
    print('\n\t######## testing lock managed by server process\n')
    test_lockspeed(manager.Lock())
    print('\n\t######## testing rlock managed by server process\n')
    test_lockspeed(manager.RLock())

    print()

    print('\n\t######## testing threading.Condition\n')
    test_conditionspeed(threading.Thread, threading.Condition())
    print('\n\t######## testing multiprocessing.Condition\n')
    test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
    print('\n\t######## testing condition managed by a server process\n')
    test_conditionspeed(multiprocessing.Process, manager.Condition())

    gc.enable()
Esempio n. 3
0
    def __init__(self):
        self._lock = multiprocessing.RLock()

        # Mapping of a key to an object
        #: :type: dict of (str, object)
        self._obj_cache = {}

        # Mapping of a key to the last access time for that object (UNIX time)
        #: :type: dict of (str, float)
        self._obj_last_access = {}

        # Mapping of timeout values for each entry
        self._obj_timeouts = {}
Esempio n. 4
0
    def __init__(self, pid):
        self.__watchermsg = multiprocessing.Manager().dict()
        self.__WatchPid = pid
        self.__watchermsg['MaxRam'] = 0
        self.__watchermsg['Stop'] = False
        self.__watchermsg['Begintime'] = 0
        self.__watchermsg['Endtime'] = 0
        self.__watchermsg['infertime'] = 0
        self.__Lock = multiprocessing.RLock()

        self.__WatchPeocess = multiprocessing.Process(name="Watcher",
                                                      target=self.WatchRam,
                                                      args=())
Esempio n. 5
0
 def __init__(self, connection, path):
     #zk connection
     self.connection = connection
     self.path = path
     self.deletion_handlers = {}
     # Ensures path exists
     self.connection.create_recursive(self.path + PREFIX,
                                      "",
                                      acl=zc.zk.OPEN_ACL_UNSAFE)
     self.lock = multiprocessing.RLock()
     # Stores leader queues
     self.candidate_by_predecessor = {}
     self.counter_by_candidate = {}
Esempio n. 6
0
class AsyncFunctionWrapper(object):
    _thread_pools = {}
    _pool_lock = multiprocessing.RLock()
    _async_pool_size = 10
    def __init__(self):
        self.max_tries = 1

    #TODO: evaluate if this could be a classmethod;
    def get_async(self, func, *args, **kwargs):
        return self._get_wrapped_async_func(func, *args, **kwargs)

    @classmethod
    def _get_thread_pool(cls):
        '''Get the thread pool for this process.'''
        with cls._pool_lock:
            try:
                return cls._thread_pools[os.getpid()]
            except KeyError:
                pool = concurrent.futures.ThreadPoolExecutor(
                    cls._async_pool_size)
                cls._thread_pools[os.getpid()] = pool
                return pool

    def _get_wrapped_async_func(self, func, *args, **kwargs):
        '''Returns an asynchronous function wrapped around the given func.
        The asynchronous call has a callback keyword added to it
        '''
        def AsyncWrapper(*args, **kwargs):
            # Find the callback argument
            try:
                callback = kwargs['callback']
                del kwargs['callback']
            except KeyError:
                if len(args) > 0 and hasattr(args[-1], '__call__'):
                    callback = args[-1]
                    args = args[:-1]
                else:
                    raise AttributeError('A callback is necessary')

            io_loop = tornado.ioloop.IOLoop.current()

            def _cb(future, cur_try=0):
                if future.exception() is None:
                    callback(future.result())
                else:
                    _log.error('Error executing the function: %s' % future.exception())
                    raise future.exception()
            future = AsyncFunctionWrapper._get_thread_pool().submit(
                func, *args, **kwargs)
            io_loop.add_future(future, _cb)
        return AsyncWrapper
    def __init__(self, data_file, out_file):
        # Process the arguments to __init__.
        self.data_file = data_file
        self.out_file = out_file
        tmp_home = os.path.expanduser('~/.sim_tmp')
        if not os.path.exists(tmp_home):
            os.mkdir(tmp_home)
        self.tmp_dir = tempfile.mkdtemp(dir=tmp_home)

        # Load and process the parameter on-disk.
        mat = scipy.io.loadmat(data_file)

        self.tr_file = str(mat['tr_file'][0])
        self.te_file = str(mat['te_file'][0])
        self.n_train = int(np.ravel(mat['nTrain']))
        self.classes = np.ravel(mat['classes'])
        self.eval_dir = str(mat['eval_dir'][0])

        del mat, data_file, out_file

        # Load the data matrices, the actual feature values and labels.
        self.X_te = utils.make_mmap(self.tmp_dir, 'X_te.mmap', self.te_file, 'x')
        gc.collect()
        self.y_te = utils.make_mmap(self.tmp_dir, 'y_te.mmap', self.te_file, 'y')
        gc.collect()
        self.X_tr = utils.make_mmap(self.tmp_dir, 'X_tr.mmap', self.tr_file, 'x')
        gc.collect()
        self.y_tr = utils.make_mmap(self.tmp_dir, 'y_tr.mmap', self.tr_file, 'y')
        gc.collect()

        # Parallelize over up to 32 CPUs.
        self.nCPUs = min(32, mp.cpu_count())

        # Create queues to hold intermediate outputs.
        self.q0 = mp.Queue()
        self.q1 = mp.Queue()
        self.q2 = mp.Queue()

        # Create a lock to prevent races/deadlocks when printing to screen.
        self.print_lock = mp.RLock()

        gc.collect()

        # create a sub-process for each part of the evaluation.
        self.ps = [mp.Process(target=self.add_specs, args=())] + \
                  [mp.Process(target=self.specs_to_inputs, args=()) for _ in range(self.nCPUs)] + \
                  [mp.Process(target=self.run_wrapper, args=()) for _ in range(self.nCPUs)] + \
                  [mp.Process(target=self.save_data, args=())]

        # tell the log that you've finished
        print('initialized at', time.time())
Esempio n. 8
0
 def __init__(self, route_store):
     super(DriverManager, self).__init__()
     self._navigator = Navigator(route_store)
     self._navigation_queue = deque(maxlen=10)
     self._principal_steer_scale = 0
     # Static conversion factor of m/s to km/h.
     self._speed_scale = 3.6
     self._cruise_speed_step = 0
     self._steering_stabilizer = None
     self._pilot_state = PilotState()
     self._driver_cache = {}
     self._lock = multiprocessing.RLock()
     self._driver = None
     self._driver_ctl = None
Esempio n. 9
0
    def __init__(self, accounts=None):
        """
        Constructor.

        :type  accounts: Account|list[Account]
        :param accounts: Passed to add_account()
        """
        self.accounts = set()
        self.unlocked_accounts = deque()
        self.owner2account = defaultdict(list)
        self.account2owner = dict()
        self.unlock_cond = multiprocessing.Condition(multiprocessing.RLock())
        if accounts:
            self.add_account(accounts)
Esempio n. 10
0
    def __init__(self, port=9200, host="localhost", version=None) -> None:
        super().__init__()
        self._port = port
        self._host = host
        self._version = version or constants.ELASTICSEARCH_DEFAULT_VERSION

        self._cluster = None
        self._backend_port = None
        self._proxy_thread = None

        self._lifecycle_lock = multiprocessing.RLock()
        self._started = False
        self._stopped = multiprocessing.Event()
        self._starting = multiprocessing.Event()
Esempio n. 11
0
    def setUp(self):
        tmpFile = tempfile.NamedTemporaryFile(delete=False)
        tmpFile.close()

        self.dummyFileName = tmpFile.name

        self.processList = []

        try:
            self.multiprocessing_lock = multiprocessing.RLock()
        except ImportError:
            self.multiprocessing_lock = None  # *bsd platforms without proper synchronization primitives

        self.multithreading_lock = threading.RLock()
Esempio n. 12
0
    def __init__(self, manager, elem_entries, size=5000):
        from trainer_tool.buffers.buffer_utils import make_buffer
        self.buffers, self.types, self.ctypes, self.shapes = make_buffer(
            manager, elem_entries, size=size)

        self.ptr = manager.mp_manager.Value('i', 0)
        self.curr_size = manager.mp_manager.Value('i', 0)
        self.max_size = size

        # Use to coordinate processes in reading and writing and assures maximum 1 process reading at all times
        self.write_lock = mp.RLock()
        self.views = None

        self.elem_entries = elem_entries
Esempio n. 13
0
def install_s3mi(installed={}, mutex=TraceLock("install_s3mi", multiprocessing.RLock())):  # pylint: disable=dangerous-default-value
    with mutex:
        if installed:  # Mutable default value persists
            return
        try:
            # This is typically a no-op.
            command.execute(
                "which s3mi || pip install git+git://github.com/chanzuckerberg/s3mi.git"
            )
            command.execute(
                "s3mi tweak-vm || echo s3mi tweak-vm sometimes fails under docker. Continuing..."
            )
        finally:
            installed['time'] = time.time()
Esempio n. 14
0
def publish():
    """
    The main render script.
    """
    num_w = 56

    black_list = Blacklist()
    valid_data = construct_data_dirs(black_list)
    print(valid_data)

    print("Publishing segments: ")
    num_segments = []
    if E('errors.txt'):
        os.remove('errors.txt')
    try:
        multiprocessing.freeze_support()
        with multiprocessing.Pool(
                num_w,
                initializer=tqdm.tqdm.set_lock,
                initargs=(multiprocessing.RLock(), )) as pool:
            manager = ThreadManager(multiprocessing.Manager(), num_w, 1, 1)
            func = functools.partial(_render_data, DATA_DIR, manager)
            num_segments = list(
                tqdm.tqdm(pool.imap_unordered(func, valid_data),
                          total=len(valid_data),
                          desc='Files',
                          miniters=1,
                          position=0,
                          maxinterval=1))

            # for recording_name, render_path in tqdm.tqdm(valid_renders, desc='Files'):
            #     num_segments_rendered += gen_sarsa_pairs(render_path, recording_name, DATA_DIR)
    except Exception as e:
        if isinstance(e, KeyboardInterrupt):
            pool.terminate()
            pool.join()
            raise e
        print('\n' * num_w)
        print("Exception in pool: ", type(e), e)
        print('Vectorized {} files in total!'.format(sum(num_segments)))
        raise e

    num_segments_rendered = sum(num_segments)

    print('\n' * num_w)
    print('Vectorized {} files in total!'.format(num_segments_rendered))
    if E('errors.txt'):
        print('Errors:')
        print(open('errors.txt', 'r').read())
Esempio n. 15
0
    def __init__(self):
        if not '_ready' in dir(self):
            # Der Konstruktor wird bei jeder Instanziierung aufgerufen.
            # Einmalige Dinge wie zum Beispiel die Initialisierung von Klassenvariablen müssen also in diesen Block.
            self._ready = True
            self.lock = multiprocessing.RLock()
            self.apps = {}

            self.waitingCondition = multiprocessing.Condition()
            self.manager = multiprocessing.Manager()
            self.namespace = self.manager.Namespace()
            self.appDeleteListener = AppDeleteListener(self,
                                                       self.waitingCondition,
                                                       self.namespace)
            self.appDeleteListener.start()
Esempio n. 16
0
def init_logger(logger, level=LOGLEVEL):
    logger.setLevel(LOGLEVELS[level])
    if not len(logger.handlers):
        handler = logging.handlers.RotatingFileHandler(LOGFILE,
                                                       maxBytes=LOGFILESIZE,
                                                       backupCount=LOGFILES)
        # This formatter removes the fractions of a second in the time.
        #       formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s',
        #                                     '%Y-%m-%d %H:%M:%S %Z')
        # The default formatter contains fractions of a second in the time.
        formatter = logging.Formatter(
            '%(asctime)-15s %(levelname)-8s %(message)s')
        handler.setFormatter(formatter)
        handler.lock = multiprocessing.RLock()
        logger.addHandler(handler)
Esempio n. 17
0
def refreshed_credentials(credentials_mutex=multiprocessing.RLock(),
                          credentials_cache={}):  # pylint: disable=dangerous-default-value
    with credentials_mutex:
        if credentials_cache.get("expiration_time", 0) < time.time() + 5 * 60:
            try:
                credentials_cache["vars"] = _get_credentials()
            except:
                log.write(
                    "ERROR:  Failed to refresh credentials with boto, even after retries.  Subcommands will have to do it themselves."
                )
                log.write(traceback.format_exc())
                return {}
            credentials_cache["expiration_time"] = time.time(
            ) + 15 * 60  # this is the default for most accounts
    return credentials_cache["vars"]
Esempio n. 18
0
    def __init__(self):
        self._lock = multiprocessing.RLock()
        self._vars = {}

        # Find the root directory of the source tree
        cur_dir = os.path.abspath(os.path.dirname(__file__))
        while cur_dir <> '/':
            if os.path.exists(os.path.join(cur_dir, 'NEON_ROOT')):
                self._NEON_ROOT = cur_dir
                break
            cur_dir = os.path.abspath(os.path.join(cur_dir, '..'))

        if cur_dir == '/':
            raise Error(
                'Could not find the NEON_ROOT file in the source tree.')
Esempio n. 19
0
 def _initialize_context_lock(self, safety):
     """initialize context lock used for thread or process safety"""
     if safety == 'thread':
         self.lock = threading.RLock()
     elif safety == 'process':
         self.lock = multiprocessing.RLock()
     elif safety is None:
         # nullcontext in python3.7 and above
         # see https://stackoverflow.com/a/45187287
         if hasattr(contextlib, 'nullcontext'):
             self.lock = contextlib.nullcontext()
         else:
             self.lock = contextlib.suppress()
     else:
         raise Exception('safety must be \'thread\', \'process\', or None')
Esempio n. 20
0
def test_semaphore():
    sema = multiprocessing.Semaphore(3)
    mutex = multiprocessing.RLock()
    running = multiprocessing.Value('i', 0)

    processes = [
        multiprocessing.Process(target=semaphore_func,
                                args=(sema, mutex, running)) for i in range(10)
    ]

    for p in processes:
        p.start()

    for p in processes:
        p.join()
Esempio n. 21
0
def main():
    """
    The main render script.
    """
    # 1. Load the blacklist.
    blacklist = set(np.loadtxt(BLACKLIST_PATH, dtype=np.str).tolist())

    print("Constructing render directories.")
    renders = construct_render_dirs(blacklist)

    print("Validating metadata from files:")
    valid_renders, invalid_renders = render_metadata(renders)
    print(len(valid_renders))
    # print("Rendering actions: ")
    # valid_renders, invalid_renders = render_actions(valid_renders)
    print("... found {} valid recordings and {} invalid recordings"
          " out of {} total files".format(len(valid_renders),
                                          len(invalid_renders),
                                          len(os.listdir(MERGED_DIR))))

    unfinished_renders = [
        v for v in valid_renders if not len(glob.glob(J(v[1], '*.mp4')))
    ]

    print("... found {} unfinished renders out of {} valid renders".format(
        len(unfinished_renders), len(valid_renders)))

    print("Rendering videos: ")
    clean_render_dirs()

    # Render videos in multiprocessing queue
    multiprocessing.freeze_support()
    with multiprocessing.Pool(NUM_MINECRAFT_DIR,
                              initializer=tqdm.tqdm.set_lock,
                              initargs=(multiprocessing.RLock(), )) as pool:
        manager = ThreadManager(multiprocessing.Manager(), NUM_MINECRAFT_DIR,
                                0, 1)
        func = functools.partial(_render_videos, manager)
        num_rendered = list(
            tqdm.tqdm(pool.imap_unordered(func, unfinished_renders),
                      total=len(unfinished_renders),
                      desc='Files',
                      miniters=1,
                      position=0,
                      maxinterval=1,
                      smoothing=0))

    print('Rendered {} new files!'.format(sum(num_rendered)))
Esempio n. 22
0
    def compute_parallel(self, workers=4):
        self._check_mesh_arrays()

        mp.freeze_support()  # for Windows Support
        with mp.Pool(
                processes=workers,
                initializer=tqdm.set_lock,  # WinSup
                initargs=(mp.RLock(), )) as pool:
            fullstorages = self.meshstor.flatten()
            res_cycles = list(
                tqdm(pool.imap(self.calculate_single_point, fullstorages),
                     total=len(fullstorages)))
        for flatind, (res, cycles) in enumerate(res_cycles):
            ind2d = np.unravel_index(flatind, self.meshstor.shape)
            self.meshres[ind2d] = res
            self.meshcycle[ind2d] = cycles
Esempio n. 23
0
    def __init__(self, port=9200, host="localhost", version=None) -> None:
        super().__init__()
        self._port = port
        self._host = host
        self._version = version or constants.ELASTICSEARCH_DEFAULT_VERSION

        self.command_settings = {}

        self.directories = self._resolve_directories()

        self._elasticsearch_thread = None

        self._lifecycle_lock = multiprocessing.RLock()
        self._started = False
        self._stopped = multiprocessing.Event()
        self._starting = multiprocessing.Event()
Esempio n. 24
0
 def __init__(self, N, shape, dtype):
     print("SharedCircularBuffer created!")
     self._n = N  # The number of buffers in the ring
     self._dim = np.array(shape)  # The shape of each buffer
     self._dtype = dtype  # The type of the buffer or int bytes
     self._index = mp.Value('i',
                            0)  # The index of the next buffer to be put
     self._acquired = mp.Value(
         'i', -1)  # The index of the currently acquired buffer
     self._locks = []  # The locks for each buffer in the ring
     self._ring = []  # Handles for the shared memory for each buffer
     for i in range(N):
         arr = mp.sharedctypes.RawArray(dtype, 2 * int(np.prod(self._dim)))
         rlock = mp.RLock()
         self._ring.append(arr)
         self._locks.append(rlock)
Esempio n. 25
0
    def _create_mutable_fields(self, type, max_size):
        self._lock = multiprocessing.RLock()
        if type == int:
            self._value = multiprocessing.Value('i', 0, lock=self._lock)
        elif type == long:
            self._value = multiprocessing.Value('l', 0L, lock=self._lock)
        elif type == float:
            self._value = multiprocessing.Value('d', 0.0, lock=self._lock)
        elif issubclass(type, basestring):
            self._value = multiprocessing.Array('c', max_size, lock=self._lock)
        else:
            raise ValueError('Type %s is not supported' % type)

        self._data_status = multiprocessing.Value('b',
                                                  _Option._DATA_DEFAULT,
                                                  lock=self._lock)
Esempio n. 26
0
    def __init__(self, maxsize=0):
        if not isinstance(maxsize, int):
            raise TypeError('maxsize must be type of int')
        if maxsize < 0:
            raise ValueError('maxsize must be greater than equal to 0')

        self._maxsize = maxsize
        self._rlock = multiprocessing.RLock()

        self._size = multiprocessing.Value(ctypes.c_ulong, 0, lock=self._rlock)

        self._pipe_r, self._pipe_w = \
                itertools.starmap(
                        _fdopen,
                        zip(os.pipe(), ('rb', 'wb'), (0, 0)),
                        )
Esempio n. 27
0
    def __init__(
        self,
        observation_space: Space,
        action_space: Space,
        capacity=int(1e6),
        n_step=1,
        gamma=0.99,
        history_len=1,
        mode="numpy",
        logdir=None
    ):
        """
        Experience replay buffer for off-policy RL algorithms.

        Args:
            observation_shape: shape of environment observation
                e.g. (8, ) for vector of floats or (84, 84, 3) for RGB image
            action_shape: shape of action the agent can take
                e.g. (3, ) for 3-dimensional continuous control
            capacity: replay buffer capacity
            history_len: number of subsequent observations considered a state
            n_step: number of time steps between the current state and the next
                state in TD backup
            gamma: discount factor
            discrete actions: True if actions are discrete
        """
        # @TODO: Refactor !!!
        self.observation_space = observation_space
        self.action_space = action_space
        self.history_len = history_len

        self.capacity = capacity
        self.n_step = n_step
        self.gamma = gamma

        self.len = 0
        self.pointer = 0
        self._store_lock = mp.RLock()

        self.observations, self.actions, self.rewards, self.dones = \
            _get_buffers(
                capacity=capacity,
                observation_space=observation_space,
                action_space=action_space,
                mode=mode,
                logdir=logdir
            )
Esempio n. 28
0
    def __init__(
        self,
        *,
        maxsize: int = 1,
        use_multiprocessing: bool = False,
        time_func: Callable[[], float] = perf_counter,
        monitor_thread: bool = False,
    ):
        """
        Initialize the actor system.
        :param maxsize: of the message queues
        :param use_multiprocessing: set True if you intend to add processes
        :param time_func: to measure performance
        :param monitor_thread: set True if you want the actor system to request finish when the caller thread finishes
        """

        self._maxsize = maxsize

        self._mailboxes = OrderedDict()
        self._actor_names = set()
        self._actors = list()
        self._actor_instances = set()

        self._multiprocessing = use_multiprocessing
        self._started = False

        self._enqueued_stop = defaultdict(lambda: 0)
        self._enqueued = defaultdict(lambda: 0)
        self._dequeued = defaultdict(lambda: 0)
        self._time_func = time_func

        _shared_lock = multiprocessing.RLock()
        self._shared_state = _SharedState(
            lock=_shared_lock,
            empty=multiprocessing.Condition(lock=_shared_lock),
            messages=multiprocessing.Value('l', lock=False),
        )

        self._collectors = dict()

        if monitor_thread:
            self._monitor_th = _ActorSystemMonitorThread(
                threading.current_thread(), self)
            self._monitor_th.start()
        else:
            self._monitor_th = None
Esempio n. 29
0
    def __init__(self, ser_port, ser_baudrate, last_pos_count):
        if last_pos_count < 1:
            raise ValueError("last_pos_count shouldn't be less than 1")

        self._position_is_fresh = False
        self._last_pos_count = last_pos_count
        self._last_pos_container = []
        self._sync_locker = multiprocessing.RLock()

        self._serial = serial.Serial(port=ser_port, baudrate=ser_baudrate)
        #self._hot_reset()
        self._USBNMEA_OUT()

        self._keep_thread_alive = True
        self._reader_thread = threading.Thread(target=self._reader_thread_tf,
                                               daemon=True)
        self._reader_thread.start()
Esempio n. 30
0
def rave_pgf_logfile_client(name="RAVE-LOGFILE",
                            level=LOGLEVEL,
                            logfile=LOGFILE,
                            logfilesize=LOGFILESIZE,
                            nrlogfiles=LOGFILES):
    myLogger = logging.getLogger(name)
    if not len(myLogger.handlers):
        myLogger.setLevel(LOGLEVELS[level])
        handler = logging.handlers.RotatingFileHandler(logfile,
                                                       maxBytes=logfilesize,
                                                       backupCount=nrlogfiles)
        # The default formatter contains fractions of a second in the time.
        formatter = logging.Formatter(LOGFILE_FORMAT)
        handler.setFormatter(formatter)
        handler.lock = multiprocessing.RLock()
        myLogger.addHandler(handler)
    return myLogger