Exemplo n.º 1
0
    def test_multi_processing_handler(self):
        from multiprocessing import Process, Queue
        from logbook.queues import MultiProcessingHandler, \
             MultiProcessingSubscriber
        queue = Queue(-1)
        test_handler = logbook.TestHandler()
        subscriber = MultiProcessingSubscriber(queue)

        def send_back():
            handler = MultiProcessingHandler(queue)
            handler.push_thread()
            try:
                logbook.warn('Hello World')
            finally:
                handler.pop_thread()

        p = Process(target=send_back)
        p.start()
        p.join()

        test_handler.push_thread()
        try:
            subscriber.dispatch_once()
            self.assert_(test_handler.has_warning('Hello World'))
        finally:
            test_handler.pop_thread()
Exemplo n.º 2
0
def enable_multiprocess_log_handing(args):
    """
    Set up logging when using multiprocessing
    """
    normal_handler = log_handler(args, thread_wrapping=False)
    manager = multiprocessing.Manager()
    queue = manager.Queue(-1)
    mp_handler = MultiProcessingHandler(queue)
    mp_handler.push_application()
    mp_sub = MultiProcessingSubscriber(queue)
    mp_sub.dispatch_in_background(normal_handler)
    return queue
Exemplo n.º 3
0
def test_multi_processing_handler():
    from multiprocessing import Process, Queue
    from logbook.queues import MultiProcessingSubscriber
    queue = Queue(-1)
    test_handler = logbook.TestHandler()
    subscriber = MultiProcessingSubscriber(queue)

    p = Process(target=MultiProcessingHandlerSendBack(queue))
    p.start()
    p.join()

    with test_handler:
        subscriber.dispatch_once()
        assert test_handler.has_warning('Hello World')
Exemplo n.º 4
0
def test_multi_processing_handler():
    from multiprocessing import Process, Queue
    from logbook.queues import MultiProcessingSubscriber
    queue = Queue(-1)
    test_handler = logbook.TestHandler()
    subscriber = MultiProcessingSubscriber(queue)

    p = Process(target=MultiProcessingHandlerSendBack(queue))
    p.start()
    p.join()

    with test_handler:
        subscriber.dispatch_once()
        assert test_handler.has_warning('Hello World')
Exemplo n.º 5
0
def test_multi_processing_handler():
    if os.getenv('APPVEYOR') == 'True':
        pytest.skip('Test hangs on AppVeyor CI')
    from multiprocessing import Process, Queue
    from logbook.queues import MultiProcessingSubscriber
    queue = Queue(-1)
    test_handler = logbook.TestHandler()
    subscriber = MultiProcessingSubscriber(queue)

    p = Process(target=MultiProcessingHandlerSendBack(queue))
    p.start()
    p.join()

    with test_handler:
        subscriber.dispatch_once()
        assert test_handler.has_warning('Hello World')
Exemplo n.º 6
0
def test_multi_processing_handler():
    if os.getenv('APPVEYOR') == 'True':
        pytest.skip('Test hangs on AppVeyor CI')
    from multiprocessing import Process, Queue
    from logbook.queues import MultiProcessingSubscriber
    queue = Queue(-1)
    test_handler = logbook.TestHandler()
    subscriber = MultiProcessingSubscriber(queue)

    p = Process(target=MultiProcessingHandlerSendBack(queue))
    p.start()
    p.join()

    with test_handler:
        subscriber.dispatch_once()
        assert test_handler.has_warning('Hello World')
Exemplo n.º 7
0
def test_subscriber_group():
    from multiprocessing import Process, Queue
    from logbook.queues import MultiProcessingSubscriber, SubscriberGroup
    a_queue = Queue(-1)
    b_queue = Queue(-1)
    subscriber = SubscriberGroup([
        MultiProcessingSubscriber(a_queue),
        MultiProcessingSubscriber(b_queue)
    ])

    for _ in range(10):
        p1 = Process(target=SubscriberGroupSendBack('foo', a_queue))
        p2 = Process(target=SubscriberGroupSendBack('bar', b_queue))
        p1.start()
        p2.start()
        p1.join()
        p2.join()
        messages = [subscriber.recv().message for i in (1, 2)]
        assert sorted(messages) == ['bar', 'foo']
Exemplo n.º 8
0
def test_subscriber_group():
    if os.getenv('APPVEYOR') == 'True':
        pytest.skip('Test hangs on AppVeyor CI')
    from multiprocessing import Process, Queue
    from logbook.queues import MultiProcessingSubscriber, SubscriberGroup
    a_queue = Queue(-1)
    b_queue = Queue(-1)
    subscriber = SubscriberGroup([
        MultiProcessingSubscriber(a_queue),
        MultiProcessingSubscriber(b_queue)
    ])

    for _ in range(10):
        p1 = Process(target=SubscriberGroupSendBack('foo', a_queue))
        p2 = Process(target=SubscriberGroupSendBack('bar', b_queue))
        p1.start()
        p2.start()
        p1.join()
        p2.join()
        messages = [subscriber.recv().message for i in (1, 2)]
        assert sorted(messages) == ['bar', 'foo']
Exemplo n.º 9
0
def main():
    start = time.time()

    args = parse_args()
    wwise_dir = Path(args.wwise_dir)
    out_dir = Path(args.out_dir)
    out_dir.mkdir(exist_ok=True)
    id_to_filename_path = out_dir / ID_TO_FILENAME

    manager = mp.Manager()
    queue = manager.Queue()
    id_queue = manager.Queue()

    setup_logging(queue)

    target_handlers = logbook.NestedSetup([
        logbook.NullHandler(),
        logbook.StreamHandler(sys.stdout, level=logbook.INFO, bubble=True),
        logbook.FileHandler("extract.log",
                            mode="w",
                            level=logbook.INFO,
                            bubble=True),
    ])

    sub = MultiProcessingSubscriber(queue)
    controller = sub.dispatch_in_background(target_handlers)

    quickbms_log_lock = manager.Lock()
    quickbms_log = out_dir / "quickbms.log"

    try:
        id_to_filename_path.unlink()
        logbook.info("removed old {id_file}",
                     id_file=id_to_filename_path.absolute())
    except FileNotFoundError:
        pass

    logbook.info("QuickBMS log: '{qlog}'", qlog=quickbms_log.absolute())
    try:
        quickbms_log.unlink()
        logbook.info("removed old {f}", f=quickbms_log.absolute())
    except FileNotFoundError:
        pass

    id_to_filename_path.touch()
    logbook.info("writing old ID -> new filename info in '{id_file}'",
                 id_file=id_to_filename_path.absolute())

    id_to_filename_p = mp.Process(target=id_to_filename_worker,
                                  args=(id_queue, id_to_filename_path, queue))
    id_to_filename_p.start()

    logbook.info("processing audio files in '{wd}'", wd=wwise_dir.absolute())

    fut2func = {}
    # Parse .bnk files and metadata.
    with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
        fut2func[executor.submit(parse_banks_metadata, wwise_dir,
                                 queue)] = parse_banks_metadata
        fut2func[executor.submit(decode_banks, wwise_dir, out_dir,
                                 quickbms_log, quickbms_log_lock,
                                 queue)] = decode_banks

    memory_bnk_meta_file2metadata = {}
    streamed_bnk_meta_file2metadata = {}
    orig_bnk2decode_info = {}
    for completed_fut in futures.as_completed(fut2func):
        if fut2func[completed_fut] == parse_banks_metadata:
            result = completed_fut.result()
            memory_bnk_meta_file2metadata = result[0]
            streamed_bnk_meta_file2metadata = result[1]
        elif fut2func[completed_fut] == decode_banks:
            orig_bnk2decode_info = completed_fut.result()

    if len(memory_bnk_meta_file2metadata) != len(orig_bnk2decode_info):
        logbook.warning(
            "Amount of Bank and metadata files "
            "do not match ({first}) != {second})",
            first=len(orig_bnk2decode_info),
            second=len(memory_bnk_meta_file2metadata))

        s1 = memory_bnk_meta_file2metadata.keys()
        s2 = set([key.stem for key in orig_bnk2decode_info])

        to_del = []
        diff = s2.difference(s1)
        for d in diff:
            # TODO: expensive!
            for key in orig_bnk2decode_info:
                if key.stem == d:
                    logbook.warn("ignoring {f}", f=str(key))
                    to_del.append(key)

        for td in to_del:
            del orig_bnk2decode_info[td]

    wem_src2wem_dst = {}
    # Move .wem files to out_dir in correct places.
    with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
        for bnk_meta_file, meta in streamed_bnk_meta_file2metadata.items():
            for m in meta:
                src_dir = bnk_meta_file.parent
                src = src_dir / Path(m.generated_audio_file)
                if src.exists():
                    wwise_path = Path(m.wwise_object_path)
                    dst = out_dir / wwise_path.relative_to(
                        wwise_path.anchor).with_suffix(".wem")
                    executor.submit(copy, src, dst, queue, id_queue)
                    wem_src2wem_dst[src] = dst
                else:
                    logbook.warning(
                        "found references to {src} in metadata, but "
                        "the file cannot be found in wwise_dir",
                        src=src)

    decoded_file2metas = {}

    for orig_bnk_file, decode_info in orig_bnk2decode_info.items():
        orig_bnk_file = orig_bnk_file.stem
        meta = memory_bnk_meta_file2metadata[orig_bnk_file]

        if len(decode_info) != len(meta):
            logbook.error(
                "decode_info and meta length mismatch: "
                "{len1} != {len2} for bnk: '{bnk}'",
                len1=len(decode_info),
                len2=len(meta),
                bnk=orig_bnk_file)
            # print(decode_info)
            # print(meta)
            continue
            # raise ValueError(f"decode_info and meta length mismatch "
            #                  f"{len(decode_info)} != {len(meta)}")

        for m, (decoded_stem, decoded_size) in zip(meta, decode_info.items()):
            if m.data_size != decoded_size:
                # raise ValueError(f"{m.data_size} != {decoded_size}")
                logbook.error(
                    "metadata size and decoded data size length mismatch: "
                    "{len1} != {len2}",
                    len1=m.data_size,
                    len2=decoded_size)
                continue
            decoded_file2metas[decoded_stem] = m

    fs = []
    # Move output from decoding .bnk files to correct places in out_dir.
    executor = ProcessPoolExecutor(max_workers=MAX_WORKERS)
    for decoded_file, meta in decoded_file2metas.items():
        src = out_dir / f"{decoded_file}.bin"
        wwise_path = Path(meta.wwise_object_path)
        dst = out_dir / wwise_path.relative_to(
            wwise_path.anchor).with_suffix(".bin")
        fs.append(executor.submit(move, src, dst, queue, id_queue))

    futures.wait(fs, return_when=futures.ALL_COMPLETED)

    fs = []
    # Convert all .wem and .bin files to .ogg.
    executor = ProcessPoolExecutor(max_workers=MAX_WORKERS)
    for bin_file in out_dir.rglob("*.bin"):
        fs.append(executor.submit(ww2ogg, bin_file, queue))
    for wem_file in out_dir.rglob("*.wem"):
        fs.append(executor.submit(ww2ogg, wem_file, queue))

    futures.wait(fs, return_when=futures.ALL_COMPLETED)

    done_wems_stems = set([ws.stem for ws in wem_src2wem_dst.keys()])
    source_wems = [w for w in wwise_dir.rglob("*.wem")]
    source_wems_stems = set([w.stem for w in source_wems])
    wem_diff = source_wems_stems.difference(done_wems_stems)

    if wem_diff:
        logbook.warn("failed to determine filename for "
                     "{num} files",
                     num=len(wem_diff))

    for ws in source_wems:
        if str(ws.stem) in wem_diff:
            logbook.info("couldn't determine filename for: {ws}", ws=ws)
            copy_seq(ws, out_dir, id_to_filename_queue=None)

    # Convert leftovers.
    leftovers_fs = []
    with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
        for wem_file in out_dir.rglob("*.wem"):
            leftovers_fs.append(executor.submit(ww2ogg, wem_file, queue))

    futures.wait(leftovers_fs, return_when=futures.ALL_COMPLETED)

    id_queue.put(SENTINEL)
    id_to_filename_p.join()

    secs = time.time() - start
    logbook.info("finished successfully in {secs:.2f} seconds", secs=secs)

    controller.stop()
from logbook import TimedRotatingFileHandler
from multiprocessing import Queue
from logbook.queues import MultiProcessingSubscriber
from test import queue

#queue = Queue(-1)
subscriber = MultiProcessingSubscriber(queue.queue)
controller = subscriber.dispatch_in_background(TimedRotatingFileHandler('multiprocessfoo.log'))
subscriber.dispatch_in_background(TimedRotatingFileHandler('foo.log'))
a = 1
while a > 0:
  record = subscriber.recv()
  print "**** Log Record **** \n" , record
controller.stop()