Esempio n. 1
0
def test_launch_redis_with_config_password():
    redis_stop = cr.launch_redis(PORT, password='', loglevel='verbose')
    assert redis_stop is not None

    sleep(REDIS_WAIT)
    is_running = cr.check_redis(port=PORT, password='')
    assert is_running is True

    redis_stop()
    sleep(REDIS_WAIT)
    is_running = cr.check_redis(port=PORT, password='')
    assert is_running is False
def test_launch_redis_with_config_password():
    is_running = cr.check_redis(port=PORT)
    assert is_running is False, "Redis should not be running at the start of the test"

    redis_stop = cr.launch_redis(PORT, password='', loglevel='verbose')
    assert redis_stop is not None

    sleep(REDIS_WAIT)
    is_running = cr.check_redis(port=PORT, password='')
    assert is_running is True

    redis_stop()
    sleep(REDIS_WAIT)
    is_running = cr.check_redis(port=PORT, password='')
    assert is_running is False
def launch_celery_worker_environment(task_desc: TaskDescription,
                                     redis_params: dict,
                                     workers_per_node: int = None):
    redis_port = redis_params['port']
    redis_host = pbs.hostname()
    redis_password = cr.get_redis_password(generate_if_missing=True)

    redis_shutdown = cr.launch_redis(password=redis_password, **redis_params)
    if not redis_shutdown:
        raise RuntimeError('Failed to launch Redis')

    _LOG.info('Launched Redis at %s:%d', redis_host, redis_port)

    for i in range(5):
        if cr.check_redis(redis_host, redis_port, redis_password) is False:
            sleep(0.5)

    executor = cr.CeleryExecutor(
        redis_host,
        redis_port,
        password=redis_password,
    )

    logger_shutdown = multiprocessing.Value('b', False, lock=False)
    log_proc = multiprocessing.Process(
        target=_run_celery_task_logging,
        args=(logger_shutdown, cr.app, task_desc)
    )
    log_proc.start()

    worker_procs = list(_spawn_pbs_workers(redis_host,
                                           redis_port,
                                           workers_per_node))

    _LOG.info('%d workers launched.', len(worker_procs))

    def start_shutdown():
        cr.app.control.shutdown()

    def shutdown():
        start_shutdown()
        _LOG.info('Waiting for workers to quit')

        # TODO: time limit followed by kill
        for i, proc in enumerate(worker_procs):
            _LOG.debug("Waiting on node %s", i)
            proc.wait()

        # We deliberately don't want to stop the logger until all worker have stopped completely,
        # to capture all of their logs.
        _LOG.info('Stopping log process')
        logger_shutdown.value = True
        log_proc.join()

        _LOG.info('Shutting down redis-server')
        redis_shutdown()

    return executor, shutdown
def test_celery_with_worker():
    DATA = [1, 2, 3, 4]

    def launch_worker():
        args = [
            'bash', '-c',
            'nohup {} -m datacube.execution.worker --executor celery localhost:{} --nprocs 1 &'
            .format(sys.executable, PORT)
        ]
        try:
            subprocess.check_call(args)
        except subprocess.CalledProcessError:
            return False

        return True

    assert cr.check_redis(
        port=PORT, password=''
    ) is False, "Redis should not be running at the start of the test"

    runner = cr.CeleryExecutor(host='localhost', port=PORT, password='')
    sleep(REDIS_WAIT)

    assert cr.check_redis(port=PORT, password='')

    # no workers yet
    future = runner.submit(_echo, 0)
    assert future.ready() is False
    runner.release(future)

    futures = runner.map(_echo, DATA)
    assert len(futures) == len(DATA)

    completed, failed, pending = runner.get_ready(futures)

    assert len(completed) == 0
    assert len(failed) == 0
    assert len(pending) == len(DATA)
    # not worker test done

    worker_started_ok = launch_worker()
    assert worker_started_ok

    futures = runner.map(_echo, DATA)
    results = runner.results(futures)

    assert len(results) == len(DATA)
    assert set(results) == set(DATA)

    # Test failure pass-through
    future = runner.submit(_echo, "", please_fail=True)

    for ff in runner.as_completed([future]):
        assert ff.ready() is True
        with pytest.raises(IOError):
            runner.result(ff)

    del runner

    # Redis shouldn't be running now.
    is_running = cr.check_redis(port=PORT)
    assert is_running is False
Esempio n. 5
0
def test_celery_with_worker():
    DATA = [1, 2, 3, 4]

    def _echo(x, please_fail=False):
        if please_fail:
            raise IOError('Fake I/O error, cause you asked')
        return x

    def launch_worker():
        args = ['bash', '-c',
                'nohup datacube-worker --executor celery localhost:{} --nprocs 1 &'.format(PORT)]
        try:
            subprocess.check_call(args)
        except subprocess.CalledProcessError:
            return False

        return True

    if os.name == 'nt':
        return

    assert cr.check_redis('localhost', port=PORT, password='') is False

    runner = cr.CeleryExecutor(host='localhost', port=PORT, password='')
    sleep(REDIS_WAIT)

    assert cr.check_redis('localhost', port=PORT, password='')

    # no workers yet
    future = runner.submit(_echo, 0)
    assert future.ready() is False
    runner.release(future)

    futures = runner.map(_echo, DATA)
    assert len(futures) == len(DATA)

    completed, failed, pending = runner.get_ready(futures)

    assert len(completed) == 0
    assert len(failed) == 0
    assert len(pending) == len(DATA)
    # not worker test done

    worker_started_ok = launch_worker()
    assert worker_started_ok

    futures = runner.map(_echo, DATA)
    results = runner.results(futures)

    assert len(results) == len(DATA)
    assert set(results) == set(DATA)

    # Test failure pass-through
    future = runner.submit(_echo, "", please_fail=True)

    for ff in runner.as_completed([future]):
        assert ff.ready() is True
        with pytest.raises(IOError):
            runner.result(ff)

    del runner