def test_launch_mesos_container_with_docker_image(): """Launches a Mesos container with a Docker image.""" app_def = apps.ucr_docker_http_server(app_id='/launch-mesos-container-with-docker-image-app') app_id = app_def["id"] client = marathon.create_client() client.add_app(app_def) common.deployment_wait(service_id=app_id) assert_that(lambda: client.get_tasks(app_id), eventually(has_len(equal_to(1)), max_attempts=30)) app = client.get_app(app_id) assert app['container']['type'] == 'MESOS', "The container type is not MESOS"
def deployment_wait(service_id=None, deployment_id=None, wait_fixed=2000, max_attempts=60): """ Wait for a specific app/pod to deploy successfully. If no app/pod Id passed, wait for all current deployments to succeed. This inner matcher will retry fetching deployments after `wait_fixed` milliseconds but give up after `max_attempts` tries. """ assert not all([service_id, deployment_id]), "Use either deployment_id or service_id, but not both." if deployment_id: logger.info("Waiting for the deployment_id {} to finish".format(deployment_id)) elif service_id: logger.info('Waiting for {} to deploy successfully'.format(service_id)) else: logger.info('Waiting for all current deployments to finish') assert_that(lambda: deployments_for(service_id, deployment_id), eventually(has_len(0), wait_fixed=wait_fixed, max_attempts=max_attempts))
def test_restart_container_with_persistent_volume(): """A task with a persistent volume, which writes to a file in the persistent volume, is launched. The app is killed and restarted and we can still read from the persistent volume what was written to it. """ app_def = apps.persistent_volume_app() app_id = app_def['id'] client = marathon.create_client() client.add_app(app_def) common.deployment_wait(service_id=app_id) tasks = client.get_tasks(app_id) assert len( tasks ) == 1, "The number of tasks is {} after deployment, but 1 was expected".format( len(tasks)) host = tasks[0]['host'] port = tasks[0]['ports'][0] cmd = "curl {}:{}/data/foo".format(host, port) @retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception) def check_task(cmd, target_data): run, data = shakedown.run_command_on_master(cmd) assert run, "{} did not succeed".format(cmd) assert data == target_data, "'{}' was not equal to {}".format( data, target_data) check_task(cmd, target_data='hello\n') client.restart_app(app_id) common.deployment_wait(service_id=app_id) assert_that(lambda: client.get_tasks(app_id), eventually(has_len(equal_to(1)), max_attempts=30)) host = tasks[0]['host'] port = tasks[0]['ports'][0] cmd = "curl {}:{}/data/foo".format(host, port) check_task(cmd, target_data='hello\nhello\n')
def deployment_wait( service_id=None, wait_fixed=2000, max_attempts=60, ): """ Wait for a specific app/pod to deploy successfully. If no app/pod Id passed, wait for all current deployments to succeed. This inner matcher will retry fetching deployments after `wait_fixed` milliseconds but give up after `max_attempts` tries. """ if (service_id is None): print('Waiting for all current deployments to finish') else: print('Waiting for {} to deploy successfully'.format(service_id)) assert_that( lambda: deployments_for(service_id), eventually(has_len(0), wait_fixed=wait_fixed, max_attempts=max_attempts))