def test_ui_available():
    """ This simply confirms that a URL call the service endpoint is successful if
    MoM is launched.
    """
    response = http.get("{}/ui/".format(
        shakedown.dcos_service_url('marathon-user')))
    assert response.status_code == 200
Exemple #2
0
def test_ui_available(marathon_service_name):
    """Simply verifies that a request to the UI endpoint is successful if Marathon is launched."""

    response = http.get("{}/ui/".format(
        shakedown.dcos_service_url(marathon_service_name)))
    assert response.status_code == 200, "HTTP status code is {}, but 200 was expected".format(
        response.status_code)
Exemple #3
0
def test_metric_endpoint(marathon_service_name):
    response = http.get("{}metrics".format(
        shakedown.dcos_service_url(marathon_service_name)))
    assert response.status_code == 200
    print(response.json()['gauges'])
    assert response.json(
    )['gauges']['service.mesosphere.marathon.app.count'] is not None
def test_ui_available(marathon_service_name):
    """ This simply confirms that a URL call the service endpoint is successful if
    marathon is launched.
    """

    response = http.get("{}/ui/".format(
        shakedown.dcos_service_url(marathon_service_name)))
    assert response.status_code == 200
def test_ui_available(marathon_service_name):
    """ This simply confirms that a URL call the service endpoint is successful if
    marathon is launched.
    """

    response = http.get("{}/ui/".format(
        shakedown.dcos_service_url(marathon_service_name)))
    assert response.status_code == 200, "HTTP code: {} is NOT 200".format(response.status_code)
Exemple #6
0
def test_metric_endpoint(marathon_service_name):
    service_url = shakedown.dcos_service_url(marathon_service_name)
    response = http.get("{}metrics".format(service_url))
    assert response.status_code == 200, "HTTP status code {} is NOT 200".format(response.status_code)

    response_json = response.json()
    print(response_json['gauges'])
    assert response_json['gauges']['service.mesosphere.marathon.app.count'] is not None, \
        "service.mesosphere.marathon.app.count is absent"
Exemple #7
0
def test_task_dns_prefix_points_to_all_tasks():
    pod_info = dcos.http.get(
        shakedown.dcos_service_url(config.PACKAGE_NAME) +
        "/v1/pod/{}/info".format("hello-0")).json()

    # Assert that DiscoveryInfo is correctly set on tasks.
    assert(all(p["info"]["discovery"]["name"] == "hello-0" for p in pod_info))
    # Assert that the hello-0.hello-world.mesos DNS entry points to the right IP.
    sdk_plan.wait_for_completed_deployment(config.PACKAGE_NAME)
def test_task_dns_prefix_points_to_all_tasks():
    pod_info = dcos.http.get(
        shakedown.dcos_service_url(config.SERVICE_NAME) +
        "/v1/pod/{}/info".format("hello-0")).json()

    # Assert that DiscoveryInfo is correctly set on tasks.
    assert(all(p["info"]["discovery"]["name"] == "hello-0" for p in pod_info))
    # Assert that the hello-0.hello-world.mesos DNS entry points to the right IP.
    sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
def test_metric_endpoint(marathon_service_name):
    service_url = shakedown.dcos_service_url(marathon_service_name)
    response = http.get("{}metrics".format(service_url))
    assert response.status_code == 200, "HTTP status code {} is NOT 200".format(response.status_code)

    response_json = response.json()
    print(response_json['gauges'])
    assert response_json['gauges']['service.mesosphere.marathon.app.count'] is not None, \
        "service.mesosphere.marathon.app.count is absent"
def test_task_dns_prefix_points_to_all_tasks():
    pod_info = dcos.http.get(
        shakedown.dcos_service_url(PACKAGE_NAME) +
        "/v1/pods/{}/info".format("hello-0")).json()

    # Assert that DiscoveryInfo is correctly set on tasks.
    assert(all(p["info"]["discovery"]["name"] == "hello-0" for p in pod_info))
    # Assert that the hello-0.hello-world.mesos DNS entry points to the right IP.
    spin.time_wait_noisy(lambda: (
        plan.get_deployment_plan(PACKAGE_NAME).json()['status'] == 'COMPLETE'))
Exemple #11
0
def test_cassandra_migration():
    backup_service_name = os.getenv('CASSANDRA_BACKUP_CLUSTER_NAME')
    restore_service_name = os.getenv('CASSANDRA_RESTORE_CLUSTER_NAME')

    env = EnvironmentContext(
        CASSANDRA_NODE_ADDRESS=os.getenv('BACKUP_NODE_ADDRESS',
                                         'node-0.cassandra.mesos'),
        CASSANDRA_NODE_PORT=os.getenv('BACKUP_NODE_PORT', '9042'))
    plan_parameters = {
        'S3_BUCKET_NAME': os.getenv('AWS_BUCKET_NAME',
                                    'infinity-framework-test'),
        'AWS_ACCESS_KEY_ID': os.getenv('AWS_ACCESS_KEY_ID'),
        'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
        'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'),
        'SNAPSHOT_NAME': str(uuid.uuid1()),
        'CASSANDRA_KEYSPACES': '"testspace1 testspace2"',
    }

    data_context = DataContext(
        init_jobs=[WRITE_DATA_JOB, VERIFY_DATA_JOB],
        cleanup_jobs=[DELETE_DATA_JOB, VERIFY_DELETION_JOB])
    # Install and run the write/delete data jobs against backup cluster,
    # running dcos-cassandra-service
    with env, JobContext(TEST_JOBS), data_context:
        # Back this cluster up to S3
        backup_parameters = {
            'backup_name':
            plan_parameters['SNAPSHOT_NAME'],
            's3_access_key':
            plan_parameters['AWS_ACCESS_KEY_ID'],
            's3_secret_key':
            plan_parameters['AWS_SECRET_ACCESS_KEY'],
            'external_location':
            's3://{}'.format(plan_parameters['S3_BUCKET_NAME']),
        }
        dcos.http.put('{}v1/backup/start'.format(
            shakedown.dcos_service_url(backup_service_name)),
                      json=backup_parameters)
        spin.time_wait_noisy(lambda: get_dcos_cassandra_plan(
            backup_service_name).json()['status'] == 'COMPLETE')

    env = EnvironmentContext(
        CASSANDRA_NODE_ADDRESS=os.getenv('RESTORE_NODE_ADDRESS',
                                         'node-0-server.sdk-cassandra.mesos'),
        CASSANDRA_NODE_PORT=os.getenv('RESTORE_NODE_PORT', '9052'))

    data_context = DataContext(
        cleanup_jobs=[VERIFY_DATA_JOB, DELETE_DATA_JOB, VERIFY_DELETION_JOB])
    with env, JobContext(TEST_JOBS), data_context:
        plan.start_plan(restore_service_name,
                        'restore-s3',
                        parameters=plan_parameters)
        spin.time_wait_noisy(
            lambda: (plan.get_plan(restore_service_name, 'restore-s3').json()[
                'status'] == 'COMPLETE'))
Exemple #12
0
def get(service_name, endpoint):
    '''
    :param endpoint: endpoint of the form /v1/...
    :type endpoint: str
    :returns: JSON response from the provided scheduler API endpoint
    :rtype: Response
    '''
    response = dcos.http.get("{}{}".format(
        shakedown.dcos_service_url(service_name), endpoint))
    response.raise_for_status()
    return response
def test_ping():
    """ Tests the API end point for marathon /ping
        This isn't provided by the client object and will need to create the url to test
    """

    marathon_service_name = get_marathon_service_name()
    marathon_url = shakedown.dcos_service_url(marathon_service_name)
    url = urljoin(marathon_url, 'ping')
    response = http.get(url)
    assert response.status_code == 200
    assert response.text == 'pong'
Exemple #14
0
def get(service_name, endpoint):
    '''
    :param endpoint: endpoint of the form /v1/...
    :type endpoint: str
    :returns: JSON response from the provided scheduler API endpoint
    :rtype: Response
    '''
    response = dcos.http.get("{}{}".format(
        shakedown.dcos_service_url(service_name),
        endpoint))
    response.raise_for_status()
    return response
def test_job():

    shakedown.install_package_and_wait('chronos')

    # 0 tasks
    tasks = shakedown.get_service('chronos')['completed_tasks']
    assert len(tasks) == 0

    if is_before_version("3.0"):
        url = shakedown.dcos_service_url('chronos/scheduler/jobs')
    else:
        url = shakedown.dcos_service_url('chronos/v1/scheduler/jobs')

    jobs = http.get(url).json()
    assert len(jobs) == 0

    # add a job
    if is_before_version("3.0"):
        url = shakedown.dcos_service_url('chronos/scheduler/iso8601')
    else:
        url = shakedown.dcos_service_url('chronos/v1/scheduler/iso8601')

    data = default_job()
    headers = {'Content-Type': 'application/json'}
    http.post(url, data=data, headers=headers)

    # give it a couple of seconds
    time.sleep(5)

    tasks = shakedown.get_service('chronos')['completed_tasks']
    assert len(tasks) > 0

    id = tasks[0]['id']
    status, out = shakedown.run_command_on_master('date')
    sdate = out[:10]
    stdout, stderr, return_code = shakedown.run_dcos_command(
        'task log --completed {}'.format(id))
    assert sdate in stdout
Exemple #16
0
def test_metrics_endpoint(marathon_service_name):
    service_url = shakedown.dcos_service_url(marathon_service_name)
    response = http.get("{}metrics".format(service_url))
    assert response.status_code == 200, "HTTP status code {} is NOT 200".format(
        response.status_code)

    if marthon_version_less_than('1.7'):
        metric_name = 'service.mesosphere.marathon.app.count'
    else:
        metric_name = 'marathon.apps.active.gauge'

    response_json = response.json()
    print(response_json['gauges'])
    assert response_json['gauges'][metric_name] is not None, \
        "{} is absent".format(metric_name)
Exemple #17
0
def get(service_name, endpoint):
    '''
    :param endpoint: endpoint of the form /v1/...
    :type endpoint: str
    :returns: JSON response from the provided scheduler API endpoint
    :rtype: Response
    '''

    # Some of our APIs (cough looking at you plans)
    # will return a meaningful 417.
    def is_success(status_code):
        return (200 <= status_code < 300) or status_code == 417

    response = dcos.http.get("{}{}".format(
        shakedown.dcos_service_url(service_name), endpoint),
                             is_success=is_success)

    # 417 == plan error
    if response.status_code != 417:
        response.raise_for_status()

    return response
def test_metric_endpoint(marathon_service_name):
    response = http.get("{}/metrics/".format(
        shakedown.dcos_service_url(marathon_service_name)))
    assert response.status_code == 200
    assert response.json(
    )['gauges']['jvm.memory.heap.max']['value'] is not None
Exemple #19
0
def kafka_api_url(basename):
    return '{}/v1/{}'.format(shakedown.dcos_service_url(PACKAGE_NAME),
                             basename)
def marathon_api_url(basename):
    return '{}/v2/{}'.format(shakedown.dcos_service_url('marathon'), basename)
def cassandra_api_url(basename, app_id='cassandra'):
    return '{}/v1/{}'.format(shakedown.dcos_service_url(app_id), basename)
Exemple #22
0
def test_cassandra_migration():
    backup_service_name = os.getenv('CASSANDRA_BACKUP_CLUSTER_NAME')
    restore_service_name = os.getenv('CASSANDRA_RESTORE_CLUSTER_NAME')

    backup_node_address = os.getenv('BACKUP_NODE_ADDRESS',
                                    DEFAULT_NODE_ADDRESS)
    backup_node_port = os.getenv('BACKUP_NODE_PORT', DEFAULT_NODE_PORT)

    backup_write_data_job = get_write_data_job(backup_node_address,
                                               backup_node_port)
    backup_verify_data_job = get_verify_data_job(backup_node_address,
                                                 backup_node_port)
    backup_delete_data_job = get_delete_data_job(backup_node_address,
                                                 backup_node_port)
    backup_verify_deletion_job = get_verify_deletion_job(
        backup_node_address, backup_node_port)

    plan_parameters = {
        'S3_BUCKET_NAME': os.getenv('AWS_BUCKET_NAME',
                                    'infinity-framework-test'),
        'AWS_ACCESS_KEY_ID': os.getenv('AWS_ACCESS_KEY_ID'),
        'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
        'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'),
        'SNAPSHOT_NAME': str(uuid.uuid1()),
        'CASSANDRA_KEYSPACES': '"testspace1 testspace2"',
    }

    backup_install_job_context = jobs.InstallJobContext([
        backup_write_data_job, backup_verify_data_job, backup_delete_data_job,
        backup_verify_deletion_job
    ])
    backup_run_job_context = jobs.RunJobContext(
        before_jobs=[backup_write_data_job, backup_verify_data_job],
        after_jobs=[backup_delete_data_job, backup_verify_deletion_job])
    # Install and run the write/delete data jobs against backup cluster,
    # running dcos-cassandra-service
    with backup_install_job_context, backup_run_job_context:
        # Back this cluster up to S3
        backup_parameters = {
            'backup_name':
            plan_parameters['SNAPSHOT_NAME'],
            's3_access_key':
            plan_parameters['AWS_ACCESS_KEY_ID'],
            's3_secret_key':
            plan_parameters['AWS_SECRET_ACCESS_KEY'],
            'external_location':
            's3://{}'.format(plan_parameters['S3_BUCKET_NAME']),
        }
        dcos.http.put('{}v1/backup/start'.format(
            shakedown.dcos_service_url(backup_service_name)),
                      json=backup_parameters)
        plan.wait_for_completed_deployment(backup_service_name)

    # Restore data to second instance:
    restore_node_address = os.getenv(
        'RESTORE_NODE_ADDRESS',
        hosts.autoip_host('sdk-cassandra', 'node-0-server'))
    restore_node_port = os.getenv('RESTORE_NODE_PORT', '9052')

    restore_write_data_job = get_write_data_job(restore_node_address,
                                                restore_node_port)
    restore_verify_data_job = get_verify_data_job(restore_node_address,
                                                  restore_node_port)
    restore_delete_data_job = get_delete_data_job(restore_node_address,
                                                  restore_node_port)
    restore_verify_deletion_job = get_verify_deletion_job(
        restore_node_address, restore_node_port)

    restore_install_job_context = jobs.InstallJobContext([
        restore_write_data_job, restore_verify_data_job,
        restore_delete_data_job, restore_verify_deletion_job
    ])
    restore_run_job_context = jobs.RunJobContext(after_jobs=[
        restore_verify_data_job, restore_delete_data_job,
        restore_verify_deletion_job
    ])
    with restore_install_job_context, restore_run_job_context:
        plan.start_plan(restore_service_name,
                        'restore-s3',
                        parameters=plan_parameters)
        plan.wait_for_completed_plan(restore_service_name, 'restore-s3')
Exemple #23
0
 def fn():
     try:
         return dcos.http.get("{}/v1/plans/{}".format(
             shakedown.dcos_service_url(PACKAGE_NAME), plan))
     except dcos.errors.DCOSHTTPException:
         return []
def test_ui_available(marathon_service_name):
    """Simply verifies that a request to the UI endpoint is successful if Marathon is launched."""

    response = http.get("{}/ui/".format(shakedown.dcos_service_url(marathon_service_name)))
    assert response.status_code == 200, "HTTP status code is {}, but 200 was expected".format(response.status_code)
Exemple #25
0
def test_cassandra_migration():
    backup_service_name = os.getenv('CASSANDRA_BACKUP_CLUSTER_NAME')
    restore_service_name = os.getenv('CASSANDRA_RESTORE_CLUSTER_NAME')

    backup_node_address = os.getenv('BACKUP_NODE_ADDRESS', config.DEFAULT_NODE_ADDRESS)
    backup_node_port = os.getenv('BACKUP_NODE_PORT', config.DEFAULT_NODE_PORT)

    backup_write_data_job = config.get_write_data_job(backup_node_address, backup_node_port)
    backup_verify_data_job = config.get_verify_data_job(backup_node_address, backup_node_port)
    backup_delete_data_job = config.get_delete_data_job(backup_node_address, backup_node_port)
    backup_verify_deletion_job = config.get_verify_deletion_job(backup_node_address, backup_node_port)

    plan_parameters = {
        'S3_BUCKET_NAME': os.getenv(
            'AWS_BUCKET_NAME', 'infinity-framework-test'
        ),
        'AWS_ACCESS_KEY_ID': os.getenv('AWS_ACCESS_KEY_ID'),
        'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
        'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'),
        'SNAPSHOT_NAME': str(uuid.uuid1()),
        'CASSANDRA_KEYSPACES': '"testspace1 testspace2"',
    }

    backup_install_job_context = sdk_jobs.InstallJobContext(
        [backup_write_data_job, backup_verify_data_job,
         backup_delete_data_job, backup_verify_deletion_job])
    backup_run_job_context = sdk_jobs.RunJobContext(
        before_jobs=[backup_write_data_job, backup_verify_data_job],
        after_jobs=[backup_delete_data_job, backup_verify_deletion_job])
    # Install and run the write/delete data jobs against backup cluster,
    # running dcos-cassandra-service
    with backup_install_job_context, backup_run_job_context:
        # Back this cluster up to S3
        backup_parameters = {
            'backup_name': plan_parameters['SNAPSHOT_NAME'],
            's3_access_key': plan_parameters['AWS_ACCESS_KEY_ID'],
            's3_secret_key': plan_parameters['AWS_SECRET_ACCESS_KEY'],
            'external_location': 's3://{}'.format(plan_parameters['S3_BUCKET_NAME']),
        }
        dcos.http.put(
            '{}v1/backup/start'.format(
                shakedown.dcos_service_url(backup_service_name)
            ),
            json=backup_parameters
        )
        sdk_plan.wait_for_completed_deployment(backup_service_name)

    # Restore data to second instance:
    restore_node_address = os.getenv(
        'RESTORE_NODE_ADDRESS', sdk_hosts.autoip_host('sdk-cassandra', 'node-0-server'))
    restore_node_port = os.getenv('RESTORE_NODE_PORT', '9052')

    restore_write_data_job = config.get_write_data_job(restore_node_address, restore_node_port)
    restore_verify_data_job = config.get_verify_data_job(restore_node_address, restore_node_port)
    restore_delete_data_job = config.get_delete_data_job(restore_node_address, restore_node_port)
    restore_verify_deletion_job = config.get_verify_deletion_job(restore_node_address, restore_node_port)

    restore_install_job_context = sdk_jobs.InstallJobContext(
        [restore_write_data_job, restore_verify_data_job,
         restore_delete_data_job, restore_verify_deletion_job]
    )
    restore_run_job_context = sdk_jobs.RunJobContext(
        after_jobs=[restore_verify_data_job, restore_delete_data_job, restore_verify_deletion_job]
    )
    with restore_install_job_context, restore_run_job_context:
        sdk_plan.start_plan(
            restore_service_name, 'restore-s3', parameters=plan_parameters
        )
        sdk_plan.wait_for_completed_plan(restore_service_name, 'restore-s3')
def test_httpd():
    cmd.request('get', '{}/pyhttpsd'.format(shakedown.dcos_service_url('proxylite')))
Exemple #27
0
def get_kibana_status():
    token = shakedown.authenticate('bootstrapuser', 'deleteme')
    curl_cmd = "curl -I -k -H \"Authorization: token={}\" -s {}/kibana/login".format(
        token, shakedown.dcos_service_url(PACKAGE_NAME))
    exit_status, output = shakedown.run_command_on_master(curl_cmd)
    return output
def kafka_api_url(basename):
    return '{}/v1/{}'.format(shakedown.dcos_service_url(PACKAGE_NAME), basename)
Exemple #29
0
 def fn():
     response = dcos.http.get("{}/v1/plans/{}".format(shakedown.dcos_service_url(service_name), plan))
     response.raise_for_status()
     return response
def start_plan(service_name, plan, parameters=None):
    return dcos.http.post("{}/v1/plans/{}/start".format(shakedown.dcos_service_url(service_name), plan),
                          json=parameters if parameters is not None else {})
def test_google():
    cmd.request('get', '{}/google'.format(shakedown.dcos_service_url('proxylite')))
def test_metric_endpoint(marathon_service_name):
    response = http.get("{}metrics".format(
        shakedown.dcos_service_url(marathon_service_name)))
    assert response.status_code == 200
    assert response.json()['gauges']['jvm.memory.heap.max']['value'] is not None
def test_httpd():
    cmd.request('get',
                '{}/pyhttpsd'.format(shakedown.dcos_service_url('proxylite')))
"""Marathon acceptance tests for DC/OS."""

import pytest
import shakedown
import time

from datetime import timedelta
from dcos import (packagemanager, subcommand)
from dcos.cosmos import get_cosmos_url
from shakedown import required_private_agents

from common import cluster_info

PACKAGE_NAME = 'marathon'
SERVICE_NAME = 'marathon-user'
DCOS_SERVICE_URL = shakedown.dcos_service_url(PACKAGE_NAME)
WAIT_TIME_IN_SECS = 300


def test_install_marathon():
    """Install the Marathon package for DC/OS.
    """

    # Install
    shakedown.install_package_and_wait(PACKAGE_NAME)
    assert shakedown.package_installed(PACKAGE_NAME), 'Package failed to install'

    end_time = time.time() + WAIT_TIME_IN_SECS
    found = False
    while time.time() < end_time:
        found = shakedown.get_service(PACKAGE_NAME) is not None
Exemple #35
0
def start_sidecar_plan():
    return dcos.http.post(
        shakedown.dcos_service_url(PACKAGE_NAME) + "/v1/plans/sidecar/start")
def cassandra_api_url(basename, app_id='cassandra'):
    return '{}/v1/{}'.format(shakedown.dcos_service_url(app_id), basename)
Exemple #37
0
 def fn():
     try:
         return dcos.http.get(shakedown.dcos_service_url(PACKAGE_NAME) + "/v1/plans/deploy")
     except dcos.errors.DCOSHTTPException:
         return []
Exemple #38
0
def get_kibana_status():
    token = shakedown.authenticate('bootstrapuser', 'deleteme')
    curl_cmd = "curl -I -k -H \"Authorization: token={}\" -s {}/kibana/login".format(
        token, shakedown.dcos_service_url(PACKAGE_NAME))
    exit_status, output = shakedown.run_command_on_master(curl_cmd)
    return output
def cassandra_api_url(basename, app_id="cassandra"):
    return "{}/v1/{}".format(shakedown.dcos_service_url(app_id), basename)
def marathon_api_url(basename):
    return "{}/v2/{}".format(shakedown.dcos_service_url("marathon"), basename)
def test_google():
    cmd.request('get',
                '{}/google'.format(shakedown.dcos_service_url('proxylite')))
Exemple #42
0
def marathon_api_url(basename):
    return '{}/v2/{}'.format(shakedown.dcos_service_url('marathon'), basename)
Exemple #43
0
def start_plan(service_name, plan, parameters=None):
    return dcos.http.post("{}/v1/plans/{}/start".format(
        shakedown.dcos_service_url(service_name), plan),
                          json=parameters if parameters is not None else {})
"""Marathon acceptance tests for DC/OS."""

import common
import pytest
import shakedown
import time

from datetime import timedelta
from dcos import packagemanager, marathon, cosmos

PACKAGE_NAME = 'marathon'
SERVICE_NAME = 'marathon-user'
DCOS_SERVICE_URL = shakedown.dcos_service_url(PACKAGE_NAME)
WAIT_TIME_IN_SECS = 300


def teardown_function(function):
    uninstall('test-marathon')


def setup_module(module):
    uninstall(SERVICE_NAME)
    common.cluster_info()


def teardown_module(module):
    uninstall(SERVICE_NAME)


@pytest.mark.skipif("shakedown.ee_version() == 'strict'",
                    reason="MoM doesn't work on a strict cluster")
Exemple #45
0
def start_sidecar_plan(service_name):
    return dcos.http.post(
        shakedown.dcos_service_url(service_name) + "/v1/plans/sidecar/start")