Esempio n. 1
0
def update_context_marathon_config(context):
    whitelist_keys = set(['id', 'backoff_factor', 'backoff_seconds', 'max_instances', 'mem', 'cpus', 'instances'])
    with contextlib.nested(
        mock.patch.object(MarathonServiceConfig, 'get_min_instances', autospec=True, return_value=1),
        mock.patch.object(MarathonServiceConfig, 'get_max_instances', autospec=True),
    ) as (
        _,
        mock_get_max_instances,
    ):
        mock_get_max_instances.return_value = context.max_instances if 'max_instances' in context else None
        context.marathon_complete_config = {key: value for key, value in marathon_tools.create_complete_config(
            context.service,
            context.instance,
            soa_dir=context.soa_dir,
        ).items() if key in whitelist_keys}
    context.marathon_complete_config.update({
        'cmd': '/bin/sleep 1m',
        'constraints': None,
        'container': {
            'type': 'DOCKER',
            'docker': {
                'network': 'BRIDGE',
                'image': 'busybox',
            },
        },
    })
    if 'max_instances' not in context:
        context.marathon_complete_config['instances'] = context.instances
Esempio n. 2
0
def perform_command(command, service, instance, cluster, verbose, soa_dir, app_id=None, delta=None):
    """Performs a start/stop/restart/status/scale on an instance
    :param command: String of start, stop, restart, status or scale
    :param service: service name
    :param instance: instance name, like "main" or "canary"
    :param cluster: cluster name
    :param verbose: bool if the output should be verbose or not
    :returns: A unix-style return code
    """
    marathon_config = marathon_tools.load_marathon_config()
    job_config = marathon_tools.load_marathon_service_config(service, instance, cluster, soa_dir=soa_dir)
    if not app_id:
        try:
            app_id = marathon_tools.create_complete_config(service, instance, marathon_config, soa_dir=soa_dir)['id']
        except NoDockerImageError:
            job_id = compose_job_id(service, instance)
            print "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?" % job_id
            return 1

    normal_instance_count = job_config.get_instances()
    normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(service, instance)
    proxy_port = marathon_tools.get_proxy_port_for_instance(service, instance, soa_dir=soa_dir)

    client = marathon_tools.get_marathon_client(marathon_config.get_url(), marathon_config.get_username(),
                                                marathon_config.get_password())
    if command == 'start':
        start_marathon_job(service, instance, app_id, normal_instance_count, client, cluster)
    elif command == 'stop':
        stop_marathon_job(service, instance, app_id, client, cluster)
    elif command == 'restart':
        restart_marathon_job(service, instance, app_id, normal_instance_count, client, cluster)
    elif command == 'status':
        # Setting up transparent cache for http API calls
        requests_cache.install_cache('paasta_serviceinit', backend='memory')

        print status_desired_state(service, instance, client, job_config)
        print status_marathon_job(service, instance, app_id, normal_instance_count, client)
        tasks, out = status_marathon_job_verbose(service, instance, client)
        if verbose:
            print out
        print status_mesos_tasks(service, instance, normal_instance_count)
        if verbose:
            print status_mesos_tasks_verbose(app_id, get_short_task_id)
        if proxy_port is not None:
            print status_smartstack_backends(
                service=service,
                instance=instance,
                cluster=cluster,
                job_config=job_config,
                tasks=tasks,
                expected_count=normal_smartstack_count,
                soa_dir=soa_dir,
                verbose=verbose,
            )
    elif command == 'scale':
        scale_marathon_job(service, instance, app_id, delta, client, cluster)
    else:
        # The command parser shouldn't have let us get this far...
        raise NotImplementedError("Command %s is not implemented!" % command)
    return 0
def marathon_app_task_count(context, job_id, task_count):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)['id']
    client = context.marathon_client

    tasks = client.list_tasks(app_id=app_id)
    assert len(tasks) == task_count
def create_app_with_instances(context, job_id, number):
    context.job_id = job_id
    if 'app_id' not in context:
        (service, instance, _, __) = decompose_job_id(job_id)
        context.app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)['id']
    set_number_instances(context, number)
    create_complete_app(context)
Esempio n. 5
0
def update_context_marathon_config(context):
    whitelist_keys = set(['id', 'backoff_factor', 'backoff_seconds', 'max_instances', 'mem', 'cpus', 'instances'])
    with contextlib.nested(
        # This seems to be necessary because mesos reads the config file at
        # import which is sometimes before the tests get a chance to write the
        # config file
        patch_mesos_cli_master_config(),
        mock.patch.object(SystemPaastaConfig, 'get_zk_hosts', autospec=True, return_value=context.zk_hosts),
        mock.patch.object(MarathonServiceConfig, 'get_min_instances', autospec=True, return_value=1),
        mock.patch.object(MarathonServiceConfig, 'get_max_instances', autospec=True),
    ) as (
        _,
        _,
        _,
        mock_get_max_instances,
    ):
        mock_get_max_instances.return_value = context.max_instances if 'max_instances' in context else None
        context.marathon_complete_config = {key: value for key, value in marathon_tools.create_complete_config(
            context.service,
            context.instance,
            soa_dir=context.soa_dir,
        ).items() if key in whitelist_keys}
    context.marathon_complete_config.update({
        'cmd': '/bin/sleep 1m',
        'constraints': None,
        'container': {
            'type': 'DOCKER',
            'docker': {
                'network': 'BRIDGE',
                'image': 'busybox',
            },
        },
    })
    if 'max_instances' not in context:
        context.marathon_complete_config['instances'] = context.instances
def status_marathon_job(context, status, job_id):
    normal_instance_count = 1
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)["id"]

    output = marathon_serviceinit.status_marathon_job(
        service, instance, app_id, normal_instance_count, context.marathon_client
    )
    assert status in output
def run_marathon_app(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)['id']
    app_config = {
        'id': app_id,
        'cmd': '/bin/sleep 1m',
    }
    with mock.patch('paasta_tools.bounce_lib.create_app_lock'):
        paasta_tools.bounce_lib.create_marathon_app(app_id, app_config, context.marathon_client)
Esempio n. 8
0
def paasta_serviceinit_command_appid(context, command, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']
    cmd = '../paasta_tools/paasta_serviceinit.py --soa-dir %s --appid %s %s %s' \
          % (context.soa_dir, app_id, job_id, command)
    print 'Running cmd %s' % cmd
    exit_code, output = _run(cmd)
    print 'Got exitcode %s with output:\n%s' % (exit_code, output)
    print  # sacrificial line for behave to eat instead of our output

    assert exit_code == 0
def paasta_serviceinit_command_appid(context, command, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)['id']
    cmd = '../paasta_tools/paasta_serviceinit.py --soa-dir %s --appid %s %s %s' \
          % (context.soa_dir, app_id, job_id, command)
    print 'Running cmd %s' % cmd
    (exit_code, output) = _run(cmd)
    print 'Got exitcode %s with output:\n%s' % (exit_code, output)
    print  # sacrificial line for behave to eat instead of our output

    assert exit_code == 0
def status_marathon_job(context, status, job_id):
    normal_instance_count = 1
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(
        service, instance, None, soa_dir=context.soa_dir)['id']

    output = marathon_serviceinit.status_marathon_job(service, instance,
                                                      app_id,
                                                      normal_instance_count,
                                                      context.marathon_client)
    assert status in output
def run_marathon_app(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(
        service, instance, None, soa_dir=context.soa_dir)['id']
    app_config = {
        'id': app_id,
        'cmd': '/bin/sleep 1m',
    }
    with mock.patch('paasta_tools.bounce_lib.create_app_lock'):
        paasta_tools.bounce_lib.create_marathon_app(app_id, app_config,
                                                    context.marathon_client)
Esempio n. 12
0
def paasta_serviceinit_command_appid(context, command, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(
        service, instance, soa_dir=context.soa_dir)["id"]
    cmd = "python -m paasta_tools.paasta_serviceinit --soa-dir {} --appid {} {} {}".format(
        context.soa_dir, app_id, job_id, command)
    paasta_print("Running cmd %s" % cmd)
    exit_code, output = _run(cmd)
    paasta_print(f"Got exitcode {exit_code} with output:\n{output}")
    paasta_print()  # sacrificial line for behave to eat instead of our output

    assert exit_code == 0
Esempio n. 13
0
def status_marathon_job(context, status, job_id):
    normal_instance_count = 1
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']

    with requests_cache.disabled():
        output = marathon_serviceinit.status_marathon_job(
            service,
            instance,
            app_id,
            normal_instance_count,
            context.marathon_client
        )
    assert status in output
Esempio n. 14
0
def update_context_marathon_config(context):
    whitelist_keys = set([
        'id', 'backoff_factor', 'backoff_seconds', 'max_instances', 'mem',
        'cpus', 'instances'
    ])
    with contextlib.nested(
            # This seems to be necessary because mesos reads the config file at
            # import which is sometimes before the tests get a chance to write the
            # config file
            patch_mesos_cli_master_config(),
            mock.patch.object(SystemPaastaConfig,
                              'get_zk_hosts',
                              autospec=True,
                              return_value=context.zk_hosts),
            mock.patch.object(MarathonServiceConfig,
                              'get_min_instances',
                              autospec=True,
                              return_value=1),
            mock.patch.object(MarathonServiceConfig,
                              'get_max_instances',
                              autospec=True),
    ) as (
            _,
            _,
            _,
            mock_get_max_instances,
    ):
        mock_get_max_instances.return_value = context.max_instances if 'max_instances' in context else None
        context.marathon_complete_config = {
            key: value
            for key, value in marathon_tools.create_complete_config(
                context.service,
                context.instance,
                soa_dir=context.soa_dir,
            ).items() if key in whitelist_keys
        }
    context.marathon_complete_config.update({
        'cmd': '/bin/sleep 1m',
        'constraints': None,
        'container': {
            'type': 'DOCKER',
            'docker': {
                'network': 'BRIDGE',
                'image': 'busybox',
            },
        },
    })
    if 'max_instances' not in context:
        context.marathon_complete_config['instances'] = context.instances
def status_marathon_job(context, status, job_id):
    normal_instance_count = 1
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(
        service, instance, soa_dir=context.soa_dir)['id']

    with requests_cache.disabled():
        output = marathon_serviceinit.status_marathon_job(
            service,
            instance,
            app_id,
            normal_instance_count,
            context.marathon_clients.current[0],
        )
    assert status in output
Esempio n. 16
0
def setup_service(service, instance, client, marathon_config,
                  service_marathon_config, soa_dir):
    """Setup the service instance given and attempt to deploy it, if possible.
    Doesn't do anything if the service is already in Marathon and hasn't changed.
    If it's not, attempt to find old instances of the service and bounce them.

    :param service: The service name to setup
    :param instance: The instance of the service to setup
    :param client: A MarathonClient object
    :param marathon_config: The marathon configuration dict
    :param service_marathon_config: The service instance's configuration dict
    :returns: A tuple of (status, output) to be used with send_sensu_event"""

    log.info("Setting up instance %s for service %s", instance, service)
    try:
        complete_config = marathon_tools.create_complete_config(
            service, instance, marathon_config)
    except NoDockerImageError:
        error_msg = (
            "Docker image for {0}.{1} not in deployments.json. Exiting. Has Jenkins deployed it?\n"
        ).format(
            service,
            instance,
        )
        log.error(error_msg)
        return (1, error_msg)

    full_id = complete_config['id']
    service_namespace_config = marathon_tools.load_service_namespace_config(
        service, instance)

    log.info("Desired Marathon instance id: %s", full_id)
    return deploy_service(
        service=service,
        instance=instance,
        marathon_jobid=full_id,
        config=complete_config,
        client=client,
        bounce_method=service_marathon_config.get_bounce_method(),
        drain_method_name=service_marathon_config.get_drain_method(
            service_namespace_config),
        drain_method_params=service_marathon_config.get_drain_method_params(
            service_namespace_config),
        nerve_ns=service_marathon_config.get_nerve_namespace(),
        bounce_health_params=service_marathon_config.get_bounce_health_params(
            service_namespace_config),
        soa_dir=soa_dir,
    )
Esempio n. 17
0
def run_marathon_app(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']
    app_config = {
        'id': app_id,
        'cmd': '/bin/sleep 1m',
        'container': {
            'type': 'DOCKER',
            'docker': {
                'network': 'BRIDGE',
                'image': 'busybox',
            },
        },
    }
    with mock.patch('paasta_tools.bounce_lib.create_app_lock'):
        paasta_tools.bounce_lib.create_marathon_app(app_id, app_config, context.marathon_client)
def run_marathon_app(context, job_id, instances):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']
    app_config = {
        'id': app_id,
        'cmd': '/bin/sleep 1m',
        'container': {
            'type': 'DOCKER',
            'docker': {
                'network': 'BRIDGE',
                'image': 'busybox',
            },
        },
        'instances': instances,
        'constraints': [["hostname", "UNIQUE"]],
    }
    paasta_tools.bounce_lib.create_marathon_app(app_id, app_config, context.marathon_client)
Esempio n. 19
0
def marathon_restart_gets_new_task_ids(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)["id"]
    normal_instance_count = 1
    cluster = context.system_paasta_config["cluster"]

    old_tasks = context.marathon_client.get_app(app_id).tasks
    marathon_serviceinit.restart_marathon_job(
        service, instance, app_id, normal_instance_count, context.marathon_client, cluster
    )
    print "Sleeping 5 seconds to wait for %s to be restarted." % service
    time.sleep(5)
    new_tasks = context.marathon_client.get_app(app_id).tasks
    print "Tasks before the restart: %s" % old_tasks
    print "Tasks after  the restart: %s" % new_tasks
    print  # sacrificial line for behave to eat instead of our output
    assert old_tasks != new_tasks
Esempio n. 20
0
def marathon_restart_gets_new_task_ids(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(
        service, instance, None, soa_dir=context.soa_dir)['id']
    normal_instance_count = 1
    cluster = context.system_paasta_config['cluster']

    old_tasks = context.marathon_client.get_app(app_id).tasks
    marathon_serviceinit.restart_marathon_job(service, instance, app_id,
                                              normal_instance_count,
                                              context.marathon_client, cluster)
    print "Sleeping 5 seconds to wait for %s to be restarted." % service
    time.sleep(5)
    new_tasks = context.marathon_client.get_app(app_id).tasks
    print "Tasks before the restart: %s" % old_tasks
    print "Tasks after  the restart: %s" % new_tasks
    print  # sacrificial line for behave to eat instead of our output
    assert old_tasks != new_tasks
Esempio n. 21
0
def setup_service(service, instance, client, marathon_config,
                  service_marathon_config, soa_dir):
    """Setup the service instance given and attempt to deploy it, if possible.
    Doesn't do anything if the service is already in Marathon and hasn't changed.
    If it's not, attempt to find old instances of the service and bounce them.

    :param service: The service name to setup
    :param instance: The instance of the service to setup
    :param client: A MarathonClient object
    :param marathon_config: The marathon configuration dict
    :param service_marathon_config: The service instance's configuration dict
    :returns: A tuple of (status, output) to be used with send_sensu_event"""

    log.info("Setting up instance %s for service %s", instance, service)
    try:
        complete_config = marathon_tools.create_complete_config(service, instance, marathon_config)
    except NoDockerImageError:
        error_msg = (
            "Docker image for {0}.{1} not in deployments.json. Exiting. Has Jenkins deployed it?\n"
        ).format(
            service,
            instance,
        )
        log.error(error_msg)
        return (1, error_msg)

    full_id = complete_config['id']
    service_namespace_config = marathon_tools.load_service_namespace_config(service, instance)

    log.info("Desired Marathon instance id: %s", full_id)
    return deploy_service(
        service=service,
        instance=instance,
        marathon_jobid=full_id,
        config=complete_config,
        client=client,
        bounce_method=service_marathon_config.get_bounce_method(),
        drain_method_name=service_marathon_config.get_drain_method(service_namespace_config),
        drain_method_params=service_marathon_config.get_drain_method_params(service_namespace_config),
        nerve_ns=service_marathon_config.get_nerve_namespace(),
        bounce_health_params=service_marathon_config.get_bounce_health_params(service_namespace_config),
        soa_dir=soa_dir,
    )
Esempio n. 22
0
def update_context_marathon_config(context):
    whitelist_keys = set(['id', 'backoff_factor', 'backoff_seconds', 'max_instances', 'mem', 'cpus', 'instances'])
    with contextlib.nested(
        mock.patch.object(SystemPaastaConfig, 'get_zk_hosts', autospec=True, return_value=context.zk_hosts),
        mock.patch.object(MarathonServiceConfig, 'get_min_instances', autospec=True, return_value=context.instances),
        mock.patch.object(MarathonServiceConfig, 'get_max_instances', autospec=True),
    ) as (
        _,
        _,
        mock_get_max_instances,
    ):
        mock_get_max_instances.return_value = context.max_instances if 'max_instances' in context else None
        context.marathon_complete_config = {key: value for key, value in marathon_tools.create_complete_config(
            context.service,
            context.instance,
            soa_dir=context.soa_dir,
        ).items() if key in whitelist_keys}
    context.marathon_complete_config.update({
        'cmd': '/bin/sleep 1m',
        'constraints': None,
    })
Esempio n. 23
0
def update_context_marathon_config(context):
    whitelist_keys = set([
        'id', 'backoff_factor', 'backoff_seconds', 'max_instances', 'mem',
        'cpus', 'instances'
    ])
    with contextlib.nested(
            mock.patch.object(MarathonServiceConfig,
                              'get_min_instances',
                              autospec=True,
                              return_value=1),
            mock.patch.object(MarathonServiceConfig,
                              'get_max_instances',
                              autospec=True),
    ) as (
            _,
            mock_get_max_instances,
    ):
        mock_get_max_instances.return_value = context.max_instances if 'max_instances' in context else None
        context.marathon_complete_config = {
            key: value
            for key, value in marathon_tools.create_complete_config(
                context.service,
                context.instance,
                soa_dir=context.soa_dir,
            ).items() if key in whitelist_keys
        }
    context.marathon_complete_config.update({
        'cmd': '/bin/sleep 1m',
        'constraints': None,
        'container': {
            'type': 'DOCKER',
            'docker': {
                'network': 'BRIDGE',
                'image': 'busybox',
            },
        },
    })
    if 'max_instances' not in context:
        context.marathon_complete_config['instances'] = context.instances
Esempio n. 24
0
def marathon_restart_gets_new_task_ids(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']
    normal_instance_count = 1
    cluster = context.system_paasta_config['cluster']

    old_tasks = context.marathon_client.get_app(app_id).tasks
    with mock.patch('paasta_tools.marathon_serviceinit._log', autospec=True):
        marathon_serviceinit.restart_marathon_job(
            service,
            instance,
            app_id,
            normal_instance_count,
            context.marathon_client,
            cluster
        )
    paasta_print("Sleeping 5 seconds to wait for %s to be restarted." % service)
    time.sleep(5)
    new_tasks = context.marathon_client.get_app(app_id).tasks
    paasta_print("Tasks before the restart: %s" % old_tasks)
    paasta_print("Tasks after  the restart: %s" % new_tasks)
    paasta_print()  # sacrificial line for behave to eat instead of our output
    assert old_tasks != new_tasks
def marathon_restart_gets_new_task_ids(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']
    normal_instance_count = 1
    cluster = context.system_paasta_config.get_cluster()

    old_tasks = context.marathon_client.get_app(app_id).tasks
    with mock.patch('paasta_tools.marathon_serviceinit._log', autospec=True):
        marathon_serviceinit.restart_marathon_job(
            service,
            instance,
            app_id,
            normal_instance_count,
            context.marathon_client,
            cluster,
        )
    paasta_print("Sleeping 5 seconds to wait for %s to be restarted." % service)
    time.sleep(5)
    new_tasks = context.marathon_client.get_app(app_id).tasks
    paasta_print("Tasks before the restart: %s" % old_tasks)
    paasta_print("Tasks after  the restart: %s" % new_tasks)
    paasta_print()  # sacrificial line for behave to eat instead of our output
    assert old_tasks != new_tasks
Esempio n. 26
0
def wait_launch_tasks(context, job_id, task_count):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)['id']
    client = context.marathon_client
    marathon_tools.wait_for_app_to_launch_tasks(client, app_id, task_count, exact_matches_only=True)
Esempio n. 27
0
def main():
    args = parse_args()
    full_appid = args.appname.lstrip('/')
    soa_dir = args.soa_dir
    marathon_config = marathon_tools.load_marathon_config()
    client = marathon_tools.get_marathon_client(
        url=marathon_config.get_url(),
        user=marathon_config.get_username(),
        passwd=marathon_config.get_password(),
    )

    if not marathon_tools.is_app_id_running(app_id=full_appid, client=client):
        print("Couldn't find an app named {0}".format(full_appid))
        sys.exit(1)

    service, instance, _, __ = (s.replace('--', '_')
                                for s in decompose_job_id(full_appid))
    complete_config = marathon_tools.create_complete_config(
        service, instance, marathon_config)
    cluster = load_system_paasta_config().get_cluster()
    service_instance_config = marathon_tools.load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=cluster,
        soa_dir=soa_dir,
    )
    nerve_ns = service_instance_config.get_nerve_namespace()
    service_namespace_config = marathon_tools.load_service_namespace_config(
        service=service, namespace=nerve_ns)
    drain_method = drain_lib.get_drain_method(
        service_instance_config.get_drain_method(service_namespace_config),
        service=service,
        instance=instance,
        nerve_ns=nerve_ns,
        drain_method_params=service_instance_config.get_drain_method_params(
            service_namespace_config),
    )

    bounce_func = bounce_lib.get_bounce_method_func('down')

    while marathon_tools.is_app_id_running(app_id=full_appid, client=client):
        app_to_kill = client.get_app(full_appid)
        old_app_live_tasks, old_app_draining_tasks = get_old_live_draining_tasks(
            [app_to_kill], drain_method)
        do_bounce(
            bounce_func=bounce_func,
            drain_method=drain_method,
            config=complete_config,
            new_app_running='',
            happy_new_tasks=[],
            old_app_live_tasks=old_app_live_tasks,
            old_app_draining_tasks=old_app_draining_tasks,
            serviceinstance="{0}.{1}".format(service, instance),
            bounce_method='down',
            service=service,
            cluster=cluster,
            instance=instance,
            marathon_jobid=full_appid,
            client=client,
            soa_dir=soa_dir,
        )

        print "Sleeping for 10 seconds to give the tasks time to drain"
        time.sleep(10)

    print("Sucessfully killed {0}".format(full_appid))
Esempio n. 28
0
def marathon_app_task_count(context, job_id, task_count):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)['id']

    tasks = context.marathon_client.get_app(app_id).tasks
    assert len(tasks) == task_count
Esempio n. 29
0
def wait_launch_tasks(context, job_id, task_count):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)['id']
    client = context.marathon_client
    marathon_tools.wait_for_app_to_launch_tasks(client, app_id, task_count, exact_matches_only=True)
Esempio n. 30
0
def marathon_app_task_count(context, job_id, task_count):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)["id"]

    tasks = context.marathon_client.get_app(app_id).tasks
    assert len(tasks) == task_count
Esempio n. 31
0
def run_marathon_app(context, job_id):
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, None, soa_dir=context.soa_dir)["id"]
    app_config = {"id": app_id, "cmd": "/bin/sleep 1m"}
    with mock.patch("paasta_tools.bounce_lib.create_app_lock"):
        paasta_tools.bounce_lib.create_marathon_app(app_id, app_config, context.marathon_client)
Esempio n. 32
0
def perform_command(command,
                    service,
                    instance,
                    cluster,
                    verbose,
                    soa_dir,
                    app_id=None,
                    delta=None):
    """Performs a start/stop/restart/status/scale on an instance
    :param command: String of start, stop, restart, status or scale
    :param service: service name
    :param instance: instance name, like "main" or "canary"
    :param cluster: cluster name
    :param verbose: bool if the output should be verbose or not
    :returns: A unix-style return code
    """
    marathon_config = marathon_tools.load_marathon_config()
    job_config = marathon_tools.load_marathon_service_config(service,
                                                             instance,
                                                             cluster,
                                                             soa_dir=soa_dir)
    if not app_id:
        try:
            app_id = marathon_tools.create_complete_config(
                service, instance, marathon_config, soa_dir=soa_dir)['id']
        except NoDockerImageError:
            job_id = compose_job_id(service, instance)
            print "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?" % job_id
            return 1

    normal_instance_count = job_config.get_instances()
    normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(
        service, instance)
    proxy_port = marathon_tools.get_proxy_port_for_instance(service,
                                                            instance,
                                                            soa_dir=soa_dir)

    client = marathon_tools.get_marathon_client(marathon_config.get_url(),
                                                marathon_config.get_username(),
                                                marathon_config.get_password())
    if command == 'start':
        start_marathon_job(service, instance, app_id, normal_instance_count,
                           client, cluster)
    elif command == 'stop':
        stop_marathon_job(service, instance, app_id, client, cluster)
    elif command == 'restart':
        restart_marathon_job(service, instance, app_id, normal_instance_count,
                             client, cluster)
    elif command == 'status':
        # Setting up transparent cache for http API calls
        requests_cache.install_cache('paasta_serviceinit', backend='memory')

        print status_desired_state(service, instance, client, job_config)
        print status_marathon_job(service, instance, app_id,
                                  normal_instance_count, client)
        tasks, out = status_marathon_job_verbose(service, instance, client)
        if verbose:
            print out
        print status_mesos_tasks(service, instance, normal_instance_count)
        if verbose:
            print status_mesos_tasks_verbose(app_id, get_short_task_id)
        if proxy_port is not None:
            print status_smartstack_backends(
                service=service,
                instance=instance,
                cluster=cluster,
                job_config=job_config,
                tasks=tasks,
                expected_count=normal_smartstack_count,
                soa_dir=soa_dir,
                verbose=verbose,
            )
    elif command == 'scale':
        scale_marathon_job(service, instance, app_id, delta, client, cluster)
    else:
        # The command parser shouldn't have let us get this far...
        raise NotImplementedError("Command %s is not implemented!" % command)
    return 0
Esempio n. 33
0
def main():
    args = parse_args()
    full_appid = args.appname.lstrip('/')
    soa_dir = args.soa_dir
    marathon_config = marathon_tools.load_marathon_config()
    client = marathon_tools.get_marathon_client(
        url=marathon_config.get_url(),
        user=marathon_config.get_username(),
        passwd=marathon_config.get_password(),
    )

    if not marathon_tools.is_app_id_running(app_id=full_appid, client=client):
        print("Couldn't find an app named {0}".format(full_appid))
        sys.exit(1)

    service, instance, _, __ = (s.replace('--', '_') for s in decompose_job_id(full_appid))
    complete_config = marathon_tools.create_complete_config(service, instance, marathon_config)
    cluster = load_system_paasta_config().get_cluster()
    service_instance_config = marathon_tools.load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=cluster,
        soa_dir=soa_dir,
    )
    nerve_ns = service_instance_config.get_nerve_namespace()
    service_namespace_config = marathon_tools.load_service_namespace_config(service=service, namespace=nerve_ns)
    drain_method = drain_lib.get_drain_method(
        service_instance_config.get_drain_method(service_namespace_config),
        service=service,
        instance=instance,
        nerve_ns=nerve_ns,
        drain_method_params=service_instance_config.get_drain_method_params(service_namespace_config),
    )

    bounce_func = bounce_lib.get_bounce_method_func('down')

    while marathon_tools.is_app_id_running(app_id=full_appid, client=client):
        app_to_kill = client.get_app(full_appid)
        old_app_live_tasks, old_app_draining_tasks = get_old_live_draining_tasks([app_to_kill], drain_method)
        do_bounce(
            bounce_func=bounce_func,
            drain_method=drain_method,
            config=complete_config,
            new_app_running='',
            happy_new_tasks=[],
            old_app_live_tasks=old_app_live_tasks,
            old_app_draining_tasks=old_app_draining_tasks,
            serviceinstance="{0}.{1}".format(service, instance),
            bounce_method='down',
            service=service,
            cluster=cluster,
            instance=instance,
            marathon_jobid=full_appid,
            client=client,
            soa_dir=soa_dir,
        )

        print "Sleeping for 10 seconds to give the tasks time to drain"
        time.sleep(10)

    print("Sucessfully killed {0}".format(full_appid))