Пример #1
0
    def get_registrations(self):
        registrations = self.config_dict.get("registrations", [])
        for registration in registrations:
            try:
                decompose_job_id(registration)
            except InvalidJobNameError:
                log.error("Provided registration {0} for service " "{1} is invalid".format(registration, self.service))

        # Backwards compatbility with nerve_ns
        # FIXME(jlynch|2016-08-02, PAASTA-4964): DEPRECATE nerve_ns and remove it
        if not registrations and "nerve_ns" in self.config_dict:
            registrations.append(compose_job_id(self.service, self.config_dict["nerve_ns"]))

        return registrations or [compose_job_id(self.service, self.instance)]
def send_event(service, instance, soa_dir, status, output):
    """Send an event to sensu via pysensu_yelp with the given information.

    :param service: The service name the event is about
    :param instance: The instance of the service the event is about
    :param soa_dir: The service directory to read monitoring information from
    :param status: The status to emit for this event
    :param output: The output to emit for this event
    """
    cluster = load_system_paasta_config().get_cluster()
    monitoring_overrides = chronos_tools.load_chronos_job_config(
        service=service,
        instance=instance,
        cluster=cluster,
        soa_dir=soa_dir,
    ).get_monitoring()
    # In order to let sensu know how often to expect this check to fire,
    # we need to set the ``check_every`` to the frequency of our cron job, which
    # is 10s.
    monitoring_overrides['check_every'] = '10s'
    # Most deploy_chronos_jobs failures are transient and represent issues
    # that will probably be fixed eventually, so we set an alert_after
    # to suppress extra noise
    monitoring_overrides['alert_after'] = '10m'
    check_name = 'setup_chronos_job.%s' % compose_job_id(service, instance)
    monitoring_tools.send_event(
        service=service,
        check_name=check_name,
        overrides=monitoring_overrides,
        status=status,
        output=output,
        soa_dir=soa_dir,
    )
def send_event(service, namespace, cluster, soa_dir, status, output):
    """Send an event to sensu via pysensu_yelp with the given information.

    :param service: The service name the event is about
    :param namespace: The namespace of the service the event is about
    :param soa_dir: The service directory to read monitoring information from
    :param status: The status to emit for this event
    :param output: The output to emit for this event"""
    # This function assumes the input is a string like "mumble.main"
    monitoring_overrides = marathon_tools.load_marathon_service_config(service, namespace, cluster).get_monitoring()
    if "alert_after" not in monitoring_overrides:
        monitoring_overrides["alert_after"] = "2m"
    monitoring_overrides["check_every"] = "1m"
    monitoring_overrides["runbook"] = monitoring_tools.get_runbook(monitoring_overrides, service, soa_dir=soa_dir)

    check_name = "check_marathon_services_replication.%s" % compose_job_id(service, namespace)
    monitoring_tools.send_event(service, check_name, monitoring_overrides, status, output, soa_dir)
    _log(
        service=service,
        line="Replication: %s" % output,
        component="monitoring",
        level="debug",
        cluster=cluster,
        instance=namespace,
    )
Пример #4
0
def paasta_emergency_stop(args):
    """Performs an emergency stop on a given service instance on a given cluster

    Warning: This command does not permanently stop the service. The next time the service is updated
    (config change, deploy, bounce, etc.), those settings will override the emergency stop.

    If you want this stop to be permanant, adjust the relevant config file to reflect that.
    For example, this can be done for Marathon apps by setting 'instances: 0', or
    for Chronos jobs by setting 'disabled: True'. Alternatively, remove the config yaml entirely.
    """
    system_paasta_config = load_system_paasta_config()
    service = figure_out_service_name(args, soa_dir=args.soa_dir)
    print "Performing an emergency stop on %s..." % compose_job_id(service, args.instance)
    output = execute_paasta_serviceinit_on_remote_master(
        subcommand="stop",
        cluster=args.cluster,
        service=service,
        instances=args.instance,
        system_paasta_config=system_paasta_config,
        app_id=args.appid,
    )
    print "Output: %s" % output
    print "%s" % "\n".join(paasta_emergency_stop.__doc__.splitlines()[-7:])
    print "To start this service again asap, run:"
    print "paasta emergency-start --service %s --instance %s --cluster %s" % (service, args.instance, args.cluster)
def send_event(service, namespace, cluster, soa_dir, status, output):
    """Send an event to sensu via pysensu_yelp with the given information.

    :param service: The service name the event is about
    :param namespace: The namespace of the service the event is about
    :param soa_dir: The service directory to read monitoring information from
    :param status: The status to emit for this event
    :param output: The output to emit for this event"""
    # This function assumes the input is a string like "mumble.main"
    monitoring_overrides = marathon_tools.load_marathon_service_config(
        service=service,
        instance=namespace,
        cluster=cluster,
        soa_dir=soa_dir,
        load_deployments=False,
    ).get_monitoring()
    if 'alert_after' not in monitoring_overrides:
        monitoring_overrides['alert_after'] = '2m'
    monitoring_overrides['check_every'] = '1m'
    monitoring_overrides['runbook'] = monitoring_tools.get_runbook(monitoring_overrides, service, soa_dir=soa_dir)

    check_name = 'check_marathon_services_replication.%s' % compose_job_id(service, namespace)
    monitoring_tools.send_event(service, check_name, monitoring_overrides, status, output, soa_dir)
    _log(
        service=service,
        line='Replication: %s' % output,
        component='monitoring',
        level='debug',
        cluster=cluster,
        instance=namespace,
    )
def get_smartstack_replication_for_attribute(attribute, service, namespace, blacklist):
    """Loads smartstack replication from a host with the specified attribute

    :param attribute: a Mesos attribute
    :param service: A service name, like 'example_service'
    :param namespace: A particular smartstack namespace to inspect, like 'main'
    :param constraints: A list of Marathon constraints to restrict which synapse hosts to query
    :param blacklist: A list of blacklisted location tuples in the form of (location, value)
    :returns: a dictionary of the form {'<unique_attribute_value>': <smartstack replication hash>}
              (the dictionary will contain keys for unique all attribute values)
    """
    replication_info = {}
    unique_values = mesos_tools.get_mesos_slaves_grouped_by_attribute(attribute=attribute, blacklist=blacklist)
    full_name = compose_job_id(service, namespace)

    for value, hosts in unique_values.iteritems():
        # arbitrarily choose the first host with a given attribute to query for replication stats
        synapse_host = hosts[0]
        repl_info = replication_utils.get_replication_for_services(
            synapse_host=synapse_host,
            synapse_port=smartstack_tools.DEFAULT_SYNAPSE_PORT,
            services=[full_name],
        )
        replication_info[value] = repl_info

    return replication_info
 def test_send_bounce_keepalive(self):
     fake_service = "fake_service"
     fake_instance = "fake_instance"
     fake_cluster = "fake_cluster"
     fake_soa_dir = ""
     expected_check_name = "paasta_bounce_progress.%s" % compose_job_id(fake_service, fake_instance)
     with contextlib.nested(
         mock.patch("paasta_tools.monitoring_tools.send_event", autospec=True),
         mock.patch("paasta_tools.marathon_tools.load_marathon_service_config", autospec=True),
     ) as (send_event_patch, load_marathon_service_config_patch):
         load_marathon_service_config_patch.return_value.get_monitoring.return_value = {}
         setup_marathon_job.send_sensu_bounce_keepalive(
             service=fake_service, instance=fake_instance, cluster=fake_cluster, soa_dir=fake_soa_dir
         )
         send_event_patch.assert_called_once_with(
             service=fake_service,
             check_name=expected_check_name,
             overrides=mock.ANY,
             status=0,
             output=mock.ANY,
             soa_dir=fake_soa_dir,
             ttl="1h",
         )
         load_marathon_service_config_patch.assert_called_once_with(
             service=fake_service, instance=fake_instance, cluster=fake_cluster, load_deployments=False
         )
    def test_send_event(self):
        fake_service = "fake_service"
        fake_instance = "fake_instance"
        fake_status = "42"
        fake_output = "The http port is not open"
        fake_soa_dir = ""
        expected_check_name = "setup_marathon_job.%s" % compose_job_id(fake_service, fake_instance)
        with contextlib.nested(
            mock.patch("paasta_tools.monitoring_tools.send_event", autospec=True),
            mock.patch("paasta_tools.marathon_tools.load_marathon_service_config", autospec=True),
            mock.patch("paasta_tools.setup_marathon_job.load_system_paasta_config", autospec=True),
        ) as (send_event_patch, load_marathon_service_config_patch, load_system_paasta_config_patch):
            load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(return_value="fake_cluster")
            load_marathon_service_config_patch.return_value.get_monitoring.return_value = {}

            setup_marathon_job.send_event(fake_service, fake_instance, fake_soa_dir, fake_status, fake_output)

            send_event_patch.assert_called_once_with(
                fake_service,
                expected_check_name,
                {"alert_after": "10m", "check_every": "10s"},
                fake_status,
                fake_output,
                fake_soa_dir,
            )
            load_marathon_service_config_patch.assert_called_once_with(
                fake_service,
                fake_instance,
                load_system_paasta_config_patch.return_value.get_cluster.return_value,
                load_deployments=False,
            )
def test_check_smartstack_replication_for_instance_crit_when_absent():
    service = 'test'
    instance = 'some_absent_instance'
    cluster = 'fake_cluster'
    available = {'fake_region': {'test.two': 1, 'test.three': 4, 'test.four': 8}}
    expected_replication_count = 8
    soa_dir = 'test_dir'
    fake_system_paasta_config = SystemPaastaConfig({}, '/fake/config')
    crit = 90
    with contextlib.nested(
        mock.patch('paasta_tools.check_marathon_services_replication.send_event', autospec=True),
        mock.patch('paasta_tools.marathon_tools.read_registration_for_service_instance',
                   autospec=True, return_value=compose_job_id(service, instance)),
        mock.patch('paasta_tools.check_marathon_services_replication.load_smartstack_info_for_service', autospec=True),
        mock.patch('paasta_tools.marathon_tools.load_marathon_service_config', autospec=True)
    ) as (
        mock_send_event,
        mock_read_registration_for_service_instance,
        mock_load_smartstack_info_for_service,
        mock_load_marathon_service_config,
    ):
        mock_load_smartstack_info_for_service.return_value = available
        mock_service_job_config = mock.MagicMock(spec_set=MarathonServiceConfig)
        mock_service_job_config.get_replication_crit_percentage.return_value = crit
        mock_load_marathon_service_config.return_value = mock_service_job_config
        check_marathon_services_replication.check_smartstack_replication_for_instance(
            service, instance, cluster, soa_dir, expected_replication_count, fake_system_paasta_config,
        )
        mock_send_event.assert_called_once_with(
            service=service,
            namespace=instance,
            cluster=cluster,
            soa_dir=soa_dir,
            status=pysensu_yelp.Status.CRITICAL,
            output=mock.ANY)
def test_status_smartstack_backends_no_smartstack_replication_info():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    tasks = mock.Mock()
    normal_count = 10
    with contextlib.nested(
        mock.patch('paasta_tools.marathon_tools.load_service_namespace_config', autospec=True),
        mock.patch('paasta_tools.marathon_tools.read_namespace_for_service_instance'),
        mock.patch('paasta_tools.marathon_serviceinit.get_mesos_slaves_grouped_by_attribute'),
    ) as (
        mock_load_service_namespace_config,
        mock_read_ns,
        mock_get_mesos_slaves_grouped_by_attribute,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_ns.return_value = instance
        mock_get_mesos_slaves_grouped_by_attribute.return_value = {}
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=normal_count,
            soa_dir=None,
            verbose=False,
        )
        assert "%s is NOT in smartstack" % service_instance in actual
Пример #11
0
def perform_command(command, service, instance, cluster, verbose, soa_dir, app_id=None, delta=None):
    """Performs a start/stop/restart/status/scale on an instance
    :param command: String of start, stop, restart, status or scale
    :param service: service name
    :param instance: instance name, like "main" or "canary"
    :param cluster: cluster name
    :param verbose: bool if the output should be verbose or not
    :returns: A unix-style return code
    """
    marathon_config = marathon_tools.load_marathon_config()
    job_config = marathon_tools.load_marathon_service_config(service, instance, cluster, soa_dir=soa_dir)
    if not app_id:
        try:
            app_id = marathon_tools.create_complete_config(service, instance, marathon_config, soa_dir=soa_dir)['id']
        except NoDockerImageError:
            job_id = compose_job_id(service, instance)
            print "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?" % job_id
            return 1

    normal_instance_count = job_config.get_instances()
    normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(service, instance)
    proxy_port = marathon_tools.get_proxy_port_for_instance(service, instance, soa_dir=soa_dir)

    client = marathon_tools.get_marathon_client(marathon_config.get_url(), marathon_config.get_username(),
                                                marathon_config.get_password())
    if command == 'start':
        start_marathon_job(service, instance, app_id, normal_instance_count, client, cluster)
    elif command == 'stop':
        stop_marathon_job(service, instance, app_id, client, cluster)
    elif command == 'restart':
        restart_marathon_job(service, instance, app_id, normal_instance_count, client, cluster)
    elif command == 'status':
        # Setting up transparent cache for http API calls
        requests_cache.install_cache('paasta_serviceinit', backend='memory')

        print status_desired_state(service, instance, client, job_config)
        print status_marathon_job(service, instance, app_id, normal_instance_count, client)
        tasks, out = status_marathon_job_verbose(service, instance, client)
        if verbose:
            print out
        print status_mesos_tasks(service, instance, normal_instance_count)
        if verbose:
            print status_mesos_tasks_verbose(app_id, get_short_task_id)
        if proxy_port is not None:
            print status_smartstack_backends(
                service=service,
                instance=instance,
                cluster=cluster,
                job_config=job_config,
                tasks=tasks,
                expected_count=normal_smartstack_count,
                soa_dir=soa_dir,
                verbose=verbose,
            )
    elif command == 'scale':
        scale_marathon_job(service, instance, app_id, delta, client, cluster)
    else:
        # The command parser shouldn't have let us get this far...
        raise NotImplementedError("Command %s is not implemented!" % command)
    return 0
Пример #12
0
def perform_command(command, service, instance, cluster, verbose, soa_dir, app_id=None, delta=None):
    """Performs a start/stop/restart/status on an instance
    :param command: String of start, stop, restart, status
    :param service: service name
    :param instance: instance name, like "main" or "canary"
    :param cluster: cluster name
    :param verbose: int verbosity level
    :returns: A unix-style return code
    """
    system_config = load_system_paasta_config()

    marathon_config = marathon_tools.load_marathon_config()
    job_config = marathon_tools.load_marathon_service_config(service, instance, cluster, soa_dir=soa_dir)
    if not app_id:
        try:
            app_id = job_config.format_marathon_app_dict()['id']
        except NoDockerImageError:
            job_id = compose_job_id(service, instance)
            print "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?" % job_id
            return 1

    normal_instance_count = job_config.get_instances()
    normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(service, instance, cluster)
    proxy_port = marathon_tools.get_proxy_port_for_instance(service, instance, cluster, soa_dir=soa_dir)

    client = marathon_tools.get_marathon_client(marathon_config.get_url(), marathon_config.get_username(),
                                                marathon_config.get_password())
    if command == 'restart':
        restart_marathon_job(service, instance, app_id, client, cluster)
    elif command == 'status':
        print status_desired_state(service, instance, client, job_config)
        print status_marathon_job(service, instance, app_id, normal_instance_count, client)
        tasks, out = status_marathon_job_verbose(service, instance, client)
        if verbose > 0:
            print out
        print status_mesos_tasks(service, instance, normal_instance_count)
        if verbose > 0:
            tail_lines = calculate_tail_lines(verbose_level=verbose)
            print status_mesos_tasks_verbose(
                job_id=app_id,
                get_short_task_id=get_short_task_id,
                tail_lines=tail_lines,
            )
        if proxy_port is not None:
            print status_smartstack_backends(
                service=service,
                instance=instance,
                cluster=cluster,
                job_config=job_config,
                tasks=tasks,
                expected_count=normal_smartstack_count,
                soa_dir=soa_dir,
                verbose=verbose > 0,
                synapse_port=system_config.get_synapse_port(),
                synapse_haproxy_url_format=system_config.get_synapse_haproxy_url_format(),
            )
    else:
        # The command parser shouldn't have let us get this far...
        raise NotImplementedError("Command %s is not implemented!" % command)
    return 0
Пример #13
0
def send_sensu_bounce_keepalive(service, instance, cluster, soa_dir):
    """Send a Sensu event with a special ``ttl``, to let Sensu know that
    the everything is fine. This event is **not** fired when the bounce is in
    progress.

    If the bounce goes on for too long, this the ``ttl`` will expire and Sensu
    will emit a new event saying that this one didn't check in within the expected
    time-to-live."""
    ttl = '1h'
    monitoring_overrides = marathon_tools.load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=cluster,
        soa_dir=soa_dir,
        load_deployments=False,
    ).get_monitoring()
    # Sensu currently emits events for expired ttl checks every 30s
    monitoring_overrides['check_every'] = '30s'
    monitoring_overrides['alert_after'] = '2m'
    monitoring_overrides['runbook'] = 'http://y/paasta-troubleshooting'
    monitoring_overrides['tip'] = ("Check out `paasta logs`. If the bounce hasn't made progress, "
                                   "it may mean that the new version isn't healthy.")
    # Dogfooding this alert till I'm comfortable it doesn't spam people
    monitoring_overrides['team'] = 'noop'
    monitoring_overrides['notification_email'] = '*****@*****.**'

    monitoring_tools.send_event(
        service=service,
        check_name='paasta_bounce_progress.%s' % compose_job_id(service, instance),
        overrides=monitoring_overrides,
        status=pysensu_yelp.Status.OK,
        output="The bounce is in a steady state",
        soa_dir=soa_dir,
        ttl=ttl,
    )
def send_event_if_under_replication(
    service,
    instance,
    cluster,
    expected_count,
    num_available,
    soa_dir,
):
    full_name = compose_job_id(service, instance)
    job_config = marathon_tools.load_marathon_service_config(service, instance, cluster)
    crit_threshold = job_config.get_replication_crit_percentage()
    output = ('Service %s has %d out of %d expected instances available!\n' +
              '(threshold: %d%%)') % (full_name, num_available, expected_count, crit_threshold)
    under_replicated, _ = is_under_replicated(num_available, expected_count, crit_threshold)
    if under_replicated:
        log.error(output)
        status = pysensu_yelp.Status.CRITICAL
    else:
        log.info(output)
        status = pysensu_yelp.Status.OK
    send_event(
        service=service,
        namespace=instance,
        cluster=cluster,
        soa_dir=soa_dir,
        status=status,
        output=output)
Пример #15
0
    def test_send_event(self):
        fake_status = "42"
        fake_output = "something went wrong"
        fake_soa_dir = ""
        expected_check_name = "setup_chronos_job.%s" % compose_job_id(self.fake_service, self.fake_instance)
        with contextlib.nested(
            mock.patch("paasta_tools.monitoring_tools.send_event", autospec=True),
            mock.patch("paasta_tools.chronos_tools.load_chronos_job_config", autospec=True),
            mock.patch("setup_chronos_job.load_system_paasta_config", autospec=True),
        ) as (mock_send_event, mock_load_chronos_job_config, mock_load_system_paasta_config):
            mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value="fake_cluster")
            mock_load_chronos_job_config.return_value.get_monitoring.return_value = {}

            setup_chronos_job.send_event(
                service=self.fake_service,
                instance=self.fake_instance,
                soa_dir=fake_soa_dir,
                status=fake_status,
                output=fake_output,
            )
            mock_send_event.assert_called_once_with(
                service=self.fake_service,
                check_name=expected_check_name,
                overrides={"alert_after": "10m", "check_every": "10s"},
                status=fake_status,
                output=fake_output,
                soa_dir=fake_soa_dir,
            )
            mock_load_chronos_job_config.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=mock_load_system_paasta_config.return_value.get_cluster.return_value,
                soa_dir=fake_soa_dir,
            )
Пример #16
0
def get_marathon_services_running_here_for_nerve(cluster, soa_dir):
    if not cluster:
        try:
            cluster = load_system_paasta_config().get_cluster()
        # In the cases where there is *no* cluster or in the case
        # where there isn't a Paasta configuration file at *all*, then
        # there must be no marathon services running here, so we catch
        # these custom exceptions and return [].
        except (PaastaNotConfiguredError):
            return []
    # When a cluster is defined in mesos, let's iterate through marathon services
    marathon_services = marathon_services_running_here()
    nerve_list = []
    for name, instance, port in marathon_services:
        try:
            namespace = read_namespace_for_service_instance(name, instance, cluster, soa_dir)
            nerve_dict = load_service_namespace_config(name, namespace, soa_dir)
            if not nerve_dict.is_in_smartstack():
                continue
            nerve_dict['port'] = port
            nerve_name = compose_job_id(name, namespace)
            nerve_list.append((nerve_name, nerve_dict))
        except KeyError:
            continue  # SOA configs got deleted for this app, it'll get cleaned up
    return nerve_list
Пример #17
0
def test_status_smartstack_backends_no_smartstack_replication_info():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    tasks = mock.Mock()
    normal_count = 10
    with contextlib.nested(
        mock.patch('paasta_tools.marathon_tools.load_service_namespace_config', autospec=True),
        mock.patch('paasta_tools.marathon_tools.read_registration_for_service_instance', autospec=True),
        mock.patch('paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist', autospec=True),
    ) as (
        mock_load_service_namespace_config,
        mock_read_reg,
        mock_get_all_slaves_for_blacklist_whitelist,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_reg.return_value = service_instance
        mock_get_all_slaves_for_blacklist_whitelist.return_value = {}
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=normal_count,
            soa_dir=None,
            verbose=False,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        assert "%s is NOT in smartstack" % service_instance in actual
Пример #18
0
def list_service_instances():
    """Returns a sorted list of service<SPACER>instance names"""
    the_list = []
    for service in list_services():
        for instance in list_all_instances_for_service(service):
            the_list.append(compose_job_id(service, instance))
    return the_list
Пример #19
0
def run_paasta_serviceinit(subcommand, master, service, instancename, cluster, **kwargs):
    """Run 'paasta_serviceinit <subcommand>'. Return the output from running it."""
    if 'verbose' in kwargs and kwargs['verbose']:
        verbose_flag = "-v "
        timeout = 240
    else:
        verbose_flag = ''
        timeout = 60
    if 'app_id' in kwargs and kwargs['app_id']:
        app_id_flag = "--appid %s " % kwargs['app_id']
    else:
        app_id_flag = ''
    if 'delta' in kwargs and kwargs['delta']:
        delta = "--delta %s" % kwargs['delta']
    else:
        delta = ''
    command = 'ssh -A -n %s sudo paasta_serviceinit %s%s%s %s %s' % (
        master,
        verbose_flag,
        app_id_flag,
        compose_job_id(service, instancename),
        subcommand,
        delta
    )
    log.debug("Running Command: %s" % command)
    _, output = _run(command, timeout=timeout)
    return output
def test_send_event_users_monitoring_tools_send_event_respects_alert_after():
    fake_service_name = "superfast"
    fake_namespace = "jellyfish"
    fake_status = "999999"
    fake_output = "YOU DID IT"
    fake_cluster = "fake_cluster"
    fake_monitoring_overrides = {"alert_after": "666m"}
    fake_soa_dir = "/hi/hello/hey"
    fake_cluster = "fake_cluster"
    expected_check_name = "check_marathon_services_replication.%s" % compose_job_id(fake_service_name, fake_namespace)
    with contextlib.nested(
        mock.patch("paasta_tools.monitoring_tools.send_event", autospec=True),
        mock.patch("paasta_tools.check_marathon_services_replication.load_system_paasta_config", autospec=True),
        mock.patch("paasta_tools.check_marathon_services_replication._log", autospec=True),
        mock.patch("paasta_tools.marathon_tools.load_marathon_service_config", autospec=True),
    ) as (send_event_patch, load_system_paasta_config_patch, log_patch, load_marathon_service_config_patch):
        load_marathon_service_config_patch.return_value.get_monitoring.return_value = fake_monitoring_overrides
        check_marathon_services_replication.send_event(
            fake_service_name, fake_namespace, fake_cluster, fake_soa_dir, fake_status, fake_output
        )
        send_event_patch.call_count == 1
        send_event_patch.assert_called_once_with(
            fake_service_name, expected_check_name, mock.ANY, fake_status, fake_output, fake_soa_dir
        )
        # The overrides dictionary is mutated in the function under test, so
        # we expect the send_event_patch to be called with something that is a
        # superset of what we originally put in (fake_monitoring_overrides)
        actual_overrides_used = send_event_patch.call_args[0][2]
        assert set({"alert_after": "666m"}.items()).issubset(set(actual_overrides_used.items()))
        assert not set({"alert_after": "2m"}.items()).issubset(set(actual_overrides_used.items()))
Пример #21
0
def test_status_smartstack_backends_multiple_locations():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    good_task = mock.Mock()
    other_task = mock.Mock()
    fake_backend = {'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
                    'check_code': '200', 'svname': 'ipaddress1:1001_hostname1',
                    'check_status': 'L7OK', 'check_duration': 1}
    with contextlib.nested(
        mock.patch('paasta_tools.marathon_tools.load_service_namespace_config', autospec=True),
        mock.patch('paasta_tools.marathon_tools.read_namespace_for_service_instance'),
        mock.patch('paasta_tools.marathon_serviceinit.get_mesos_slaves_grouped_by_attribute'),
        mock.patch('paasta_tools.marathon_serviceinit.get_backends', autospec=True),
        mock.patch('paasta_tools.marathon_serviceinit.match_backends_and_tasks', autospec=True),
    ) as (
        mock_load_service_namespace_config,
        mock_read_ns,
        mock_get_mesos_slaves_grouped_by_attribute,
        mock_get_backends,
        mock_match_backends_and_tasks,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_ns.return_value = instance
        mock_get_backends.return_value = [fake_backend]
        mock_match_backends_and_tasks.return_value = [
            (fake_backend, good_task),
        ]
        tasks = [good_task, other_task]
        mock_get_mesos_slaves_grouped_by_attribute.return_value = {
            'fake_location1': ['fakehost1'],
            'fake_location2': ['fakehost2'],
        }
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=len(mock_get_backends.return_value),
            soa_dir=None,
            verbose=False,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakehost1',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakehost2',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        assert "fake_location1 - %s" % PaastaColors.green('Healthy') in actual
        assert "fake_location2 - %s" % PaastaColors.green('Healthy') in actual
Пример #22
0
def get_configs_of_services_to_scale(cluster, soa_dir=DEFAULT_SOA_DIR):
    services = get_services_for_cluster(
        cluster=cluster,
        instance_type='marathon',
        soa_dir=soa_dir,
    )
    configs = []
    for service, instance in services:
        try:
            service_config = load_marathon_service_config(
                service=service,
                instance=instance,
                cluster=cluster,
                soa_dir=soa_dir,
            )
        except NoDeploymentsAvailable:
            log.debug("%s is not deployed yet, refusing to do autoscaling calculations for it" %
                      compose_job_id(service, instance))
            continue

        if service_config.get_max_instances() and service_config.get_desired_state() == 'start' \
                and service_config.get_autoscaling_params()['decision_policy'] != 'bespoke':
            configs.append(service_config)

    return configs
Пример #23
0
def synapse_replication_is_low(service, instance, system_paasta_config, local_backends):
    crit_threshold = 80
    reg_svc, reg_namespace, _, __ = utils.decompose_job_id(
        read_registration_for_service_instance(
            service=service, instance=instance
        )
    )
    # We only actually care about the replication of where we're registering
    service, namespace = reg_svc, reg_namespace

    smartstack_replication_info = load_smartstack_info_for_service(
        service=service,
        namespace=namespace,
        blacklist=[],
        system_paasta_config=system_paasta_config,
    )
    expected_count = get_expected_instance_count_for_namespace(service=service, namespace=namespace)
    expected_count_per_location = int(expected_count / len(smartstack_replication_info))

    synapse_name = utils.compose_job_id(service, namespace)
    local_replication = get_replication_for_services(
        synapse_host=system_paasta_config.get_default_synapse_host(),
        synapse_port=system_paasta_config.get_synapse_port(),
        synapse_haproxy_url_format=system_paasta_config.get_synapse_haproxy_url_format(),
        services=[synapse_name],
    )
    num_available = local_replication.get(synapse_name, 0)
    under_replicated, ratio = utils.is_under_replicated(
        num_available, expected_count_per_location, crit_threshold)
    log.info('Service %s.%s has %d out of %d expected instances' % (
        service, instance, num_available, expected_count_per_location))
    return under_replicated
def check_service_replication(client, service, instance, cluster, soa_dir):
    """Checks a service's replication levels based on how the service's replication
    should be monitored. (smartstack or mesos)

    :param service: Service name, like "example_service"
    :param instance: Instance name, like "main" or "canary"
    :param cluster: name of the cluster
    :param soa_dir: The SOA configuration directory to read from
    """
    job_id = compose_job_id(service, instance)
    try:
        expected_count = marathon_tools.get_expected_instance_count_for_namespace(service, instance, soa_dir=soa_dir)
    except NoDeploymentsAvailable:
        log.info("deployments.json missing for %s. Skipping replication monitoring." % job_id)
        return
    if expected_count is None:
        return
    log.info("Expecting %d total tasks for %s" % (expected_count, job_id))
    proxy_port = marathon_tools.get_proxy_port_for_instance(service, instance, soa_dir=soa_dir)
    if proxy_port is not None:
        check_smartstack_replication_for_instance(
            service=service, instance=instance, cluster=cluster, soa_dir=soa_dir, expected_count=expected_count
        )
    else:
        check_healthy_marathon_tasks_for_service_instance(
            client=client,
            service=service,
            instance=instance,
            cluster=cluster,
            soa_dir=soa_dir,
            expected_count=expected_count,
        )
Пример #25
0
def paasta_emergency_start(args):
    """Performs an emergency start on a given service instance on a given cluster

    Warning: This command is not magic and cannot actually get a service to start if it couldn't
    run before. This includes configurations that prevent the service from running,
    such as 'instances: 0' (for Marathon apps).

    All it does for Marathon apps is ask Marathon to resume normal operation by scaling up to
    the instance count defined in the service's config.
    All it does for Chronos jobs is send the latest version of the job config to Chronos and run it immediately.
    """
    system_paasta_config = load_system_paasta_config()
    service = figure_out_service_name(args, soa_dir=args.soa_dir)
    print "Performing an emergency start on %s..." % compose_job_id(service, args.instance)
    output = execute_paasta_serviceinit_on_remote_master(
        subcommand="start",
        cluster=args.cluster,
        service=service,
        instances=args.instance,
        system_paasta_config=system_paasta_config,
    )
    print "%s" % "\n".join(paasta_emergency_start.__doc__.splitlines()[-8:])
    print "Output: %s" % PaastaColors.grey(output)
    print "Run this command to see the status:"
    print "paasta status --service %s --clusters %s" % (service, args.cluster)
def check_smartstack_replication_for_instance(
    service,
    instance,
    cluster,
    soa_dir,
    expected_count,
):
    """Check a set of namespaces to see if their number of available backends is too low,
    emitting events to Sensu based on the fraction available and the thresholds defined in
    the corresponding yelpsoa config.

    :param service: A string like example_service
    :param namespace: A nerve namespace, like "main"
    :param cluster: name of the cluster
    :param soa_dir: The SOA configuration directory to read from
    """
    namespace = marathon_tools.read_namespace_for_service_instance(service, instance, soa_dir=soa_dir)
    if namespace != instance:
        log.debug("Instance %s is announced under namespace: %s. "
                  "Not checking replication for it" % (instance, namespace))
        return
    full_name = compose_job_id(service, instance)
    job_config = marathon_tools.load_marathon_service_config(service, instance, cluster)
    crit_threshold = job_config.get_replication_crit_percentage()
    monitoring_blacklist = job_config.get_monitoring_blacklist()
    log.info('Checking instance %s in smartstack', full_name)
    smartstack_replication_info = load_smartstack_info_for_service(
        service=service, namespace=namespace, soa_dir=soa_dir, blacklist=monitoring_blacklist)
    log.debug('Got smartstack replication info for %s: %s' % (full_name, smartstack_replication_info))

    if len(smartstack_replication_info) == 0:
        status = pysensu_yelp.Status.CRITICAL
        output = ('Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
                  'is valid!\n') % full_name
        log.error(output)
    else:
        expected_count_per_location = int(expected_count / len(smartstack_replication_info))
        output = ''
        under_replication_per_location = []

        for location, available_backends in sorted(smartstack_replication_info.iteritems()):
            num_available_in_location = available_backends.get(full_name, 0)
            under_replicated, ratio = is_under_replicated(
                num_available_in_location, expected_count_per_location, crit_threshold)
            if under_replicated:
                output += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
                    full_name, num_available_in_location, expected_count_per_location, location, ratio)
            else:
                output += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
                    full_name, num_available_in_location, expected_count_per_location, location, ratio)
            under_replication_per_location.append(under_replicated)

        if any(under_replication_per_location):
            status = pysensu_yelp.Status.CRITICAL
            log.error(output)
        else:
            status = pysensu_yelp.Status.OK
            log.info(output)
    send_event(service=service, namespace=instance, cluster=cluster, soa_dir=soa_dir, status=status, output=output)
def test_status_smartstack_backends_verbose_multiple_apps():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'

    good_task = mock.Mock()
    bad_task = mock.Mock()
    other_task = mock.Mock()
    haproxy_backends_by_task = {
        good_task: {'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
                    'check_code': '200', 'svname': 'ipaddress1:1001_hostname1',
                    'check_status': 'L7OK', 'check_duration': 1},
        bad_task: {'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
                   'check_code': '200', 'svname': 'ipaddress2:1002_hostname2',
                   'check_status': 'L7OK', 'check_duration': 1},
    }

    with contextlib.nested(
        mock.patch('paasta_tools.marathon_tools.load_service_namespace_config', autospec=True),
        mock.patch('paasta_tools.marathon_tools.read_namespace_for_service_instance'),
        mock.patch('paasta_tools.marathon_serviceinit.get_mesos_slaves_grouped_by_attribute'),
        mock.patch('paasta_tools.marathon_serviceinit.get_backends', autospec=True),
        mock.patch('paasta_tools.marathon_serviceinit.match_backends_and_tasks', autospec=True),
    ) as (
        mock_load_service_namespace_config,
        mock_read_ns,
        mock_get_mesos_slaves_grouped_by_attribute,
        mock_get_backends,
        mock_match_backends_and_tasks,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_ns.return_value = instance
        mock_get_backends.return_value = haproxy_backends_by_task.values()
        mock_match_backends_and_tasks.return_value = [
            (haproxy_backends_by_task[good_task], good_task),
            (haproxy_backends_by_task[bad_task], None),
            (None, other_task),
        ]
        tasks = [good_task, other_task]
        mock_get_mesos_slaves_grouped_by_attribute.return_value = {'fake_location1': ['fakehost1']}
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=len(haproxy_backends_by_task),
            soa_dir=None,
            verbose=True,
        )
        mock_get_backends.assert_called_once_with(
            service_instance,
            synapse_host='fakehost1',
            synapse_port=3212,
        )
        assert "fake_location1" in actual
        assert "hostname1:1001" in actual
        assert re.search(r"%s[^\n]*hostname2:1002" % re.escape(PaastaColors.GREY), actual)
def main():
    args = parse_args()
    instances = get_services_for_cluster(cluster=args.cluster, instance_type='marathon', soa_dir=args.soa_dir)
    composed = []
    for name, instance in instances:
        composed.append(compose_job_id(name, instance))
    print '\n'.join(composed)
    sys.exit(0)
Пример #29
0
def test_compose_job_id_with_hashes():
    fake_service = "my_cool_service"
    fake_instance = "main"
    fake_git_hash = "git123abc"
    fake_config_hash = "config456def"
    expected = "my_cool_service.main.git123abc.config456def"
    actual = utils.compose_job_id(fake_service, fake_instance, fake_git_hash, fake_config_hash)
    assert actual == expected
Пример #30
0
def get_happy_tasks(app, service, nerve_ns, system_paasta_config, min_task_uptime=None, check_haproxy=False):
    """Given a MarathonApp object, return the subset of tasks which are considered healthy.
    With the default options, this returns tasks where at least one of the defined Marathon healthchecks passes.
    For it to do anything interesting, set min_task_uptime or check_haproxy.

    :param app: A MarathonApp object.
    :param service: The name of the service.
    :param nerve_ns: The nerve namespace
    :param min_task_uptime: Minimum number of seconds that a task must be running before we consider it healthy. Useful
                            if tasks take a while to start up.
    :param check_haproxy: Whether to check the local haproxy to make sure this task has been registered and discovered.
    """
    tasks = app.tasks
    happy = []
    now = datetime.datetime.utcnow()

    if check_haproxy:
        tasks_in_smartstack = []
        service_namespace = compose_job_id(service, nerve_ns)

        service_namespace_config = marathon_tools.load_service_namespace_config(
            service=service, namespace=nerve_ns)
        discover_location_type = service_namespace_config.get_discover()
        unique_values = mesos_tools.get_mesos_slaves_grouped_by_attribute(
            slaves=mesos_tools.get_slaves(),
            attribute=discover_location_type
        )

        for value, hosts in unique_values.iteritems():
            synapse_hostname = hosts[0]['hostname']
            tasks_in_smartstack.extend(get_registered_marathon_tasks(
                synapse_hostname,
                system_paasta_config.get_synapse_port(),
                system_paasta_config.get_synapse_haproxy_url_format(),
                service_namespace,
                tasks,
            ))
        tasks = tasks_in_smartstack

    for task in tasks:
        if task.started_at is None:
            # Can't be healthy if it hasn't started
            continue

        if min_task_uptime is not None:
            if (now - task.started_at).total_seconds() < min_task_uptime:
                continue

        # if there are healthchecks defined for the app but none have executed yet, then task is unhappy
        if len(app.health_checks) > 0 and len(task.health_check_results) == 0:
            continue

        # if there are health check results, check if at least one healthcheck is passing
        if not marathon_tools.is_task_healthy(task, require_all=False, default_healthy=True):
            continue
        happy.append(task)

    return happy
def check_smartstack_replication_for_instance(
    service,
    instance,
    cluster,
    soa_dir,
    expected_count,
    system_paasta_config,
):
    """Check a set of namespaces to see if their number of available backends is too low,
    emitting events to Sensu based on the fraction available and the thresholds defined in
    the corresponding yelpsoa config.

    :param service: A string like example_service
    :param namespace: A nerve namespace, like "main"
    :param cluster: name of the cluster
    :param soa_dir: The SOA configuration directory to read from
    :param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
    """
    namespace = marathon_tools.read_namespace_for_service_instance(
        service, instance, soa_dir=soa_dir)
    if namespace != instance:
        log.debug("Instance %s is announced under namespace: %s. "
                  "Not checking replication for it" % (instance, namespace))
        return
    full_name = compose_job_id(service, instance)
    job_config = marathon_tools.load_marathon_service_config(
        service, instance, cluster)
    crit_threshold = job_config.get_replication_crit_percentage()
    monitoring_blacklist = job_config.get_monitoring_blacklist()
    log.info('Checking instance %s in smartstack', full_name)
    smartstack_replication_info = load_smartstack_info_for_service(
        service=service,
        namespace=namespace,
        soa_dir=soa_dir,
        blacklist=monitoring_blacklist,
        system_paasta_config=system_paasta_config,
    )
    log.debug('Got smartstack replication info for %s: %s' %
              (full_name, smartstack_replication_info))

    if len(smartstack_replication_info) == 0:
        status = pysensu_yelp.Status.CRITICAL
        output = (
            'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
            'is valid!\n') % full_name
        log.error(output)
    else:
        expected_count_per_location = int(expected_count /
                                          len(smartstack_replication_info))
        output = ''
        under_replication_per_location = []

        for location, available_backends in sorted(
                smartstack_replication_info.iteritems()):
            num_available_in_location = available_backends.get(full_name, 0)
            under_replicated, ratio = is_under_replicated(
                num_available_in_location, expected_count_per_location,
                crit_threshold)
            if under_replicated:
                output += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
                    full_name, num_available_in_location,
                    expected_count_per_location, location, ratio)
            else:
                output += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
                    full_name, num_available_in_location,
                    expected_count_per_location, location, ratio)
            under_replication_per_location.append(under_replicated)

        if any(under_replication_per_location):
            status = pysensu_yelp.Status.CRITICAL
            output += (
                "\n\n"
                "What this alert means:\n"
                "\n"
                "  This replication alert means that a SmartStack powered loadbalancer (haproxy)\n"
                "  doesn't have enough healthy backends. Not having enough healthy backends\n"
                "  means that clients of that service will get 503s (http) or connection refused\n"
                "  (tcp) when trying to connect to it.\n"
                "\n"
                "Reasons this might be happening:\n"
                "\n"
                "  The service may simply not have enough copies or it could simply be\n"
                "  unhealthy in that location. There also may not be enough resources\n"
                "  in the cluster to support the requested instance count.\n"
                "\n"
                "Things you can do:\n"
                "\n"
                "  * Fix the cause of the unhealthy service. Try running:\n"
                "\n"
                "      paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
                "\n"
                "  * Widen SmartStack discovery settings\n"
                "  * Increase the instance count\n"
                "\n") % {
                    'service': service,
                    'instance': instance,
                    'cluster': cluster,
                }
            log.error(output)
        else:
            status = pysensu_yelp.Status.OK
            log.info(output)
    send_event(service=service,
               namespace=instance,
               cluster=cluster,
               soa_dir=soa_dir,
               status=status,
               output=output)
Пример #32
0
def test_status_smartstack_backends_different_nerve_ns():
    service = 'my_service'
    instance = 'my_instance'
    different_ns = 'different_ns'
    service_instance = compose_job_id(service, different_ns)

    cluster = 'fake_cluster'
    good_task = mock.Mock()
    bad_task = mock.Mock()
    other_task = mock.Mock()
    haproxy_backends_by_task = {
        good_task: {
            'status': 'UP',
            'lastchg': '1',
            'last_chk': 'OK',
            'check_code': '200',
            'svname': 'ipaddress1:1001_hostname1',
            'check_status': 'L7OK',
            'check_duration': 1
        },
        bad_task: {
            'status': 'UP',
            'lastchg': '1',
            'last_chk': 'OK',
            'check_code': '200',
            'svname': 'ipaddress2:1002_hostname2',
            'check_status': 'L7OK',
            'check_duration': 1
        },
    }

    with contextlib.nested(
            mock.patch(
                'paasta_tools.marathon_tools.load_service_namespace_config',
                autospec=True),
            mock.patch(
                'paasta_tools.marathon_tools.read_registration_for_service_instance',
                autospec=True),
            mock.patch(
                'paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist',
                autospec=True),
            mock.patch('paasta_tools.marathon_serviceinit.get_backends',
                       autospec=True),
            mock.patch(
                'paasta_tools.marathon_serviceinit.match_backends_and_tasks',
                autospec=True),
    ) as (
            mock_load_service_namespace_config,
            mock_read_reg,
            mock_get_all_slaves_for_blacklist_whitelist,
            mock_get_backends,
            mock_match_backends_and_tasks,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_get_all_slaves_for_blacklist_whitelist.return_value = [{
            'hostname':
            'fakehost',
            'attributes': {
                'fake_discover': 'fakelocation'
            }
        }]

        mock_read_reg.return_value = service_instance
        mock_get_backends.return_value = haproxy_backends_by_task.values()
        mock_match_backends_and_tasks.return_value = [
            (haproxy_backends_by_task[good_task], good_task),
            (haproxy_backends_by_task[bad_task], None),
            (None, other_task),
        ]
        mock_read_reg.return_value = compose_job_id(service, different_ns)
        tasks = [good_task, other_task]
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=len(haproxy_backends_by_task),
            soa_dir=None,
            verbose=False,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_backends.assert_called_once_with(
            service_instance,
            synapse_host='fakehost',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        assert "fakelocation" in actual
        assert "Healthy" in actual
Пример #33
0
def test_status_smartstack_backends_verbose_emphasizes_maint_instances():
    service = 'my_service'
    instance = 'my_instance'
    cluster = 'fake_cluster'
    normal_count = 10
    good_task = mock.Mock()
    other_task = mock.Mock()
    fake_backend = {
        'status': 'MAINT',
        'lastchg': '1',
        'last_chk': 'OK',
        'check_code': '200',
        'svname': 'ipaddress1:1001_hostname1',
        'check_status': 'L7OK',
        'check_duration': 1
    }
    with contextlib.nested(
            mock.patch(
                'paasta_tools.marathon_tools.load_service_namespace_config',
                autospec=True),
            mock.patch(
                'paasta_tools.marathon_tools.read_registration_for_service_instance',
                autospec=True),
            mock.patch(
                'paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist',
                autospec=True),
            mock.patch('paasta_tools.marathon_serviceinit.get_backends',
                       autospec=True),
            mock.patch(
                'paasta_tools.marathon_serviceinit.match_backends_and_tasks',
                autospec=True),
    ) as (
            mock_load_service_namespace_config,
            mock_read_reg,
            mock_get_mesos_slaves_for_blacklist_whitelist,
            mock_get_backends,
            mock_match_backends_and_tasks,
    ):
        mock_get_mesos_slaves_for_blacklist_whitelist.return_value = [{
            'hostname':
            'fake',
            'attributes': {
                'fake_discover': 'fake_location_1'
            }
        }]
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_reg.return_value = compose_job_id(service, instance)
        mock_get_backends.return_value = [fake_backend]
        mock_match_backends_and_tasks.return_value = [
            (fake_backend, good_task),
        ]
        tasks = [good_task, other_task]
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=normal_count,
            soa_dir=None,
            verbose=True,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        assert PaastaColors.red('MAINT') in actual
def test_compose_job_id_with_config_hash():
    fake_service = "my_cool_service"
    fake_instance = "main"
    fake_config_hash = "config456def"
    with raises(utils.InvalidJobNameError):
        utils.compose_job_id(fake_service, fake_instance, config_hash=fake_config_hash)
Пример #35
0
class TestSetupChronosJob:
    @pytest.fixture(autouse=True)
    def mock_read_monitoring_config(self):
        with mock.patch(
                "paasta_tools.utils.get_pipeline_deploy_groups",
                mock.Mock(return_value=["fake_deploy_group"]),
                autospec=None,
        ) as f:
            yield f

    fake_docker_image = "test_docker:1.0"
    fake_client = mock.MagicMock()

    fake_service = "test_service"
    fake_instance = "test"
    fake_cluster = "fake_test_cluster"
    fake_config_dict = {
        "name": "test_service test gitsha config",
        "description": "This is a test Chronos job.",
        "command": "/bin/sleep 40",
        "bounce_method": "graceful",
        "epsilon": "PT30M",
        "retries": 5,
        "owner": "*****@*****.**",
        "async": False,
        "cpus": 5.5,
        "mem": 1024.4,
        "disk": 2048.5,
        "disabled": "true",
        "schedule": "R/2015-03-25T19:36:35Z/PT5M",
        "schedule_time_zone": "Zulu",
        "deploy_group": "fake_deploy_group",
    }
    fake_branch_dict = {
        "docker_image": f"paasta-{fake_service}-{fake_cluster}",
        "git_sha": "fake_sha",
        "force_bounce": None,
        "desired_state": "start",
    }
    fake_chronos_job_config = chronos_tools.ChronosJobConfig(
        service=fake_service,
        cluster=fake_cluster,
        instance=fake_instance,
        config_dict=fake_config_dict,
        branch_dict=fake_branch_dict,
    )

    fake_docker_registry = "remote_registry.com"
    fake_args = mock.MagicMock(
        service_instance=compose_job_id(fake_service, fake_instance),
        soa_dir="no_more",
        verbose=False,
    )

    def test_config_with_historical_stats(self):
        with mock.patch(
                "paasta_tools.setup_chronos_job.chronos_tools.lookup_chronos_jobs",
                autospec=True,
        ) as mock_lookup_chronos_jobs:
            ret = [{
                "lastSuccess": "2017-04-01T00:00:00Z",
                "lastError": "2017-04-02T00:00:00Z",
                "successCount": 1,
                "errorCount": 1,
            }]
            mock_lookup_chronos_jobs.return_value = ret
            init_config = {"name": "foo bar"}
            expected_merge = {
                "name": "foo bar",
                "lastSuccess": "2017-04-01T00:00:00Z",
                "lastError": "2017-04-02T00:00:00Z",
                "successCount": 1,
                "errorCount": 1,
            }
            actual = setup_chronos_job.config_with_historical_stats(
                chronos_client=mock.Mock(),
                service="foo",
                instance="bar",
                job_config=init_config,
            )
            assert actual == expected_merge

    def test_config_with_historical_stats_no_existing(self):
        with mock.patch(
                "paasta_tools.setup_chronos_job.chronos_tools.lookup_chronos_jobs",
                autospec=True,
        ) as mock_lookup_chronos_jobs:
            ret = []
            mock_lookup_chronos_jobs.return_value = ret
            init_config = {"name": "foo bar"}
            expected_merge = {"name": "foo bar"}
            actual = setup_chronos_job.config_with_historical_stats(
                chronos_client=mock.Mock(),
                service="foo",
                instance="bar",
                job_config=init_config,
            )
            assert actual == expected_merge

    def test_main_success(self):
        expected_status = 0
        expected_output = "it_is_finished"
        fake_complete_job_config = {"foo": "bar"}
        with mock.patch(
                "paasta_tools.setup_chronos_job.parse_args",
                return_value=self.fake_args,
                autospec=True,
        ) as parse_args_patch, mock.patch(
                "paasta_tools.chronos_tools.load_chronos_config", autospec=True
        ) as load_chronos_config_patch, mock.patch(
                "paasta_tools.chronos_tools.get_chronos_client",
                return_value=self.fake_client,
                autospec=True,
        ) as get_client_patch, mock.patch(
                "paasta_tools.chronos_tools.create_complete_config",
                return_value=fake_complete_job_config,
                autospec=True,
        ), mock.patch(
                "paasta_tools.setup_chronos_job.setup_job",
                return_value=(expected_status, expected_output),
                autospec=True,
        ) as setup_job_patch, mock.patch(
                "paasta_tools.setup_chronos_job.send_event", autospec=True
        ) as send_event_patch, mock.patch(
                "paasta_tools.setup_chronos_job.load_system_paasta_config",
                autospec=True) as load_system_paasta_config_patch, mock.patch(
                    "sys.exit", autospec=True) as sys_exit_patch:
            load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(
                return_value=self.fake_cluster)
            setup_chronos_job.main()

            parse_args_patch.assert_called_once_with()
            get_client_patch.assert_called_once_with(
                load_chronos_config_patch.return_value)
            setup_job_patch.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=fake_complete_job_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            send_event_patch.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
                status=expected_status,
                output=expected_output,
            )
            sys_exit_patch.assert_called_once_with(0)

    def test_main_no_deployments(self):
        with mock.patch(
                "paasta_tools.setup_chronos_job.parse_args",
                return_value=self.fake_args,
                autospec=True,
        ), mock.patch(
                "paasta_tools.chronos_tools.load_chronos_config", autospec=True
        ), mock.patch(
                "paasta_tools.chronos_tools.get_chronos_client",
                return_value=self.fake_client,
                autospec=True,
        ), mock.patch(
                "paasta_tools.chronos_tools.create_complete_config",
                return_value={},
                autospec=True,
                side_effect=NoDeploymentsAvailable,
        ), mock.patch(
                "paasta_tools.setup_chronos_job.setup_job",
                return_value=(0, "it_is_finished"),
                autospec=True,
        ), mock.patch(
                "paasta_tools.setup_chronos_job.load_system_paasta_config",
                autospec=True) as load_system_paasta_config_patch, mock.patch(
                    "paasta_tools.setup_chronos_job.send_event",
                    autospec=True):
            load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(
                return_value=self.fake_cluster)
            with raises(SystemExit) as excinfo:
                setup_chronos_job.main()
            assert excinfo.value.code == 0

    def test_main_bad_chronos_job_config_notifies_user(self):
        with mock.patch(
                "paasta_tools.setup_chronos_job.parse_args",
                return_value=self.fake_args,
                autospec=True,
        ), mock.patch(
                "paasta_tools.chronos_tools.load_chronos_config", autospec=True
        ), mock.patch(
                "paasta_tools.chronos_tools.get_chronos_client",
                return_value=self.fake_client,
                autospec=True,
        ), mock.patch(
                "paasta_tools.chronos_tools.create_complete_config",
                autospec=True,
                side_effect=NoConfigurationForServiceError(
                    "test bad configuration"),
        ), mock.patch(
                "paasta_tools.setup_chronos_job.setup_job",
                return_value=(0, "it_is_finished"),
                autospec=True,
        ), mock.patch(
                "paasta_tools.setup_chronos_job.load_system_paasta_config",
                autospec=True) as load_system_paasta_config_patch, mock.patch(
                    "paasta_tools.setup_chronos_job.send_event",
                    autospec=True) as send_event_patch:
            load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(
                return_value=self.fake_cluster)
            with raises(SystemExit) as excinfo:
                setup_chronos_job.main()
            assert excinfo.value.code == 0
            expected_error_msg = (
                "Could not read chronos configuration file for %s in cluster %s\nError was: test bad configuration"
                % (
                    compose_job_id(self.fake_service, self.fake_instance),
                    self.fake_cluster,
                ))
            send_event_patch.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
                status=Status.CRITICAL,
                output=expected_error_msg,
            )

    def test_setup_job_new_app_with_no_previous_jobs(self):
        fake_existing_jobs = []
        with mock.patch(
                "paasta_tools.setup_chronos_job.bounce_chronos_job",
                autospec=True,
                return_value=(0, "ok"),
        ) as mock_bounce_chronos_job, mock.patch(
                "paasta_tools.chronos_tools.lookup_chronos_jobs",
                autospec=True), mock.patch(
                    "paasta_tools.chronos_tools.sort_jobs",
                    autospec=True,
                    return_value=fake_existing_jobs,
                ), mock.patch(
                    "paasta_tools.utils.load_system_paasta_config",
                    autospec=True), mock.patch(
                        "paasta_tools.chronos_tools.load_system_paasta_config",
                        autospec=True
                    ) as load_system_paasta_config_patch, mock.patch(
                        "paasta_tools.chronos_tools.load_chronos_job_config",
                        autospec=True,
                        return_value=self.fake_chronos_job_config,
                    ):
            load_system_paasta_config_patch.return_value.get_cluster.return_value = (
                self.fake_cluster)
            load_system_paasta_config_patch.return_value.get_volumes.return_value = []
            load_system_paasta_config_patch.return_value.get_deploy_whitelist.return_value = (
                None)
            load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = (
                "file:///root/.dockercfg")
            complete_config = chronos_tools.create_complete_config(
                service=self.fake_service,
                job_name=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
            )
            actual = setup_chronos_job.setup_job(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=complete_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            mock_bounce_chronos_job.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=complete_config,
                client=self.fake_client,
            )
            assert actual == mock_bounce_chronos_job.return_value

    def test_setup_job_with_previously_enabled_job(self):
        fake_existing_job = {"name": "fake_job", "disabled": False}
        with mock.patch(
                "paasta_tools.setup_chronos_job.bounce_chronos_job",
                autospec=True,
                return_value=(0, "ok"),
        ) as mock_bounce_chronos_job, mock.patch(
                "paasta_tools.chronos_tools.lookup_chronos_jobs",
                autospec=True) as mock_lookup_chronos_jobs, mock.patch(
                    "paasta_tools.chronos_tools.sort_jobs",
                    autospec=True,
                    return_value=[fake_existing_job],
                ), mock.patch(
                    "paasta_tools.utils.load_system_paasta_config",
                    autospec=True), mock.patch(
                        "paasta_tools.chronos_tools.load_system_paasta_config",
                        autospec=True
                    ) as load_system_paasta_config_patch, mock.patch(
                        "paasta_tools.chronos_tools.load_chronos_job_config",
                        autospec=True,
                        return_value=self.fake_chronos_job_config,
                    ):
            load_system_paasta_config_patch.return_value.get_cluster.return_value = (
                self.fake_cluster)
            load_system_paasta_config_patch.return_value.get_volumes.return_value = []
            load_system_paasta_config_patch.return_value.get_deploy_whitelist.return_value = (
                None)
            load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = (
                "file:///root/.dockercfg")
            complete_config = chronos_tools.create_complete_config(
                service=self.fake_service,
                job_name=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
            )
            actual = setup_chronos_job.setup_job(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=complete_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            mock_bounce_chronos_job.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=complete_config,
                client=self.fake_client,
            )
            assert mock_lookup_chronos_jobs.called
            assert actual == mock_bounce_chronos_job.return_value

    def test_setup_job_does_nothing_with_only_existing_app(self):
        fake_existing_job = copy.deepcopy(self.fake_config_dict)
        with mock.patch(
                "paasta_tools.setup_chronos_job.bounce_chronos_job",
                autospec=True,
                return_value=(0, "ok"),
        ) as mock_bounce_chronos_job, mock.patch(
                "paasta_tools.chronos_tools.lookup_chronos_jobs",
                autospec=True,
                return_value=[fake_existing_job],
        ) as mock_lookup_chronos_jobs, mock.patch(
                "paasta_tools.chronos_tools.load_system_paasta_config",
                autospec=True) as load_system_paasta_config_patch, mock.patch(
                    "paasta_tools.chronos_tools.load_chronos_job_config",
                    autospec=True,
                    return_value=self.fake_chronos_job_config,
                ):
            load_system_paasta_config_patch.return_value.get_cluster.return_value = (
                self.fake_cluster)
            complete_config = copy.deepcopy(self.fake_config_dict)
            # Force the complete_config's name to match the return value of
            # lookup_chronos_jobs to simulate that they have the same name
            complete_config["name"] = fake_existing_job["name"]
            actual = setup_chronos_job.setup_job(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=complete_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            mock_bounce_chronos_job.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=None,
                client=self.fake_client,
            )
            assert mock_lookup_chronos_jobs.called
            assert actual == mock_bounce_chronos_job.return_value

    def test_send_event(self):
        fake_status = "42"
        fake_output = "something went wrong"
        fake_soa_dir = ""
        expected_check_name = "setup_chronos_job.%s" % compose_job_id(
            self.fake_service, self.fake_instance)
        with mock.patch(
                "paasta_tools.monitoring_tools.send_event", autospec=True
        ) as mock_send_event, mock.patch(
                "paasta_tools.chronos_tools.load_chronos_job_config",
                autospec=True) as mock_load_chronos_job_config, mock.patch(
                    "paasta_tools.setup_chronos_job.load_system_paasta_config",
                    autospec=True) as mock_load_system_paasta_config:
            mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(
                return_value="fake_cluster")
            mock_load_chronos_job_config.return_value.get_monitoring.return_value = {}

            setup_chronos_job.send_event(
                service=self.fake_service,
                instance=self.fake_instance,
                soa_dir=fake_soa_dir,
                status=fake_status,
                output=fake_output,
            )
            mock_send_event.assert_called_once_with(
                service=self.fake_service,
                check_name=expected_check_name,
                overrides={
                    "alert_after": "10m",
                    "check_every": "10s"
                },
                status=fake_status,
                output=fake_output,
                soa_dir=fake_soa_dir,
            )
            mock_load_chronos_job_config.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=mock_load_system_paasta_config.return_value.
                get_cluster.return_value,
                soa_dir=fake_soa_dir,
                load_deployments=False,
            )

    def test_bounce_chronos_job_takes_actions(self):
        fake_job_to_update = {"name": "job_to_update"}
        with mock.patch("paasta_tools.setup_chronos_job._log",
                        autospec=True) as mock_log, mock.patch(
                            "paasta_tools.chronos_tools.update_job",
                            autospec=True) as mock_update_job:
            setup_chronos_job.bounce_chronos_job(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=fake_job_to_update,
                client=self.fake_client,
            )
            mock_log.assert_any_call(
                line=mock.ANY,
                level="debug",
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                component="deploy",
                service=self.fake_service,
            )
            mock_log.assert_any_call(
                line="Updated Chronos job: job_to_update",
                level="event",
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                component="deploy",
                service=self.fake_service,
            )
            mock_update_job.assert_called_once_with(job=fake_job_to_update,
                                                    client=self.fake_client)

    def test_bounce_chronos_job_doesnt_log_when_nothing_to_do(self):
        with mock.patch("paasta_tools.setup_chronos_job._log",
                        autospec=True) as mock_log, mock.patch(
                            "paasta_tools.chronos_tools.update_job",
                            autospec=True) as mock_update_job:
            setup_chronos_job.bounce_chronos_job(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=None,
                client=self.fake_client,
            )
            assert not mock_log.called
            assert not mock_update_job.called
Пример #36
0
def main(argv):
    args = parse_args(argv)
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        paasta_print(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates"
                "PaaSTA is not configured locally on this host, and remote-run may not behave"
                "the same way it would behave on a server configured for PaaSTA."
            ),
            sep='\n',
        )
        system_paasta_config = SystemPaastaConfig({"volumes": []},
                                                  '/etc/paasta')

    service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root)
    cluster = args.cluster or system_paasta_config.get_local_run_config().get(
        'default_cluster', None)

    if not cluster:
        paasta_print(
            PaastaColors.red(
                "PaaSTA on this machine has not been configured with a default cluster."
                "Please pass one using '-c'."),
            sep='\n',
            file=sys.stderr,
        )
        return 1

    soa_dir = args.yelpsoa_config_root
    dry_run = args.dry_run
    instance = args.instance
    command = args.cmd

    if instance is None:
        instance_type = 'adhoc'
        instance = 'remote'
    else:
        instance_type = validate_service_instance(service, instance, cluster,
                                                  soa_dir)

    overrides_dict = {}

    constraints_json = args.constraints_json
    if constraints_json:
        try:
            constraints = json.loads(constraints_json)
        except Exception as e:
            paasta_print("Error while parsing constraints: %s", e)

        if constraints:
            overrides_dict['constraints'] = constraints

    if command:
        overrides_dict['cmd'] = command

    paasta_print('Scheduling a task on Mesos')
    scheduler = AdhocScheduler(
        service_name=service,
        instance_name=instance,
        instance_type=instance_type,
        cluster=cluster,
        system_paasta_config=system_paasta_config,
        soa_dir=soa_dir,
        reconcile_backoff=0,
        dry_run=dry_run,
        staging_timeout=args.staging_timeout,
        service_config_overrides=overrides_dict,
    )
    driver = create_driver(
        framework_name="paasta-remote %s %s" % (compose_job_id(
            service, instance), datetime.utcnow().strftime('%Y%m%d%H%M%S%f')),
        scheduler=scheduler,
        system_paasta_config=system_paasta_config)
    driver.run()
Пример #37
0
def build_executor_stack(
    # TODO: rename to registry?
    processor,
    service,
    instance,
    cluster,
    # TODO: move run_id into task identifier?
    run_id,
    system_paasta_config,
    framework_staging_timeout,
):

    mesos_address = '{}:{}'.format(
        mesos_tools.get_mesos_leader(), mesos_tools.MESOS_MASTER_PORT,
    )

    # TODO: implement DryRunExecutor?
    taskproc_config = system_paasta_config.get('taskproc')

    MesosExecutor = processor.executor_cls('mesos')
    mesos_executor = MesosExecutor(
        role=taskproc_config.get('role', taskproc_config['principal']),
        principal=taskproc_config['principal'],
        secret=taskproc_config['secret'],
        mesos_address=mesos_address,
        framework_name="paasta-remote %s %s %s" % (
            compose_job_id(service, instance),
            datetime.utcnow().strftime('%Y%m%d%H%M%S%f'),
            run_id,
        ),
        framework_staging_timeout=framework_staging_timeout,
        initial_decline_delay=0.5,
    )

    credentials_file = taskproc_config.get('boto_credential_file')
    if credentials_file:
        with open(credentials_file) as f:
            credentials = json.loads(f.read())
    else:
        raise ValueError("Required aws credentials")

    region = taskproc_config.get('aws_region')

    endpoint = taskproc_config.get('dynamodb_endpoint')
    session = Session(
        region_name=region,
        aws_access_key_id=credentials['accessKeyId'],
        aws_secret_access_key=credentials['secretAccessKey'],
    )

    StatefulExecutor = processor.executor_cls(provider='stateful')
    stateful_executor = StatefulExecutor(
        downstream_executor=mesos_executor,
        persister=DynamoDBPersister(
            table_name="taskproc_events_%s" % cluster,
            session=session,
            endpoint_url=endpoint,
        ),
    )

    return stateful_executor
def test_status_smartstack_backends_verbose_multiple_locations():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    good_task = mock.Mock()
    other_task = mock.Mock()
    fake_backend = {
        'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
        'check_code': '200', 'svname': 'ipaddress1:1001_hostname1',
        'check_status': 'L7OK', 'check_duration': 1,
    }
    fake_other_backend = {
        'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
        'check_code': '200', 'svname': 'ipaddress1:1002_hostname2',
        'check_status': 'L7OK', 'check_duration': 1,
    }
    with mock.patch(
        'paasta_tools.marathon_tools.load_service_namespace_config', autospec=True,
    ) as mock_load_service_namespace_config, mock.patch(
        'paasta_tools.marathon_tools.read_registration_for_service_instance', autospec=True,
    ) as mock_read_reg, mock.patch(
        'paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist', autospec=True,
    ) as mock_get_all_slaves_for_blacklist_whitelist, mock.patch(
        'paasta_tools.marathon_serviceinit.get_backends', autospec=True,
        side_effect=[[fake_backend], [fake_other_backend]],
    ) as mock_get_backends, mock.patch(
        'paasta_tools.marathon_serviceinit.match_backends_and_tasks',
        autospec=True, side_effect=[[(fake_backend, good_task)], [(fake_other_backend, good_task)]],
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_reg.return_value = service_instance
        mock_get_all_slaves_for_blacklist_whitelist.return_value = [
            {
                'hostname': 'hostname1',
                'attributes': {
                    'fake_discover': 'fakelocation',
                },
            },
            {
                'hostname': 'hostname2',
                'attributes': {
                    'fake_discover': 'fakeotherlocation',
                },
            },
        ]
        tasks = [good_task, other_task]
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=1,
            soa_dir=None,
            verbose=True,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
            system_deploy_blacklist=[],
            system_deploy_whitelist=[],
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='hostname1',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='hostname2',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_all_slaves_for_blacklist_whitelist.assert_called_once_with(
            blacklist=[],
            whitelist=[],
        )
        assert "fakelocation - %s" % PaastaColors.green('Healthy') in actual
        assert "hostname1:1001" in actual
        assert "fakeotherlocation - %s" % PaastaColors.green('Healthy') in actual
        assert "hostname2:1002" in actual
Пример #39
0
class TestSetupChronosJob:

    fake_docker_image = 'test_docker:1.0'
    fake_client = mock.MagicMock()

    fake_service = 'test_service'
    fake_instance = 'test'
    fake_cluster = 'fake_test_cluster'
    fake_config_dict = {
        'name': 'test_service test gitsha config',
        'description': 'This is a test Chronos job.',
        'command': '/bin/sleep 40',
        'bounce_method': 'graceful',
        'epsilon': 'PT30M',
        'retries': 5,
        'owner': '*****@*****.**',
        'async': False,
        'cpus': 5.5,
        'mem': 1024.4,
        'disk': 2048.5,
        'disabled': 'true',
        'schedule': 'R/2015-03-25T19:36:35Z/PT5M',
        'schedule_time_zone': 'Zulu',
    }
    fake_branch_dict = {
        'docker_image': 'paasta-%s-%s' % (fake_service, fake_cluster),
    }
    fake_chronos_job_config = chronos_tools.ChronosJobConfig(
        service=fake_service,
        cluster=fake_cluster,
        instance=fake_instance,
        config_dict=fake_config_dict,
        branch_dict=fake_branch_dict,
    )

    fake_docker_registry = 'remote_registry.com'
    fake_args = mock.MagicMock(
        service_instance=compose_job_id(fake_service, fake_instance),
        soa_dir='no_more',
        verbose=False,
    )

    def test_config_with_historical_stats(self):
        with mock.patch(
                'paasta_tools.setup_chronos_job.chronos_tools.lookup_chronos_jobs',
                autospec=True,
        ) as mock_lookup_chronos_jobs:
            ret = [{
                'lastSuccess': '2017-04-01T00:00:00Z',
                'lastError': '2017-04-02T00:00:00Z',
                'successCount': 1,
                'errorCount': 1,
            }]
            mock_lookup_chronos_jobs.return_value = ret
            init_config = {
                'name': 'foo bar',
            }
            expected_merge = {
                'name': 'foo bar',
                'lastSuccess': '2017-04-01T00:00:00Z',
                'lastError': '2017-04-02T00:00:00Z',
                'successCount': 1,
                'errorCount': 1,
            }
            actual = setup_chronos_job.config_with_historical_stats(
                chronos_client=mock.Mock(),
                service='foo',
                instance='bar',
                job_config=init_config,
            )
            assert actual == expected_merge

    def test_config_with_historical_stats_no_existing(self):
        with mock.patch(
                'paasta_tools.setup_chronos_job.chronos_tools.lookup_chronos_jobs',
                autospec=True,
        ) as mock_lookup_chronos_jobs:
            ret = []
            mock_lookup_chronos_jobs.return_value = ret
            init_config = {
                'name': 'foo bar',
            }
            expected_merge = {
                'name': 'foo bar',
            }
            actual = setup_chronos_job.config_with_historical_stats(
                chronos_client=mock.Mock(),
                service='foo',
                instance='bar',
                job_config=init_config,
            )
            assert actual == expected_merge

    def test_main_success(self):
        expected_status = 0
        expected_output = 'it_is_finished'
        fake_complete_job_config = {'foo': 'bar'}
        with mock.patch(
                'paasta_tools.setup_chronos_job.parse_args',
                return_value=self.fake_args,
                autospec=True,
        ) as parse_args_patch, mock.patch(
                'paasta_tools.chronos_tools.load_chronos_config',
                autospec=True,
        ) as load_chronos_config_patch, mock.patch(
                'paasta_tools.chronos_tools.get_chronos_client',
                return_value=self.fake_client,
                autospec=True,
        ) as get_client_patch, mock.patch(
                'paasta_tools.chronos_tools.create_complete_config',
                return_value=fake_complete_job_config,
                autospec=True,
        ), mock.patch(
                'paasta_tools.setup_chronos_job.setup_job',
                return_value=(expected_status, expected_output),
                autospec=True,
        ) as setup_job_patch, mock.patch(
                'paasta_tools.setup_chronos_job.send_event',
                autospec=True,
        ) as send_event_patch, mock.patch(
                'paasta_tools.setup_chronos_job.load_system_paasta_config',
                autospec=True,
        ) as load_system_paasta_config_patch, mock.patch(
                'sys.exit',
                autospec=True,
        ) as sys_exit_patch:
            load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(
                return_value=self.fake_cluster)
            setup_chronos_job.main()

            parse_args_patch.assert_called_once_with()
            get_client_patch.assert_called_once_with(
                load_chronos_config_patch.return_value)
            setup_job_patch.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=fake_complete_job_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            send_event_patch.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
                status=expected_status,
                output=expected_output,
            )
            sys_exit_patch.assert_called_once_with(0)

    def test_main_no_deployments(self):
        with mock.patch(
                'paasta_tools.setup_chronos_job.parse_args',
                return_value=self.fake_args,
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.load_chronos_config',
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.get_chronos_client',
                return_value=self.fake_client,
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.create_complete_config',
                return_value={},
                autospec=True,
                side_effect=NoDeploymentsAvailable,
        ), mock.patch(
                'paasta_tools.setup_chronos_job.setup_job',
                return_value=(0, 'it_is_finished'),
                autospec=True,
        ), mock.patch(
                'paasta_tools.setup_chronos_job.load_system_paasta_config',
                autospec=True,
        ) as load_system_paasta_config_patch, mock.patch(
                'paasta_tools.setup_chronos_job.send_event',
                autospec=True,
        ):
            load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(
                return_value=self.fake_cluster)
            with raises(SystemExit) as excinfo:
                setup_chronos_job.main()
            assert excinfo.value.code == 0

    def test_main_bad_chronos_job_config_notifies_user(self):
        with mock.patch(
                'paasta_tools.setup_chronos_job.parse_args',
                return_value=self.fake_args,
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.load_chronos_config',
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.get_chronos_client',
                return_value=self.fake_client,
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.create_complete_config',
                autospec=True,
                side_effect=NoConfigurationForServiceError(
                    'test bad configuration'),
        ), mock.patch(
                'paasta_tools.setup_chronos_job.setup_job',
                return_value=(0, 'it_is_finished'),
                autospec=True,
        ), mock.patch(
                'paasta_tools.setup_chronos_job.load_system_paasta_config',
                autospec=True,
        ) as load_system_paasta_config_patch, mock.patch(
                'paasta_tools.setup_chronos_job.send_event',
                autospec=True,
        ) as send_event_patch:
            load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(
                return_value=self.fake_cluster)
            with raises(SystemExit) as excinfo:
                setup_chronos_job.main()
            assert excinfo.value.code == 0
            expected_error_msg = (
                "Could not read chronos configuration file for %s in cluster %s\nError was: test bad configuration"
                % (compose_job_id(self.fake_service,
                                  self.fake_instance), self.fake_cluster))
            send_event_patch.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
                status=Status.CRITICAL,
                output=expected_error_msg,
            )

    def test_setup_job_new_app_with_no_previous_jobs(self):
        fake_existing_jobs = []
        with mock.patch(
                'paasta_tools.setup_chronos_job.bounce_chronos_job',
                autospec=True,
                return_value=(0, 'ok'),
        ) as mock_bounce_chronos_job, mock.patch(
                'paasta_tools.chronos_tools.lookup_chronos_jobs',
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.sort_jobs',
                autospec=True,
                return_value=fake_existing_jobs,
        ), mock.patch(
                'paasta_tools.utils.load_system_paasta_config',
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.load_system_paasta_config',
                autospec=True,
        ) as load_system_paasta_config_patch, mock.patch(
                'paasta_tools.chronos_tools.load_chronos_job_config',
                autospec=True,
                return_value=self.fake_chronos_job_config,
        ):
            load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
            load_system_paasta_config_patch.return_value.get_volumes.return_value = []
            load_system_paasta_config_patch.return_value.get_deploy_whitelist.return_value = None
            load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = \
                'file:///root/.dockercfg'
            complete_config = chronos_tools.create_complete_config(
                service=self.fake_service,
                job_name=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
            )
            actual = setup_chronos_job.setup_job(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=complete_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            mock_bounce_chronos_job.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=complete_config,
                client=self.fake_client,
            )
            assert actual == mock_bounce_chronos_job.return_value

    def test_setup_job_with_previously_enabled_job(self):
        fake_existing_job = {
            'name': 'fake_job',
            'disabled': False,
        }
        with mock.patch(
                'paasta_tools.setup_chronos_job.bounce_chronos_job',
                autospec=True,
                return_value=(0, 'ok'),
        ) as mock_bounce_chronos_job, mock.patch(
                'paasta_tools.chronos_tools.lookup_chronos_jobs',
                autospec=True,
        ) as mock_lookup_chronos_jobs, mock.patch(
                'paasta_tools.chronos_tools.sort_jobs',
                autospec=True,
                return_value=[fake_existing_job],
        ), mock.patch(
                'paasta_tools.utils.load_system_paasta_config',
                autospec=True,
        ), mock.patch(
                'paasta_tools.chronos_tools.load_system_paasta_config',
                autospec=True,
        ) as load_system_paasta_config_patch, mock.patch(
                'paasta_tools.chronos_tools.load_chronos_job_config',
                autospec=True,
                return_value=self.fake_chronos_job_config,
        ):
            load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
            load_system_paasta_config_patch.return_value.get_volumes.return_value = []
            load_system_paasta_config_patch.return_value.get_deploy_whitelist.return_value = None
            load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = \
                "file:///root/.dockercfg"
            complete_config = chronos_tools.create_complete_config(
                service=self.fake_service,
                job_name=self.fake_instance,
                soa_dir=self.fake_args.soa_dir,
            )
            actual = setup_chronos_job.setup_job(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=complete_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            mock_bounce_chronos_job.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=complete_config,
                client=self.fake_client,
            )
            assert mock_lookup_chronos_jobs.called
            assert actual == mock_bounce_chronos_job.return_value

    def test_setup_job_does_nothing_with_only_existing_app(self):
        fake_existing_job = copy.deepcopy(self.fake_config_dict)
        with mock.patch(
                'paasta_tools.setup_chronos_job.bounce_chronos_job',
                autospec=True,
                return_value=(0, 'ok'),
        ) as mock_bounce_chronos_job, mock.patch(
                'paasta_tools.chronos_tools.lookup_chronos_jobs',
                autospec=True,
                return_value=[fake_existing_job],
        ) as mock_lookup_chronos_jobs, mock.patch(
                'paasta_tools.chronos_tools.load_system_paasta_config',
                autospec=True,
        ) as load_system_paasta_config_patch, mock.patch(
                'paasta_tools.chronos_tools.load_chronos_job_config',
                autospec=True,
                return_value=self.fake_chronos_job_config,
        ):
            load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
            complete_config = copy.deepcopy(self.fake_config_dict)
            # Force the complete_config's name to match the return value of
            # lookup_chronos_jobs to simulate that they have the same name
            complete_config["name"] = fake_existing_job["name"]
            actual = setup_chronos_job.setup_job(
                service=self.fake_service,
                instance=self.fake_instance,
                complete_job_config=complete_config,
                client=self.fake_client,
                cluster=self.fake_cluster,
            )
            mock_bounce_chronos_job.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=None,
                client=self.fake_client,
            )
            assert mock_lookup_chronos_jobs.called
            assert actual == mock_bounce_chronos_job.return_value

    def test_send_event(self):
        fake_status = '42'
        fake_output = 'something went wrong'
        fake_soa_dir = ''
        expected_check_name = 'setup_chronos_job.%s' % compose_job_id(
            self.fake_service, self.fake_instance)
        with mock.patch(
                "paasta_tools.monitoring_tools.send_event",
                autospec=True,
        ) as mock_send_event, mock.patch(
                "paasta_tools.chronos_tools.load_chronos_job_config",
                autospec=True,
        ) as mock_load_chronos_job_config, mock.patch(
                "paasta_tools.setup_chronos_job.load_system_paasta_config",
                autospec=True,
        ) as mock_load_system_paasta_config:
            mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(
                return_value='fake_cluster')
            mock_load_chronos_job_config.return_value.get_monitoring.return_value = {}

            setup_chronos_job.send_event(
                service=self.fake_service,
                instance=self.fake_instance,
                soa_dir=fake_soa_dir,
                status=fake_status,
                output=fake_output,
            )
            mock_send_event.assert_called_once_with(
                service=self.fake_service,
                check_name=expected_check_name,
                overrides={
                    'alert_after': '10m',
                    'check_every': '10s'
                },
                status=fake_status,
                output=fake_output,
                soa_dir=fake_soa_dir,
            )
            mock_load_chronos_job_config.assert_called_once_with(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=mock_load_system_paasta_config.return_value.
                get_cluster.return_value,
                soa_dir=fake_soa_dir,
                load_deployments=False,
            )

    def test_bounce_chronos_job_takes_actions(self):
        fake_job_to_update = {'name': 'job_to_update'}
        with mock.patch(
                "paasta_tools.setup_chronos_job._log",
                autospec=True,
        ) as mock_log, mock.patch(
                "paasta_tools.chronos_tools.update_job",
                autospec=True,
        ) as mock_update_job:
            setup_chronos_job.bounce_chronos_job(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=fake_job_to_update,
                client=self.fake_client,
            )
            mock_log.assert_any_call(
                line=mock.ANY,
                level='debug',
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                component='deploy',
                service=self.fake_service,
            )
            mock_log.assert_any_call(
                line="Updated Chronos job: job_to_update",
                level='event',
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                component='deploy',
                service=self.fake_service,
            )
            mock_update_job.assert_called_once_with(job=fake_job_to_update,
                                                    client=self.fake_client)

    def test_bounce_chronos_job_doesnt_log_when_nothing_to_do(self):
        with mock.patch(
                "paasta_tools.setup_chronos_job._log",
                autospec=True,
        ) as mock_log, mock.patch(
                "paasta_tools.chronos_tools.update_job",
                autospec=True,
        ) as mock_update_job:
            setup_chronos_job.bounce_chronos_job(
                service=self.fake_service,
                instance=self.fake_instance,
                cluster=self.fake_cluster,
                job_to_update=None,
                client=self.fake_client,
            )
            assert not mock_log.called
            assert not mock_update_job.called
Пример #40
0
def main() -> None:
    args = parse_args()
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.WARNING)

    instances = []
    return_codes = []
    command = args.command
    if (args.service_instance):
        service_instance = args.service_instance
        service, instance, _, __ = decompose_job_id(service_instance)
        instances.append(instance)
    elif (args.service and args.instances):
        service = args.service
        instances = args.instances.split(',')
    else:
        log.error(
            "The name of service or the name of instance to inspect is missing. Exiting."
        )
        sys.exit(1)

    # Setting up transparent cache for http API calls
    requests_cache.install_cache("paasta_serviceinit", backend="memory")

    cluster = load_system_paasta_config().get_cluster()
    actual_deployments = get_actual_deployments(service, args.soa_dir)
    clients = PaastaClients(cached=(command == 'status'))

    instance_types = ['marathon', 'chronos', 'paasta_native', 'adhoc']
    instance_types_map: Dict[str,
                             List[str]] = {it: []
                                           for it in instance_types}
    for instance in instances:
        try:
            instance_type = validate_service_instance(
                service,
                instance,
                cluster,
                args.soa_dir,
            )
        except Exception:
            log.error(
                ('Exception raised while looking at service %s instance %s:'
                 ).format(service, instance), )
            log.error(traceback.format_exc())
            return_codes.append(1)
            continue

        if instance_type not in instance_types:
            log.error(
                ("I calculated an instance_type of {} for {} which I don't "
                 "know how to handle.").format(
                     instance_type,
                     compose_job_id(service, instance),
                 ), )
            return_codes.append(1)
        else:
            instance_types_map[instance_type].append(instance)

    remote_run_frameworks = None
    if len(instance_types_map['adhoc']) > 0:
        remote_run_frameworks = paasta_remote_run.remote_run_frameworks()

    service_config_loader = PaastaServiceConfigLoader(service)

    for instance_type in instance_types:

        if instance_type == 'marathon':
            job_configs = {
                jc.instance: jc
                for jc in service_config_loader.instance_configs(
                    cluster=cluster,
                    instance_type_class=marathon_tools.MarathonServiceConfig,
                )
            }

        for instance in instance_types_map[instance_type]:
            try:
                version = get_deployment_version(
                    actual_deployments,
                    cluster,
                    instance,
                )
                paasta_print('instance: %s' % PaastaColors.blue(instance))
                paasta_print('Git sha:    %s (desired)' % version)

                if instance_type == 'marathon':
                    return_code = marathon_serviceinit.perform_command(
                        command=command,
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        verbose=args.verbose,
                        soa_dir=args.soa_dir,
                        app_id=args.app_id,
                        clients=clients.marathon(),
                        job_config=job_configs[instance],
                    )
                elif instance_type == 'chronos':
                    return_code = chronos_serviceinit.perform_command(
                        command=command,
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        verbose=args.verbose,
                        soa_dir=args.soa_dir,
                        client=clients.chronos(),
                    )
                elif instance_type == 'paasta_native':
                    return_code = paasta_native_serviceinit.perform_command(
                        command=command,
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        verbose=args.verbose,
                        soa_dir=args.soa_dir,
                    )
                elif instance_type == 'adhoc':
                    if instance == 'interactive':
                        continue
                    if command != 'status':
                        raise NotImplementedError
                    paasta_remote_run.remote_run_list_report(
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        frameworks=remote_run_frameworks,
                    )
                    return_code = 0
            except Exception:
                log.error(('Exception raised while looking at service {} '
                           'instance {}:').format(service, instance), )
                log.error(traceback.format_exc())
                return_code = 1

            return_codes.append(return_code)

    sys.exit(max(return_codes))
Пример #41
0
def test_compose_job_id_without_hashes():
    fake_service = "my_cool_service"
    fake_instance = "main"
    expected = "my_cool_service.main"
    actual = utils.compose_job_id(fake_service, fake_instance)
    assert actual == expected
Пример #42
0
def perform_command(command,
                    service,
                    instance,
                    cluster,
                    verbose,
                    soa_dir,
                    app_id=None,
                    delta=None,
                    clients=None):
    """Performs a start/stop/restart/status on an instance
    :param command: String of start, stop, restart, status
    :param service: service name
    :param instance: instance name, like "main" or "canary"
    :param cluster: cluster name
    :param verbose: int verbosity level
    :param client: MarathonClient or CachingMarathonClient
    :returns: A unix-style return code
    """
    system_config = load_system_paasta_config()

    job_config = marathon_tools.load_marathon_service_config(service,
                                                             instance,
                                                             cluster,
                                                             soa_dir=soa_dir)
    if not app_id:
        try:
            app_id = job_config.format_marathon_app_dict()['id']
        except NoDockerImageError:
            job_id = compose_job_id(service, instance)
            paasta_print(
                "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?"
                % job_id)
            return 1

    normal_instance_count = job_config.get_instances()
    proxy_port = marathon_tools.get_proxy_port_for_instance(service,
                                                            instance,
                                                            cluster,
                                                            soa_dir=soa_dir)

    if clients is None:
        clients = marathon_tools.get_marathon_clients(
            system_config.get_marathon_servers())

    current_client = clients.get_current_client_for_service(job_config)

    if command == 'restart':
        restart_marathon_job(service, instance, app_id, current_client,
                             cluster)
    elif command == 'status':
        paasta_print(
            status_desired_state(service, instance, current_client,
                                 job_config))
        paasta_print(
            status_marathon_job(service, instance, app_id,
                                normal_instance_count, current_client))
        dashboards = get_marathon_dashboard_links(clients, system_config)
        tasks, out = status_marathon_job_verbose(service, instance, clients,
                                                 cluster, soa_dir, job_config,
                                                 dashboards)
        if verbose > 0:
            paasta_print(out)
        paasta_print(
            status_mesos_tasks(service, instance, normal_instance_count))
        if verbose > 0:
            tail_lines = calculate_tail_lines(verbose_level=verbose)
            paasta_print(
                status_mesos_tasks_verbose(
                    job_id=app_id,
                    get_short_task_id=get_short_task_id,
                    tail_lines=tail_lines,
                ))
        if proxy_port is not None:
            normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(
                service,
                instance,
                cluster,
            )
            paasta_print(
                status_smartstack_backends(
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    job_config=job_config,
                    tasks=tasks,
                    expected_count=normal_smartstack_count,
                    soa_dir=soa_dir,
                    verbose=verbose > 0,
                    synapse_port=system_config.get_synapse_port(),
                    synapse_haproxy_url_format=system_config.
                    get_synapse_haproxy_url_format(),
                    system_deploy_blacklist=system_config.get_deploy_blacklist(
                    ),
                    system_deploy_whitelist=system_config.get_deploy_whitelist(
                    ),
                ))
    else:
        # The command parser shouldn't have let us get this far...
        raise NotImplementedError("Command %s is not implemented!" % command)
    return 0
Пример #43
0
def check_smartstack_replication_for_instance(
    service,
    instance,
    cluster,
    soa_dir,
    crit_threshold,
    expected_count,
):
    """Check a set of namespaces to see if their number of available backends is too low,
    emitting events to Sensu based on the fraction available and the thresholds given.

    :param service: A string like example_service
    :param namespace: A nerve namespace, like "main"
    :param cluster: name of the cluster
    :param soa_dir: The SOA configuration directory to read from
    :param crit_threshold: The fraction of instances that need to be up to avoid a CRITICAL event
    """
    namespace = marathon_tools.read_namespace_for_service_instance(
        service, instance, soa_dir=soa_dir)
    if namespace != instance:
        log.debug("Instance %s is announced under namespace: %s. "
                  "Not checking replication for it" % (instance, namespace))
        return
    full_name = compose_job_id(service, instance)
    job_config = marathon_tools.load_marathon_service_config(
        service, instance, cluster)
    monitoring_blacklist = job_config.get_monitoring_blacklist()
    log.info('Checking instance %s in smartstack', full_name)
    smartstack_replication_info = load_smartstack_info_for_service(
        service=service,
        namespace=namespace,
        soa_dir=soa_dir,
        blacklist=monitoring_blacklist)
    log.debug('Got smartstack replication info for %s: %s' %
              (full_name, smartstack_replication_info))

    if len(smartstack_replication_info) == 0:
        status = pysensu_yelp.Status.CRITICAL
        output = (
            'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
            'is valid!\n') % full_name
        output = add_context_to_event(service, instance, output)
        log.error(output)
    else:
        expected_count_per_location = int(expected_count /
                                          len(smartstack_replication_info))
        output = ''
        under_replication_per_location = []

        for location, available_backends in sorted(
                smartstack_replication_info.iteritems()):
            num_available_in_location = available_backends.get(full_name, 0)
            under_replicated, ratio = is_under_replicated(
                num_available_in_location, expected_count_per_location,
                crit_threshold)
            if under_replicated:
                output += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
                    full_name, num_available_in_location,
                    expected_count_per_location, location, ratio)
            else:
                output += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
                    full_name, num_available_in_location,
                    expected_count_per_location, location, ratio)
            under_replication_per_location.append(under_replicated)

        if any(under_replication_per_location):
            status = pysensu_yelp.Status.CRITICAL
            output = add_context_to_event(service, instance, output)
            log.error(output)
        else:
            status = pysensu_yelp.Status.OK
            log.info(output)
    send_event(service=service,
               namespace=instance,
               cluster=cluster,
               soa_dir=soa_dir,
               status=status,
               output=output)
def test_status_smartstack_backends_verbose_multiple_apps():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'

    good_task = mock.Mock()
    bad_task = mock.Mock()
    other_task = mock.Mock()
    haproxy_backends_by_task = {
        good_task: {
            'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
            'check_code': '200', 'svname': 'ipaddress1:1001_hostname1',
            'check_status': 'L7OK', 'check_duration': 1,
        },
        bad_task: {
            'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
            'check_code': '200', 'svname': 'ipaddress2:1002_hostname2',
            'check_status': 'L7OK', 'check_duration': 1,
        },
    }

    with mock.patch(
        'paasta_tools.marathon_tools.load_service_namespace_config', autospec=True,
    ) as mock_load_service_namespace_config, mock.patch(
        'paasta_tools.marathon_tools.read_registration_for_service_instance', autospec=True,
    ) as mock_read_reg, mock.patch(
        'paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist', autospec=True,
    ) as mock_get_all_slaves_for_blacklist_whitelist, mock.patch(
        'paasta_tools.marathon_serviceinit.get_backends', autospec=True,
    ) as mock_get_backends, mock.patch(
        'paasta_tools.marathon_serviceinit.match_backends_and_tasks', autospec=True,
    ) as mock_match_backends_and_tasks:
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_reg.return_value = service_instance
        mock_get_backends.return_value = haproxy_backends_by_task.values()
        mock_match_backends_and_tasks.return_value = [
            (haproxy_backends_by_task[good_task], good_task),
            (haproxy_backends_by_task[bad_task], None),
            (None, other_task),
        ]
        tasks = [good_task, other_task]
        mock_get_all_slaves_for_blacklist_whitelist.return_value = [
            {
                'hostname': 'hostname1',
                'attributes': {
                    'fake_discover': 'fakelocation',
                },
            },
        ]
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=len(haproxy_backends_by_task),
            soa_dir=None,
            verbose=True,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
            system_deploy_blacklist=[],
            system_deploy_whitelist=[],
        )
        mock_get_backends.assert_called_once_with(
            service_instance,
            synapse_host='hostname1',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        assert "fakelocation" in actual
        assert "hostname1:1001" in actual
        assert re.search(r"%s[^\n]*hostname2:1002" % re.escape(PaastaColors.GREY), actual)
def test_compose_job_id_with_git_hash():
    fake_service = "my_cool_service"
    fake_instance = "main"
    fake_git_hash = "git123abc"
    with raises(utils.InvalidJobNameError):
        utils.compose_job_id(fake_service, fake_instance, git_hash=fake_git_hash)
Пример #46
0
def paasta_to_task_config_kwargs(
        service,
        instance,
        cluster,
        system_paasta_config,
        instance_type,
        soa_dir=DEFAULT_SOA_DIR,
        config_overrides=None,
):
    native_job_config = load_paasta_native_job_config(
        service,
        instance,
        cluster,
        soa_dir=soa_dir,
        instance_type=instance_type,
        config_overrides=config_overrides,
    )

    image = native_job_config.get_docker_url()
    docker_parameters = [
        {'key': param['key'], 'value': param['value']}
        for param in native_job_config.format_docker_parameters()
    ]
    # network = native_job_config.get_mesos_network_mode()

    docker_volumes = native_job_config.get_volumes(
        system_volumes=system_paasta_config.get_volumes(),
    )
    volumes = [
        {
            'container_path': volume['containerPath'],
            'host_path': volume['hostPath'],
            'mode': volume['mode'].upper(),
        }
        for volume in docker_volumes
    ]
    cmd = native_job_config.get_cmd()
    uris = system_paasta_config.get_dockercfg_location()
    cpus = native_job_config.get_cpus()
    mem = native_job_config.get_mem()
    disk = native_job_config.get_disk(10)
    gpus = native_job_config.get_gpus()

    kwargs = {
        'image': str(image),
        'cmd': cmd,
        'cpus': cpus,
        'mem': float(mem),
        'disk': float(disk),
        'volumes': volumes,
        # 'ports': None,
        # 'cap_add'
        # 'ulimit'
        'uris': [uris],
        'docker_parameters': docker_parameters,
        'containerizer': 'DOCKER',
        'environment': native_job_config.get_env_dictionary(),
    }
    if gpus > 0:
        kwargs['gpus'] = int(gpus)
        kwargs['containerizer'] = 'MESOS'

    config_hash = get_config_hash(
        kwargs,
        force_bounce=native_job_config.get_force_bounce(),
    )

    kwargs['name'] = str(compose_job_id(
        service,
        instance,
        git_hash=get_code_sha_from_dockerurl(image),
        config_hash=config_hash,
        spacer=MESOS_TASK_SPACER,
    ))

    return kwargs
def test_status_smartstack_backends_multiple_locations_expected_count():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    normal_count = 10

    good_task = mock.Mock()
    other_task = mock.Mock()
    fake_backend = {
        'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
        'check_code': '200', 'svname': 'ipaddress1:1001_hostname1',
        'check_status': 'L7OK', 'check_duration': 1,
    }
    with mock.patch(
        'paasta_tools.marathon_tools.load_service_namespace_config', autospec=True,
    ) as mock_load_service_namespace_config, mock.patch(
        'paasta_tools.marathon_tools.read_registration_for_service_instance', autospec=True,
    ) as mock_read_reg, mock.patch(
        'paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist', autospec=True,
    ) as mock_get_all_slaves_for_blacklist_whitelist, mock.patch(
        'paasta_tools.marathon_serviceinit.get_backends', autospec=True,
    ) as mock_get_backends, mock.patch(
        'paasta_tools.marathon_serviceinit.match_backends_and_tasks', autospec=True,
    ) as mock_match_backends_and_tasks, mock.patch(
        'paasta_tools.marathon_serviceinit.haproxy_backend_report', autospec=True,
    ) as mock_haproxy_backend_report:
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_reg.return_value = service_instance
        mock_get_backends.return_value = [fake_backend]
        mock_match_backends_and_tasks.return_value = [
            (fake_backend, good_task),
        ]
        tasks = [good_task, other_task]
        mock_get_all_slaves_for_blacklist_whitelist.return_value = [
            {
                'hostname': 'hostname1',
                'attributes': {
                    'fake_discover': 'fakelocation',
                },
            },
            {
                'hostname': 'hostname2',
                'attributes': {
                    'fake_discover': 'fakelocation2',
                },
            },
        ]
        marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=normal_count,
            soa_dir=None,
            verbose=False,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
            system_deploy_blacklist=[],
            system_deploy_whitelist=[],
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='hostname1',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='hostname2',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        expected_count_per_location = int(normal_count / 2)
        mock_haproxy_backend_report.assert_any_call(expected_count_per_location, 1)
def test_status_smartstack_backends_verbose_multiple_apps():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'

    good_task = mock.Mock()
    bad_task = mock.Mock()
    other_task = mock.Mock()
    haproxy_backends_by_task = {
        good_task: {
            'status': 'UP',
            'lastchg': '1',
            'last_chk': 'OK',
            'check_code': '200',
            'svname': 'ipaddress1:1001_hostname1',
            'check_status': 'L7OK',
            'check_duration': 1
        },
        bad_task: {
            'status': 'UP',
            'lastchg': '1',
            'last_chk': 'OK',
            'check_code': '200',
            'svname': 'ipaddress2:1002_hostname2',
            'check_status': 'L7OK',
            'check_duration': 1
        },
    }

    with contextlib.nested(
            mock.patch(
                'paasta_tools.marathon_tools.load_service_namespace_config',
                autospec=True),
            mock.patch(
                'paasta_tools.marathon_tools.read_namespace_for_service_instance'
            ),
            mock.patch(
                'paasta_tools.marathon_serviceinit.get_mesos_slaves_grouped_by_attribute'
            ),
            mock.patch('paasta_tools.marathon_serviceinit.get_backends',
                       autospec=True),
            mock.patch(
                'paasta_tools.marathon_serviceinit.match_backends_and_tasks',
                autospec=True),
    ) as (
            mock_load_service_namespace_config,
            mock_read_ns,
            mock_get_mesos_slaves_grouped_by_attribute,
            mock_get_backends,
            mock_match_backends_and_tasks,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_ns.return_value = instance
        mock_get_backends.return_value = haproxy_backends_by_task.values()
        mock_match_backends_and_tasks.return_value = [
            (haproxy_backends_by_task[good_task], good_task),
            (haproxy_backends_by_task[bad_task], None),
            (None, other_task),
        ]
        tasks = [good_task, other_task]
        mock_get_mesos_slaves_grouped_by_attribute.return_value = {
            'fake_location1': ['fakehost1']
        }
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=len(haproxy_backends_by_task),
            soa_dir=None,
            verbose=True,
        )
        mock_get_backends.assert_called_once_with(
            service_instance,
            synapse_host='fakehost1',
            synapse_port=3212,
        )
        assert "fake_location1" in actual
        assert "hostname1:1001" in actual
        assert re.search(
            r"%s[^\n]*hostname2:1002" % re.escape(PaastaColors.GREY), actual)
Пример #49
0
def perform_command(command,
                    service,
                    instance,
                    cluster,
                    verbose,
                    soa_dir,
                    app_id=None,
                    delta=None):
    """Performs a start/stop/restart/status/scale on an instance
    :param command: String of start, stop, restart, status or scale
    :param service: service name
    :param instance: instance name, like "main" or "canary"
    :param cluster: cluster name
    :param verbose: bool if the output should be verbose or not
    :returns: A unix-style return code
    """
    marathon_config = marathon_tools.load_marathon_config()
    job_config = marathon_tools.load_marathon_service_config(service,
                                                             instance,
                                                             cluster,
                                                             soa_dir=soa_dir)
    if not app_id:
        try:
            app_id = marathon_tools.create_complete_config(
                service, instance, marathon_config, soa_dir=soa_dir)['id']
        except NoDockerImageError:
            job_id = compose_job_id(service, instance)
            print "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?" % job_id
            return 1

    normal_instance_count = job_config.get_instances()
    normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(
        service, instance)
    proxy_port = marathon_tools.get_proxy_port_for_instance(service,
                                                            instance,
                                                            soa_dir=soa_dir)

    client = marathon_tools.get_marathon_client(marathon_config.get_url(),
                                                marathon_config.get_username(),
                                                marathon_config.get_password())
    if command == 'start':
        start_marathon_job(service, instance, app_id, normal_instance_count,
                           client, cluster)
    elif command == 'stop':
        stop_marathon_job(service, instance, app_id, client, cluster)
    elif command == 'restart':
        restart_marathon_job(service, instance, app_id, normal_instance_count,
                             client, cluster)
    elif command == 'status':
        # Setting up transparent cache for http API calls
        requests_cache.install_cache('paasta_serviceinit', backend='memory')

        print status_desired_state(service, instance, client, job_config)
        print status_marathon_job(service, instance, app_id,
                                  normal_instance_count, client)
        tasks, out = status_marathon_job_verbose(service, instance, client)
        if verbose:
            print out
        print status_mesos_tasks(service, instance, normal_instance_count)
        if verbose:
            print status_mesos_tasks_verbose(app_id, get_short_task_id)
        if proxy_port is not None:
            print status_smartstack_backends(
                service=service,
                instance=instance,
                cluster=cluster,
                job_config=job_config,
                tasks=tasks,
                expected_count=normal_smartstack_count,
                soa_dir=soa_dir,
                verbose=verbose,
            )
    elif command == 'scale':
        scale_marathon_job(service, instance, app_id, delta, client, cluster)
    else:
        # The command parser shouldn't have let us get this far...
        raise NotImplementedError("Command %s is not implemented!" % command)
    return 0
def test_status_smartstack_backends_different_nerve_ns():
    service = "servicename"
    instance = "instancename"
    different_ns = "different_ns"
    service_instance = compose_job_id(service, different_ns)

    cluster = "fake_cluster"
    good_task = mock.Mock()
    bad_task = mock.Mock()
    other_task = mock.Mock()
    haproxy_backends_by_task = {
        good_task: {
            "status": "UP",
            "lastchg": "1",
            "last_chk": "OK",
            "check_code": "200",
            "svname": "ipaddress1:1001_hostname1",
            "check_status": "L7OK",
            "check_duration": 1,
        },
        bad_task: {
            "status": "UP",
            "lastchg": "1",
            "last_chk": "OK",
            "check_code": "200",
            "svname": "ipaddress2:1002_hostname2",
            "check_status": "L7OK",
            "check_duration": 1,
        },
    }

    with mock.patch(
            "paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist",
            autospec=True,
    ) as mock_get_all_slaves_for_blacklist_whitelist, mock.patch(
            "paasta_tools.marathon_serviceinit.get_backends",
            autospec=True) as mock_get_backends, mock.patch(
                "paasta_tools.marathon_serviceinit.match_backends_and_tasks",
                autospec=True) as mock_match_backends_and_tasks:
        fake_service_namespace_config = mock.Mock()
        fake_service_namespace_config.get_discover.return_value = "fake_discover"
        mock_get_all_slaves_for_blacklist_whitelist.return_value = [{
            "hostname":
            "fakehost",
            "attributes": {
                "fake_discover": "fakelocation"
            }
        }]

        mock_get_backends.return_value = haproxy_backends_by_task.values()
        mock_match_backends_and_tasks.return_value = [
            (haproxy_backends_by_task[good_task], good_task),
            (haproxy_backends_by_task[bad_task], None),
            (None, other_task),
        ]

        tasks = [good_task, other_task]
        with mock.patch.object(
                fake_marathon_job_config,
                "get_registrations",
                return_value=[compose_job_id(service, different_ns)],
        ):
            actual = marathon_serviceinit.status_smartstack_backends(
                service=service,
                instance=instance,
                cluster=cluster,
                job_config=fake_marathon_job_config,
                service_namespace_config=fake_service_namespace_config,
                tasks=tasks,
                expected_count=len(haproxy_backends_by_task),
                soa_dir=None,
                verbose=False,
                synapse_port=123456,
                synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
                system_deploy_blacklist=[],
                system_deploy_whitelist=[],
            )
        mock_get_backends.assert_called_once_with(
            service_instance,
            synapse_host="fakehost",
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        assert "fakelocation" in actual
        assert "Healthy" in actual
Пример #51
0
def main():
    args = parse_args()
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.WARNING)

    instances = []
    return_codes = []
    command = args.command
    if (args.service_instance):
        service_instance = args.service_instance
        service, instance, _, __ = decompose_job_id(service_instance)
        instances.append(instance)
    elif (args.service and args.instances):
        service = args.service
        instances = args.instances.split(',')
    else:
        log.error(
            "The name of service or the name of instance to inspect is missing. Exiting."
        )
        sys.exit(1)

    # Setting up transparent cache for http API calls
    requests_cache.install_cache("paasta_serviceinit", backend="memory")

    cluster = load_system_paasta_config().get_cluster()
    actual_deployments = get_actual_deployments(service, args.soa_dir)

    for instance in instances:
        # For an instance, there might be multiple versions running, e.g. in crossover bouncing.
        # In addition, mesos master does not have information of a chronos service's git hash.
        # The git sha in deployment.json is simply used here.
        version = get_deployment_version(actual_deployments, cluster, instance)
        print 'instance: %s' % PaastaColors.blue(instance)
        print 'Git sha:    %s (desired)' % version

        try:
            instance_type = validate_service_instance(service, instance,
                                                      cluster, args.soa_dir)
            if instance_type == 'marathon':
                return_code = marathon_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                    app_id=args.app_id,
                    delta=args.delta,
                )
            elif instance_type == 'chronos':
                return_code = chronos_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                )
            else:
                log.error(
                    "I calculated an instance_type of %s for %s which I don't know how to handle."
                    % (instance_type, compose_job_id(service, instance)))
                return_code = 1
        except:
            log.error(
                'Exception raised while looking at service %s instance %s:' %
                (service, instance))
            log.error(traceback.format_exc())
            return_code = 1

        return_codes.append(return_code)

    sys.exit(max(return_codes))
Пример #52
0
def test_status_smartstack_backends_verbose_multiple_locations():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    good_task = mock.Mock()
    other_task = mock.Mock()
    fake_backend = {'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
                    'check_code': '200', 'svname': 'ipaddress1:1001_hostname1',
                    'check_status': 'L7OK', 'check_duration': 1}
    fake_other_backend = {'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
                          'check_code': '200', 'svname': 'ipaddress1:1002_hostname2',
                          'check_status': 'L7OK', 'check_duration': 1}
    with contextlib.nested(
        mock.patch('paasta_tools.marathon_tools.load_service_namespace_config', autospec=True),
        mock.patch('paasta_tools.marathon_tools.read_namespace_for_service_instance'),
        mock.patch('paasta_tools.marathon_serviceinit.get_mesos_slaves_grouped_by_attribute'),
        mock.patch('paasta_tools.marathon_serviceinit.get_backends', autospec=True,
                   side_effect=[[fake_backend], [fake_other_backend]]),
        mock.patch('paasta_tools.marathon_serviceinit.match_backends_and_tasks',
                   autospec=True, side_effect=[[(fake_backend, good_task)], [(fake_other_backend, good_task)]]),
    ) as (
        mock_load_service_namespace_config,
        mock_read_ns,
        mock_get_mesos_slaves_grouped_by_attribute,
        mock_get_backends,
        mock_match_backends_and_tasks,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_ns.return_value = instance
        tasks = [good_task, other_task]
        mock_get_mesos_slaves_grouped_by_attribute.return_value = {
            'fake_location1': ['fakehost1'],
            'fake_location2': ['fakehost2'],
        }
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=1,
            soa_dir=None,
            verbose=True,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakehost1',
            synapse_port=3212,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakehost2',
            synapse_port=3212,
        )
        mock_get_mesos_slaves_grouped_by_attribute.assert_called_once_with(
            attribute='fake_discover',
            blacklist=[],
        )
        assert "fake_location1 - %s" % PaastaColors.green('Healthy') in actual
        assert "hostname1:1001" in actual
        assert "fake_location2 - %s" % PaastaColors.green('Healthy') in actual
        assert "hostname2:1002" in actual
Пример #53
0
def test_status_smartstack_backends_multiple_locations():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    good_task = mock.Mock()
    other_task = mock.Mock()
    fake_backend = {
        'status': 'UP',
        'lastchg': '1',
        'last_chk': 'OK',
        'check_code': '200',
        'svname': 'ipaddress1:1001_hostname1',
        'check_status': 'L7OK',
        'check_duration': 1
    }
    with contextlib.nested(
            mock.patch(
                'paasta_tools.marathon_tools.load_service_namespace_config',
                autospec=True),
            mock.patch(
                'paasta_tools.marathon_tools.read_namespace_for_service_instance',
                autospec=True),
            mock.patch(
                'paasta_tools.marathon_serviceinit.get_all_slaves_for_blacklist_whitelist',
                autospec=True),
            mock.patch('paasta_tools.marathon_serviceinit.get_backends',
                       autospec=True),
            mock.patch(
                'paasta_tools.marathon_serviceinit.match_backends_and_tasks',
                autospec=True),
    ) as (
            mock_load_service_namespace_config,
            mock_read_ns,
            mock_get_all_slaves_for_blacklist_whitelist,
            mock_get_backends,
            mock_match_backends_and_tasks,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_ns.return_value = instance
        mock_get_backends.return_value = [fake_backend]
        mock_match_backends_and_tasks.return_value = [
            (fake_backend, good_task),
        ]
        tasks = [good_task, other_task]
        mock_get_all_slaves_for_blacklist_whitelist.return_value = [{
            'hostname':
            'fakehost',
            'attributes': {
                'fake_discover': 'fakelocation'
            }
        }, {
            'hostname':
            'fakeotherhost',
            'attributes': {
                'fake_discover': 'fakeotherlocation'
            }
        }]
        actual = marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=len(mock_get_backends.return_value),
            soa_dir=None,
            verbose=False,
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakehost',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakeotherhost',
            synapse_port=123456,
            synapse_haproxy_url_format=DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT,
        )
        assert "fakelocation - %s" % PaastaColors.green('Healthy') in actual
        assert "fakeotherlocation - %s" % PaastaColors.green(
            'Healthy') in actual
Пример #54
0
def _namespaced_get_classic_service_information_for_nerve(name, namespace, soa_dir):
    nerve_dict = load_service_namespace_config(name, namespace, soa_dir)
    port_file = os.path.join(soa_dir, name, 'port')
    nerve_dict['port'] = service_configuration_lib.read_port(port_file)
    nerve_name = compose_job_id(name, namespace)
    return (nerve_name, nerve_dict)
Пример #55
0
def get_happy_tasks(app,
                    service,
                    nerve_ns,
                    min_task_uptime=None,
                    check_haproxy=False):
    """Given a MarathonApp object, return the subset of tasks which are considered healthy.
    With the default options, this returns tasks where at least one of the defined Marathon healthchecks passes.
    For it to do anything interesting, set min_task_uptime or check_haproxy.

    :param app: A MarathonApp object.
    :param service: The name of the service.
    :param nerve_ns: The nerve namespace
    :param min_task_uptime: Minimum number of seconds that a task must be running before we consider it healthy. Useful
                            if tasks take a while to start up.
    :param check_haproxy: Whether to check the local haproxy to make sure this task has been registered and discovered.
    """
    tasks = app.tasks
    happy = []
    now = datetime.datetime.utcnow()

    if check_haproxy:
        tasks_in_smartstack = []
        service_namespace = compose_job_id(service, nerve_ns)

        service_namespace_config = marathon_tools.load_service_namespace_config(
            service, nerve_ns)
        discover_location_type = service_namespace_config.get_discover()
        unique_values = mesos_tools.get_mesos_slaves_grouped_by_attribute(
            discover_location_type)

        for value, hosts in unique_values.iteritems():
            synapse_host = hosts[0]
            tasks_in_smartstack.extend(
                get_registered_marathon_tasks(
                    synapse_host,
                    DEFAULT_SYNAPSE_PORT,
                    service_namespace,
                    tasks,
                ))
        tasks = tasks_in_smartstack

    for task in tasks:
        if min_task_uptime is not None:
            if (now - task.started_at).total_seconds() < min_task_uptime:
                continue

        # if there are healthchecks defined for the app but none have executed yet, then task is unhappy
        if len(app.health_checks) > 0 and len(task.health_check_results) == 0:
            continue

        # if there are health check results, check if at least one healthcheck is passing
        if len(task.health_check_results) > 0:
            task_up = any([
                hc_result.alive is True
                for hc_result in task.health_check_results
            ])
            if not task_up:
                continue
        happy.append(task)

    return happy
Пример #56
0
def test_status_smartstack_backends_multiple_locations_expected_count():
    service = 'my_service'
    instance = 'my_instance'
    service_instance = compose_job_id(service, instance)
    cluster = 'fake_cluster'
    normal_count = 10

    good_task = mock.Mock()
    other_task = mock.Mock()
    fake_backend = {'status': 'UP', 'lastchg': '1', 'last_chk': 'OK',
                    'check_code': '200', 'svname': 'ipaddress1:1001_hostname1',
                    'check_status': 'L7OK', 'check_duration': 1}
    with contextlib.nested(
        mock.patch('paasta_tools.marathon_tools.load_service_namespace_config', autospec=True),
        mock.patch('paasta_tools.marathon_tools.read_namespace_for_service_instance'),
        mock.patch('paasta_tools.marathon_serviceinit.get_mesos_slaves_grouped_by_attribute'),
        mock.patch('paasta_tools.marathon_serviceinit.get_backends', autospec=True),
        mock.patch('paasta_tools.marathon_serviceinit.match_backends_and_tasks', autospec=True),
        mock.patch('paasta_tools.marathon_serviceinit.haproxy_backend_report', autospec=True),
    ) as (
        mock_load_service_namespace_config,
        mock_read_ns,
        mock_get_mesos_slaves_grouped_by_attribute,
        mock_get_backends,
        mock_match_backends_and_tasks,
        mock_haproxy_backend_report,
    ):
        mock_load_service_namespace_config.return_value.get_discover.return_value = 'fake_discover'
        mock_read_ns.return_value = instance
        mock_get_backends.return_value = [fake_backend]
        mock_match_backends_and_tasks.return_value = [
            (fake_backend, good_task),
        ]
        tasks = [good_task, other_task]
        mock_get_mesos_slaves_grouped_by_attribute.return_value = {
            'fake_location1': ['fakehost1'],
            'fake_location2': ['fakehost2'],
        }
        marathon_serviceinit.status_smartstack_backends(
            service=service,
            instance=instance,
            cluster=cluster,
            job_config=fake_marathon_job_config,
            tasks=tasks,
            expected_count=normal_count,
            soa_dir=None,
            verbose=False,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakehost1',
            synapse_port=3212,
        )
        mock_get_backends.assert_any_call(
            service_instance,
            synapse_host='fakehost2',
            synapse_port=3212,
        )
        expected_count_per_location = int(
            normal_count / len(mock_get_mesos_slaves_grouped_by_attribute.return_value))
        mock_haproxy_backend_report.assert_any_call(expected_count_per_location, 1)
Пример #57
0
def perform_command(command,
                    service,
                    instance,
                    cluster,
                    verbose,
                    soa_dir,
                    app_id=None,
                    delta=None):
    """Performs a start/stop/restart/status/scale on an instance
    :param command: String of start, stop, restart, status or scale
    :param service: service name
    :param instance: instance name, like "main" or "canary"
    :param cluster: cluster name
    :param verbose: int verbosity level
    :returns: A unix-style return code
    """
    system_config = load_system_paasta_config()

    marathon_config = marathon_tools.load_marathon_config()
    job_config = marathon_tools.load_marathon_service_config(service,
                                                             instance,
                                                             cluster,
                                                             soa_dir=soa_dir)
    if not app_id:
        try:
            app_id = job_config.format_marathon_app_dict()['id']
        except NoDockerImageError:
            job_id = compose_job_id(service, instance)
            print "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?" % job_id
            return 1

    normal_instance_count = job_config.get_instances()
    normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(
        service, instance, cluster)
    proxy_port = marathon_tools.get_proxy_port_for_instance(service,
                                                            instance,
                                                            cluster,
                                                            soa_dir=soa_dir)

    client = marathon_tools.get_marathon_client(marathon_config.get_url(),
                                                marathon_config.get_username(),
                                                marathon_config.get_password())
    if command == 'start':
        start_marathon_job(service, instance, app_id, normal_instance_count,
                           client, cluster)
    elif command == 'stop':
        stop_marathon_job(service, instance, app_id, client, cluster)
    elif command == 'restart':
        restart_marathon_job(service, instance, app_id, normal_instance_count,
                             client, cluster)
    elif command == 'status':
        print status_desired_state(service, instance, client, job_config)
        print status_marathon_job(service, instance, app_id,
                                  normal_instance_count, client)
        tasks, out = status_marathon_job_verbose(service, instance, client)
        if verbose > 0:
            print out
        print status_mesos_tasks(service, instance, normal_instance_count)
        if verbose > 0:
            tail_stdstreams = verbose > 1
            print status_mesos_tasks_verbose(app_id, get_short_task_id,
                                             tail_stdstreams)
        if proxy_port is not None:
            print status_smartstack_backends(
                service=service,
                instance=instance,
                cluster=cluster,
                job_config=job_config,
                tasks=tasks,
                expected_count=normal_smartstack_count,
                soa_dir=soa_dir,
                verbose=verbose > 0,
                synapse_port=system_config.get_synapse_port(),
                synapse_haproxy_url_format=system_config.
                get_synapse_haproxy_url_format(),
            )
    elif command == 'scale':
        scale_marathon_job(service, instance, app_id, delta, client, cluster)
    else:
        # The command parser shouldn't have let us get this far...
        raise NotImplementedError("Command %s is not implemented!" % command)
    return 0