Ejemplo n.º 1
0
def main():
    system_paasta_config = load_system_paasta_config()
    args = parse_args(system_paasta_config.get_deployd_startup_bounce_deadline())

    service, instance = args.service_instance.split(".", 1)
    try:
        validate_service_instance(
            service,
            instance,
            cluster=system_paasta_config.get_cluster(),
            soa_dir=DEFAULT_SOA_DIR,
        )
    except NoConfigurationForServiceError as e:
        paasta_print(PaastaColors.red(str(e)))
        sys.exit(1)

    service_instance = ServiceInstance(
        service=service,
        instance=instance,
        bounce_by=time.time() + args.bounce_by_delay_secs,
        wait_until=time.time(),
        watcher="manually_added",
        failures=0,
        enqueue_time=time.time(),
        bounce_start_time=time.time(),
    )

    zk_client = KazooClient(hosts=system_paasta_config.get_zk_hosts())
    zk_client.start()
    queue = ZKDelayDeadlineQueue(client=zk_client)

    queue.put(service_instance)
Ejemplo n.º 2
0
def test_validate_service_instance_valid_marathon():
    mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
    mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
    my_service = 'service1'
    my_instance = 'main'
    fake_cluster = 'fake_cluster'
    fake_soa_dir = 'fake_soa_dir'
    with contextlib.nested(
        mock.patch('paasta_tools.utils.get_services_for_cluster',
                   autospec=True,
                   side_effect=[mock_marathon_services, mock_chronos_services]),
    ) as (
        get_services_for_cluster_patch,
    ):
        assert utils.validate_service_instance(
            my_service,
            my_instance,
            fake_cluster,
            fake_soa_dir,
        ) == 'marathon'
        assert mock.call(
            cluster=fake_cluster,
            instance_type='marathon',
            soa_dir=fake_soa_dir,
        ) in get_services_for_cluster_patch.call_args_list
Ejemplo n.º 3
0
def get_instance_config(
    service: str,
    instance: str,
    cluster: str,
    soa_dir: str = DEFAULT_SOA_DIR,
    load_deployments: bool = False,
    instance_type: Optional[str] = None,
) -> InstanceConfig:
    """ Returns the InstanceConfig object for whatever type of instance
    it is. (marathon) """
    if instance_type is None:
        instance_type = validate_service_instance(service=service,
                                                  instance=instance,
                                                  cluster=cluster,
                                                  soa_dir=soa_dir)

    instance_config_loader = INSTANCE_TYPE_HANDLERS[instance_type].loader
    if instance_config_loader is None:
        raise NotImplementedError(
            "instance is %s of type %s which is not supported by paasta" %
            (instance, instance_type))

    return instance_config_loader(
        service=service,
        instance=instance,
        cluster=cluster,
        load_deployments=load_deployments,
        soa_dir=soa_dir,
    )
Ejemplo n.º 4
0
def instance_set_state(request, ) -> None:
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    desired_state = request.swagger_data.get("desired_state")

    try:
        instance_type = validate_service_instance(service, instance,
                                                  settings.cluster,
                                                  settings.soa_dir)
    except NoConfigurationForServiceError:
        error_message = "deployment key %s not found" % ".".join(
            [settings.cluster, instance])
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if pik.can_set_state(instance_type):
        try:
            pik.set_cr_desired_state(
                kube_client=settings.kubernetes_client,
                service=service,
                instance=instance,
                instance_type=instance_type,
                desired_state=desired_state,
            )
        except RuntimeError as e:
            raise ApiFailure(e, 500)
    else:
        error_message = (
            f"instance_type {instance_type} of {service}.{instance} doesn't "
            "support set_state")
        raise ApiFailure(error_message, 500)
Ejemplo n.º 5
0
def instance_status(request):
    service = request.matchdict['service']
    instance = request.matchdict['instance']
    verbose = request.matchdict.get('verbose', False)

    instance_status = {}
    instance_status['service'] = service
    instance_status['instance'] = instance

    actual_deployments = get_actual_deployments(service, settings.soa_dir)
    version = get_deployment_version(actual_deployments, settings.cluster, instance)
    # exit if the deployment key is not found
    if not version:
        error_message = 'deployment key %s not found' % '.'.join([settings.cluster, instance])
        raise InstanceFailure(error_message, 404)

    instance_status['git_sha'] = version

    try:
        instance_type = validate_service_instance(service, instance, settings.cluster, settings.soa_dir)
        if instance_type == 'marathon':
            marathon_instance_status(instance_status, service, instance, verbose)
        elif instance_type == 'chronos':
            chronos_instance_status(instance_status, service, instance, verbose)
        else:
            error_message = 'Unknown instance_type %s of %s.%s' % (instance_type, service, instance)
            raise InstanceFailure(error_message, 404)
    except:
        error_message = traceback.format_exc()
        raise InstanceFailure(error_message, 500)

    return instance_status
Ejemplo n.º 6
0
def get_instance_config(service,
                        instance,
                        cluster,
                        soa_dir,
                        load_deployments=False):
    """ Returns the InstanceConfig object for whatever type of instance
    it is. (chronos or marathon) """
    instance_type = validate_service_instance(
        service=service,
        instance=instance,
        cluster=cluster,
        soa_dir=soa_dir,
    )
    if instance_type == 'marathon':
        instance_config_load_function = load_marathon_service_config
    elif instance_type == 'chronos':
        instance_config_load_function = load_chronos_job_config
    elif instance_type == 'adhoc':
        instance_config_load_function = load_adhoc_job_config
    else:
        raise NotImplementedError(
            "instance is %s of type %s which is not supported by paasta" %
            (instance, instance_type))
    return instance_config_load_function(service=service,
                                         instance=instance,
                                         cluster=cluster,
                                         load_deployments=load_deployments,
                                         soa_dir=soa_dir)
Ejemplo n.º 7
0
def instance_set_state(request, ) -> None:
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    desired_state = request.swagger_data.get("desired_state")

    try:
        instance_type = validate_service_instance(service, instance,
                                                  settings.cluster,
                                                  settings.soa_dir)
    except NoConfigurationForServiceError:
        error_message = "deployment key %s not found" % ".".join(
            [settings.cluster, instance])
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if instance_type == "flink":
        try:
            kube_client = KubeClient()
            set_flink_desired_state(
                kube_client=kube_client,
                service=service,
                instance=instance,
                desired_state=desired_state,
            )
        except ApiException as e:
            error_message = f"Error while setting state {desired_state} of {service}.{instance}: {e}"
            raise ApiFailure(error_message, 500)
    else:
        error_message = f"Unknown instance_type {instance_type} of {service}.{instance}"
        raise ApiFailure(error_message, 404)
Ejemplo n.º 8
0
def get_instance_config(service, instance, cluster, soa_dir, load_deployments=False):
    """ Returns the InstanceConfig object for whatever type of instance
    it is. (chronos or marathon) """
    instance_type = validate_service_instance(
        service=service,
        instance=instance,
        cluster=cluster,
        soa_dir=soa_dir,
    )
    if instance_type == 'marathon':
        instance_config_load_function = load_marathon_service_config
    elif instance_type == 'chronos':
        instance_config_load_function = load_chronos_job_config
    else:
        raise NotImplementedError(
            "instance is %s of type %s which is not supported by local-run"
            % (instance, instance_type)
        )
    return instance_config_load_function(
        service=service,
        instance=instance,
        cluster=cluster,
        load_deployments=load_deployments,
        soa_dir=soa_dir
    )
Ejemplo n.º 9
0
def bounce_status(request):
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    try:
        instance_type = validate_service_instance(service, instance,
                                                  settings.cluster,
                                                  settings.soa_dir)
    except NoConfigurationForServiceError:
        error_message = no_configuration_for_service_message(
            settings.cluster,
            service,
            instance,
        )
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if instance_type != "kubernetes":
        # We are using HTTP 204 to indicate that the instance exists but has
        # no bounce status to be returned.  The client should just mark the
        # instance as bounced.
        response = Response()
        response.status_int = 204
        return response

    try:
        return pik.bounce_status(service, instance, settings)
    except Exception as e:
        raise ApiFailure(e, 500)
Ejemplo n.º 10
0
def test_validate_service_instance_invalid():
    mock_marathon_services = [("service1", "main"), ("service2", "main")]
    mock_chronos_services = [("service1", "worker"), ("service2", "tailer")]
    my_service = "bad_service"
    my_instance = "main"
    fake_cluster = "fake_cluster"
    fake_soa_dir = "fake_soa_dir"
    with contextlib.nested(
        mock.patch(
            "paasta_tools.utils.get_services_for_cluster",
            autospec=True,
            side_effect=[mock_marathon_services, mock_chronos_services],
        ),
        mock.patch("sys.exit"),
    ) as (get_services_for_cluster_patch, sys_exit_patch):
        utils.validate_service_instance(my_service, my_instance, fake_cluster, fake_soa_dir)
        sys_exit_patch.assert_called_once_with(3)
Ejemplo n.º 11
0
def instance_status(request):
    service = request.swagger_data.get('service')
    instance = request.swagger_data.get('instance')
    verbose = request.swagger_data.get('verbose', False)

    instance_status: Dict[str, Any] = {}
    instance_status['service'] = service
    instance_status['instance'] = instance

    try:
        instance_type = validate_service_instance(service, instance, settings.cluster, settings.soa_dir)
    except NoConfigurationForServiceError:
        error_message = 'deployment key %s not found' % '.'.join([settings.cluster, instance])
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if instance_type != 'flinkcluster':
        try:
            actual_deployments = get_actual_deployments(service, settings.soa_dir)
        except Exception:
            error_message = traceback.format_exc()
            raise ApiFailure(error_message, 500)

        version = get_deployment_version(actual_deployments, settings.cluster, instance)
        # exit if the deployment key is not found
        if not version:
            error_message = 'deployment key %s not found' % '.'.join([settings.cluster, instance])
            raise ApiFailure(error_message, 404)

        instance_status['git_sha'] = version
    else:
        instance_status['git_sha'] = ''

    try:
        if instance_type == 'marathon':
            instance_status['marathon'] = marathon_instance_status(instance_status, service, instance, verbose)
        elif instance_type == 'chronos':
            instance_status['chronos'] = chronos_instance_status(instance_status, service, instance, verbose)
        elif instance_type == 'adhoc':
            instance_status['adhoc'] = adhoc_instance_status(instance_status, service, instance, verbose)
        elif instance_type == 'kubernetes':
            instance_status['kubernetes'] = kubernetes_instance_status(instance_status, service, instance, verbose)
        elif instance_type == 'flinkcluster':
            status = flinkcluster_instance_status(instance_status, service, instance, verbose)
            if status is not None:
                instance_status['flinkcluster'] = {'status': status}
            else:
                instance_status['flinkcluster'] = {}
        else:
            error_message = f'Unknown instance_type {instance_type} of {service}.{instance}'
            raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    return instance_status
Ejemplo n.º 12
0
def extract_args(args):
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        paasta_print(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates"
                "PaaSTA is not configured locally on this host, and remote-run may not behave"
                "the same way it would behave on a server configured for PaaSTA.",
            ),
            sep='\n',
        )
        system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')

    service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root)
    cluster = args.cluster or system_paasta_config.get_local_run_config().get('default_cluster', None)

    if not cluster:
        paasta_print(
            PaastaColors.red(
                "PaaSTA on this machine has not been configured with a default cluster."
                "Please pass one using '-c'.",
            ),
            sep='\n',
            file=sys.stderr,
        )
        sys.exit(1)

    soa_dir = args.yelpsoa_config_root
    instance = args.instance
    if instance is None:
        instance_type = 'adhoc'
        instance = 'remote'
    else:
        instance_type = validate_service_instance(
            service, instance, cluster, soa_dir,
        )
        if instance_type != 'adhoc':
            paasta_print(
                PaastaColors.red(
                    (
                        "Please use instance declared in adhoc.yaml for use "
                        "with remote-run, {} is declared as {}"
                    ).format(instance, instance_type),
                ),
            )
            sys.exit(1)

    return (
        system_paasta_config,
        service,
        cluster,
        soa_dir,
        instance,
        instance_type,
    )
Ejemplo n.º 13
0
def extract_args(args):
    system_paasta_config = get_system_paasta_config()
    soa_dir = args.yelpsoa_config_root
    service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root)

    cluster = args.cluster or \
        system_paasta_config.get_remote_run_config().get('default_cluster', None)
    if not cluster:
        paasta_print(
            PaastaColors.red(
                "PaaSTA on this machine has not been configured with a default cluster."
                "Please pass one using '-c'.", ))
        emit_counter_metric('paasta.remote_run.' + args.action + '.failed',
                            service, 'UNKNOWN')
        sys.exit(1)

    instance = args.instance
    if instance is None:
        instance_type = 'adhoc'
        instance = 'remote'
    else:
        try:
            instance_type = validate_service_instance(
                service,
                instance,
                cluster,
                soa_dir,
            )
        except NoConfigurationForServiceError as e:
            paasta_print(e)
            emit_counter_metric('paasta.remote_run.' + args.action + '.failed',
                                service, instance)
            sys.exit(1)

        if instance_type != 'adhoc':
            paasta_print(
                PaastaColors.red(
                    "Please use instance declared in adhoc.yaml for use "
                    f"with remote-run, {instance} is declared as {instance_type}",
                ))
            emit_counter_metric('paasta.remote_run.' + args.action + '.failed',
                                service, instance)
            sys.exit(1)

    return (
        system_paasta_config,
        service,
        cluster,
        soa_dir,
        instance,
        instance_type,
    )
Ejemplo n.º 14
0
def get_instance_config(
    service: str,
    instance: str,
    cluster: str,
    soa_dir: str = DEFAULT_SOA_DIR,
    load_deployments: bool = False,
    instance_type: Optional[str] = None,
) -> InstanceConfig:
    """ Returns the InstanceConfig object for whatever type of instance
    it is. (chronos or marathon) """
    if instance_type is None:
        instance_type = validate_service_instance(
            service=service,
            instance=instance,
            cluster=cluster,
            soa_dir=soa_dir,
        )

    instance_config_load_function: Callable[
        [
            NamedArg(str, 'service'),
            NamedArg(str, 'instance'),
            NamedArg(str, 'cluster'),
            NamedArg(bool, 'load_deployments'),
            NamedArg(str, 'soa_dir'),
        ],
        InstanceConfig,
    ]
    if instance_type == 'marathon':
        instance_config_load_function = load_marathon_service_config
    elif instance_type == 'chronos':
        instance_config_load_function = load_chronos_job_config
    elif instance_type == 'adhoc':
        instance_config_load_function = load_adhoc_job_config
    elif instance_type == 'kubernetes':
        instance_config_load_function = load_kubernetes_service_config
    elif instance_type == 'tron':
        instance_config_load_function = load_tron_instance_config
    elif instance_type == 'flinkcluster':
        instance_config_load_function = load_flinkcluster_instance_config
    else:
        raise NotImplementedError(
            "instance is %s of type %s which is not supported by paasta"
            % (instance, instance_type),
        )
    return instance_config_load_function(
        service=service,
        instance=instance,
        cluster=cluster,
        load_deployments=load_deployments,
        soa_dir=soa_dir,
    )
Ejemplo n.º 15
0
def main():
    args = parse_args()
    if args.debug:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.WARNING)

    instances = []
    return_codes = []
    command = args.command
    if (args.service_instance):
        service_instance = args.service_instance
        service, instance, _, __ = decompose_job_id(service_instance)
        instances.append(instance)
    elif (args.service and args.instances):
        service = args.service
        instances = args.instances.split(',')
    else:
        log.error("The name of service or the name of instance to inspect is missing. Exiting.")
        sys.exit(1)

    cluster = load_system_paasta_config().get_cluster()
    for instance in instances:
        instance_type = validate_service_instance(service, instance, cluster, args.soa_dir)
        if instance_type == 'marathon':
            return_code = marathon_serviceinit.perform_command(
                command=command,
                service=service,
                instance=instance,
                cluster=cluster,
                verbose=args.verbose,
                soa_dir=args.soa_dir,
                app_id=args.app_id,
                delta=args.delta,
            )
        elif instance_type == 'chronos':
            return_code = chronos_serviceinit.perform_command(
                command=command,
                service=service,
                instance=instance,
                cluster=cluster,
                verbose=args.verbose,
                soa_dir=args.soa_dir,
            )
        else:
            log.error("I calculated an instance_type of %s for %s which I don't know how to handle. Exiting."
                      % (instance_type, compose_job_id(service, instance)))
            return_code = 1

        return_codes.append(return_code)
    sys.exit(max(return_codes))
Ejemplo n.º 16
0
def test_validate_service_instance_invalid():
    mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
    mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
    my_service = 'bad_service'
    my_instance = 'main'
    fake_cluster = 'fake_cluster'
    fake_soa_dir = 'fake_soa_dir'
    with contextlib.nested(
        mock.patch('paasta_tools.utils.get_services_for_cluster',
                   autospec=True,
                   side_effect=[mock_marathon_services, mock_chronos_services]),
        mock.patch('sys.exit'),
    ) as (
        get_services_for_cluster_patch,
        sys_exit_patch,
    ):
        utils.validate_service_instance(
            my_service,
            my_instance,
            fake_cluster,
            fake_soa_dir,
        )
        sys_exit_patch.assert_called_once_with(3)
Ejemplo n.º 17
0
def instance_status(request):
    service = request.swagger_data.get('service')
    instance = request.swagger_data.get('instance')
    verbose = request.matchdict.get('verbose', False)

    instance_status = {}
    instance_status['service'] = service
    instance_status['instance'] = instance

    try:
        actual_deployments = get_actual_deployments(service, settings.soa_dir)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    version = get_deployment_version(actual_deployments, settings.cluster,
                                     instance)
    # exit if the deployment key is not found
    if not version:
        error_message = 'deployment key %s not found' % '.'.join(
            [settings.cluster, instance])
        raise ApiFailure(error_message, 404)

    instance_status['git_sha'] = version

    try:
        instance_type = validate_service_instance(service, instance,
                                                  settings.cluster,
                                                  settings.soa_dir)
        if instance_type == 'marathon':
            instance_status['marathon'] = marathon_instance_status(
                instance_status, service, instance, verbose)
        elif instance_type == 'chronos':
            instance_status['chronos'] = chronos_instance_status(
                instance_status, service, instance, verbose)
        elif instance_type == 'adhoc':
            instance_status['adhoc'] = adhoc_instance_status(
                instance_status, service, instance, verbose)
        else:
            error_message = 'Unknown instance_type %s of %s.%s' % (
                instance_type, service, instance)
            raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    return instance_status
Ejemplo n.º 18
0
def test_validate_service_instance_valid_chronos():
    mock_marathon_services = [("service1", "main"), ("service2", "main")]
    mock_chronos_services = [("service1", "worker"), ("service2", "tailer")]
    my_service = "service1"
    my_instance = "worker"
    fake_cluster = "fake_cluster"
    fake_soa_dir = "fake_soa_dir"
    with contextlib.nested(
        mock.patch(
            "paasta_tools.utils.get_services_for_cluster",
            autospec=True,
            side_effect=[mock_marathon_services, mock_chronos_services],
        )
    ) as (get_services_for_cluster_patch,):
        assert utils.validate_service_instance(my_service, my_instance, fake_cluster, fake_soa_dir) == "chronos"
        assert (
            mock.call(cluster=fake_cluster, instance_type="chronos", soa_dir=fake_soa_dir)
            in get_services_for_cluster_patch.call_args_list
        )
Ejemplo n.º 19
0
def main():
    args = parse_args()
    if args.debug:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.WARNING)

    command = args.command
    service_instance = args.service_instance
    service, instance, _, __ = decompose_job_id(service_instance)

    cluster = load_system_paasta_config().get_cluster()
    instance_type = validate_service_instance(service, instance, cluster,
                                              args.soa_dir)
    if instance_type == 'marathon':
        return_code = marathon_serviceinit.perform_command(
            command=command,
            service=service,
            instance=instance,
            cluster=cluster,
            verbose=args.verbose,
            soa_dir=args.soa_dir,
            app_id=args.app_id,
            delta=args.delta,
        )
        sys.exit(return_code)
    elif instance_type == 'chronos':
        return_code = chronos_serviceinit.perform_command(
            command=command,
            service=service,
            instance=instance,
            cluster=cluster,
            verbose=args.verbose,
            soa_dir=args.soa_dir,
        )
        sys.exit(return_code)
    else:
        log.error(
            "I calculated an instance_type of %s for %s which I don't know how to handle. Exiting."
            % (instance_type, compose_job_id(service, instance)))
        sys.exit(1)
Ejemplo n.º 20
0
def instance_mesh_status(request):
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    include_smartstack = request.swagger_data.get("include_smartstack")
    include_envoy = request.swagger_data.get("include_envoy")

    instance_mesh: Dict[str, Any] = {}
    instance_mesh["service"] = service
    instance_mesh["instance"] = instance

    try:
        instance_type = validate_service_instance(service, instance,
                                                  settings.cluster,
                                                  settings.soa_dir)
    except NoConfigurationForServiceError:
        error_message = (
            f"No instance named '{compose_job_id(service, instance)}' has been "
            f"configured to run in the {settings.cluster} cluster")
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    try:
        instance_mesh.update(
            pik.kubernetes_mesh_status(
                service=service,
                instance=instance,
                instance_type=instance_type,
                settings=settings,
                include_smartstack=include_smartstack,
                include_envoy=include_envoy,
            ))
    except RuntimeError as e:
        raise ApiFailure(str(e), 405)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    return instance_mesh
def instance_set_state(request,) -> None:
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    desired_state = request.swagger_data.get("desired_state")

    try:
        instance_type = validate_service_instance(
            service, instance, settings.cluster, settings.soa_dir
        )
    except NoConfigurationForServiceError:
        error_message = "deployment key %s not found" % ".".join(
            [settings.cluster, instance]
        )
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if instance_type in INSTANCE_TYPES_WITH_SET_STATE:
        try:
            cr_id_fn = cr_id_fn_for_instance_type(instance_type)
            kube_client = KubeClient()
            kubernetes_tools.set_cr_desired_state(
                kube_client=kube_client,
                cr_id=cr_id_fn(service=service, instance=instance),
                desired_state=desired_state,
            )
        except ApiException as e:
            error_message = (
                f"Error while setting state {desired_state} of "
                f"{service}.{instance}: {e}"
            )
            raise ApiFailure(error_message, 500)
    else:
        error_message = (
            f"instance_type {instance_type} of {service}.{instance} doesn't "
            f"support set_state, must be in INSTANCE_TYPES_WITH_SET_STATE, "
            f"currently: {INSTANCE_TYPES_WITH_SET_STATE}"
        )
        raise ApiFailure(error_message, 404)
Ejemplo n.º 22
0
def main():
    args = parse_args()
    if args.debug:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.WARNING)

    command = args.command
    service_instance = args.service_instance
    service, instance, _, __ = decompose_job_id(service_instance)

    cluster = load_system_paasta_config().get_cluster()
    instance_type = validate_service_instance(service, instance, cluster, args.soa_dir)
    if instance_type == 'marathon':
        return_code = marathon_serviceinit.perform_command(
            command=command,
            service=service,
            instance=instance,
            cluster=cluster,
            verbose=args.verbose,
            soa_dir=args.soa_dir,
            app_id=args.app_id,
            delta=args.delta,
        )
        sys.exit(return_code)
    elif instance_type == 'chronos':
        return_code = chronos_serviceinit.perform_command(
            command=command,
            service=service,
            instance=instance,
            cluster=cluster,
            verbose=args.verbose,
            soa_dir=args.soa_dir,
        )
        sys.exit(return_code)
    else:
        log.error("I calculated an instance_type of %s for %s which I don't know how to handle. Exiting."
                  % (instance_type, compose_job_id(service, instance)))
        sys.exit(1)
Ejemplo n.º 23
0
def instance_status(request):
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    verbose = request.swagger_data.get("verbose") or 0
    omit_smartstack = request.swagger_data.get("omit_smartstack") or False
    omit_mesos = request.swagger_data.get("omit_mesos") or False

    instance_status: Dict[str, Any] = {}
    instance_status["service"] = service
    instance_status["instance"] = instance
    try:
        instance_type = validate_service_instance(service, instance,
                                                  settings.cluster,
                                                  settings.soa_dir)
    except NoConfigurationForServiceError:
        error_message = "deployment key %s not found" % ".".join(
            [settings.cluster, instance])
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if instance_type != "flink" and instance_type != "tron":
        try:
            actual_deployments = get_actual_deployments(
                service, settings.soa_dir)
        except Exception:
            error_message = traceback.format_exc()
            raise ApiFailure(error_message, 500)

        version = get_deployment_version(actual_deployments, settings.cluster,
                                         instance)
        # exit if the deployment key is not found
        if not version:
            error_message = "deployment key %s not found" % ".".join(
                [settings.cluster, instance])
            raise ApiFailure(error_message, 404)

        instance_status["git_sha"] = version
    else:
        instance_status["git_sha"] = ""

    try:
        if instance_type == "marathon":
            instance_status["marathon"] = marathon_instance_status(
                instance_status,
                service,
                instance,
                verbose,
                omit_smartstack=omit_smartstack,
                omit_mesos=omit_mesos,
            )
        elif instance_type == "chronos":
            if verbose:
                instance_status["chronos"] = chronos_instance_status(
                    service, instance, 1)
            else:
                instance_status["chronos"] = chronos_instance_status(
                    service, instance, 0)
        elif instance_type == "adhoc":
            instance_status["adhoc"] = adhoc_instance_status(
                instance_status, service, instance, verbose)
        elif instance_type == "kubernetes":
            instance_status["kubernetes"] = kubernetes_instance_status(
                instance_status, service, instance, verbose)
        elif instance_type == "tron":
            instance_status["tron"] = tron_instance_status(
                instance_status, service, instance, verbose)
        elif instance_type == "flink":
            status = flink_instance_status(instance_status, service, instance,
                                           verbose)
            if status is not None:
                instance_status["flink"] = {"status": status}
            else:
                instance_status["flink"] = {}
        else:
            error_message = (
                f"Unknown instance_type {instance_type} of {service}.{instance}"
            )
            raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    return instance_status
Ejemplo n.º 24
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       args,
                                       pull_image=False,
                                       dry_run=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
                "PaaSTA is not configured locally on this host, and local-run may not behave\n"
                "the same way it would behave on a server configured for PaaSTA.\n"
            ))
        system_paasta_config = SystemPaastaConfig({"volumes": []},
                                                  '/etc/paasta')

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    instance_type = validate_service_instance(service, instance, cluster,
                                              soa_dir)

    load_deployments = docker_hash is None or pull_image

    try:
        instance_config = get_instance_config(
            service=service,
            instance=instance,
            cluster=cluster,
            load_deployments=load_deployments,
            soa_dir=soa_dir,
        )
    except NoDeploymentsAvailable:
        sys.stderr.write(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
                "You can generate this by running:\n"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n"
                % {
                    'soa_dir': soa_dir,
                    'service': service
                }))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(),
                instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s" %
                    (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config),
                                  posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Ejemplo n.º 25
0
def main(argv):
    args = parse_args(argv)
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        paasta_print(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates"
                "PaaSTA is not configured locally on this host, and remote-run may not behave"
                "the same way it would behave on a server configured for PaaSTA."
            ),
            sep='\n',
        )
        system_paasta_config = SystemPaastaConfig({"volumes": []},
                                                  '/etc/paasta')

    service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root)
    cluster = args.cluster or system_paasta_config.get_local_run_config().get(
        'default_cluster', None)

    if not cluster:
        paasta_print(
            PaastaColors.red(
                "PaaSTA on this machine has not been configured with a default cluster."
                "Please pass one using '-c'."),
            sep='\n',
            file=sys.stderr,
        )
        return 1

    soa_dir = args.yelpsoa_config_root
    dry_run = args.dry_run
    instance = args.instance
    command = args.cmd

    if instance is None:
        instance_type = 'adhoc'
        instance = 'remote'
    else:
        instance_type = validate_service_instance(service, instance, cluster,
                                                  soa_dir)

    overrides_dict = {}

    constraints_json = args.constraints_json
    if constraints_json:
        try:
            constraints = json.loads(constraints_json)
        except Exception as e:
            paasta_print("Error while parsing constraints: %s", e)

        if constraints:
            overrides_dict['constraints'] = constraints

    if command:
        overrides_dict['cmd'] = command

    paasta_print('Scheduling a task on Mesos')
    scheduler = AdhocScheduler(
        service_name=service,
        instance_name=instance,
        instance_type=instance_type,
        cluster=cluster,
        system_paasta_config=system_paasta_config,
        soa_dir=soa_dir,
        reconcile_backoff=0,
        dry_run=dry_run,
        staging_timeout=args.staging_timeout,
        service_config_overrides=overrides_dict,
    )
    driver = create_driver(
        framework_name="paasta-remote %s %s" % (compose_job_id(
            service, instance), datetime.utcnow().strftime('%Y%m%d%H%M%S%f')),
        scheduler=scheduler,
        system_paasta_config=system_paasta_config)
    driver.run()
def instance_status(request):
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    verbose = request.swagger_data.get("verbose") or 0
    include_smartstack = request.swagger_data.get("include_smartstack")
    if include_smartstack is None:
        include_smartstack = True
    include_mesos = request.swagger_data.get("include_mesos")
    if include_mesos is None:
        include_mesos = True

    instance_status: Dict[str, Any] = {}
    instance_status["service"] = service
    instance_status["instance"] = instance
    try:
        instance_type = validate_service_instance(
            service, instance, settings.cluster, settings.soa_dir
        )
    except NoConfigurationForServiceError:
        error_message = (
            "Deployment key %s not found.  Try to execute the corresponding pipeline if it's a fresh instance"
            % ".".join([settings.cluster, instance])
        )
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if instance_type != "tron":
        try:
            actual_deployments = get_actual_deployments(service, settings.soa_dir)
        except Exception:
            error_message = traceback.format_exc()
            raise ApiFailure(error_message, 500)

        version = get_deployment_version(actual_deployments, settings.cluster, instance)
        # exit if the deployment key is not found
        if not version:
            error_message = (
                "Deployment key %s not found.  Try to execute the corresponding pipeline if it's a fresh instance"
                % ".".join([settings.cluster, instance])
            )
            raise ApiFailure(error_message, 404)

        instance_status["git_sha"] = version
    else:
        instance_status["git_sha"] = ""

    try:
        if instance_type == "marathon":
            instance_status["marathon"] = marathon_instance_status(
                instance_status,
                service,
                instance,
                verbose,
                include_smartstack=include_smartstack,
                include_mesos=include_mesos,
            )
        elif instance_type == "adhoc":
            instance_status["adhoc"] = adhoc_instance_status(
                instance_status, service, instance, verbose
            )
        elif instance_type == "kubernetes":
            instance_status["kubernetes"] = kubernetes_instance_status(
                instance_status,
                service,
                instance,
                verbose,
                include_smartstack=include_smartstack,
                instance_type=instance_type,
            )
        elif instance_type == "tron":
            instance_status["tron"] = tron_instance_status(
                instance_status, service, instance, verbose
            )
        elif instance_type in INSTANCE_TYPES_K8S:
            cr_id_fn = cr_id_fn_for_instance_type(instance_type)
            cr_id = cr_id_fn(service, instance)
            status = kubernetes_cr_status(cr_id, verbose)
            metadata = kubernetes_cr_metadata(cr_id, verbose)
            instance_status[instance_type] = {}
            if status is not None:
                instance_status[instance_type]["status"] = status
            if metadata is not None:
                instance_status[instance_type]["metadata"] = metadata
        else:
            error_message = (
                f"Unknown instance_type {instance_type} of {service}.{instance}"
            )
            raise ApiFailure(error_message, 404)
        if instance_type == "cassandracluster":
            instance_status["kubernetes"] = kubernetes_instance_status(
                instance_status,
                service,
                instance,
                verbose,
                include_smartstack=include_smartstack,
                instance_type=instance_type,
            )
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    return instance_status
Ejemplo n.º 27
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(PaastaColors.yellow(
            "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
            "PaaSTA is not configured locally on this host, and local-run may not behave\n"
            "the same way it would behave on a server configured for PaaSTA.\n"
        ))
        system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    instance_type = validate_service_instance(service, instance, cluster, soa_dir)

    try:
        instance_config = get_instance_config(
            service=service,
            instance=instance,
            cluster=cluster,
            load_deployments=pull_image,
            soa_dir=soa_dir,
        )
    except NoDeploymentsAvailable:
        sys.stderr.write(PaastaColors.red(
            "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
            "You can generate this by running:\n"
            "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n" % {
                'soa_dir': soa_dir, 'service': service}))
        return

    if pull_image:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s" % (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config), posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
    )
Ejemplo n.º 28
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       system_paasta_config,
                                       args,
                                       pull_image=False,
                                       dry_run=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    load_deployments = docker_hash is None or pull_image

    interactive = args.interactive

    try:
        if instance is None and args.healthcheck:
            sys.stderr.write(
                "With --healthcheck, --instance must be provided!\n")
            sys.exit(1)
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance,
                                                      cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        sys.stderr.write(str(e) + '\n')
        return
    except NoDeploymentsAvailable:
        sys.stderr.write(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
                "You can generate this by running:\n"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n"
                % {
                    'soa_dir': soa_dir,
                    'service': service
                }))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(),
                instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s\n" %
                    (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config),
                                  posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Ejemplo n.º 29
0
def main():
    args = parse_args()
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.WARNING)

    instances = []
    return_codes = []
    command = args.command
    if (args.service_instance):
        service_instance = args.service_instance
        service, instance, _, __ = decompose_job_id(service_instance)
        instances.append(instance)
    elif (args.service and args.instances):
        service = args.service
        instances = args.instances.split(',')
    else:
        log.error(
            "The name of service or the name of instance to inspect is missing. Exiting."
        )
        sys.exit(1)

    # Setting up transparent cache for http API calls
    requests_cache.install_cache("paasta_serviceinit", backend="memory")

    cluster = load_system_paasta_config().get_cluster()
    actual_deployments = get_actual_deployments(service, args.soa_dir)
    clients = PaastaClients(cached=(command == 'status'))

    for instance in instances:
        try:
            instance_type = validate_service_instance(service, instance,
                                                      cluster, args.soa_dir)
            if instance_type == 'adhoc':
                continue

            version = get_deployment_version(actual_deployments, cluster,
                                             instance)
            paasta_print('instance: %s' % PaastaColors.blue(instance))
            paasta_print('Git sha:    %s (desired)' % version)

            if instance_type == 'marathon':
                return_code = marathon_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                    app_id=args.app_id,
                    delta=args.delta,
                    client=clients.marathon(),
                )
            elif instance_type == 'chronos':
                return_code = chronos_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                    client=clients.chronos(),
                )
            elif instance_type == 'paasta_native':
                return_code = paasta_native_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                )
            else:
                log.error(
                    "I calculated an instance_type of %s for %s which I don't know how to handle."
                    % (instance_type, compose_job_id(service, instance)))
                return_code = 1
        except Exception:
            log.error(
                'Exception raised while looking at service %s instance %s:' %
                (service, instance))
            log.error(traceback.format_exc())
            return_code = 1

        return_codes.append(return_code)

    sys.exit(max(return_codes))
Ejemplo n.º 30
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        system_paasta_config,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    load_deployments = docker_hash is None or pull_image

    interactive = args.interactive

    try:
        if instance is None and args.healthcheck:
            sys.stderr.write("With --healthcheck, --instance must be provided!\n")
            sys.exit(1)
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance, cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        sys.stderr.write(str(e) + '\n')
        return
    except NoDeploymentsAvailable:
        sys.stderr.write(PaastaColors.red(
            "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
            "You can generate this by running:\n"
            "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n" % {
                'soa_dir': soa_dir, 'service': service}))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config), posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Ejemplo n.º 31
0
def main():
    args = parse_args()
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.WARNING)

    instances = []
    return_codes = []
    command = args.command
    if (args.service_instance):
        service_instance = args.service_instance
        service, instance, _, __ = decompose_job_id(service_instance)
        instances.append(instance)
    elif (args.service and args.instances):
        service = args.service
        instances = args.instances.split(',')
    else:
        log.error("The name of service or the name of instance to inspect is missing. Exiting.")
        sys.exit(1)

    # Setting up transparent cache for http API calls
    requests_cache.install_cache("paasta_serviceinit", backend="memory")

    cluster = load_system_paasta_config().get_cluster()
    actual_deployments = get_actual_deployments(service, args.soa_dir)

    for instance in instances:
        # For an instance, there might be multiple versions running, e.g. in crossover bouncing.
        # In addition, mesos master does not have information of a chronos service's git hash.
        # The git sha in deployment.json is simply used here.
        version = actual_deployments['.'.join((cluster, instance))][:8]
        print 'instance: %s' % PaastaColors.blue(instance)
        print 'Git sha:    %s (desired)' % version

        try:
            instance_type = validate_service_instance(service, instance, cluster, args.soa_dir)
            if instance_type == 'marathon':
                return_code = marathon_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                    app_id=args.app_id,
                    delta=args.delta,
                )
            elif instance_type == 'chronos':
                return_code = chronos_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                )
            else:
                log.error("I calculated an instance_type of %s for %s which I don't know how to handle."
                          % (instance_type, compose_job_id(service, instance)))
                return_code = 1
        except:
            log.error('Exception raised while looking at service %s instance %s:' % (service, instance))
            log.error(traceback.format_exc())
            return_code = 1

        return_codes.append(return_code)

    sys.exit(max(return_codes))
Ejemplo n.º 32
0
def main():
    args = parse_args()
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.WARNING)

    instances = []
    return_codes = []
    command = args.command
    if (args.service_instance):
        service_instance = args.service_instance
        service, instance, _, __ = decompose_job_id(service_instance)
        instances.append(instance)
    elif (args.service and args.instances):
        service = args.service
        instances = args.instances.split(',')
    else:
        log.error(
            "The name of service or the name of instance to inspect is missing. Exiting."
        )
        sys.exit(1)

    # Setting up transparent cache for http API calls
    requests_cache.install_cache("paasta_serviceinit", backend="memory")

    cluster = load_system_paasta_config().get_cluster()
    actual_deployments = get_actual_deployments(service, args.soa_dir)

    for instance in instances:
        # For an instance, there might be multiple versions running, e.g. in crossover bouncing.
        # In addition, mesos master does not have information of a chronos service's git hash.
        # The git sha in deployment.json is simply used here.
        version = actual_deployments['.'.join((cluster, instance))][:8]
        print 'instance: %s' % PaastaColors.blue(instance)
        print 'Git sha:    %s (desired)' % version

        try:
            instance_type = validate_service_instance(service, instance,
                                                      cluster, args.soa_dir)
            if instance_type == 'marathon':
                return_code = marathon_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                    app_id=args.app_id,
                    delta=args.delta,
                )
            elif instance_type == 'chronos':
                return_code = chronos_serviceinit.perform_command(
                    command=command,
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    verbose=args.verbose,
                    soa_dir=args.soa_dir,
                )
            else:
                log.error(
                    "I calculated an instance_type of %s for %s which I don't know how to handle."
                    % (instance_type, compose_job_id(service, instance)))
                return_code = 1
        except Exception:
            log.error(
                'Exception raised while looking at service %s instance %s:' %
                (service, instance))
            log.error(traceback.format_exc())
            return_code = 1

        return_codes.append(return_code)

    sys.exit(max(return_codes))
Ejemplo n.º 33
0
def configure_and_run_docker_container(
    docker_client,
    docker_url,
    docker_sha,
    service,
    instance,
    cluster,
    system_paasta_config,
    args,
    pull_image=False,
    dry_run=False,
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    if instance is None and args.healthcheck_only:
        paasta_print(
            "With --healthcheck-only, --instance MUST be provided!", file=sys.stderr
        )
        return 1
    if instance is None and not sys.stdin.isatty():
        paasta_print(
            "--instance and --cluster must be specified when using paasta local-run without a tty!",
            file=sys.stderr,
        )
        return 1

    soa_dir = args.yelpsoa_config_root
    volumes = list()
    load_deployments = (docker_url is None or pull_image) and not docker_sha
    interactive = args.interactive

    try:
        if instance is None:
            instance_type = "adhoc"
            instance = "interactive"
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(
                service, instance, cluster, soa_dir
            )
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        paasta_print(str(e), file=sys.stderr)
        return 1
    except NoDeploymentsAvailable:
        paasta_print(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s. "
                "You can generate this by running: "
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
                % {"soa_dir": soa_dir, "service": service}
            ),
            sep="\n",
            file=sys.stderr,
        )
        return 1

    if docker_sha is not None:
        instance_config.branch_dict = {
            "git_sha": docker_sha,
            "docker_image": build_docker_image_name(service=service, sha=docker_sha),
            "desired_state": "start",
            "force_bounce": None,
        }

    if docker_url is None:
        try:
            docker_url = instance_config.get_docker_url()
        except NoDockerImageError:
            if instance_config.get_deploy_group() is None:
                paasta_print(
                    PaastaColors.red(
                        f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so "
                        "the proper image can be used to run for this service."
                    ),
                    sep="",
                    file=sys.stderr,
                )
            else:
                paasta_print(
                    PaastaColors.red(
                        "Error: No sha has been marked for deployment for the %s deploy group.\n"
                        "Please ensure this service has either run through a jenkins pipeline "
                        "or paasta mark-for-deployment has been run for %s\n"
                        % (instance_config.get_deploy_group(), service)
                    ),
                    sep="",
                    file=sys.stderr,
                )
            return 1

    if pull_image:
        docker_pull_image(docker_url)

    for volume in instance_config.get_volumes(system_paasta_config.get_volumes()):
        if os.path.exists(volume["hostPath"]):
            volumes.append(
                "{}:{}:{}".format(
                    volume["hostPath"], volume["containerPath"], volume["mode"].lower()
                )
            )
        else:
            paasta_print(
                PaastaColors.yellow(
                    "Warning: Path %s does not exist on this host. Skipping this binding."
                    % volume["hostPath"]
                ),
                file=sys.stderr,
            )

    if interactive is True and args.cmd is None:
        command = "bash"
    elif args.cmd:
        command = args.cmd
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command = format_command_for_type(
                command=command_from_config, instance_type=instance_type, date=args.date
            )
        else:
            command = instance_config.get_args()

    secret_provider_kwargs = {
        "vault_cluster_config": system_paasta_config.get_vault_cluster_config(),
        "vault_auth_method": args.vault_auth_method,
        "vault_token_file": args.vault_token_file,
    }

    return run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_url=docker_url,
        volumes=volumes,
        interactive=interactive,
        command=command,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        user_port=args.user_port,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
        framework=instance_type,
        secret_provider_name=system_paasta_config.get_secret_provider_name(),
        secret_provider_kwargs=secret_provider_kwargs,
        skip_secrets=args.skip_secrets,
    )
Ejemplo n.º 34
0
def main() -> None:
    args = parse_args()
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.WARNING)

    instances = []
    return_codes = []
    command = args.command
    if (args.service_instance):
        service_instance = args.service_instance
        service, instance, _, __ = decompose_job_id(service_instance)
        instances.append(instance)
    elif (args.service and args.instances):
        service = args.service
        instances = args.instances.split(',')
    else:
        log.error(
            "The name of service or the name of instance to inspect is missing. Exiting."
        )
        sys.exit(1)

    # Setting up transparent cache for http API calls
    requests_cache.install_cache("paasta_serviceinit", backend="memory")

    cluster = load_system_paasta_config().get_cluster()
    actual_deployments = get_actual_deployments(service, args.soa_dir)
    clients = PaastaClients(cached=(command == 'status'))

    instance_types = ['marathon', 'chronos', 'paasta_native', 'adhoc']
    instance_types_map: Dict[str,
                             List[str]] = {it: []
                                           for it in instance_types}
    for instance in instances:
        try:
            instance_type = validate_service_instance(
                service,
                instance,
                cluster,
                args.soa_dir,
            )
        except Exception:
            log.error(
                ('Exception raised while looking at service %s instance %s:'
                 ).format(service, instance), )
            log.error(traceback.format_exc())
            return_codes.append(1)
            continue

        if instance_type not in instance_types:
            log.error(
                ("I calculated an instance_type of {} for {} which I don't "
                 "know how to handle.").format(
                     instance_type,
                     compose_job_id(service, instance),
                 ), )
            return_codes.append(1)
        else:
            instance_types_map[instance_type].append(instance)

    remote_run_frameworks = None
    if len(instance_types_map['adhoc']) > 0:
        remote_run_frameworks = paasta_remote_run.remote_run_frameworks()

    service_config_loader = PaastaServiceConfigLoader(service)

    for instance_type in instance_types:

        if instance_type == 'marathon':
            job_configs = {
                jc.instance: jc
                for jc in service_config_loader.instance_configs(
                    cluster=cluster,
                    instance_type_class=marathon_tools.MarathonServiceConfig,
                )
            }

        for instance in instance_types_map[instance_type]:
            try:
                version = get_deployment_version(
                    actual_deployments,
                    cluster,
                    instance,
                )
                paasta_print('instance: %s' % PaastaColors.blue(instance))
                paasta_print('Git sha:    %s (desired)' % version)

                if instance_type == 'marathon':
                    return_code = marathon_serviceinit.perform_command(
                        command=command,
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        verbose=args.verbose,
                        soa_dir=args.soa_dir,
                        app_id=args.app_id,
                        clients=clients.marathon(),
                        job_config=job_configs[instance],
                    )
                elif instance_type == 'chronos':
                    return_code = chronos_serviceinit.perform_command(
                        command=command,
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        verbose=args.verbose,
                        soa_dir=args.soa_dir,
                        client=clients.chronos(),
                    )
                elif instance_type == 'paasta_native':
                    return_code = paasta_native_serviceinit.perform_command(
                        command=command,
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        verbose=args.verbose,
                        soa_dir=args.soa_dir,
                    )
                elif instance_type == 'adhoc':
                    if command != 'status':
                        raise NotImplementedError
                    paasta_remote_run.remote_run_list_report(
                        service=service,
                        instance=instance,
                        cluster=cluster,
                        frameworks=remote_run_frameworks,
                    )
                    return_code = 0
            except Exception:
                log.error(('Exception raised while looking at service {} '
                           'instance {}:').format(service, instance), )
                log.error(traceback.format_exc())
                return_code = 1

            return_codes.append(return_code)

    sys.exit(max(return_codes))
Ejemplo n.º 35
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        system_paasta_config,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    if instance is None and args.healthcheck_only:
        paasta_print(
            "With --healthcheck-only, --instance MUST be provided!",
            file=sys.stderr,
        )
        return 1
    if instance is None and not sys.stdin.isatty():
        paasta_print(
            "--instance and --cluster must be specified when using paasta local-run without a tty!",
            file=sys.stderr,
        )
        return 1

    soa_dir = args.yelpsoa_config_root
    volumes = list()
    load_deployments = docker_hash is None or pull_image
    interactive = args.interactive

    try:
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance, cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        paasta_print(str(e), file=sys.stderr)
        return 1
    except NoDeploymentsAvailable:
        paasta_print(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s."
                "You can generate this by running:"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s" % {
                    'soa_dir': soa_dir,
                    'service': service,
                }
            ),
            sep='\n',
            file=sys.stderr,
        )
        return 1

    if docker_hash is None:
        try:
            docker_url = instance_config.get_docker_url()
        except NoDockerImageError:
            paasta_print(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service)),
                sep='',
                file=sys.stderr,
            )
            return 1
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = 'bash'
    elif args.cmd:
        command = args.cmd
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = command_modifier(command_from_config)
        else:
            command = instance_config.get_args()

    return run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        user_port=args.user_port,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
        framework=instance_type,
    )
Ejemplo n.º 36
0
def instance_status(request):
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    verbose = request.swagger_data.get("verbose") or 0
    use_new = request.swagger_data.get("new") or False
    include_smartstack = request.swagger_data.get("include_smartstack")
    if include_smartstack is None:
        include_smartstack = True
    include_envoy = request.swagger_data.get("include_envoy")
    if include_envoy is None:
        include_envoy = True
    include_mesos = request.swagger_data.get("include_mesos")
    if include_mesos is None:
        include_mesos = True

    instance_status: Dict[str, Any] = {}
    instance_status["service"] = service
    instance_status["instance"] = instance
    try:
        instance_type = validate_service_instance(service, instance,
                                                  settings.cluster,
                                                  settings.soa_dir)
    except NoConfigurationForServiceError:
        error_message = no_configuration_for_service_message(
            settings.cluster,
            service,
            instance,
        )
        raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    if instance_type != "tron":
        try:
            actual_deployments = get_actual_deployments(
                service, settings.soa_dir)
        except Exception:
            error_message = traceback.format_exc()
            raise ApiFailure(error_message, 500)

        version = get_deployment_version(actual_deployments, settings.cluster,
                                         instance)
        # exit if the deployment key is not found
        if not version:
            error_message = (
                "Deployment key %s not found.  Try to execute the corresponding pipeline if it's a fresh instance"
                % ".".join([settings.cluster, instance]))
            raise ApiFailure(error_message, 404)

        instance_status["git_sha"] = version
    else:
        instance_status["git_sha"] = ""

    try:
        if instance_type == "marathon":
            instance_status["marathon"] = marathon_instance_status(
                instance_status,
                service,
                instance,
                verbose,
                include_smartstack=include_smartstack,
                include_envoy=include_envoy,
                include_mesos=include_mesos,
            )
        elif instance_type == "adhoc":
            instance_status["adhoc"] = adhoc_instance_status(
                instance_status, service, instance, verbose)
        elif pik.can_handle(instance_type):
            instance_status.update(
                pik.instance_status(
                    service=service,
                    instance=instance,
                    verbose=verbose,
                    include_smartstack=include_smartstack,
                    include_envoy=include_envoy,
                    use_new=use_new,
                    instance_type=instance_type,
                    settings=settings,
                ))
        elif instance_type == "tron":
            instance_status["tron"] = tron_instance_status(
                instance_status, service, instance, verbose)
        else:
            error_message = (
                f"Unknown instance_type {instance_type} of {service}.{instance}"
            )
            raise ApiFailure(error_message, 404)
    except Exception:
        error_message = traceback.format_exc()
        raise ApiFailure(error_message, 500)

    return instance_status