Ejemplo n.º 1
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'status',
        help="Display the status of a PaaSTA service.",
        description=(
            "'paasta status' works by SSH'ing to remote PaaSTA masters and "
            "inspecting the local APIs, and reports on the overal health "
            "of a service."
        ),
        epilog=(
            "Note: This command requires SSH and sudo privileges on the remote PaaSTA "
            "masters."
        ),
    )
    status_parser.add_argument(
        '-v', '--verbose',
        action='store_true',
        dest="verbose",
        default=False,
        help="Print out more output regarding the state of the service")
    status_parser.add_argument(
        '-s', '--service',
        help='The name of the service you wish to inspect'
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-c', '--clusters',
        help="A comma-separated list of clusters to view. Defaults to view all clusters.\n"
             "For example: --clusters norcal-prod,nova-prod"
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-i', '--instances',
        help="A comma-separated list of instances to view. Defaults to view all instances.\n"
             "For example: --instances canary,main"
    )  # No completer because we need to know service first and we can't until some other stuff has happened
    status_parser.set_defaults(command=paasta_status)
Ejemplo n.º 2
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'emergency-stop',
        help="Stop a PaaSTA service instance in an emergency",
        description=(
            "Chronos jobs: Stops and kills and inflight run.\n"
            "Marathon apps: Not implemented."
        ),
    )
    status_parser.add_argument(
        '-s', '--service',
        help="Service that you want to stop. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i', '--instance',
        help="Instance of the service that you want to stop. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        '-c', '--cluster',
        help="The PaaSTA cluster that has the service instance you want to stop. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_emergency_stop)
Ejemplo n.º 3
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'emergency-restart',
        help="Restarts a PaaSTA service instance in an emergency",
        description=(
            "'paasta emergency-restart' is useful in situations where the operator "
            "needs to bypass the normal git-based control plan, and needs to interact "
            "with the underlying APIs directly. For example, in an emergency situation "
            "it may be necessary to restart a Marathon service without doing a 'full bounce'."
            "'emergency-restart' can do this, but at the cost of the safety of the normal "
            "bouncing procedures. In other words, and emergency-restart is fast, but not safe "
            "and will cause dropped traffic.\n\n"
            "'paasta emergency-restart' is the equivalent to a 'paasta emergency-stop' followed "
            "by a 'paasta emergency-start'."
        ),
    )
    status_parser.add_argument(
        '-s', '--service',
        help="Service that you want to restart. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i', '--instance',
        help="Instance of the service that you want to restart. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        '-c', '--cluster',
        help="The PaaSTA cluster that has the service you want to restart. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.set_defaults(command=paasta_emergency_restart)
Ejemplo n.º 4
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        "emergency-start",
        help="Resumes normal operation of a PaaSTA service instance by scaling to the configured instance count",
        description=(
            "'emergency-start' scales a PaaSTA service instance up to the configured instance count for a "
            "Marathon service. It does nothing to an existing Marathon service that already has the desired "
            "instance count.\n\n"
            "On a Chronos job, 'emergency-start' has the effect of forcing a job to run outside of its normal "
            "schedule."
        ),
    )
    status_parser.add_argument(
        "-s", "--service", help="Service that you want to start. Like 'example_service'."
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        "-i",
        "--instance",
        help="Instance of the service that you want to start. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        "-c",
        "--cluster",
        help="The PaaSTA cluster that has the service instance you want to start. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.set_defaults(command=paasta_emergency_start)
Ejemplo n.º 5
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'emergency-start',
        help="Kicks off a chronos job run. Not implemented for Marathon instances.",
        description=(
            "Chronos Jobs: Forces a job to run outside of its normal schedule.\n"
            "Marathon Apps: Not implemented.\n"
        ),
    )
    status_parser.add_argument(
        '-s', '--service',
        help="Service that you want to start. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i', '--instance',
        help="Instance of the service that you want to start. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        '-c', '--cluster',
        help="The PaaSTA cluster that has the service instance you want to start. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_emergency_start)
Ejemplo n.º 6
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'emergency-start',
        help="Resumes normal operation of a PaaSTA service instance by scaling to the configured instance count",
        description=(
            "'emergency-start' scales a PaaSTA service instance up to the configured instance count for a "
            "Marathon service. It does nothing to an existing Marathon service that already has the desired "
            "instance count.\n\n"
            "On a Chronos job, 'emergency-start' has the effect of forcing a job to run outside of its normal "
            "schedule."
        ),
    )
    status_parser.add_argument(
        '-s', '--service',
        help="Service that you want to start. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i', '--instance',
        help="Instance of the service that you want to start. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        '-c', '--cluster',
        help="The PaaSTA cluster that has the service instance you want to start. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_emergency_start)
Ejemplo n.º 7
0
def add_subparser(subparsers):
    for command, lower, upper, cmd_func in [
        ('start', 'start or restart', 'Start or restart', paasta_start),
        ('restart', 'start or restart', 'Start or restart', paasta_start),
        ('stop', 'stop', 'Stop', paasta_stop)
    ]:
        status_parser = subparsers.add_parser(
            command,
            help="%ss a PaaSTA service in a graceful way." % upper,
            description=(
                "%ss a PaaSTA service in a graceful way. This uses the Git control plane." % upper
            ),
            epilog=(
                "This command uses Git, and assumes access and authorization to the Git repo "
                "for the service is available."
            ),
        )
        status_parser.add_argument(
            '-s', '--service',
            help='Service that you want to %s. Like example_service.' % lower,
        ).completer = lazy_choices_completer(list_services)
        status_parser.add_argument(
            '-i', '--instance',
            help='Instance of the service that you want to %s. Like "main" or "canary".' % lower,
            required=True,
        ).completer = lazy_choices_completer(list_instances)
        status_parser.add_argument(
            '-c', '--cluster',
            help='The PaaSTA cluster that has the service you want to %s. Like norcal-prod' % lower,
            required=True,
        ).completer = lazy_choices_completer(utils.list_clusters)
        status_parser.set_defaults(command=cmd_func)
Ejemplo n.º 8
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'rollback',
        help='Rollback a docker image to a previous deploy',
        description=(
            "'paasta rollback' is a human-friendly tool for marking a particular "
            "docker image for deployment, which invokes a bounce. While the command "
            "is called 'rollback', it can be used to roll forward or back, as long "
            "as there is a docker image available for the input git SHA."
        ),
        epilog=(
            "This rollback command uses the Git control plane, which requires network "
            "connectivity as well as authorization to the git repo."
        ),
    )
    list_parser.add_argument(
        '-k', '--commit',
        help="Git SHA to mark for rollback. "
        "A commit to rollback to is required for paasta rollback to run. However if one is not provided, "
        "paasta rollback will instead output a list of valid git shas to rollback to.",
        required=False,
    ).completer = lazy_choices_completer(list_previously_deployed_shas)
    list_parser.add_argument(
        '-d', '--deploy-groups',
        help='Mark one or more deploy groups to roll back (e.g. '
        '"all.main", "all.main,all.canary"). If no deploy groups specified,'
        ' all deploy groups for that service are rolled back',
        default='',
        required=False,
    )
    list_parser.add_argument(
        '-s', '--service',
        help='Name of the service to rollback (e.g. "service1")',
    ).completer = lazy_choices_completer(list_services)
    list_parser.set_defaults(command=paasta_rollback)
Ejemplo n.º 9
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'logs',
        help="Streams logs relevant to a service across the PaaSTA components",
        description=(
            "'paasta logs' works by streaming PaaSTA-related event messages "
            "in a human-readable way."
        ),
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    status_parser.add_argument(
        '-s', '--service',
        help='The name of the service you wish to inspect. Defaults to autodetect.'
    ).completer = lazy_choices_completer(list_services)
    components_help = 'A comma separated list of the components you want logs for.'
    status_parser.add_argument(
        '-C', '--components',
        help=components_help,
    ).completer = lazy_choices_completer(LOG_COMPONENTS.keys)
    cluster_help = 'The clusters to see relevant logs for. Defaults to all clusters to which this service is deployed.'
    status_parser.add_argument(
        '-c', '--clusters',
        help=cluster_help,
    ).completer = completer_clusters
    status_parser.add_argument(
        '-f', '-F', '--tail', dest='tail', action='store_true', default=True,
        help='Stream the logs and follow it for more data',
    )
    status_parser.add_argument(
        '-v', '--verbose', action='store_true', dest='verbose', default=False,
        help='Enable verbose logging',
    )
    status_parser.add_argument(
        '-r', '--raw-mode', action='store_true',
        dest='raw_mode', default=False,
        help="Don't pretty-print logs; emit them exactly as they are in scribe."
    )
    status_parser.add_argument(
        '-d', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    default_component_string = ','.join(DEFAULT_COMPONENTS)
    component_descriptions = build_component_descriptions(LOG_COMPONENTS)
    epilog = 'COMPONENTS\n' \
             'There are many possible components of Paasta logs that you might be interested in:\n' \
             'Run --list-components to see all available log components.\n' \
             'If unset, the default components are:\n\t%s\n' \
             'So the default behavior of `paasta logs` will be to tail those logs.\n\n' \
             'Here is a list of all components and what they are:\n%s\n\n' \
             % (default_component_string, component_descriptions)
    status_parser.epilog = epilog
    status_parser.set_defaults(command=paasta_logs)
Ejemplo n.º 10
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        "emergency-stop",
        help="Stop a PaaSTA service instance in an emergency",
        description=(
            "'emergency-stop' stops a Marathon service instance by scaling it down to 0. If the "
            "provided 'instance' name refers to a Chronos job, 'emergency-stop' will cancel the "
            "chronos job if it is currently running."
        ),
        epilog=(
            "Warning: 'emergency-stop' does not interact with load balancers, so any in-flight "
            "traffic will be dropped after stopping. Additionally the 'desired state' of a service "
            "is not changed after an 'emergency-stop', therefore alerts will fire for the service "
            "after an emergency stop.\n\n"
            "'emergency-stop' is not a permanant declaration of state. If the operator wishes to "
            "stop a service permanently, they should run 'paasta stop', or configure the service to "
            "have '0' instances. Otherwise, subsequent changes or bounces to a service will start "
            "it right back up."
        ),
    )
    status_parser.add_argument(
        "-s", "--service", help="Service that you want to stop. Like 'example_service'."
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        "-i",
        "--instance",
        help="Instance of the service that you want to stop. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        "-c",
        "--cluster",
        help="The PaaSTA cluster that has the service instance you want to stop. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        "-a",
        "--appid",
        help="The complete marathon appid to stop. Like 'example-service.main.gitf0cfd3a0.config7a2a00b7",
        required=False,
    )
    status_parser.add_argument(
        "-d",
        "--soa-dir",
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_emergency_stop)
Ejemplo n.º 11
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'itest',
        help="Runs 'make itest' as part of the PaaSTA contract.",
        description=(
            "'paasta itest' runs 'make itest' in the root of a service directory. "
            "It is designed to be used in conjection with the 'Jenkins' workflow: "
            "http://paasta.readthedocs.org/en/latest/about/contract.html#jenkins-pipeline-recommended"
        )
    )
    list_parser.add_argument(
        '-s', '--service',
        help='Test and build docker image for this service. Leading '
             '"services-", as included in a Jenkins job name, '
             'will be stripped.',
        required=True,
    )
    list_parser.add_argument(
        '-c', '--commit',
        help='Git sha used to construct tag for built image',
        required=True,
    )
    list_parser.add_argument(
        '-d', '--soa-dir',
        dest='soa_dir',
        help='A directory from which soa-configs should be read from',
        default=DEFAULT_SOA_DIR,
    ).completer = lazy_choices_completer(list_services)
    list_parser.set_defaults(command=paasta_itest)
Ejemplo n.º 12
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'metastatus',
        help="Display the status for an entire PaaSTA cluster",
        description=(
            "'paasta metastatus' is used to get the vital statistics about a PaaaSTA "
            "cluster as a whole. This tool is helpful when answering the question: 'Is "
            "it just my service or the whole cluster that is broken?'\n\n"
            "metastatus operates by ssh'ing to a Mesos master of a remote cluster, and "
            "querying the local APIs."
        ),
        epilog=(
            "The metastatus command may time out during heavy load. When that happens "
            "users may execute the ssh command directly, in order to bypass the timeout."
        ),
    )
    status_parser.add_argument(
        '-v', '--verbose',
        action='store_true',
        dest="verbose",
        default=False,
        help="Print out more output regarding the state of the cluster",
    )
    clusters_help = (
        'A comma separated list of clusters to view. Defaults to view all clusters. '
        'Try: --clusters norcal-prod,nova-prod'
    )
    status_parser.add_argument(
        '-c', '--clusters',
        help=clusters_help,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.set_defaults(command=paasta_metastatus)
Ejemplo n.º 13
0
def add_subparser(subparsers):
    rerun_parser = subparsers.add_parser(
        'rerun',
        help="Re-run a scheduled PaaSTA job",
        description=(
            "'paasta rerun' creates a copy of the specified PaaSTA scheduled job and executes it immediately. "
            "Parent-dependent relationships are ignored: 'pasta rerun' only executes individual jobs."
        ),
        epilog=(
            "Note: This command requires SSH and sudo privileges on the remote PaaSTA "
            "masters."
        ),
    )
    rerun_parser.add_argument(
        '-v', '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="Print out more output regarding the operation."
    )
    rerun_parser.add_argument(
        '-s', '--service',
        help='The name of the service you wish to operate on.',
    ).completer = lazy_choices_completer(list_services)
    rerun_parser.add_argument(
        '-i', '--instance',
        help='Name of the scheduled job (instance) that you want to rerun.',
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    rerun_parser.add_argument(
        '-c', '--clusters',
        help="A comma-separated list of clusters to rerun the job on. Defaults to rerun on all clusters.\n"
             "For example: --clusters norcal-prod,nova-prod"
    ).completer = lazy_choices_completer(list_clusters)
    rerun_parser.add_argument(
        '-d', '--execution_date',
        help="The date the job should be rerun for. Expected in the format %%Y-%%m-%%dT%%H:%%M:%%S .",
        type=chronos_tools.parse_execution_date
    )
    rerun_parser.add_argument(
        '-y', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    rerun_parser.set_defaults(command=paasta_rerun)
Ejemplo n.º 14
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'emergency-scale',
        help="Scale a PaaSTA service instance in Marathon without bouncing it",
        description=(
            "'emergency-scale' is used to scale a PaaSTA service instance by scaling it up or down "
            "in Marathon by N instances, where N is provided by the --delta argument.\n\n"
            "This command works by using SSH to execute commands directly on the Marathon servers, "
            "where API access and credentials are guaranteed to be available."
        ),
        epilog=(
            "Warning: Using emergency-scale to scale *down* a service will not interact with "
            "load balancers, and therefore will drop traffic."
        ),
    )
    status_parser.add_argument(
        '-s', '--service',
        help="Service that you want to scale. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i', '--instance',
        help="Instance of the service that you want to scale. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        '-c', '--cluster',
        help="The PaaSTA cluster that has the service instance you want to scale. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-a', '--appid',
        help="The complete marathon appid to scale. Like 'example-service.main.gitf0cfd3a0.config7a2a00b7",
        required=False,
    )
    status_parser.add_argument(
        '-y', '--yelpsoa-config-root',
        default=DEFAULT_SOA_DIR,
        required=False,
        help="Path to root of yelpsoa-configs checkout",
    )
    status_parser.add_argument(
        '--delta',
        required=True,
        help="Number of instances you want to scale up (positive number) or down (negative number)",
    )
    status_parser.set_defaults(command=paasta_emergency_scale)
Ejemplo n.º 15
0
def add_subparser(subparsers):
    fsm_parser = subparsers.add_parser(
        "fsm",
        help="Generate boilerplate configs for a new PaaSTA Service",
        description=(
            "'paasta fsm' is used to generate example soa-configs, which is useful during initial "
            "service creation. Currently 'fsm' generates 'yelp-specific' configuration, but can still "
            "be used as an example of a fully working PaaSTA service.\n\n"
            "After 'paasta fsm' is run, the operator should inspect the generated boilerplate configuration "
            "and adjust it to meet the particular needs of the new service."
        ),
    )
    fsm_parser.add_argument(
        "-y", "--yelpsoa-config-root",
        dest="yelpsoa_config_root",
        default=DEFAULT_SOA_DIR,
        required=True,
        help="Path to root of yelpsoa-configs checkout (required)")
    fsm_parser.add_argument(
        "-s", "--service-name",
        dest="srvname",
        default=None,
        help="Name of service being configured (--auto not available)")
    fsm_parser.add_argument(
        "--description",
        dest="description",
        default=None,
        help="One line description of the service. If AUTO will have placeholder text")
    fsm_parser.add_argument(
        "--external-link",
        dest="external_link",
        default=None,
        help="Link to a reference doc for the service. If AUTO will have placeholder text")
    fsm_parser.add_argument(
        "-a",
        "--auto",
        dest="auto",
        default=False,
        action="store_true",
        help="Automatically calculate and use sane defaults. Exit violently if "
             "any values cannot be automatically calculated.",
    )
    fsm_parser.add_argument(
        "-p", "--port",
        dest="port",
        default=None,
        help="Smartstack proxy port used by service.")
    fsm_parser.add_argument(
        "-t", "--team",
        dest="team",
        default=None,
        help="Team responsible for the service. Used by various notification "
             "systems. (--auto not available)",
    ).completer = lazy_choices_completer(list_teams)
    fsm_parser.set_defaults(command=paasta_fsm)
Ejemplo n.º 16
0
def add_subparser(subparsers):
    for command, lower, upper, cmd_func in [
        ('start', 'start or restart', 'Start or restart', paasta_start),
        ('restart', 'start or restart', 'Start or restart', paasta_start),
        ('stop', 'stop', 'Stop', paasta_stop),
    ]:
        status_parser = subparsers.add_parser(
            command,
            help="%ss a PaaSTA service in a graceful way." % upper,
            description=(
                "%ss a PaaSTA service in a graceful way. This uses the Git control plane." % upper
            ),
            epilog=(
                "This command uses Git, and assumes access and authorization to the Git repo "
                "for the service is available."
            ),
        )
        status_parser.add_argument(
            '-s', '--service',
            help='Service that you want to %s. Like example_service.' % lower,
        ).completer = lazy_choices_completer(list_services)
        status_parser.add_argument(
            '-i', '--instances',
            help='A comma-separated list of instances of the service that you '
                 'want to %s. Like --instances main,canary' % lower
        ).completer = lazy_choices_completer(list_instances)
        status_parser.add_argument(
            '-c', '--clusters',
            help="A comma-separated list of clusters to view. "
            "For example: --clusters norcal-prod,nova-prod",
            required=True
        ).completer = lazy_choices_completer(list_clusters)

        status_parser.add_argument(
            '-d', '--soa-dir',
            dest="soa_dir",
            metavar="SOA_DIR",
            default=DEFAULT_SOA_DIR,
            help="define a different soa config directory",
        )
        status_parser.set_defaults(command=cmd_func)
Ejemplo n.º 17
0
def add_subparser(subparsers):
    for command, lower, upper, cmd_func in [
        ("start", "start or restart", "Start or restart", paasta_start),
        ("restart", "start or restart", "Start or restart", paasta_start),
        ("stop", "stop", "Stop", paasta_stop),
    ]:
        status_parser = subparsers.add_parser(
            command,
            help="%ss a PaaSTA service in a graceful way." % upper,
            description=("%ss a PaaSTA service in a graceful way. This uses the Git control plane." % upper),
            epilog=(
                "This command uses Git, and assumes access and authorization to the Git repo "
                "for the service is available."
            ),
        )
        status_parser.add_argument(
            "-s", "--service", help="Service that you want to %s. Like example_service." % lower
        ).completer = lazy_choices_completer(list_services)
        status_parser.add_argument(
            "-i",
            "--instance",
            help='Instance of the service that you want to %s. Like "main" or "canary".' % lower,
            required=True,
        ).completer = lazy_choices_completer(list_instances)
        status_parser.add_argument(
            "-c",
            "--clusters",
            help="A comma-separated list of clusters to view. Defaults to view all clusters.\n"
            "For example: --clusters norcal-prod,nova-prod",
        ).completer = lazy_choices_completer(list_clusters)

        status_parser.add_argument(
            "-d",
            "--soa-dir",
            dest="soa_dir",
            metavar="SOA_DIR",
            default=DEFAULT_SOA_DIR,
            help="define a different soa config directory",
        )
        status_parser.set_defaults(command=cmd_func)
Ejemplo n.º 18
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'metastatus',
        help="Display the status for an entire PaaSTA cluster",
        description=(
            "'paasta metastatus' is used to get the vital statistics about a PaaaSTA "
            "cluster as a whole. This tool is helpful when answering the question: 'Is "
            "it just my service or the whole cluster that is broken?'\n\n"
            "metastatus operates by ssh'ing to a Mesos master of a remote cluster, and "
            "querying the local APIs."
        ),
        epilog=(
            "The metastatus command may time out during heavy load. When that happens "
            "users may execute the ssh command directly, in order to bypass the timeout."
        ),
    )
    status_parser.add_argument(
        '-v', '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="""Print out more output regarding the state of the cluster.
        Multiple v options increase verbosity. Maximum is 3.""",
    )
    clusters_help = (
        'A comma separated list of clusters to view. Defaults to view all clusters. '
        'Try: --clusters norcal-prod,nova-prod'
    )
    status_parser.add_argument(
        '-c', '--clusters',
        help=clusters_help,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.add_argument(
        '-g',
        '--groupings',
        nargs='+',
        default=['region'],
        help=(
            'Group resource information of slaves grouped by attribute.'
            'Note: This is only effective with -vv'
        )
    )
    status_parser.add_argument('-H', '--humanize', action='store_true', dest="humanize", default=True,
                               help="Print human-readable sizes")
    status_parser.set_defaults(command=paasta_metastatus)
Ejemplo n.º 19
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'get-latest-deployment',
        help='Gets the Git SHA for the latest deployment of a service',
    )
    list_parser.add_argument(
        '-s', '--service',
        help='Name of the service which you want to get the latest deployment for.',
        required=True,
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        '-i', '-l', '--deploy-group',
        help='Name of the deploy group which you want to get the latest deployment for.',
        required=True,
    ).completer = lazy_choices_completer(list_deploy_groups)
    list_parser.add_argument(
        '-d', '--soa-dir',
        help='A directory from which soa-configs should be read from',
        default=DEFAULT_SOA_DIR,
    )

    list_parser.set_defaults(command=paasta_get_latest_deployment)
Ejemplo n.º 20
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'rollback',
        help='Rollback a docker image to a previous deploy',
        description=(
            "'paasta rollback' is a human-friendly tool for marking a particular "
            "docker image for deployment, which invokes a bounce. While the command "
            "is called 'rollback', it can be used to roll forward or back, as long "
            "as there is a docker image available for the input git SHA."
        ),
        epilog=(
            "This rollback command uses the Git control plane, which requires network "
            "connectivity as well as authorization to the git repo."
        ),
    )
    list_parser.add_argument(
        '-k', '--commit',
        help='Git SHA to mark for rollback',
        required=True,
    )
    list_parser.add_argument(
        '-i', '--instances',
        help='Mark one or more instances to roll back (e.g. '
        '"canary", "canary,main"). If no instances specified,'
        ' all instances for that service are rolled back',
        default='',
        required=False,
    ).completer = lazy_choices_completer(list_instances)
    list_parser.add_argument(
        '-c', '--cluster',
        help='Mark the cluster to rollback (e.g. "cluster1")',
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    list_parser.add_argument(
        '-s', '--service',
        help='Name of the service to rollback (e.g. "service1")',
    ).completer = lazy_choices_completer(list_services)
    list_parser.set_defaults(command=paasta_rollback)
Ejemplo n.º 21
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'status',
        help="Display the status of a PaaSTA service.",
        description=(
            "'paasta status' works by SSH'ing to remote PaaSTA masters and "
            "inspecting the local APIs, and reports on the overal health "
            "of a service."),
        epilog=
        ("Note: This command requires SSH and sudo privileges on the remote PaaSTA "
         "masters."),
    )
    status_parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        dest="verbose",
        default=False,
        help="Print out more output regarding the state of the service")
    status_parser.add_argument(
        '-s', '--service', help='The name of the service you wish to inspect'
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-c',
        '--clusters',
        help=
        "A comma-separated list of clusters to view. Defaults to view all clusters.\n"
        "For example: --clusters norcal-prod,nova-prod"
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-i',
        '--instances',
        help=
        "A comma-separated list of instances to view. Defaults to view all instances.\n"
        "For example: --instances canary,main"
    )  # No completer because we need to know service first and we can't until some other stuff has happened
    status_parser.set_defaults(command=paasta_status)
Ejemplo n.º 22
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'emergency-start',
        help=
        "Kicks off a chronos job run. Not implemented for Marathon instances.",
        description=
        ("Chronos Jobs: Forces a job to run outside of its normal schedule.\n"
         "Marathon Apps: Not implemented.\n"),
    )
    status_parser.add_argument(
        '-s',
        '--service',
        help="Service that you want to start. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i',
        '--instance',
        help=
        "Instance of the service that you want to start. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        '-c',
        '--cluster',
        help=
        "The PaaSTA cluster that has the service instance you want to start. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_emergency_start)
Ejemplo n.º 23
0
def add_instance_filter_arguments(status_parser,
                                  verb: str = "inspect") -> None:
    status_parser.add_argument(
        "-s", "--service", help=f"The name of the service you wish to {verb}"
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        "-c",
        "--clusters",
        help=
        f"A comma-separated list of clusters to {verb}. By default, will {verb} all clusters.\n"
        f"For example: --clusters norcal-prod,nova-prod",
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        "-i",
        "--instances",
        help=
        f"A comma-separated list of instances to {verb}. By default, will {verb} all instances.\n"
        f"For example: --instances canary,main",
    )  # No completer because we need to know service first and we can't until some other stuff has happened
    status_parser.add_argument(
        "-l",
        "--deploy-group",
        help=
        (f"Name of the deploy group which you want to {verb}. "
         f"If specified together with --instances and/or --clusters, will {verb} common instances only."
         ),
    ).completer = lazy_choices_completer(list_deploy_groups)
    status_parser.add_argument(
        "-o",
        "--owner",
        help=f"Only {verb} instances with this owner specified in soa-configs.",
    ).completer = lazy_choices_completer(list_teams)
    status_parser.add_argument(
        "-r",
        "--registration",
        help=f"Only {verb} instances with this registration.")
Ejemplo n.º 24
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'emergency-restart',
        help="Restarts a PaaSTA service instance in an emergency",
        description=(
            "'paasta emergency-restart' is useful in situations where the operator "
            "needs to bypass the normal git-based control plan, and needs to interact "
            "with the underlying APIs directly. For example, in an emergency situation "
            "it may be necessary to restart a Marathon service without doing a 'full bounce'."
            "'emergency-restart' can do this, but at the cost of the safety of the normal "
            "bouncing procedures. In other words, and emergency-restart is fast, but not safe "
            "and will cause dropped traffic.\n\n"
        ),
    )
    status_parser.add_argument(
        '-s', '--service',
        help="Service that you want to restart. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i', '--instance',
        help="Instance of the service that you want to restart. Like 'main' or 'canary'.",
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    status_parser.add_argument(
        '-c', '--cluster',
        help="The PaaSTA cluster that has the service you want to restart. Like 'norcal-prod'.",
        required=True,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_emergency_restart)
Ejemplo n.º 25
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'rollback',
        help='Rollback a docker image to a previous deploy',
        description=
        ("'paasta rollback' is a human-friendly tool for marking a particular "
         "docker image for deployment, which invokes a bounce. While the command "
         "is called 'rollback', it can be used to roll forward or back, as long "
         "as there is a docker image available for the input git SHA."),
        epilog=
        ("This rollback command uses the Git control plane, which requires network "
         "connectivity as well as authorization to the git repo."),
    )
    list_parser.add_argument(
        '-k',
        '--commit',
        help="Git SHA to mark for rollback. "
        "A commit to rollback to is required for paasta rollback to run. However if one is not provided, "
        "paasta rollback will instead output a list of valid git shas to rollback to.",
        required=False,
    ).completer = lazy_choices_completer(list_previously_deployed_shas)
    list_parser.add_argument(
        '-d',
        '--deploy-groups',
        help='Mark one or more deploy groups to roll back (e.g. '
        '"all.main", "all.main,all.canary"). If no deploy groups specified,'
        ' all deploy groups for that service are rolled back',
        default='',
        required=False,
    )
    list_parser.add_argument(
        '-s',
        '--service',
        help='Name of the service to rollback (e.g. "service1")',
    ).completer = lazy_choices_completer(list_services)
    list_parser.set_defaults(command=paasta_rollback)
Ejemplo n.º 26
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'info',
        help="Prints the general information about a service.",
        description=(
            "'paasta info' gathers information about a service from soa-configs "
            "and prints it in a human-friendly way. It does no API calls, it "
            "just analyzes the config files."
        ),
    )
    list_parser.add_argument(
        '-s', '--service',
        help='The name of the service you wish to inspect'
    ).completer = lazy_choices_completer(list_services)
    list_parser.set_defaults(command=paasta_info)
Ejemplo n.º 27
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        "generate-pipeline",
        help="Configures a Yelp-specific Jenkins build pipeline to match the 'deploy.yaml'",
        description=(
            "'paasta generate-pipeline' is a Yelp-specific tool to interact with Jenkins "
            "to build a build pipeline that matches what is declared in the 'deploy.yaml' "
            "for a service."
        ),
        epilog="Warning: Due to the Yelpisms in this tool, it is not currently useful to other organizations.",
    )
    list_parser.add_argument(
        "-s", "--service", help="Name of service for which you wish to generate a Jenkins pipeline"
    ).completer = lazy_choices_completer(list_services)
    list_parser.set_defaults(command=paasta_generate_pipeline)
Ejemplo n.º 28
0
def add_subparser(subparsers):
    help_text = (
        "Determine whether service in pwd is 'paasta ready', checking for common "
        "mistakes in the soa-configs directory and the local service directory. This "
        "command is designed to be run from the 'root' of a service directory."
    )
    check_parser = subparsers.add_parser(
        'check',
        description=help_text,
        help=help_text,
    )
    check_parser.add_argument(
        '-s', '--service',
        help='The name of the service you wish to inspect. Defaults to autodetect.'
    ).completer = lazy_choices_completer(list_services)
    check_parser.set_defaults(command=paasta_check)
Ejemplo n.º 29
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'generate-pipeline',
        help="Configures a Yelp-specific Jenkins build pipeline to match the 'deploy.yaml'",
        description=(
            "'paasta generate-pipeline' is a Yelp-specific tool to interact with Jenkins "
            "to build a build pipeline that matches what is declared in the 'deploy.yaml' "
            "for a service."
        ),
        epilog="Warning: Due to the Yelpisms in this tool, it is not currently useful to other organizations.",
    )
    list_parser.add_argument(
        '-s', '--service',
        help='Name of service for which you wish to generate a Jenkins pipeline',
    ).completer = lazy_choices_completer(list_services)
    list_parser.set_defaults(command=paasta_generate_pipeline)
Ejemplo n.º 30
0
def add_subparser(subparsers):
    validate_parser = subparsers.add_parser(
        'validate',
        description="Execute 'paasta validate' from service repo root",
        help="Validate that all paasta config files in pwd are correct")
    validate_parser.add_argument(
        '-s', '--service',
        required=False,
        help="Service that you want to validate. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    validate_parser.add_argument(
        '-y', '--yelpsoa-config-root',
        dest='yelpsoa_config_root',
        default=os.getcwd(),
        required=False,
        help="Path to root of yelpsoa-configs checkout",
    )
    validate_parser.set_defaults(command=paasta_validate)
Ejemplo n.º 31
0
def add_subparser(subparsers):
    help_text = (
        "Determine whether service in pwd is 'paasta ready', checking for common "
        "mistakes in the soa-configs directory and the local service directory. This "
        "command is designed to be run from the 'root' of a service directory."
    )
    check_parser = subparsers.add_parser("check", description=help_text, help=help_text)
    check_parser.add_argument(
        "-s", "--service", help="The name of the service you wish to inspect. Defaults to autodetect."
    ).completer = lazy_choices_completer(list_services)
    check_parser.add_argument(
        "-y",
        "--yelpsoa-config-root",
        dest="yelpsoa_config_root",
        help="A directory from which yelpsoa-configs should be read from",
        default=DEFAULT_SOA_DIR,
    )
    check_parser.set_defaults(command=paasta_check)
Ejemplo n.º 32
0
def add_subparser(subparsers):
    validate_parser = subparsers.add_parser(
        'validate',
        description="Execute 'paasta validate' from service repo root",
        help="Validate that all paasta config files in pwd are correct")
    validate_parser.add_argument(
        '-s', '--service',
        required=False,
        help="Service that you want to validate. Like 'example_service'.",
    ).completer = lazy_choices_completer(list_services)
    validate_parser.add_argument(
        '-y', '--yelpsoa-config-root',
        dest='yelpsoa_config_root',
        default=os.getcwd(),
        required=False,
        help="Path to root of yelpsoa-configs checkout",
    )
    validate_parser.set_defaults(command=paasta_validate)
Ejemplo n.º 33
0
def add_subparser(subparsers):
    help_text = (
        "Determine whether service in pwd is 'paasta ready', checking for common "
        "mistakes in the soa-configs directory and the local service directory. This "
        "command is designed to be run from the 'root' of a service directory."
    )
    check_parser = subparsers.add_parser("check", description=help_text, help=help_text)
    check_parser.add_argument(
        "-s",
        "--service",
        help="The name of the service you wish to inspect. Defaults to autodetect.",
    ).completer = lazy_choices_completer(list_services)
    check_parser.add_argument(
        "-y",
        "--yelpsoa-config-root",
        dest="yelpsoa_config_root",
        help="A directory from which yelpsoa-configs should be read from",
        default=DEFAULT_SOA_DIR,
    )
    check_parser.set_defaults(command=paasta_check)
Ejemplo n.º 34
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'metastatus',
        help="Display the status for an entire PaaSTA cluster",
        description=
        ("'paasta metastatus' is used to get the vital statistics about a PaaaSTA "
         "cluster as a whole. This tool is helpful when answering the question: 'Is "
         "it just my service or the whole cluster that is broken?'\n\n"
         "metastatus operates by ssh'ing to a Mesos master of a remote cluster, and "
         "querying the local APIs."),
        epilog=
        ("The metastatus command may time out during heavy load. When that happens "
         "users may execute the ssh command directly, in order to bypass the timeout."
         ),
    )
    status_parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="""Print out more output regarding the state of the cluster.
        Multiple v options increase verbosity. Maximum is 3.""",
    )
    clusters_help = (
        'A comma separated list of clusters to view. Defaults to view all clusters. '
        'Try: --clusters norcal-prod,nova-prod')
    status_parser.add_argument(
        '-c',
        '--clusters',
        help=clusters_help,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_metastatus)
Ejemplo n.º 35
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        "info",
        help="Prints the general information about a service.",
        description=
        ("'paasta info' gathers information about a service from soa-configs "
         "and prints it in a human-friendly way. It does no API calls, it "
         "just analyzes the config files."),
    )
    list_parser.add_argument(
        "-s", "--service", help="The name of the service you wish to inspect"
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        "-d",
        "--soa-dir",
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    list_parser.set_defaults(command=paasta_info)
Ejemplo n.º 36
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        "metastatus",
        help="Display the status for an entire PaaSTA cluster",
        description=(
            "'paasta metastatus' is used to get the vital statistics about a PaaaSTA "
            "cluster as a whole. This tool is helpful when answering the question: 'Is "
            "it just my service or the whole cluster that is broken?'\n\n"
            "metastatus operates by ssh'ing to a Mesos master of a remote cluster, and "
            "querying the local APIs."
        ),
        epilog=(
            "The metastatus command may time out during heavy load. When that happens "
            "users may execute the ssh command directly, in order to bypass the timeout."
        ),
    )
    status_parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        dest="verbose",
        default=0,
        help="""Print out more output regarding the state of the cluster.
        Multiple v options increase verbosity. Maximum is 3.""",
    )
    clusters_help = (
        "A comma separated list of clusters to view. Defaults to view all clusters. "
        "Try: --clusters norcal-prod,nova-prod"
    )
    status_parser.add_argument("-c", "--clusters", help=clusters_help).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        "-d",
        "--soa-dir",
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.set_defaults(command=paasta_metastatus)
Ejemplo n.º 37
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        "itest",
        help="Runs 'make itest' as part of the PaaSTA contract.",
        description=(
            "'paasta itest' runs 'make itest' in the root of a service directory. "
            "It is designed to be used in conjunction with the 'Jenkins' workflow: "
            "http://paasta.readthedocs.io/en/latest/about/contract.html#jenkins-pipeline-recommended"
        ),
    )
    list_parser.add_argument(
        "-s",
        "--service",
        help="Test and build docker image for this service. Leading "
        '"services-", as included in a Jenkins job name, '
        "will be stripped.",
        required=True,
    )
    list_parser.add_argument(
        "-c",
        "--commit",
        help="Git sha used to construct tag for built image",
        required=True,
    )
    list_parser.add_argument(
        "-d",
        "--soa-dir",
        dest="soa_dir",
        help="A directory from which soa-configs should be read from",
        default=DEFAULT_SOA_DIR,
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        "--timeout",
        dest="timeout",
        help="How many seconds before this command times out",
        default=3600,
        type=float,
    )
    list_parser.set_defaults(command=paasta_itest)
Ejemplo n.º 38
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'generate-pipeline',
        help="Configures a Yelp-specific Jenkins build pipeline to match the 'deploy.yaml'",
        description=(
            "'paasta generate-pipeline' is a Yelp-specific tool to interact with Jenkins "
            "to build a build pipeline that matches what is declared in the 'deploy.yaml' "
            "for a service."
        ),
        epilog="Warning: Due to the Yelpisms in this tool, it is not currently useful to other organizations."
    )
    list_parser.add_argument(
        '-s', '--service',
        help='Name of service for which you wish to generate a Jenkins pipeline',
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        '-d', '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    list_parser.set_defaults(command=paasta_generate_pipeline)
Ejemplo n.º 39
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'wait-for-deployment',
        help='Wait a service to be deployed to deploy_group',
        description=(
            "'paasta wait-for-deployment' waits for a previously marked for "
            "deployment service to be deployed to deploy_group."),
        epilog=("Note: Access and credentials to the Git repo of a service "
                "are required for this command to work."),
    )
    list_parser.add_argument(
        '-u',
        '--git-url',
        help=('Git url for service. Defaults to the normal git URL for '
              'the service.'),
        default=None,
    )
    list_parser.add_argument(
        '-c',
        '-k',
        '--commit',
        help='Git sha to wait for deployment',
        required=True,
        type=validate_short_git_sha,
    )
    list_parser.add_argument(
        '-l',
        '--deploy-group',
        help='deploy group (e.g. cluster1.canary, cluster2.main).',
        required=True,
    ).completer = lazy_choices_completer(list_deploy_groups)
    list_parser.add_argument(
        '-s',
        '--service',
        help='Name of the service which you wish to wait for deployment. '
        'Leading "services-" will be stripped.',
        required=True,
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        '-t',
        '--timeout',
        dest="timeout",
        type=int,
        default=DEFAULT_DEPLOYMENT_TIMEOUT,
        help=("Time in seconds to wait for paasta to deploy the service. "
              "If the timeout is exceeded we return 1. "
              "Default is %(default)s seconds."),
    )
    list_parser.add_argument(
        '-d',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    list_parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="Print out more output.",
    )

    list_parser.set_defaults(command=paasta_wait_for_deployment)
Ejemplo n.º 40
0
def add_remote_run_args(parser):
    parser.add_argument(
        '-s',
        '--service',
        help='The name of the service you wish to inspect',
    ).completer = lazy_choices_completer(list_services)
    parser.add_argument(
        '-c',
        '--cluster',
        help=
        ("The name of the cluster you wish to run your task on. "
         "If omitted, uses the default cluster defined in the paasta remote-run configs"
         ),
    ).completer = lazy_choices_completer(list_clusters)
    parser.add_argument(
        '-y',
        '--yelpsoa-config-root',
        dest='yelpsoa_config_root',
        help='A directory from which yelpsoa-configs should be read from',
        default=DEFAULT_SOA_DIR,
    )
    parser.add_argument(
        '--json-dict',
        help='When running dry run, output the arguments as a json dict',
        action='store_true',
        dest='dry_run_json_dict',
    )
    parser.add_argument(
        '-C',
        '--cmd',
        help=
        ('Run Docker container with particular command, '
         'for example: "bash". By default will use the command or args specified by the '
         'soa-configs or what was specified in the Dockerfile'),
        required=False,
        default=None,
    )
    parser.add_argument(
        '-i',
        '--instance',
        help=
        ("Simulate a docker run for a particular instance of the service, like 'main' or 'canary'"
         ),
        required=False,
        default=None,
    ).completer = lazy_choices_completer(list_instances)
    parser.add_argument(
        '-v',
        '--verbose',
        help='Show Docker commands output',
        action='store_true',
        required=False,
        default=True,
    )
    parser.add_argument(
        '-d',
        '--dry-run',
        help='Don\'t launch the task',
        action='store_true',
        required=False,
        default=False,
    )
    parser.add_argument(
        '-t',
        '--staging-timeout',
        help='A timeout for the task to be launching before killed',
        required=False,
        default=60,
        type=float,
    )
Ejemplo n.º 41
0
def add_subparser(subparsers):
    rerun_parser = subparsers.add_parser(
        'rerun',
        help="Re-run a scheduled PaaSTA job",
        description=
        ("'paasta rerun' creates a copy of the specified PaaSTA scheduled job and executes it immediately. "
         "Parent-dependent relationships are ignored: 'pasta rerun' only executes individual jobs."
         ),
        epilog=
        ("Note: This command requires SSH and sudo privileges on the remote PaaSTA "
         "masters."),
        formatter_class=RawTextHelpFormatter,
    )
    rerun_parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="Print out more output regarding the operation.",
    )
    rerun_parser.add_argument(
        '-s',
        '--service',
        help='The name of the service you wish to operate on.',
    ).completer = lazy_choices_completer(list_services)
    rerun_parser.add_argument(
        '-i',
        '--instance',
        help='Name of the scheduled job (instance) that you want to rerun.',
        required=True,
    ).completer = lazy_choices_completer(list_instances)
    rerun_parser.add_argument(
        '-c',
        '--clusters',
        help=
        "A comma-separated list of clusters to rerun the job on. Defaults to rerun on all clusters.\n"
        "For example: --clusters norcal-prod,nova-prod",
    ).completer = lazy_choices_completer(list_clusters)
    rerun_parser.add_argument(
        '-d',
        '--execution_date',
        help=
        "The date the job should be rerun for. Expected in the format %%Y-%%m-%%dT%%H:%%M:%%S .",
        type=chronos_tools.parse_execution_date,
    )
    rerun_parser.add_argument(
        '-y',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    rerun_parser.add_argument(
        '-t',
        '--rerun-type',
        dest="rerun_type",
        choices=['instance', 'graph'],
        help="Specify how to rerun jobs that have parent-dependencies.\n"
        "  - instance: rerun, as soon as possible, the required instance ONLY\n"
        "  - graph: will rerun, as soon as possible, ALL the instances related to the required instance\n"
        "    NOTE: the jobs rerun will respect the parents dependencies (topological order).\n"
        "    WARNING: it could be expensive in terms of resources and of time. Use it carefully.\n"
        "\n"
        "Example: Assume that we have 4 jobs (j1, j2, j3 and j4) with the following relations\n"
        "    j1 -> j2, j1 -> j3, j2 -> j3, j2 -> j4\n"
        "\n"
        "    Rerunning j2 wih --rerun-type=instance will rerun ONLY j2, j3 and j4 will not be re-ran\n"
        "    Rerunning j2 wih --rerun-type=graph will rerun j1, j2, j3 and j4 respecting the dependency order\n",
    )
    rerun_parser.add_argument(
        '-f',
        '--force-disabled',
        dest="force_disabled",
        action="store_true",
        default=False,
        help="Ignore the 'disabled' configuration of the service.\n"
        "If this is set, disabled services will still be run.\n"
        "If specified with '--rerun-type=graph', will also rerun disabled dependencies.\n",
    )
    rerun_parser.set_defaults(command=paasta_rerun)
Ejemplo n.º 42
0
def add_subparser(subparsers, ) -> None:
    status_parser = subparsers.add_parser(
        'metastatus',
        help="Display the status for an entire PaaSTA cluster",
        description=
        ("'paasta metastatus' is used to get the vital statistics about a PaaSTA "
         "cluster as a whole. This tool is helpful when answering the question: 'Is "
         "it just my service or the whole cluster that is broken?'\n\n"
         "metastatus operates by ssh'ing to a Mesos master of a remote cluster, and "
         "querying the local APIs."),
        epilog=
        ("The metastatus command may time out during heavy load. When that happens "
         "users may execute the ssh command directly, in order to bypass the timeout."
         ),
    )
    status_parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="""Print out more output regarding the state of the cluster.
        Multiple v options increase verbosity. Maximum is 3.""",
    )
    clusters_help = (
        'A comma separated list of clusters to view. Defaults to view all clusters. '
        'Try: --clusters norcal-prod,nova-prod')
    status_parser.add_argument(
        '-c',
        '--clusters',
        help=clusters_help,
    ).completer = lazy_choices_completer(list_clusters)
    status_parser.add_argument(
        '-d',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    status_parser.add_argument(
        '-a',
        '--autoscaling-info',
        action='store_true',
        default=False,
        dest="autoscaling_info",
        help="Show cluster autoscaling info, implies -vv",
    )
    status_parser.add_argument(
        '--use-mesos-cache',
        action='store_true',
        default=False,
        dest="use_mesos_cache",
        help="Use Mesos cache for state.json and frameworks",
    )
    status_parser.add_argument(
        '-g',
        '--groupings',
        nargs='+',
        default=['region'],
        help=('Group resource information of slaves grouped by attribute.'
              'Note: This is only effective with -vv'),
    )
    # The service and instance args default to None if not specified.
    status_parser.add_argument(
        '-s',
        '--service',
        help=
        ('Show how many of a given service instance can be run on a cluster slave.'
         'Note: This is only effective with -vvv and --instance must also be specified'
         ),
    ).completer = lazy_choices_completer(list_services)
    status_parser.add_argument(
        '-i',
        '--instance',
        help=
        ('Show how many of a given service instance can be run on a cluster slave.'
         'Note: This is only effective with -vvv and --service must also be specified'
         ),
    )
    status_parser.set_defaults(command=paasta_metastatus)
Ejemplo n.º 43
0
def add_subparser(subparsers):
    boost_parser = subparsers.add_parser(
        'boost',
        help=
        "Set, print the status, or clear a capacity boost for a given region in a PaaSTA cluster",
        description=
        ("'paasta boost' is used to temporary provision more capacity in a given cluster "
         "It operates by ssh'ing to a Mesos master of a remote cluster, and "
         "interracting with the boost in the local zookeeper cluster. If you set or clear "
         "a boost, you may want to run the cluster autoscaler manually afterwards."
         ),
        epilog=
        ("The boost command may time out during heavy load. When that happens "
         "users may execute the ssh command directly, in order to bypass the timeout."
         ),
    )
    boost_parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="""Print out more output regarding the state of the cluster.
        Multiple v options increase verbosity. Maximum is 3.""",
    )
    boost_parser.add_argument(
        '-c',
        '--cluster',
        type=str,
        required=True,
        help=
        """Paasta cluster(s) to boost. This option can take comma separated values.
        If auto-completion doesn't work, you can get a list of cluster with `paasta list-clusters'""",
    ).completer = lazy_choices_completer(list_clusters)
    boost_parser.add_argument(
        '-d',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    boost_parser.add_argument(
        '-p',
        '--pool',
        type=str,
        default='default',
        help=
        "Name of the pool you want to increase the capacity. Default is 'default' pool.",
    )
    boost_parser.add_argument(
        '-b',
        '--boost',
        type=float,
        default=cluster_boost.DEFAULT_BOOST_FACTOR,
        help=
        "Boost factor to apply. Default is 1.5. A big failover should be 2, 3 is the max.",
    )
    boost_parser.add_argument(
        '--duration',
        type=int,
        default=cluster_boost.DEFAULT_BOOST_DURATION,
        help="Duration of the capacity boost in minutes. Default is 40min.",
    )
    boost_parser.add_argument(
        '-f',
        '--force',
        action='store_true',
        dest='override',
        help="Replace an existing boost. Default is false",
    )
    boost_parser.add_argument(
        'action',
        choices=[
            'set',
            'status',
            'clear',
        ],
        help="You can view the status, set or clear a boost.",
    )
    boost_parser.set_defaults(command=paasta_boost)
Ejemplo n.º 44
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'local-run',
        help="Run service's Docker image locally",
        description=(
            "'paasta local-run' is useful for simulating how a PaaSTA service would be "
            "executed on a real cluster. It analyzes the local soa-configs and constructs "
            "a 'docker run' invocation to match. This is useful as a type of end-to-end "
            "test, ensuring that a service will work inside the docker container as expected. "
            "Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n"
            "Alternatively, 'local-run' can be used with --pull, which will pull the currently "
            "deployed docker image and use it, instead of building one."
        ),
        epilog=(
            "Note: 'paasta local-run' uses docker commands, which may require elevated privileges "
            "to run (sudo)."
        ),
    )
    list_parser.add_argument(
        '-s', '--service',
        help='The name of the service you wish to inspect',
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        '-c', '--cluster',
        help='The name of the cluster you wish to simulate. If omitted, attempts to guess a cluster to simulate',
    ).completer = lazy_choices_completer(list_clusters)
    list_parser.add_argument(
        '-y', '--yelpsoa-config-root',
        dest='yelpsoa_config_root',
        help='A directory from which yelpsoa-configs should be read from',
        default=DEFAULT_SOA_DIR,
    )
    build_pull_group = list_parser.add_mutually_exclusive_group()
    build_pull_group.add_argument(
        '-b', '--build',
        help=(
            "Build the docker image to run from scratch using the local Makefile's "
            "'cook-image' target. Defaults to try to use the local Makefile if present. "
            "otherwise local-run will pull and run the Docker image that is marked for "
            "deployment in the Docker registry. Mutually exclusive with '--pull'."
        ),
        required=False,
        action='store_true',
        default=None,
    )
    build_pull_group.add_argument(
        '-p', '--pull',
        help=(
            "Pull the docker image marked for deployment from the Docker registry and "
            "use that for the local-run. This is the opposite of --build. Defaults to "
            "autodetect a Makefile, if present will not pull, and instead assume that "
            "a local build is desired. Mutally exclusive with '--build'"
        ),
        required=False,
        action='store_true',
        default=None,
    )
    list_parser.add_argument(
        '-C', '--cmd',
        help=('Run Docker container with particular command, '
              'for example: "bash". By default will use the command or args specified by the '
              'soa-configs or what was specified in the Dockerfile'),
        required=False,
        default=None,
    )
    list_parser.add_argument(
        '-i', '--instance',
        help='Simulate a docker run for a particular instance of the service, like "main" or "canary"',
        required=False,
        default=None,
    ).completer = lazy_choices_completer(list_instances)
    list_parser.add_argument(
        '-v', '--verbose',
        help='Show Docker commands output',
        action='store_true',
        required=False,
        default=True,
    )
    list_parser.add_argument(
        '-I', '--interactive',
        help=('Run container in interactive mode. If interactive is set the default command will be "bash" '
              'unless otherwise set by the "--cmd" flag'),
        action='store_true',
        required=False,
        default=False,
    )
    list_parser.add_argument(
        '-k', '--no-healthcheck',
        help='Disable simulated healthcheck',
        dest='healthcheck',
        action='store_false',
        required=False,
        default=True,
    )
    list_parser.add_argument(
        '-t', '--healthcheck-only',
        help='Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)',
        dest='healthcheck_only',
        action='store_true',
        required=False,
        default=False,
    )
    list_parser.add_argument(
        '-d', '--dry-run',
        help='Shows the arguments supplied to docker as json.',
        action='store_true',
    )
    list_parser.set_defaults(command=paasta_local_run)
Ejemplo n.º 45
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        "mark-for-deployment",
        help="Mark a docker image for deployment in git",
        description=(
            "'paasta mark-for-deployment' uses Git as the control-plane, to "
            "signal to other PaaSTA components that a particular docker image "
            "is ready to be deployed."
        ),
        epilog=(
            "Note: Access and credentials to the Git repo of a service are required "
            "for this command to work."
        ),
    )
    list_parser.add_argument(
        "-u",
        "--git-url",
        help=(
            "Git url for service -- where magic mark-for-deployment tags are pushed. "
            "Defaults to the normal git URL for the service."
        ),
        default=None,
    )
    list_parser.add_argument(
        "-c",
        "-k",
        "--commit",
        help="Git sha to mark for deployment",
        required=True,
        type=validate_short_git_sha,
    )
    list_parser.add_argument(
        "-l",
        "--deploy-group",
        "--clusterinstance",
        help="Mark the service ready for deployment in this deploy group (e.g. "
        "cluster1.canary, cluster2.main). --clusterinstance is deprecated and "
        "should be replaced with --deploy-group",
        required=True,
    ).completer = lazy_choices_completer(list_deploy_groups)
    list_parser.add_argument(
        "-s",
        "--service",
        help="Name of the service which you wish to mark for deployment. Leading "
        '"services-" will be stripped.',
        required=True,
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        "--verify-image-exists",
        help="Check the docker registry and verify the image has been pushed",
        dest="verify_image",
        action="store_true",
        default=False,
    )
    list_parser.add_argument(
        "--wait-for-deployment",
        help="Set to poll paasta and wait for the deployment to finish, "
        "the default strategy is to mark for deployment and exit straightaway",
        dest="block",
        action="store_true",
        default=False,
    )
    list_parser.add_argument(
        "-t",
        "--timeout",
        dest="timeout",
        type=int,
        default=DEFAULT_DEPLOYMENT_TIMEOUT,
        help=(
            "Time in seconds to wait for paasta to deploy the service. "
            "If the timeout is exceeded we return 1. "
            "Default is %(default)s seconds."
        ),
    )
    list_parser.add_argument(
        "--auto-rollback",
        help="Automatically roll back to the previously deployed sha if the deployment "
        "times out or is canceled (ctrl-c). Only applicable with --wait-for-deployment. "
        "Defaults to false.",
        dest="auto_rollback",
        action="store_true",
        default=False,
    )
    list_parser.add_argument(
        "-d",
        "--soa-dir",
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    list_parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        dest="verbose",
        default=0,
        help="Print out more output.",
    )
    list_parser.add_argument(
        "--auto-certify-delay",
        dest="auto_certify_delay",
        type=int,
        default=None,  # the logic for this is complicated. See MarkForDeploymentProcess.get_auto_certify_delay.
        help="After a deploy finishes, wait this many seconds before automatically certifying."
        f"Default {DEFAULT_AUTO_CERTIFY_DELAY} when --auto-rollback is enabled",
    )
    list_parser.add_argument(
        "--auto-abandon-delay",
        dest="auto_abandon_delay",
        type=int,
        default=600,
        help="After a rollback finishes, wait this many seconds before automatically abandoning.",
    )
    list_parser.add_argument(
        "--auto-rollback-delay",
        dest="auto_rollback_delay",
        type=int,
        default=30,
        help="After noticing an SLO failure, wait this many seconds before automatically rolling back.",
    )

    list_parser.set_defaults(command=paasta_mark_for_deployment)
Ejemplo n.º 46
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        "spark-run",
        help="Run Spark on the PaaSTA cluster",
        description=(
            "'paasta spark-run' launches a Spark cluster on PaaSTA. "
            "It analyzes soa-configs and command line arguments to invoke "
            "a 'docker run'. By default, it will pull the Spark service "
            "image from the registry unless the --build option is used.\n\n"
        ),
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    group = list_parser.add_mutually_exclusive_group()
    group.add_argument(
        "-b",
        "--build",
        help="Build the docker image from scratch using the local Makefile's cook-image target.",
        action="store_true",
        default=False,
    )
    group.add_argument(
        "-I",
        "--image",
        help="Use the provided image to start the Spark driver and executors.",
    )

    list_parser.add_argument(
        "-e",
        "--enable-compact-bin-packing",
        help=(
            "Enabling compact bin packing will try to ensure executors are scheduled on the same nodes. Requires --cluster-manager to be kubernetes."
            " Always true by default, keep around for backward compability."
        ),
        action="store_true",
        default=True,
    )
    list_parser.add_argument(
        "--disable-compact-bin-packing",
        help=(
            "Disable compact bin packing. Requires --cluster-manager to be kubernetes. Note: this option is only for advanced Spark configurations,"
            " don't use it unless you've been instructed to do so."
        ),
        action="store_true",
        default=False,
    )
    list_parser.add_argument(
        "--docker-memory-limit",
        help=(
            "Set docker memory limit. Should be greater than driver memory. Defaults to 2x spark.driver.memory. Example: 2g, 500m, Max: 64g"
            "Note: If memory limit provided is greater than associated with the batch instance, it will default to max memory of the box."
        ),
        default=None,
    )
    list_parser.add_argument(
        "--docker-cpu-limit",
        help=(
            "Set docker cpus limit. Should be greater than driver cores. Defaults to 1x spark.driver.cores."
            "Note: The job will fail if the limit provided is greater than number of cores present on batch box (8 for production batch boxes)."
        ),
        default=None,
    )
    list_parser.add_argument(
        "--docker-registry",
        help="Docker registry to push the Spark image built.",
        default=DEFAULT_SPARK_DOCKER_REGISTRY,
    )

    list_parser.add_argument(
        "-s",
        "--service",
        help="The name of the service from which the Spark image is built.",
        default=DEFAULT_SPARK_SERVICE,
    ).completer = lazy_choices_completer(list_services)

    list_parser.add_argument(
        "-i",
        "--instance",
        help=("Start a docker run for a particular instance of the service."),
        default="adhoc",
    ).completer = lazy_choices_completer(list_instances)

    try:
        system_paasta_config = load_system_paasta_config()
        default_spark_cluster = system_paasta_config.get_spark_run_config().get(
            "default_cluster"
        )
        default_spark_pool = system_paasta_config.get_spark_run_config().get(
            "default_pool"
        )
    except PaastaNotConfiguredError:
        default_spark_cluster = "pnw-devc"
        default_spark_pool = "batch"

    list_parser.add_argument(
        "-c",
        "--cluster",
        help=("The name of the cluster you wish to run Spark on."),
        default=default_spark_cluster,
    )

    list_parser.add_argument(
        "-p",
        "--pool",
        help="Name of the resource pool to run the Spark job.",
        default=default_spark_pool,
    )

    list_parser.add_argument(
        "-w",
        "--work-dir",
        default="{}:{}".format(os.getcwd(), DEFAULT_SPARK_WORK_DIR),
        help="The read-write volume to mount in format local_abs_dir:container_abs_dir",
    )

    list_parser.add_argument(
        "-y",
        "--yelpsoa-config-root",
        dest="yelpsoa_config_root",
        help="A directory from which yelpsoa-configs should be read from.",
        default=DEFAULT_SOA_DIR,
    )

    list_parser.add_argument(
        "-C",
        "--cmd",
        help="Run the spark-shell, pyspark, spark-submit, jupyter-lab, or history-server command.",
    )

    list_parser.add_argument(
        "--timeout-job-runtime",
        type=str,
        help="Timeout value which will be added before spark-submit. Job will exit if it doesn't "
        "finishes in given runtime. Recommended value: 2 * expected runtime. Example: 1h, 30m {DEFAULT_RUNTIME_TIMEOUT}",
        default=DEFAULT_RUNTIME_TIMEOUT,
    )

    list_parser.add_argument(
        "-d",
        "--dry-run",
        help="Shows the arguments supplied to docker as json.",
        action="store_true",
        default=False,
    )

    list_parser.add_argument(
        "--spark-args",
        help="Spark configurations documented in https://spark.apache.org/docs/latest/configuration.html. "
        r'For example, --spark-args "spark.mesos.constraints=pool:default\;instance_type:m4.10xlarge '
        'spark.executor.cores=4".',
    )

    list_parser.add_argument(
        "--nvidia",
        help="Use nvidia docker runtime for Spark driver process (requires GPU)",
        action="store_true",
        default=False,
    )

    list_parser.add_argument(
        "--mrjob",
        help="Pass Spark arguments to invoked command in the format expected by mrjobs",
        action="store_true",
        default=False,
    )

    list_parser.add_argument(
        "--cluster-manager",
        help="Specify which cluster manager to use. Support for certain cluster managers may be experimental",
        dest="cluster_manager",
        choices=CLUSTER_MANAGERS,
        default=CLUSTER_MANAGER_K8S,
    )

    if clusterman_metrics:
        list_parser.add_argument(
            "--suppress-clusterman-metrics-errors",
            help="Continue even if sending resource requirements to Clusterman fails. This may result in the job "
            "failing to acquire resources.",
            action="store_true",
        )

    list_parser.add_argument(
        "-j", "--jars", help=argparse.SUPPRESS, action=DeprecatedAction
    )

    list_parser.add_argument(
        "--executor-memory", help=argparse.SUPPRESS, action=DeprecatedAction
    )

    list_parser.add_argument(
        "--executor-cores", help=argparse.SUPPRESS, action=DeprecatedAction
    )

    list_parser.add_argument(
        "--max-cores", help=argparse.SUPPRESS, action=DeprecatedAction
    )

    list_parser.add_argument(
        "--driver-max-result-size", help=argparse.SUPPRESS, action=DeprecatedAction
    )

    list_parser.add_argument(
        "--driver-memory", help=argparse.SUPPRESS, action=DeprecatedAction
    )

    list_parser.add_argument(
        "--driver-cores", help=argparse.SUPPRESS, action=DeprecatedAction
    )

    aws_group = list_parser.add_argument_group(
        title="AWS credentials options",
        description="If --aws-credentials-yaml is specified, it overrides all "
        "other options. Otherwise, if -s/--service is specified, spark-run "
        "looks for service credentials in /etc/boto_cfg/[service].yaml. If "
        "it does not find the service credentials or no service is "
        "specified, spark-run falls back to the boto default behavior "
        "(checking ~/.aws/credentials, ~/.boto, etc).",
    )

    aws_group.add_argument(
        "--aws-credentials-yaml",
        help="Load aws keys from the provided yaml file. The yaml file must "
        "have keys for aws_access_key_id and aws_secret_access_key.",
    )

    aws_group.add_argument(
        "--aws-profile",
        help="Name of the AWS profile to load credentials from. Only used when "
        "--aws-credentials-yaml is not specified and --service is either "
        "not specified or the service does not have credentials in "
        "/etc/boto_cfg",
        default="default",
    )

    aws_group.add_argument(
        "--no-aws-credentials",
        help="Do not load any AWS credentials; allow the Spark job to use its "
        "own logic to load credentials",
        action="store_true",
        default=False,
    )

    aws_group.add_argument(
        "--disable-aws-credential-env-variables",
        help="Do not put AWS credentials into environment variables.  Credentials "
        "will still be read and set in the Spark configuration.  In Spark v3.2 "
        "you want to set this argument.",
        action="store_true",
        default=False,
    )

    aws_group.add_argument(
        "--disable-temporary-credentials-provider",
        help="Disable explicit setting of TemporaryCredentialsProvider if a session token "
        "is found in the AWS credentials.  In Spark v3.2 you want to set this argument ",
        action="store_true",
        default=False,
    )

    aws_group.add_argument(
        "--aws-region",
        help=f"Specify an aws region. If the region is not specified, we will"
        f"default to using {DEFAULT_AWS_REGION}.",
        default=DEFAULT_AWS_REGION,
    )

    jupyter_group = list_parser.add_argument_group(
        title="Jupyter kernel culling options",
        description="Idle kernels will be culled by default. Idle "
        "kernels with connections can be overridden not to be culled.",
    )

    jupyter_group.add_argument(
        "--cull-idle-timeout",
        type=int,
        default=7200,
        help="Timeout (in seconds) after which a kernel is considered idle and "
        "ready to be culled.",
    )

    jupyter_group.add_argument(
        "--not-cull-connected",
        action="store_true",
        default=False,
        help="By default, connected idle kernels are culled after timeout. "
        "They can be skipped if not-cull-connected is specified.",
    )

    list_parser.set_defaults(command=paasta_spark_run)
Ejemplo n.º 47
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'mark-for-deployment',
        help='Mark a docker image for deployment in git',
        description=(
            "'paasta mark-for-deployment' uses Git as the control-plane, to "
            "signal to other PaaSTA components that a particular docker image "
            "is ready to be deployed."),
        epilog=
        ("Note: Access and credentials to the Git repo of a service are required "
         "for this command to work."),
    )
    list_parser.add_argument(
        '-u',
        '--git-url',
        help=
        ('Git url for service -- where magic mark-for-deployment tags are pushed. '
         'Defaults to the normal git URL for the service.'),
        default=None,
    )
    list_parser.add_argument(
        '-c',
        '-k',
        '--commit',
        help='Git sha to mark for deployment',
        required=True,
        type=validate_short_git_sha,
    )
    list_parser.add_argument(
        '-l',
        '--deploy-group',
        '--clusterinstance',
        help='Mark the service ready for deployment in this deploy group (e.g. '
        'cluster1.canary, cluster2.main). --clusterinstance is deprecated and '
        'should be replaced with --deploy-group',
        required=True,
    ).completer = lazy_choices_completer(list_deploy_groups)
    list_parser.add_argument(
        '-s',
        '--service',
        help=
        'Name of the service which you wish to mark for deployment. Leading '
        '"services-" will be stripped.',
        required=True,
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        '--verify-image-exists',
        help='Check the docker registry and verify the image has been pushed',
        dest='verify_image',
        action='store_true',
        default=False,
    )
    list_parser.add_argument(
        '--wait-for-deployment',
        help='Set to poll paasta and wait for the deployment to finish, '
        'the default strategy is to mark for deployment and exit straightaway',
        dest='block',
        action='store_true',
        default=False,
    )
    list_parser.add_argument(
        '-t',
        '--timeout',
        dest="timeout",
        type=int,
        default=DEFAULT_DEPLOYMENT_TIMEOUT,
        help=("Time in seconds to wait for paasta to deploy the service. "
              "If the timeout is exceeded we return 1. "
              "Default is %(default)s seconds."),
    )
    list_parser.add_argument(
        '--auto-rollback',
        help=
        'Automatically roll back to the previously deployed sha if the deployment '
        'times out or is canceled (ctrl-c). Only applicable with --wait-for-deployment. '
        'Defaults to false.',
        dest='auto_rollback',
        action='store_true',
        default=False,
    )
    list_parser.add_argument(
        '-d',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )
    list_parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        dest="verbose",
        default=0,
        help="Print out more output.",
    )

    list_parser.set_defaults(command=paasta_mark_for_deployment)
Ejemplo n.º 48
0
Archivo: logs.py Proyecto: somic/paasta
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        "logs",
        help="Streams logs relevant to a service across the PaaSTA components",
        description=("'paasta logs' works by streaming PaaSTA-related event messages " "in a human-readable way."),
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    status_parser.add_argument(
        "-s", "--service", help="The name of the service you wish to inspect. Defaults to autodetect."
    ).completer = lazy_choices_completer(list_services)
    components_help = "A comma separated list of the components you want logs for."
    status_parser.add_argument("-C", "--components", help=components_help).completer = lazy_choices_completer(
        LOG_COMPONENTS.keys
    )
    cluster_help = "The clusters to see relevant logs for. Defaults to all clusters to which this service is deployed."
    status_parser.add_argument("-c", "--clusters", help=cluster_help).completer = completer_clusters
    instance_help = "The instances to see relevant logs for. Defaults to all instances for this service."
    status_parser.add_argument("-i", "--instances", help=instance_help).completer = completer_clusters
    status_parser.add_argument(
        "-f",
        "-F",
        "--tail",
        dest="tail",
        action="store_true",
        default=False,
        help="Stream the logs and follow it for more data",
    )
    status_parser.add_argument(
        "-v", "--verbose", action="store_true", dest="verbose", default=False, help="Enable verbose logging"
    )
    status_parser.add_argument(
        "-r",
        "--raw-mode",
        action="store_true",
        dest="raw_mode",
        default=False,
        help="Don't pretty-print logs; emit them exactly as they are in scribe.",
    )
    status_parser.add_argument(
        "-d",
        "--soa-dir",
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )

    status_parser.add_argument(
        "-a",
        "--from",
        "--after",
        dest="time_from",
        help="The time to start gettings logs from. This can be an ISO-8601 timestamp or a human readable duration "
        'parseable by pytimeparse such as "5m", "1d3h" etc. For example: --from "3m" would start retrieving logs '
        "from 3 minutes ago",
    )
    status_parser.add_argument(
        "-t",
        "--to",
        dest="time_to",
        help="The time to get logs up to. This can be an ISO-8601 timestamp or a human readable duration"
        'parseable by pytimeparse such as "5m", "1d3h" etc. Defaults to right now',
    )
    status_parser.add_argument(
        "-l",
        "-n",
        "--lines",
        dest="line_count",
        help='The number of lines to retrieve from the specified offset. May optionally be prefixed with a "+" or "-" '
        'to specify which direction from the offset, defaults to "-100"',
        type=int,
    )
    status_parser.add_argument(
        "-o",
        "--line-offset",
        dest="line_offset",
        help="The offset at which line to start grabbing logs from. For example 1 would be the first line. Paired with "
        "--lines +100 would give you the first 100 lines of logs. Defaults to the latest line's offset",
        type=int,
    )
    default_component_string = ",".join(DEFAULT_COMPONENTS)
    component_descriptions = build_component_descriptions(LOG_COMPONENTS)
    epilog = (
        "TIME/LINE PARAMETERS\n"
        "The args for time and line based offsetting are mutually exclusive, they cannot be used together. "
        "Additionally, some logging backends may not support offsetting by time or offsetting by lines."
        "\n"
        "\n"
        "COMPONENTS\n"
        "There are many possible components of Paasta logs that you might be interested in:\n"
        "Run --list-components to see all available log components.\n"
        "If unset, the default components are:\n\t%s\n"
        "So the default behavior of `paasta logs` will be to tail those logs.\n\n"
        "Here is a list of all components and what they are:\n%s\n\n"
        % (default_component_string, component_descriptions)
    )
    status_parser.epilog = epilog
    status_parser.set_defaults(command=paasta_logs)
Ejemplo n.º 49
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'spark-run',
        help="Run Spark on the PaaSTA cluster",
        description=(
            "'paasta spark-run' launches a Spark cluster on PaaSTA. "
            "It analyzes soa-configs and command line arguments to invoke "
            "a 'docker run'. By default, it will pull the Spark service "
            "image from the registry unless the --build option is used.\n\n"),
    )
    list_parser.add_argument(
        '-b',
        '--build',
        help=
        "Build the docker image from scratch using the local Makefile's cook-image target.",
        action='store_true',
        default=False,
    )

    list_parser.add_argument(
        '-s',
        '--service',
        help="The name of the service from which the Spark image is built.",
        default='spark',
    ).completer = lazy_choices_completer(list_services)

    list_parser.add_argument(
        '-i',
        '--instance',
        help=("Start a docker run for a particular instance of the service."),
        default='client',
    ).completer = lazy_choices_completer(list_instances)

    # Restrict usage to norcal-devc and pnw-devc for now.
    list_parser.add_argument(
        '-c',
        '--cluster',
        help=("The name of the cluster you wish to run Spark on. "),
        choices=['norcal-devc', 'pnw-devc'],
        required=True,
    )

    list_parser.add_argument(
        '-y',
        '--yelpsoa-config-root',
        dest='yelpsoa_config_root',
        help='A directory from which yelpsoa-configs should be read from.',
        default=DEFAULT_SOA_DIR,
    )

    list_parser.add_argument(
        '-C',
        '--cmd',
        help=
        "Run Spark with the spark-shell, pyspark, spark-submit or jupyter command.",
        default='pyspark',
    )

    list_parser.add_argument(
        '-d',
        '--dry-run',
        help='Shows the arguments supplied to docker as json.',
        action='store_true',
        default=False,
    )

    list_parser.add_argument(
        '--executor-memory',
        type=int,
        help='Size of Spark executor memory in GB',
        default=4,
    )

    list_parser.add_argument(
        '--executor-cores',
        type=int,
        help='Number of CPU cores for each Spark executor',
        default=2,
    )

    list_parser.add_argument(
        '--max-cores',
        type=int,
        help='The total number of CPU cores for all Spark executors',
        default=4,
    )

    list_parser.add_argument(
        '--driver-max-result-size',
        type=int,
        help=
        'Limit of total size in GB of serialized results of all partitions',
    )

    list_parser.add_argument(
        '--driver-memory',
        type=int,
        help='Size of Spark driver memory in GB',
    )

    list_parser.add_argument(
        '--driver-cores',
        type=int,
        help='Number of CPU cores for the Spark driver',
    )

    list_parser.set_defaults(command=paasta_spark_run)
Ejemplo n.º 50
0
def test_lazy_choices_completer():
    completer = utils.lazy_choices_completer(lambda: ["1", "2", "3"])
    assert completer(prefix="") == ["1", "2", "3"]
Ejemplo n.º 51
0
def add_subparser(subparsers):
    secret_parser = subparsers.add_parser(
        "secret",
        help="Add/update PaaSTA service secrets",
        description=(
            "This script allows you to add secrets to your services "
            "as environment variables. This script modifies your local "
            "checkout of yelpsoa-configs and you must then commit and "
            "push the changes back to git."),
    )
    secret_parser.add_argument("action",
                               help="should be add/update",
                               choices=["add", "update", "decrypt"])
    secret_parser.add_argument(
        "-n",
        "--secret-name",
        type=check_secret_name,
        required=True,
        help="The name of the secret to create/update, "
        "this is the name you will reference in your "
        "services yaml files and should "
        "be unique per service.",
    )

    # Must choose valid service or act on a shared secret
    service_group = secret_parser.add_mutually_exclusive_group(required=True)
    service_group.add_argument(
        "-s",
        "--service",
        help="The name of the service on which you wish to act"
    ).completer = lazy_choices_completer(list_services)
    service_group.add_argument(
        "--shared",
        help="Act on a secret that can be shared by all services",
        action="store_true",
    )

    secret_parser.add_argument(
        "-c",
        "--clusters",
        help="A comma-separated list of clusters to create secrets for. "
        "Note: this is translated to ecosystems because Vault is run "
        "at an ecosystem level. As a result you can only have different "
        "secrets per ecosystem. (it is not possible for example to encrypt "
        "a different value for pnw-prod vs nova-prod. "
        "Defaults to all clusters in which the service runs. "
        "For example: --clusters pnw-prod,nova-prod ",
    ).completer = lazy_choices_completer(list_clusters)
    secret_parser.add_argument(
        "-p",
        "--plain-text",
        required=False,
        type=str,
        help="Optionally specify the secret as a command line argument",
    )
    secret_parser.add_argument(
        "-i",
        "--stdin",
        required=False,
        action="store_true",
        default=False,
        help="Optionally pass the plaintext from stdin",
    )
    secret_parser.add_argument(
        "--cross-env-motivation",
        required=False,
        type=str,
        help=
        ("Provide motivation in case the same value is being duplicated "
         "across multiple runtime environments when adding or updating a secret"
         ),
        metavar="MOTIVATION",
    )
    secret_parser.set_defaults(command=paasta_secret)
Ejemplo n.º 52
0
Archivo: logs.py Proyecto: ese/paasta
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'logs',
        help="Streams logs relevant to a service across the PaaSTA components",
        description=(
            "'paasta logs' works by streaming PaaSTA-related event messages "
            "in a human-readable way."),
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    status_parser.add_argument(
        '-s',
        '--service',
        help=
        'The name of the service you wish to inspect. Defaults to autodetect.'
    ).completer = lazy_choices_completer(list_services)
    components_help = 'A comma separated list of the components you want logs for.'
    status_parser.add_argument(
        '-C',
        '--components',
        help=components_help,
    ).completer = lazy_choices_completer(LOG_COMPONENTS.keys)
    cluster_help = 'The clusters to see relevant logs for. Defaults to all clusters to which this service is deployed.'
    status_parser.add_argument(
        '-c',
        '--clusters',
        help=cluster_help,
    ).completer = completer_clusters
    status_parser.add_argument(
        '-f',
        '-F',
        '--tail',
        dest='tail',
        action='store_true',
        default=True,
        help='Stream the logs and follow it for more data',
    )
    status_parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        dest='verbose',
        default=False,
        help='Enable verbose logging',
    )
    status_parser.add_argument(
        '-r',
        '--raw-mode',
        action='store_true',
        dest='raw_mode',
        default=False,
        help="Don't pretty-print logs; emit them exactly as they are in scribe."
    )
    default_component_string = ','.join(DEFAULT_COMPONENTS)
    component_descriptions = build_component_descriptions(LOG_COMPONENTS)
    epilog = 'COMPONENTS\n' \
             'There are many possible components of Paasta logs that you might be interested in:\n' \
             'Run --list-components to see all available log components.\n' \
             'If unset, the default components are:\n\t%s\n' \
             'So the default behavior of `paasta logs` will be to tail those logs.\n\n' \
             'Here is a list of all components and what they are:\n%s\n\n' \
             % (default_component_string, component_descriptions)
    status_parser.epilog = epilog
    status_parser.set_defaults(command=paasta_logs)
Ejemplo n.º 53
0
def test_lazy_choices_completer():
    completer = utils.lazy_choices_completer(lambda: ['1', '2', '3'])
    assert completer(prefix='') == ['1', '2', '3']
Ejemplo n.º 54
0
def add_subparser(subparsers):
    secret_parser = subparsers.add_parser(
        'secret',
        help="Add/update PaaSTA service secrets",
        description=(
            "This script allows you to add secrets to your services "
            "as environment variables. This script modifies your local "
            "checkout of yelpsoa-configs and you must then commit and "
            "push the changes back to git."),
    )
    secret_parser.add_argument(
        "action",
        help="should be add/update",
        choices=["add", "update", "decrypt"],
    )
    secret_parser.add_argument(
        "-n",
        "--secret-name",
        required=True,
        help="The name of the secret to create/update, "
        "this is the name you will reference in your "
        "services marathon/chronos yaml files and should "
        "be unique per service. Please prefix with "
        "PAASTA_SECRET_ if you are going to use the "
        "yelp_servlib client library.",
    )

    # Must choose valid service or act on a shared secret
    service_group = secret_parser.add_mutually_exclusive_group(required=True)
    service_group.add_argument(
        '-s',
        '--service',
        help='The name of the service on which you wish to act',
    ).completer = lazy_choices_completer(list_services)
    service_group.add_argument(
        '--shared',
        help='Act on a secret that can be shared by all services',
        action='store_true',
    )

    secret_parser.add_argument(
        '-c',
        '--clusters',
        help="A comma-separated list of clusters to create secrets for. "
        "Note: this is translated to ecosystems because Vault is run "
        "at an ecosystem level. As a result you can only have different "
        "secrets per ecosystem. (it is not possible for example to encrypt "
        "a different value for norcal-prod vs nova-prod. "
        "Defaults to all clusters in which the service runs. "
        "For example: --clusters norcal-prod,nova-prod ",
    ).completer = lazy_choices_completer(list_clusters)
    secret_parser.add_argument(
        "-p",
        "--plain-text",
        required=False,
        type=str,
        help="Optionally specify the secret as a command line argument",
    )
    secret_parser.add_argument(
        "-i",
        "--stdin",
        required=False,
        action="store_true",
        default=False,
        help="Optionally pass the plaintext from stdin",
    )
    secret_parser.set_defaults(command=paasta_secret)
Ejemplo n.º 55
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'spark-run',
        help="Run Spark on the PaaSTA cluster",
        description=(
            "'paasta spark-run' launches a Spark cluster on PaaSTA. "
            "It analyzes soa-configs and command line arguments to invoke "
            "a 'docker run'. By default, it will pull the Spark service "
            "image from the registry unless the --build option is used.\n\n"),
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    group = list_parser.add_mutually_exclusive_group()
    group.add_argument(
        '-b',
        '--build',
        help=
        "Build the docker image from scratch using the local Makefile's cook-image target.",
        action='store_true',
        default=False,
    )
    group.add_argument(
        '-I',
        '--image',
        help="Use the provided image to start the Spark driver and executors.",
    )

    list_parser.add_argument(
        '--docker-registry',
        help="Docker registry to push the Spark image built.",
        default=DEFAULT_SPARK_DOCKER_REGISTRY,
    )

    list_parser.add_argument(
        '-s',
        '--service',
        help="The name of the service from which the Spark image is built.",
        default=DEFAULT_SERVICE,
    ).completer = lazy_choices_completer(list_services)

    list_parser.add_argument(
        '-i',
        '--instance',
        help=("Start a docker run for a particular instance of the service."),
        default='client',
    ).completer = lazy_choices_completer(list_instances)

    try:
        system_paasta_config = load_system_paasta_config()
        default_spark_cluster = system_paasta_config.get_spark_run_config(
        ).get('default_cluster')
        default_spark_pool = system_paasta_config.get_spark_run_config().get(
            'default_pool')
    except PaastaNotConfiguredError:
        default_spark_cluster = 'pnw-devc'
        default_spark_pool = 'batch'

    list_parser.add_argument(
        '-c',
        '--cluster',
        help=("The name of the cluster you wish to run Spark on."),
        default=default_spark_cluster,
    )

    list_parser.add_argument(
        '-p',
        '--pool',
        help="Name of the resource pool to run the Spark job.",
        default=default_spark_pool,
    )

    list_parser.add_argument(
        '-w',
        '--work-dir',
        default='{}:{}'.format(os.getcwd(), DEFAULT_SPARK_WORK_DIR),
        help=
        "The read-write volume to mount in format local_abs_dir:container_abs_dir",
    )

    list_parser.add_argument(
        '-y',
        '--yelpsoa-config-root',
        dest='yelpsoa_config_root',
        help='A directory from which yelpsoa-configs should be read from.',
        default=DEFAULT_SOA_DIR,
    )

    list_parser.add_argument(
        '-C',
        '--cmd',
        help=
        "Run the spark-shell, pyspark, spark-submit, jupyter, or history-server command.",
    )

    list_parser.add_argument(
        '-d',
        '--dry-run',
        help='Shows the arguments supplied to docker as json.',
        action='store_true',
        default=False,
    )

    list_parser.add_argument(
        '--spark-args',
        help=
        'Spark configurations documented in https://spark.apache.org/docs/latest/configuration.html. '
        r'For example, --spark-args "spark.mesos.constraints=pool:default\;instance_type:m4.10xlarge '
        'spark.executor.cores=4".',
    )

    if clusterman_metrics:
        list_parser.add_argument(
            '--suppress-clusterman-metrics-errors',
            help=
            'Continue even if sending resource requirements to Clusterman fails. This may result in the job '
            'failing to acquire resources.',
            action='store_true',
        )

    list_parser.add_argument(
        '-j',
        '--jars',
        help=argparse.SUPPRESS,
        action=DeprecatedAction,
    )

    list_parser.add_argument(
        '--executor-memory',
        help=argparse.SUPPRESS,
        action=DeprecatedAction,
    )

    list_parser.add_argument(
        '--executor-cores',
        help=argparse.SUPPRESS,
        action=DeprecatedAction,
    )

    list_parser.add_argument(
        '--max-cores',
        help=argparse.SUPPRESS,
        action=DeprecatedAction,
    )

    list_parser.add_argument(
        '--driver-max-result-size',
        help=argparse.SUPPRESS,
        action=DeprecatedAction,
    )

    list_parser.add_argument(
        '--driver-memory',
        help=argparse.SUPPRESS,
        action=DeprecatedAction,
    )

    list_parser.add_argument(
        '--driver-cores',
        help=argparse.SUPPRESS,
        action=DeprecatedAction,
    )

    aws_group = list_parser.add_argument_group(
        title='AWS credentials options',
        description='If --aws-credentials-yaml is specified, it overrides all '
        'other options. Otherwise, if -s/--service is specified, spark-run '
        'looks for service credentials in /etc/boto_cfg/[service].yaml. If '
        'it does not find the service credentials or no service is '
        'specified, spark-run falls back to the boto default behavior '
        '(checking ~/.aws/credentials, ~/.boto, etc).',
    )

    aws_group.add_argument(
        '--aws-credentials-yaml',
        help='Load aws keys from the provided yaml file. The yaml file must '
        'have keys for aws_access_key_id and aws_secret_access_key.',
    )

    aws_group.add_argument(
        '--aws-profile',
        help="Name of the AWS profile to load credentials from. Only used when "
        "--aws-credentials-yaml is not specified and --service is either "
        "not specified or the service does not have credentials in "
        "/etc/boto_cfg",
        default='default',
    )

    jupyter_group = list_parser.add_argument_group(
        title='Jupyter kernel culling options',
        description='Idle kernels will be culled by default. Idle '
        'kernels with connections can be overridden not to be culled.',
    )

    jupyter_group.add_argument(
        '--cull-idle-timeout',
        type=int,
        default=7200,
        help='Timeout (in seconds) after which a kernel is considered idle and '
        'ready to be culled.',
    )

    jupyter_group.add_argument(
        '--not-cull-connected',
        action='store_true',
        default=False,
        help='By default, connected idle kernels are culled after timeout. '
        'They can be skipped if not-cull-connected is specified.',
    )

    list_parser.set_defaults(command=paasta_spark_run)
Ejemplo n.º 56
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        "local-run",
        help="Run service's Docker image locally",
        description=(
            "'paasta local-run' is useful for simulating how a PaaSTA service would be "
            "executed on a real cluster. It analyzes the local soa-configs and constructs "
            "a 'docker run' invocation to match. This is useful as a type of end-to-end "
            "test, ensuring that a service will work inside the docker container as expected. "
            "Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n"
            "Alternatively, 'local-run' can be used with --pull, which will pull the currently "
            "deployed docker image and use it, instead of building one."
        ),
        epilog=(
            "Note: 'paasta local-run' uses docker commands, which may require elevated privileges "
            "to run (sudo)."
        ),
    )
    list_parser.add_argument(
        "-s", "--service", help="The name of the service you wish to inspect"
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        "-c",
        "--cluster",
        help=(
            "The name of the cluster you wish to simulate. "
            "If omitted, uses the default cluster defined in the paasta local-run configs"
        ),
    ).completer = lazy_choices_completer(list_clusters)
    list_parser.add_argument(
        "-y",
        "--yelpsoa-config-root",
        dest="yelpsoa_config_root",
        help="A directory from which yelpsoa-configs should be read from",
        default=DEFAULT_SOA_DIR,
    )
    build_pull_group = list_parser.add_mutually_exclusive_group()
    build_pull_group.add_argument(
        "-b",
        "--build",
        help=(
            "Build the docker image to run from scratch using the local Makefile's "
            "'cook-image' target. Defaults to try to use the local Makefile if present."
        ),
        action="store_const",
        const="build",
        dest="action",
    )
    build_pull_group.add_argument(
        "-p",
        "--pull",
        help=(
            "Pull the docker image marked for deployment from the Docker registry and "
            "use that for the local-run. This is the opposite of --build."
        ),
        action="store_const",
        const="pull",
        dest="action",
    )
    build_pull_group.add_argument(
        "-d",
        "--dry-run",
        help="Shows the arguments supplied to docker as json.",
        action="store_const",
        const="dry_run",
        dest="action",
    )
    build_pull_group.set_defaults(action="build")
    list_parser.add_argument(
        "--json-dict",
        help="When running dry run, output the arguments as a json dict",
        action="store_true",
        dest="dry_run_json_dict",
    )
    list_parser.add_argument(
        "-C",
        "--cmd",
        help=(
            "Run Docker container with particular command, "
            'for example: "bash". By default will use the command or args specified by the '
            "soa-configs or what was specified in the Dockerfile"
        ),
        required=False,
        default=None,
    )
    list_parser.add_argument(
        "-i",
        "--instance",
        help=(
            "Simulate a docker run for a particular instance of the service, like 'main' or 'canary'. "
            "NOTE: if you don't specify an instance, PaaSTA will run in interactive mode"
        ),
        required=False,
        default=None,
    ).completer = lazy_choices_completer(list_instances)
    list_parser.add_argument(
        "--date",
        default=datetime.datetime.today().strftime("%Y-%m-%d"),
        help="Date to use for interpolating date variables in a job. Defaults to use %(default)s.",
        type=parse_date,
    )
    list_parser.add_argument(
        "-v",
        "--verbose",
        help="Show Docker commands output",
        action="store_true",
        required=False,
        default=True,
    )
    list_parser.add_argument(
        "-I",
        "--interactive",
        help=(
            'Run container in interactive mode. If interactive is set the default command will be "bash" '
            'unless otherwise set by the "--cmd" flag'
        ),
        action="store_true",
        required=False,
        default=False,
    )
    list_parser.add_argument(
        "-k",
        "--no-healthcheck",
        help="Disable simulated healthcheck",
        dest="healthcheck",
        action="store_false",
        required=False,
        default=True,
    )
    list_parser.add_argument(
        "-t",
        "--healthcheck-only",
        help="Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)",
        dest="healthcheck_only",
        action="store_true",
        required=False,
        default=False,
    )
    list_parser.add_argument(
        "-o",
        "--port",
        help="Specify a port number to use. If not set, a random non-conflicting port will be found.",
        type=int,
        dest="user_port",
        required=False,
        default=False,
    )
    list_parser.add_argument(
        "--vault-auth-method",
        help="Override how we auth with vault, defaults to token if not present",
        type=str,
        dest="vault_auth_method",
        required=False,
        default="token",
        choices=["token", "ldap"],
    )
    list_parser.add_argument(
        "--vault-token-file",
        help="Override vault token file, defaults to %(default)s",
        type=str,
        dest="vault_token_file",
        required=False,
        default="/var/spool/.paasta_vault_token",
    )
    list_parser.add_argument(
        "--skip-secrets",
        help="Skip decrypting secrets, useful if running non-interactively",
        dest="skip_secrets",
        required=False,
        action="store_true",
        default=False,
    )
    list_parser.add_argument(
        "--sha",
        help=(
            "SHA to run instead of the currently marked-for-deployment SHA. Ignored when used with --build."
            " Must be a version that exists in the registry, i.e. it has been built by Jenkins."
        ),
        type=str,
        dest="sha",
        required=False,
        default=None,
    )

    list_parser.set_defaults(command=paasta_local_run)
Ejemplo n.º 57
0
def test_lazy_choices_completer():
    completer = utils.lazy_choices_completer(lambda: ['1', '2', '3'])
    assert completer(prefix='') == ['1', '2', '3']
Ejemplo n.º 58
0
def add_subparser(subparsers):
    list_parser = subparsers.add_parser(
        'local-run',
        help="Run service's Docker image locally",
        description=(
            "'paasta local-run' is useful for simulating how a PaaSTA service would be "
            "executed on a real cluster. It analyzes the local soa-configs and constructs "
            "a 'docker run' invocation to match. This is useful as a type of end-to-end "
            "test, ensuring that a service will work inside the docker container as expected. "
            "Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n"
            "Alternatively, 'local-run' can be used with --pull, which will pull the currently "
            "deployed docker image and use it, instead of building one."
        ),
        epilog=(
            "Note: 'paasta local-run' uses docker commands, which may require elevated privileges "
            "to run (sudo)."
        ),
    )
    list_parser.add_argument(
        '-s', '--service',
        help='The name of the service you wish to inspect',
    ).completer = lazy_choices_completer(list_services)
    list_parser.add_argument(
        '-c', '--cluster',
        help=("The name of the cluster you wish to simulate. "
              "If omitted, uses the default cluster defined in the paasta local-run configs"),
    ).completer = lazy_choices_completer(list_clusters)
    list_parser.add_argument(
        '-y', '--yelpsoa-config-root',
        dest='yelpsoa_config_root',
        help='A directory from which yelpsoa-configs should be read from',
        default=DEFAULT_SOA_DIR,
    )
    build_pull_group = list_parser.add_mutually_exclusive_group()
    build_pull_group.add_argument(
        '-b', '--build',
        help=(
            "Build the docker image to run from scratch using the local Makefile's "
            "'cook-image' target. Defaults to try to use the local Makefile if present."
        ),
        action='store_const',
        const='build',
        dest='action',
    )
    build_pull_group.add_argument(
        '-p', '--pull',
        help=(
            "Pull the docker image marked for deployment from the Docker registry and "
            "use that for the local-run. This is the opposite of --build."
        ),
        action='store_const',
        const='pull',
        dest='action',
    )
    build_pull_group.add_argument(
        '-d', '--dry-run',
        help='Shows the arguments supplied to docker as json.',
        action='store_const',
        const='dry_run',
        dest='action',
    )
    build_pull_group.set_defaults(action='build')
    list_parser.add_argument(
        '--json-dict',
        help='When running dry run, output the arguments as a json dict',
        action='store_true',
        dest='dry_run_json_dict',
    )
    list_parser.add_argument(
        '-C', '--cmd',
        help=('Run Docker container with particular command, '
              'for example: "bash". By default will use the command or args specified by the '
              'soa-configs or what was specified in the Dockerfile'),
        required=False,
        default=None,
    )
    list_parser.add_argument(
        '-i', '--instance',
        help=("Simulate a docker run for a particular instance of the service, like 'main' or 'canary'"),
        required=False,
        default=None,
    ).completer = lazy_choices_completer(list_instances)
    list_parser.add_argument(
        '-v', '--verbose',
        help='Show Docker commands output',
        action='store_true',
        required=False,
        default=True,
    )
    list_parser.add_argument(
        '-I', '--interactive',
        help=('Run container in interactive mode. If interactive is set the default command will be "bash" '
              'unless otherwise set by the "--cmd" flag'),
        action='store_true',
        required=False,
        default=False,
    )
    list_parser.add_argument(
        '-k', '--no-healthcheck',
        help='Disable simulated healthcheck',
        dest='healthcheck',
        action='store_false',
        required=False,
        default=True,
    )
    list_parser.add_argument(
        '-t', '--healthcheck-only',
        help='Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)',
        dest='healthcheck_only',
        action='store_true',
        required=False,
        default=False,
    )
    list_parser.add_argument(
        '-o', '--port',
        help='Specify a port number to use. If not set, a random non-conflicting port will be found.',
        dest='user_port',
        required=False,
        default=False,
    )
    list_parser.set_defaults(command=paasta_local_run)
Ejemplo n.º 59
0
def add_subparser(subparsers):
    status_parser = subparsers.add_parser(
        'logs',
        help="Streams logs relevant to a service across the PaaSTA components",
        description=(
            "'paasta logs' works by streaming PaaSTA-related event messages "
            "in a human-readable way."),
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    status_parser.add_argument(
        '-s',
        '--service',
        help=
        'The name of the service you wish to inspect. Defaults to autodetect.',
    ).completer = lazy_choices_completer(list_services)
    components_help = 'A comma separated list of the components you want logs for.'
    status_parser.add_argument(
        '-C',
        '--components',
        help=components_help,
    ).completer = lazy_choices_completer(LOG_COMPONENTS.keys)
    cluster_help = 'The clusters to see relevant logs for. Defaults to all clusters to which this service is deployed.'
    status_parser.add_argument(
        '-c',
        '--clusters',
        help=cluster_help,
    ).completer = completer_clusters
    instance_help = 'The instances to see relevant logs for. Defaults to all instances for this service.'
    status_parser.add_argument(
        '-i',
        '--instances',
        help=instance_help,
    ).completer = completer_clusters
    status_parser.add_argument(
        '-f',
        '-F',
        '--tail',
        dest='tail',
        action='store_true',
        default=False,
        help='Stream the logs and follow it for more data',
    )
    status_parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        dest='verbose',
        default=False,
        help='Enable verbose logging',
    )
    status_parser.add_argument(
        '-r',
        '--raw-mode',
        action='store_true',
        dest='raw_mode',
        default=False,
        help=
        "Don't pretty-print logs; emit them exactly as they are in scribe.",
    )
    status_parser.add_argument(
        '-d',
        '--soa-dir',
        dest="soa_dir",
        metavar="SOA_DIR",
        default=DEFAULT_SOA_DIR,
        help="define a different soa config directory",
    )

    status_parser.add_argument(
        '-a',
        '--from',
        '--after',
        dest='time_from',
        help=
        'The time to start gettings logs from. This can be an ISO-8601 timestamp or a human readable duration '
        'parseable by pytimeparse such as "5m", "1d3h" etc. For example: --from "3m" would start retrieving logs '
        'from 3 minutes ago',
    )
    status_parser.add_argument(
        '-t',
        '--to',
        dest='time_to',
        help=
        'The time to get logs up to. This can be an ISO-8601 timestamp or a human readable duration'
        'parseable by pytimeparse such as "5m", "1d3h" etc. Defaults to right now',
    )
    status_parser.add_argument(
        '-l',
        '-n',
        '--lines',
        dest='line_count',
        help=
        'The number of lines to retrieve from the specified offset. May optionally be prefixed with a "+" or "-" '
        'to specify which direction from the offset, defaults to "-100"',
        type=int,
    )
    status_parser.add_argument(
        '-o',
        '--line-offset',
        dest='line_offset',
        help=
        'The offset at which line to start grabbing logs from. For example 1 would be the first line. Paired with '
        '--lines +100 would give you the first 100 lines of logs. Defaults to the latest line\'s offset',
        type=int,
    )
    default_component_string = ','.join(DEFAULT_COMPONENTS)
    component_descriptions = build_component_descriptions(LOG_COMPONENTS)
    epilog = 'TIME/LINE PARAMETERS\n' \
             'The args for time and line based offsetting are mutually exclusive, they cannot be used together. ' \
             'Additionally, some logging backends may not support offsetting by time or offsetting by lines.' \
             '\n' \
             '\n' \
             'COMPONENTS\n' \
             'There are many possible components of Paasta logs that you might be interested in:\n' \
             'Run --list-components to see all available log components.\n' \
             'If unset, the default components are:\n\t%s\n' \
             'So the default behavior of `paasta logs` will be to tail those logs.\n\n' \
             'Here is a list of all components and what they are:\n%s\n\n' \
             % (default_component_string, component_descriptions)
    status_parser.epilog = epilog
    status_parser.set_defaults(command=paasta_logs)