Exemple #1
0
def restart(args):
    if not args.skip_confirm:
        deploy_utils.confirm_restart(args)
    _get_fds_service_config(args)

    for job_name in args.job or ALL_JOBS:
        hosts = args.fds_config.jobs[job_name].hosts
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                stop_job(args, hosts[host_id].ip, job_name, instance_id)

    for job_name in args.job or ALL_JOBS:
        hosts = args.fds_config.jobs[job_name].hosts
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                deploy_utils.wait_for_job_stopping(
                    "fds", args.fds_config.cluster.name, job_name,
                    hosts[host_id].ip, instance_id)
                start_job(args, hosts[host_id].ip, job_name, host_id,
                          instance_id)
Exemple #2
0
def bootstrap(args):
    get_hdfs_service_config(args)

    cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

    for job_name in args.job or ALL_JOBS:
        hosts = args.hdfs_config.jobs[job_name].hosts
        first = True
        if job_name == "namenode":
            while not check_journalnode_all_started(args):
                Log.print_warning("Wait for journalnode starting")
                time.sleep(2)
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                if job_name == "namenode" and not first:
                    while not deploy_utils.check_service(
                            hosts[0].ip,
                            args.hdfs_config.jobs["namenode"].base_port):
                        Log.print_warning("Wait for active namenode starting")
                        time.sleep(2)

                bootstrap_job(args, hosts[host_id].ip, job_name, host_id,
                              instance_id, first, cleanup_token)
                first = False
Exemple #3
0
def rolling_update(args):
  if not args.job:
    Log.print_critical("You must specify the job name to do rolling update")

  get_hdfs_service_config(args)
  job_name = args.job[0]

  if not args.skip_confirm:
    deploy_utils.confirm_action(args, "rolling_update")

  Log.print_info("Rolling updating %s" % job_name)
  hosts = args.hdfs_config.jobs[job_name].hosts
  wait_time = 0

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.iterkeys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)
      stop_job(args, hosts[host_id].ip, job_name, instance_id)
      deploy_utils.wait_for_job_stopping("hdfs",
        args.hdfs_config.cluster.name, job_name, hosts[host_id].ip, instance_id)
      start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)
      deploy_utils.wait_for_job_starting("hdfs",
        args.hdfs_config.cluster.name, job_name, hosts[host_id].ip, instance_id)
      wait_time = args.time_interval
  Log.print_success("Rolling updating %s success" % job_name)
Exemple #4
0
def rolling_update(args):
    if not args.job:
        Log.print_critical(
            "You must specify the job name to do rolling update")

    _get_fds_service_config(args)
    job_name = args.job[0]

    if not args.skip_confirm:
        deploy_utils.confirm_action(args, "rolling_update")

    Log.print_info("Rolling updating %s" % job_name)
    hosts = args.fds_config.jobs[job_name].hosts
    wait_time = 0

    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.iterkeys():
        for instance_id in args.task_map.get(host_id) or range(
                hosts[host_id].instance_num):
            instance_id = -1 if not deploy_utils.is_multiple_instances(
                host_id, hosts) else instance_id
            deploy_utils.confirm_rolling_update(host_id, instance_id,
                                                wait_time)
            stop_job(args, hosts[host_id].ip, job_name, instance_id)
            deploy_utils.wait_for_job_stopping("fds",
                                               args.fds_config.cluster.name,
                                               job_name, hosts[host_id].ip,
                                               instance_id)
            start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)
            deploy_utils.wait_for_job_starting("fds",
                                               args.fds_config.cluster.name,
                                               job_name, hosts[host_id].ip,
                                               instance_id)
            wait_time = args.time_interval
    Log.print_success("Rolling updating %s success" % job_name)
def restart(args):
  if not args.skip_confirm:
    deploy_utils.confirm_restart(args)
  get_zk_service_config(args)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      stop_job(args, hosts[host_id].ip, "zookeeper", instance_id)

  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      deploy_utils.wait_for_job_stopping("zookeeper",
        args.zookeeper_config.cluster.name, "zookeeper", hosts[host_id].ip, instance_id)
      start_job(args, hosts[host_id].ip, "zookeeper", host_id, instance_id)
Exemple #6
0
def restart(args):
  if not args.skip_confirm:
    deploy_utils.confirm_restart(args)
  get_zk_service_config(args)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      stop_job(args, hosts[host_id].ip, "zookeeper", instance_id)

  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      deploy_utils.wait_for_job_stopping("zookeeper",
        args.zookeeper_config.cluster.name, "zookeeper", hosts[host_id].ip, instance_id)
      start_job(args, hosts[host_id].ip, "zookeeper", host_id, instance_id)
Exemple #7
0
def show(args):
  get_zk_service_config(args)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      deploy_utils.show_job("zookeeper", args.zookeeper_config,
        hosts[host_id].ip, "zookeeper", instance_id)
Exemple #8
0
def show(args):
    get_hbase_service_config(args)

    for job_name in args.job or ALL_JOBS:
        hosts = args.hbase_config.jobs[job_name].hosts
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
                deploy_utils.show_job("hbase", args.hbase_config, hosts[host_id].ip, job_name, instance_id)
def show(args):
  get_zk_service_config(args)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      deploy_utils.show_job("zookeeper", args.zookeeper_config,
        hosts[host_id].ip, "zookeeper", instance_id)
def bootstrap(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("zookeeper", args.zookeeper_config)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      bootstrap_job(args, hosts[host_id].ip, "zookeeper", host_id, instance_id, cleanup_token)
Exemple #11
0
def show(args):
  get_yarn_service_config(args)

  for job_name in args.job or ALL_JOBS:
    hosts = args.yarn_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        deploy_utils.show_job("yarn", args.yarn_config,
          hosts[host_id].ip, job_name, instance_id)
Exemple #12
0
def bootstrap(args):
  get_yarn_service_config(args)
  cleanup_token = deploy_utils.confirm_bootstrap("yarn", args.yarn_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.yarn_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, cleanup_token)
Exemple #13
0
def rolling_update(args):
    if not args.job:
        Log.print_critical(
            "You must specify the job name to do rolling update")

    if not args.skip_confirm:
        deploy_utils.confirm_action(args, "rolling_update")

    get_hbase_service_config(args)
    job_name = args.job[0]

    if job_name != 'regionserver':
        args.vacate_rs = False

    if args.vacate_rs:
        balance_switch(args, False)

    Log.print_info("Rolling updating %s" % job_name)
    hosts = args.hbase_config.jobs[job_name].hosts
    wait_time = 0

    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.iterkeys():
        for instance_id in args.task_map.get(host_id) or range(
                hosts[host_id].instance_num):
            instance_id = -1 if not deploy_utils.is_multiple_instances(
                host_id, hosts) else instance_id
            if not args.skip_confirm:
                deploy_utils.confirm_rolling_update(host_id, instance_id,
                                                    wait_time)

            port = deploy_utils.get_base_port(
                args.hbase_config.jobs[job_name].base_port, instance_id)
            if args.vacate_rs:
                vacate_region_server(args, hosts[host_id].ip, port)

            stop_job(args, hosts[host_id].ip, job_name, instance_id)
            deploy_utils.wait_for_job_stopping("hbase",
                                               args.hbase_config.cluster.name,
                                               job_name, hosts[host_id].ip,
                                               instance_id)
            start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)
            deploy_utils.wait_for_job_starting("hbase",
                                               args.hbase_config.cluster.name,
                                               job_name, hosts[host_id].ip,
                                               instance_id)

            if args.vacate_rs:
                recover_region_server(args, hosts[host_id].ip, port)
            wait_time = args.time_interval

    if args.vacate_rs:
        balance_switch(args, True)
    Log.print_success("Rolling updating %s success" % job_name)
Exemple #14
0
def bootstrap(args):
  _get_kafka_service_config(args)
  cleanup_token = deploy_utils.confirm_bootstrap("kafka", args.kafka_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.kafka_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, cleanup_token)
Exemple #15
0
def bootstrap(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("zookeeper", args.zookeeper_config)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      bootstrap_job(args, hosts[host_id].ip, "zookeeper", host_id, instance_id, cleanup_token)
Exemple #16
0
def cleanup(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_cleanup(args,
      "zookeeper", args.zookeeper_config)

  hosts = args.zookeeper_config.jobs["zookeeper"].hosts
  for host_id in hosts.keys():
    for instance_id in range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      deploy_utils.cleanup_job("zookeeper", args.zookeeper_config,
        hosts[host_id].ip, "zookeeper", instance_id, cleanup_token)
def cleanup(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_cleanup(args,
      "zookeeper", args.zookeeper_config)

  hosts = args.zookeeper_config.jobs["zookeeper"].hosts
  for host_id in hosts.keys():
    for instance_id in range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      deploy_utils.cleanup_job("zookeeper", args.zookeeper_config,
        hosts[host_id].ip, "zookeeper", instance_id, cleanup_token)
Exemple #18
0
def stop(args):
  if not args.skip_confirm:
    deploy_utils.confirm_stop(args)
  get_hdfs_service_config(args)

  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        stop_job(args, hosts[host_id].ip, job_name, instance_id)
Exemple #19
0
def restart(args):
  if not args.skip_confirm:
    deploy_utils.confirm_restart(args)
  _get_kafka_service_config(args)

  for job_name in args.job or ALL_JOBS:
    hosts = args.kafka_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        stop_job(args, hosts[host_id].ip, job_name, instance_id)

  for job_name in args.job or ALL_JOBS:
    hosts = args.kafka_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        deploy_utils.wait_for_job_stopping("kafka",
          args.kafka_config.cluster.name, job_name, hosts[host_id].ip, instance_id)
        start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)
Exemple #20
0
def cleanup(args):
  get_yarn_service_config(args)

  cleanup_token = deploy_utils.confirm_cleanup(args,
      "yarn", args.yarn_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.yarn_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        deploy_utils.cleanup_job("yarn", args.yarn_config,
          hosts[host_id].ip, job_name, instance_id, cleanup_token)
Exemple #21
0
def stop(args):
    if not args.skip_confirm:
        deploy_utils.confirm_stop(args)
    get_hbase_service_config(args)

    for job_name in args.job or reversed(ALL_JOBS):
        hosts = args.hbase_config.jobs[job_name].hosts
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                stop_job(args, hosts[host_id].ip, job_name, instance_id)
Exemple #22
0
def cleanup(args):
    _get_fds_service_config(args)

    cleanup_token = deploy_utils.confirm_cleanup(args, "fds", args.fds_config)
    for job_name in args.job or ALL_JOBS:
        hosts = args.fds_config.jobs[job_name].hosts
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                deploy_utils.cleanup_job("fds", args.fds_config,
                                         hosts[host_id].ip, job_name,
                                         instance_id, cleanup_token)
Exemple #23
0
def rolling_update(args):
    if not args.job:
        Log.print_critical("You must specify the job name to do rolling update")

    if not args.skip_confirm:
        deploy_utils.confirm_action(args, "rolling_update")

    get_hbase_service_config(args)
    job_name = args.job[0]

    if job_name != "regionserver":
        args.vacate_rs = False

    if args.vacate_rs:
        balance_switch(args, False)

    Log.print_info("Rolling updating %s" % job_name)
    hosts = args.hbase_config.jobs[job_name].hosts
    wait_time = 0

    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.iterkeys():
        for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
            instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
            if not args.skip_confirm:
                deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)

            port = deploy_utils.get_base_port(args.hbase_config.jobs[job_name].base_port, instance_id)
            if args.vacate_rs:
                vacate_region_server(args, hosts[host_id].ip, port)

            stop_job(args, hosts[host_id].ip, job_name, instance_id)
            deploy_utils.wait_for_job_stopping(
                "hbase", args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id
            )
            start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)
            deploy_utils.wait_for_job_starting(
                "hbase", args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id
            )

            if args.vacate_rs:
                recover_region_server(args, hosts[host_id].ip, port)
            wait_time = args.time_interval

    if args.vacate_rs:
        balance_switch(args, True)
    Log.print_success("Rolling updating %s success" % job_name)
Exemple #24
0
def cleanup(args):
  get_hdfs_service_config(args)

  cleanup_token = deploy_utils.confirm_cleanup(args,
      "hdfs", args.hdfs_config)

  first = True
  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts

    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        cleanup_job(args, hosts[host_id].ip, job_name, host_id, instance_id, first, cleanup_token)
        if job_name == "zkfc":
          first = False
Exemple #25
0
def cleanup(args):
    get_hdfs_service_config(args)

    cleanup_token = deploy_utils.confirm_cleanup(args, "hdfs",
                                                 args.hdfs_config)

    first = True
    for job_name in args.job or ALL_JOBS:
        hosts = args.hdfs_config.jobs[job_name].hosts

        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                cleanup_job(args, hosts[host_id].ip, job_name, host_id,
                            instance_id, first, cleanup_token)
                if job_name == "zkfc":
                    first = False
Exemple #26
0
def bootstrap(args):
  get_hdfs_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts
    first = True
    if job_name == "namenode":
      while not check_journalnode_all_started(args):
        Log.print_warning("Wait for journalnode starting")
        time.sleep(2)
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        if job_name == "namenode" and not first:
          while not deploy_utils.check_service(hosts[0].ip,
              args.hdfs_config.jobs["namenode"].base_port):
            Log.print_warning("Wait for active namenode starting")
            time.sleep(2)

        bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, first, cleanup_token)
        first = False