Example #1
0
def bootstrap(args):
    get_hdfs_service_config(args)

    cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

    for job_name in args.job or ALL_JOBS:
        hosts = args.hdfs_config.jobs[job_name].hosts
        first = True
        if job_name == "namenode":
            while not check_journalnode_all_started(args):
                Log.print_warning("Wait for journalnode starting")
                time.sleep(2)
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                if job_name == "namenode" and not first:
                    while not deploy_utils.check_service(
                            hosts[0].ip,
                            args.hdfs_config.jobs["namenode"].base_port):
                        Log.print_warning("Wait for active namenode starting")
                        time.sleep(2)

                bootstrap_job(args, hosts[host_id].ip, job_name, host_id,
                              instance_id, first, cleanup_token)
                first = False
Example #2
0
def bootstrap(args):
    get_yarn_service_config(args)
    cleanup_token = deploy_utils.confirm_bootstrap("yarn", args.yarn_config)

    for job_name in args.job or ALL_JOBS:
        hosts = args.yarn_config.jobs[job_name].hosts
        for id in args.task or hosts.iterkeys():
            bootstrap_job(args, hosts[id], job_name, cleanup_token)
Example #3
0
def bootstrap(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("zookeeper", args.zookeeper_config)

  hosts = args.zookeeper_config.jobs["zookeeper"].hosts
  for id in args.task or hosts.iterkeys():
    bootstrap_job(args, hosts[id], "zookeeper", id, cleanup_token)
Example #4
0
def bootstrap(args):
  get_yarn_service_config(args)
  cleanup_token = deploy_utils.confirm_bootstrap("yarn", args.yarn_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.yarn_config.jobs[job_name].hosts
    for id in args.task or hosts.iterkeys():
      bootstrap_job(args, hosts[id], job_name, cleanup_token)
Example #5
0
def bootstrap(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("zookeeper", args.zookeeper_config)

  hosts = args.zookeeper_config.jobs["zookeeper"].hosts
  for id in args.task or hosts.iterkeys():
    bootstrap_job(args, hosts[id], "zookeeper", id, cleanup_token)
Example #6
0
def bootstrap(args):
  get_yarn_service_config(args)
  cleanup_token = deploy_utils.confirm_bootstrap("yarn", args.yarn_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.yarn_config.jobs[job_name].hosts
    task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,
      'bootstrap', cleanup_token=cleanup_token)
    parallel_deploy.start_deploy_threads(bootstrap_job, task_list)
Example #7
0
def bootstrap(args):
  _get_kafka_service_config(args)
  cleanup_token = deploy_utils.confirm_bootstrap("kafka", args.kafka_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.kafka_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, cleanup_token)
def bootstrap(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("zookeeper", args.zookeeper_config)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      bootstrap_job(args, hosts[host_id].ip, "zookeeper", host_id, instance_id, cleanup_token)
Example #9
0
def bootstrap(args):
  get_yarn_service_config(args)
  cleanup_token = deploy_utils.confirm_bootstrap("yarn", args.yarn_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.yarn_config.jobs[job_name].hosts
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, cleanup_token)
Example #10
0
def bootstrap(args):
  get_zk_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("zookeeper", args.zookeeper_config)
  hosts = args.zookeeper_config.jobs["zookeeper"].hosts

  args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
  for host_id in args.task_map.keys() or hosts.keys():
    for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
      instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
      bootstrap_job(args, hosts[host_id].ip, "zookeeper", host_id, instance_id, cleanup_token)
Example #11
0
def bootstrap(args):
    get_hdfs_service_config(args)

    cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

    for job_name in args.job or ALL_JOBS:
        hosts = args.hdfs_config.jobs[job_name].hosts
        if job_name == "namenode":
            while not check_journalnode_all_started(args):
                Log.print_warning("Wait for journalnode starting")
                time.sleep(2)
        task_list = deploy_utils.schedule_task_for_threads(
            args, hosts, job_name, 'bootstrap', cleanup_token=cleanup_token)
        parallel_deploy.start_deploy_threads(bootstrap_job, task_list)
Example #12
0
def bootstrap(args):
  get_hdfs_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts
    if job_name == "namenode":
      while not check_journalnode_all_started(args):
        Log.print_warning("Wait for journalnode starting")
        time.sleep(2)
    task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,
      'bootstrap', cleanup_token=cleanup_token)
    parallel_deploy.start_deploy_threads(bootstrap_job, task_list)
Example #13
0
def bootstrap(args):
    get_hdfs_service_config(args)

    cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

    for job_name in args.job or ALL_JOBS:
        hosts = args.hdfs_config.jobs[job_name].hosts
        first = True
        if job_name == "namenode":
            while not check_journalnode_all_started(args):
                Log.print_warning("Wait for journalnode starting")
                time.sleep(2)

        for id in args.task or hosts.iterkeys():
            if job_name == "namenode" and not first:
                while not deploy_utils.check_service(
                        hosts[0], args.hdfs_config.jobs["namenode"].base_port):
                    Log.print_warning("Wait for active namenode starting")
                    time.sleep(2)

            bootstrap_job(args, hosts[id], job_name, first, cleanup_token)
            first = False
Example #14
0
def bootstrap(args):
  get_hdfs_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts
    first = True
    if job_name == "namenode":
      while not check_journalnode_all_started(args):
        Log.print_warning("Wait for journalnode starting")
        time.sleep(2)

    for id in args.task or hosts.iterkeys():
      if job_name == "namenode" and not first:
        while not deploy_utils.check_service(hosts[0],
            args.hdfs_config.jobs["namenode"].base_port):
          Log.print_warning("Wait for active namenode starting")
          time.sleep(2)

      bootstrap_job(args, hosts[id], job_name, first, cleanup_token)
      first = False
Example #15
0
def bootstrap(args):
  get_hdfs_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts
    first = True
    if job_name == "namenode":
      while not check_journalnode_all_started(args):
        Log.print_warning("Wait for journalnode starting")
        time.sleep(2)
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        if job_name == "namenode" and not first:
          while not deploy_utils.check_service(hosts[0].ip,
              args.hdfs_config.jobs["namenode"].base_port):
            Log.print_warning("Wait for active namenode starting")
            time.sleep(2)

        bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, first, cleanup_token)
        first = False