Esempio n. 1
0
def check_journalnode_all_started(args):
  job = args.hdfs_config.jobs["journalnode"]
  hosts = job.hosts
  for id in hosts.iterkeys():
    if not deploy_utils.check_service(hosts[id], job.base_port):
      return False
  return True
Esempio n. 2
0
def check_journalnode_all_started(args):
    job = args.hdfs_config.jobs["journalnode"]
    hosts = job.hosts
    for id in hosts.iterkeys():
        if not deploy_utils.check_service(hosts[id], job.base_port):
            return False
    return True
Esempio n. 3
0
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token,
                  active):
    if job_name == "namenode" and not active:
        hosts = args.hdfs_config.jobs[job_name].hosts
        while not deploy_utils.check_service(
                hosts[0].ip, args.hdfs_config.jobs["namenode"].base_port):
            Log.print_warning("Wait for active namenode starting")
            time.sleep(2)

    # parse the service_config according to the instance_id
    args.hdfs_config.parse_generated_config_files(args, job_name, host_id,
                                                  instance_id)
    data_dir_indexes = get_data_dir_indexes(args, job_name, host, instance_id)
    config_files = generate_configs(args, host, job_name, instance_id)
    if job_name == "namenode" or job_name == "zkfc":
        bootstrap_script = generate_bootstrap_script(args, host, job_name,
                                                     host_id, instance_id,
                                                     active)
        deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config,
                                   host, job_name, instance_id, cleanup_token,
                                   data_dir_indexes, bootstrap_script,
                                   **config_files)
    else:
        deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config,
                                   host, job_name, instance_id, cleanup_token,
                                   data_dir_indexes, '', **config_files)
    # start job after bootstrapping
    args.skip_gen_config_files = True
    start_job(args, host, job_name, host_id, instance_id)
Esempio n. 4
0
def bootstrap(args):
    get_hdfs_service_config(args)

    cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

    for job_name in args.job or ALL_JOBS:
        hosts = args.hdfs_config.jobs[job_name].hosts
        first = True
        if job_name == "namenode":
            while not check_journalnode_all_started(args):
                Log.print_warning("Wait for journalnode starting")
                time.sleep(2)
        args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
        for host_id in args.task_map.keys() or hosts.keys():
            for instance_id in args.task_map.get(host_id) or range(
                    hosts[host_id].instance_num):
                instance_id = -1 if not deploy_utils.is_multiple_instances(
                    host_id, hosts) else instance_id
                if job_name == "namenode" and not first:
                    while not deploy_utils.check_service(
                            hosts[0].ip,
                            args.hdfs_config.jobs["namenode"].base_port):
                        Log.print_warning("Wait for active namenode starting")
                        time.sleep(2)

                bootstrap_job(args, hosts[host_id].ip, job_name, host_id,
                              instance_id, first, cleanup_token)
                first = False
Esempio n. 5
0
def check_journalnode_all_started(args):
  job = args.hdfs_config.jobs["journalnode"]
  hosts = job.hosts
  for host_id in hosts.iterkeys():
    for instance_id in range(hosts[host_id].instance_num):
      if not deploy_utils.check_service(hosts[host_id].ip,
        service_config.get_base_port(job.base_port, instance_id)):
        return False
  return True
Esempio n. 6
0
def check_journalnode_all_started(args):
    job = args.hdfs_config.jobs["journalnode"]
    hosts = job.hosts
    for host_id in hosts.iterkeys():
        for instance_id in range(hosts[host_id].instance_num):
            if not deploy_utils.check_service(
                    hosts[host_id].ip,
                    service_config.get_base_port(job.base_port, instance_id)):
                return False
    return True
Esempio n. 7
0
def bootstrap(args):
    get_hdfs_service_config(args)

    cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

    for job_name in args.job or ALL_JOBS:
        hosts = args.hdfs_config.jobs[job_name].hosts
        first = True
        if job_name == "namenode":
            while not check_journalnode_all_started(args):
                Log.print_warning("Wait for journalnode starting")
                time.sleep(2)

        for id in args.task or hosts.iterkeys():
            if job_name == "namenode" and not first:
                while not deploy_utils.check_service(
                        hosts[0], args.hdfs_config.jobs["namenode"].base_port):
                    Log.print_warning("Wait for active namenode starting")
                    time.sleep(2)

            bootstrap_job(args, hosts[id], job_name, first, cleanup_token)
            first = False
Esempio n. 8
0
def bootstrap(args):
  get_hdfs_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts
    first = True
    if job_name == "namenode":
      while not check_journalnode_all_started(args):
        Log.print_warning("Wait for journalnode starting")
        time.sleep(2)

    for id in args.task or hosts.iterkeys():
      if job_name == "namenode" and not first:
        while not deploy_utils.check_service(hosts[0],
            args.hdfs_config.jobs["namenode"].base_port):
          Log.print_warning("Wait for active namenode starting")
          time.sleep(2)

      bootstrap_job(args, hosts[id], job_name, first, cleanup_token)
      first = False
Esempio n. 9
0
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):
  if job_name == "namenode" and not active:
    hosts = args.hdfs_config.jobs[job_name].hosts
    while not deploy_utils.check_service(hosts[0].ip,
      args.hdfs_config.jobs["namenode"].base_port):
      Log.print_warning("Wait for active namenode starting")
      time.sleep(2)

  # parse the service_config according to the instance_id
  args.hdfs_config.parse_generated_config_files(args, job_name, host_id, instance_id)
  data_dir_indexes = get_data_dir_indexes(args, job_name, host, instance_id)
  config_files = generate_configs(args, host, job_name, instance_id)
  if job_name == "namenode" or job_name == "zkfc":
    bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id, instance_id, active)
    deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config,
        host, job_name, instance_id, cleanup_token, data_dir_indexes, bootstrap_script,
        **config_files)
  else:
    deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config,
        host, job_name, instance_id, cleanup_token, data_dir_indexes, '', **config_files)
  # start job after bootstrapping
  args.skip_gen_config_files = True
  start_job(args, host, job_name, host_id, instance_id)
Esempio n. 10
0
def bootstrap(args):
  get_hdfs_service_config(args)

  cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config)

  for job_name in args.job or ALL_JOBS:
    hosts = args.hdfs_config.jobs[job_name].hosts
    first = True
    if job_name == "namenode":
      while not check_journalnode_all_started(args):
        Log.print_warning("Wait for journalnode starting")
        time.sleep(2)
    args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
    for host_id in args.task_map.keys() or hosts.keys():
      for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
        instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
        if job_name == "namenode" and not first:
          while not deploy_utils.check_service(hosts[0].ip,
              args.hdfs_config.jobs["namenode"].base_port):
            Log.print_warning("Wait for active namenode starting")
            time.sleep(2)

        bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, first, cleanup_token)
        first = False