def bootstrap_job(args, host, job_name, host_id, cleanup_token): bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id) deploy_utils.bootstrap_job(args, "zookeeper", "zookeeper", args.zookeeper_config, host, job_name, cleanup_token, '0', bootstrap_script) # start job after bootstrapping. start_job(args, host, job_name)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token): # parse the service_config according to the instance_id args.hbase_config.parse_generated_config_files(args, job_name, host_id, instance_id) deploy_utils.bootstrap_job( args, "hbase", "hbase", args.hbase_config, host, job_name, instance_id, cleanup_token, "0" ) start_job(args, host, job_name, host_id, instance_id)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active): if job_name == "namenode" and not active: hosts = args.hdfs_config.jobs[job_name].hosts while not deploy_utils.check_service( hosts[0].ip, args.hdfs_config.jobs["namenode"].base_port): Log.print_warning("Wait for active namenode starting") time.sleep(2) # parse the service_config according to the instance_id args.hdfs_config.parse_generated_config_files(args, job_name, host_id, instance_id) data_dir_indexes = get_data_dir_indexes(args, job_name, host, instance_id) config_files = generate_configs(args, host, job_name, instance_id) if job_name == "namenode" or job_name == "zkfc": bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id, instance_id, active) deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, data_dir_indexes, bootstrap_script, **config_files) else: deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, data_dir_indexes, '', **config_files) # start job after bootstrapping args.skip_gen_config_files = True start_job(args, host, job_name, host_id, instance_id)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token): # parse the service_config according to the instance_id args.fds_config.parse_generated_config_files(args, job_name, host_id, instance_id) deploy_utils.bootstrap_job(args, "galaxy", "fds", args.fds_config, host, job_name, instance_id, cleanup_token, '0') start_job(args, host, job_name, host_id, instance_id)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token): # parse the service_config according to the instance_id args.zookeeper_config.parse_generated_config_files(args, job_name, host_id, instance_id) bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id, instance_id) deploy_utils.bootstrap_job(args, "zookeeper", "zookeeper", args.zookeeper_config, host, job_name, instance_id, cleanup_token, '0', bootstrap_script) # start job after bootstrapping. start_job(args, host, job_name, host_id, instance_id)
def bootstrap_job(args, host, job_name, active, cleanup_token): data_dir_indexes = get_data_dir_indexes(args, job_name, host) config_files = generate_configs(args, host, job_name) if job_name == "namenode" or job_name == "zkfc": bootstrap_script = generate_bootstrap_script(args, host, job_name, active) deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, cleanup_token, data_dir_indexes, bootstrap_script, **config_files) else: deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, cleanup_token, data_dir_indexes, '', **config_files) # start job after bootstrapping args.skip_gen_config_files = True start_job(args, host, job_name)
def bootstrap_job(args, host, job_name, host_id, instance_id, active, cleanup_token): # parse the service_config according to the instance_id args.hdfs_config.parse_generated_config_files(args, job_name, host_id, instance_id) data_dir_indexes = get_data_dir_indexes(args, job_name, host, instance_id) config_files = generate_configs(args, host, job_name, instance_id) if job_name == "namenode" or job_name == "zkfc": bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id, instance_id, active) deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, data_dir_indexes, bootstrap_script, **config_files) else: deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, data_dir_indexes, '', **config_files) # start job after bootstrapping args.skip_gen_config_files = True start_job(args, host, job_name, host_id, instance_id)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active): if job_name == "namenode" and not active: hosts = args.hdfs_config.jobs[job_name].hosts while not deploy_utils.check_service(hosts[0].ip, args.hdfs_config.jobs["namenode"].base_port): Log.print_warning("Wait for active namenode starting") time.sleep(2) # parse the service_config according to the instance_id args.hdfs_config.parse_generated_config_files(args, job_name, host_id, instance_id) data_dir_indexes = get_data_dir_indexes(args, job_name, host, instance_id) config_files = generate_configs(args, host, job_name, instance_id) if job_name == "namenode" or job_name == "zkfc": bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id, instance_id, active) deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, data_dir_indexes, bootstrap_script, **config_files) else: deploy_utils.bootstrap_job(args, "hadoop", "hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, data_dir_indexes, '', **config_files) # start job after bootstrapping args.skip_gen_config_files = True start_job(args, host, job_name, host_id, instance_id)
def bootstrap_job(args, host, job_name, cleanup_token): deploy_utils.bootstrap_job(args, "hbase", "hbase", args.hbase_config, host, job_name, cleanup_token, '0') start_job(args, host, job_name)
def bootstrap_job(args, host, job_name, cleanup_token): deploy_utils.bootstrap_job(args, "impala", "impala", args.impala_config, host, job_name, cleanup_token, '0') start_job(args, host, job_name)
def bootstrap_job(args, host, job_name, cleanup_token): deploy_utils.bootstrap_job(args, "hadoop", "yarn", args.yarn_config, host, job_name, cleanup_token, '0') start_job(args, host, job_name)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active): # parse the service_config according to the instance_id args.yarn_config.parse_generated_config_files(args, job_name, host_id, instance_id) deploy_utils.bootstrap_job(args, "hadoop", "yarn", args.yarn_config, host, job_name, instance_id, cleanup_token, '0') start_job(args, host, job_name, host_id, instance_id)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token): deploy_utils.bootstrap_job(args, "hadoop", "mapreduce", args.mapreduce_config, host, job_name, instance_id, cleanup_token, '0')