def run_peloton_hostmgr(config, enable_k8s=False): scarce_resource = ",".join(config["scarce_resource_types"]) slack_resource = ",".join(config["slack_resource_types"]) mounts = [] env = { "SCARCE_RESOURCE_TYPES": scarce_resource, "SLACK_RESOURCE_TYPES": slack_resource, } if enable_k8s: k8s = kind.Kind(PELOTON_K8S_NAME) kubeconfig_dir = os.path.dirname(k8s.get_kubeconfig()) mounts = [kubeconfig_dir + ":/.kube"] env.update({ "ENABLE_K8S": True, "KUBECONFIG": "/.kube/kind-config-peloton-k8s", }) for i in range(0, config["peloton_hostmgr_instance_count"]): # to not cause port conflicts among apps, increase port # by 10 for each instance ports = [port + i * 10 for port in config["peloton_hostmgr_ports"]] name = config["peloton_hostmgr_container"] + repr(i) utils.remove_existing_container(name) start_and_wait( "hostmgr", name, ports, config, extra_env=env, mounts=mounts, )
def run_peloton_placement(config, enable_k8s=False): i = 0 for task_type in config["peloton_placement_instances"]: # to not cause port conflicts among apps, increase port by 10 # for each instance ports = [port + i * 10 for port in config["peloton_placement_ports"]] name = config["peloton_placement_container"] + repr(i) utils.remove_existing_container(name) if task_type == 'BATCH': app_type = 'placement' else: app_type = 'placement_' + task_type.lower() env = { "APP_TYPE": app_type, "TASK_TYPE": task_type, } if enable_k8s: env.update({"HOSTMGR_API_VERSION": "v1alpha"}) start_and_wait( "placement", name, ports, config, extra_env=env, ) i = i + 1
def run_cassandra(config): utils.remove_existing_container(config["cassandra_container"]) cli.pull(config["cassandra_image"]) container = cli.create_container( name=config["cassandra_container"], hostname=config["cassandra_container"], host_config=cli.create_host_config( port_bindings={ config["cassandra_cql_port"]: config["cassandra_cql_port"], config["cassandra_thrift_port"]: config[ "cassandra_thrift_port" ], }, binds=[work_dir + "/files:/files"], ), environment=["MAX_HEAP_SIZE=1G", "HEAP_NEWSIZE=256M"], image=config["cassandra_image"], detach=True, entrypoint="bash /files/run_cassandra_with_stratio_index.sh", ) cli.start(container=container.get("Id")) print_utils.okgreen("started container %s" % config["cassandra_container"]) # Create cassandra store create_cassandra_store(config)
def run_peloton_aurorabridge(config): for i in range(0, config["peloton_aurorabridge_instance_count"]): ports = [ port + i * 10 for port in config["peloton_aurorabridge_ports"] ] name = config["peloton_aurorabridge_container"] + repr(i) utils.remove_existing_container(name) start_and_wait("aurorabridge", name, ports, config)
def run_peloton_apiproxy(config, enable_k8s=False): for i in range(0, config["peloton_apiproxy_instance_count"]): ports = [ port + i * 10 for port in config["peloton_apiproxy_ports"] ] name = config["peloton_apiproxy_container"] + repr(i) utils.remove_existing_container(name) start_and_wait("apiproxy", name, ports, config)
def run_peloton_resmgr(config): # TODO: move docker run logic into a common function for all apps to share for i in range(0, config["peloton_resmgr_instance_count"]): # to not cause port conflicts among apps, increase port by 10 # for each instance ports = [port + i * 10 for port in config["peloton_resmgr_ports"]] name = config["peloton_resmgr_container"] + repr(i) utils.remove_existing_container(name) start_and_wait("resmgr", name, ports, config)
def teardown(stop=False): if stop: # Stop existing container func = utils.stop_container else: # Remove existing container func = utils.remove_existing_container # 1 - Remove jobmgr instances for i in range(0, config["peloton_jobmgr_instance_count"]): name = config["peloton_jobmgr_container"] + repr(i) func(name) # 2 - Remove placement engine instances for i in range(0, len(config["peloton_placement_instances"])): name = config["peloton_placement_container"] + repr(i) func(name) # 3 - Remove resmgr instances for i in range(0, config["peloton_resmgr_instance_count"]): name = config["peloton_resmgr_container"] + repr(i) func(name) # 4 - Remove hostmgr instances for i in range(0, config["peloton_hostmgr_instance_count"]): name = config["peloton_hostmgr_container"] + repr(i) func(name) # 5 - Remove archiver instances for i in range(0, config["peloton_archiver_instance_count"]): name = config["peloton_archiver_container"] + repr(i) func(name) # 6 - Remove aurorabridge instances for i in range(0, config["peloton_aurorabridge_instance_count"]): name = config["peloton_aurorabridge_container"] + repr(i) func(name) # 7 - Remove apiproxy instances for i in range(0, config["peloton_apiproxy_instance_count"]): name = config["peloton_apiproxy_container"] + repr(i) func(name) minicluster.teardown_mesos(config) minicluster.teardown_k8s() utils.remove_existing_container(config["cassandra_container"]) print_utils.okgreen("teardown complete!")
def run_peloton_placement(config): i = 0 for task_type in config["peloton_placement_instances"]: # to not cause port conflicts among apps, increase port by 10 # for each instance ports = [port + i * 10 for port in config["peloton_placement_ports"]] name = config["peloton_placement_container"] + repr(i) utils.remove_existing_container(name) start_and_wait( "placement", name, ports, config, extra_env={"TASK_TYPE": task_type}, ) i = i + 1
def run_peloton_jobmgr(config): for i in range(0, config["peloton_jobmgr_instance_count"]): # to not cause port conflicts among apps, increase port by 10 # for each instance ports = [port + i * 10 for port in config["peloton_jobmgr_ports"]] name = config["peloton_jobmgr_container"] + repr(i) utils.remove_existing_container(name) start_and_wait( "jobmgr", name, ports, config, extra_env={ "MESOS_AGENT_WORK_DIR": config["work_dir"], "JOB_TYPE": os.getenv("JOB_TYPE", "BATCH"), }, )
def run_peloton_hostmgr(config): for i in range(0, config["peloton_hostmgr_instance_count"]): # to not cause port conflicts among apps, increase port # by 10 for each instance ports = [port + i * 10 for port in config["peloton_hostmgr_ports"]] scarce_resource = ",".join(config["scarce_resource_types"]) slack_resource = ",".join(config["slack_resource_types"]) name = config["peloton_hostmgr_container"] + repr(i) utils.remove_existing_container(name) start_and_wait( "hostmgr", name, ports, config, extra_env={ "SCARCE_RESOURCE_TYPES": scarce_resource, "SLACK_RESOURCE_TYPES": slack_resource, }, )
def run_peloton_resmgr(config, enable_k8s=False): env = {} if enable_k8s: env.update({"HOSTMGR_API_VERSION": "v1alpha"}) # TODO: move docker run logic into a common function for all apps to share for i in range(0, config["peloton_resmgr_instance_count"]): # to not cause port conflicts among apps, increase port by 10 # for each instance ports = [port + i * 10 for port in config["peloton_resmgr_ports"]] name = config["peloton_resmgr_container"] + repr(i) utils.remove_existing_container(name) start_and_wait( "resmgr", name, ports, config, extra_env=env, )
def run_peloton_jobmgr(config, enable_k8s=False): env = { "MESOS_AGENT_WORK_DIR": config["work_dir"], "JOB_TYPE": os.getenv("JOB_TYPE", "BATCH"), } if enable_k8s: env.update({"HOSTMGR_API_VERSION": "v1alpha"}) for i in range(0, config["peloton_jobmgr_instance_count"]): # to not cause port conflicts among apps, increase port by 10 # for each instance ports = [port + i * 10 for port in config["peloton_jobmgr_ports"]] name = config["peloton_jobmgr_container"] + repr(i) utils.remove_existing_container(name) start_and_wait( "jobmgr", name, ports, config, extra_env=env, )
def teardown_mesos(config): # 1 - Remove all Mesos Agents for i in range(0, config["num_agents"]): teardown_mesos_agent(config, i) for i in range(0, config.get("num_exclusive_agents", 0)): teardown_mesos_agent(config, i, is_exclusive=True) # 2 - Remove Mesos Master utils.remove_existing_container(config["mesos_master_container"]) # 3- Remove orphaned mesos containers. for c in cli.containers(filters={"name": "^/mesos-"}, all=True): utils.remove_existing_container(c.get("Id")) # 4 - Remove ZooKeeper utils.remove_existing_container(config["zk_container"])
def teardown_mesos_agent(config, agent_index, is_exclusive=False): prefix = config["mesos_agent_container"] if is_exclusive: prefix += "-exclusive" agent = prefix + repr(agent_index) utils.remove_existing_container(agent)
def teardown(): # 1 - Remove jobmgr instances for i in range(0, config["peloton_jobmgr_instance_count"]): name = config["peloton_jobmgr_container"] + repr(i) utils.remove_existing_container(name) # 2 - Remove placement engine instances for i in range(0, len(config["peloton_placement_instances"])): name = config["peloton_placement_container"] + repr(i) utils.remove_existing_container(name) # 3 - Remove resmgr instances for i in range(0, config["peloton_resmgr_instance_count"]): name = config["peloton_resmgr_container"] + repr(i) utils.remove_existing_container(name) # 4 - Remove hostmgr instances for i in range(0, config["peloton_hostmgr_instance_count"]): name = config["peloton_hostmgr_container"] + repr(i) utils.remove_existing_container(name) # 5 - Remove archiver instances for i in range(0, config["peloton_archiver_instance_count"]): name = config["peloton_archiver_container"] + repr(i) utils.remove_existing_container(name) # 6 - Remove aurorabridge instances for i in range(0, config["peloton_aurorabridge_instance_count"]): name = config["peloton_aurorabridge_container"] + repr(i) utils.remove_existing_container(name) minicluster.teardown_mesos(config) minicluster.teardown_k8s() utils.remove_existing_container(config["cassandra_container"]) print_utils.okgreen("teardown complete!")