def deploy_10kservices(): service_id_start = 0 total_services = [] for nr in range(9): for n in nodes: pod_name = "memcached-%d-%s" % (nr, "-".join(n.split(".")), ) pod = PodBuilder( pod_name, namespace, ).set_node( n ).add_container( pod_name, image=image, args=args, ports=[client_port], requests={'cpu': '0', 'memory': '0'}, limits={'cpu': '0', 'memory': '0'} ) services = [ ServiceBuilder("service-memcached-%d" % (i+service_id_start), namespace).add_port(client_port) for i in range(service_per_pod) ] for s in services: pod.attache_service(s) pod.deploy() wait_for_pod_state(namespace, pod_name, timeout=600, expect_status=RUNNING) total_services += services service_id_start += service_per_pod print "pod are all running, deploy %d services now..." % len(total_services) for s in total_services: s.deploy()
def deploy_services(nr=0): service_id_start = service_per_pod * node_count * nr for n in nodes: pod_name = "memcached-%d-%s" % (nr, "-".join(n.split(".")), ) pod = PodBuilder( pod_name, namespace, ).set_node( n ).add_container( pod_name, image=image, args=args, ports=[client_port], requests={'cpu': '0', 'memory': '0'}, limits={'cpu': '0', 'memory': '0'} ) services = [ ServiceBuilder("service-memcached-%d" % (i+service_id_start), namespace).add_port(client_port) for i in range(service_per_pod) ] for s in services: pod.attache_service(s) s.deploy() pod.deploy() service_id_start += service_per_pod
def deploy(node): node_mark = _get_node_mark(node) for i in range(2): pod_name = ("pod-%s-%d" % (node_mark, i)) + uid service_name = ("service-%s-%d" % (node_mark, i)) + uid service = ServiceBuilder(service_name, namespace).add_port(client_port) pod = PodBuilder( pod_name, namespace, ).set_node(node).add_container( pod_name, image=image, args=args, ports=[client_port], requests={ 'cpu': '0', 'memory': '0' }).attache_service(service).attache_service(global_service) pod.deploy() service.deploy()
def deploy(node): pod = PodBuilder( "memcached-1", namespace, ).set_node(node).add_container("pod-memcached-1", image=image, args=args, ports=[client_port], requests={ 'cpu': '200m', 'memory': '256Mi' }, limits={ 'cpu': '1', 'memory': '512Mi' }) for i in range(3000): s = ServiceBuilder("service-memcached-%d" % i, namespace).add_port(client_port) s.deploy() pod.attache_service(s) pod.deploy()
def deploy(node, action=CREATE): node_mark = "-".join(node.split(".")) for i in range(2): pod_name = ("pod-%s-%d" % (node_mark, i)) + uid service_name = ("service-%s-%d" % (node_mark, i)) + uid service = ServiceBuilder(service_name, namespace).add_port(client_port) pod = PodBuilder( pod_name, namespace, ).set_node(node).add_container( pod_name, image=image, args=args, ports=[client_port], requests={ 'cpu': '0', 'memory': '0' }).attache_service(service).attache_service(global_service) if action == CREATE: pod.deploy() service.deploy() else: pod.un_deploy() service.un_deploy()
) from k8sclient.Components import ( ServicePort, PodBuilder, ServiceBuilder, ) import time nodes = list_ready_nodes() nodes = [n for n in nodes if n.startswith("10.19.137")] global_service_name = "stress-pod" namespace = "k8sft" image = "ihub.helium.io:30100/library/memcached:check" args = "memcached -m 32 -u root -v" client_port = ServicePort("clientport", 11211, 11211) global_service = ServiceBuilder(global_service_name, namespace).add_port(client_port) counts = [0] * len(nodes) readys = [True] * len(nodes) dones = [False] * len(nodes) node_marks = ["-".join(n.split(".")) for n in nodes] POD_PER_NODE = 400 def stress_pod(): total = 0 start = time.time() while not all(dones): # deploy ready nodes for i, n in enumerate(nodes): pod_id = counts[i] if dones[i]:
# volume volume_pack = HostPathVolume("zkpack", "/opt/zookeeper", "/data/zookeeper/pack", read_only=True) # replica set nodes = [ "", "192.168.57.101", "192.168.57.102", "192.168.57.103", ] for i in range(1, 4): name = "zk-%d" % i service = ServiceBuilder(name, namespace, service_type="NodePort") service.add_port(client_port) service.add_port(server_port_1) service.add_port(server_port_2) volume_data = HostPathVolume("zkdata", "/var/lib/zookeeper", "/data/zookeeper/zk-%d" % i, read_only=False) rs = ReplicaSetBuilder(name, namespace).add_container( name=name + "-container", image=image, args=args, ports=[client_port, server_port_1, server_port_2], volumes=[ volume_pack, volume_data ]).attache_service(service).set_hostname(name).set_node(nodes[i])
register_cluster("yancheng", "~/.kube/config-yancheng") register_cluster("shanghai", "~/.kube/config-shanghai") if len(sys.argv) > 1: switch_cluster(sys.argv[1]) image = "ihub.helium.io:30100/library/alpine-iperf" server_args = "iperf -f M -i 1 -m -s" client_args = r"iperf -f M -t 10 -i 1 -c %s" namespace = "k8sft" nodes = sorted(list_ready_nodes()) server_port = ServicePort("serverport", 5001, 5001) global_server_name = "iperf-server" glimit = {'cpu': '0', 'memory': '8Gi'} grequest = {'cpu': '0', 'memory': '0'} server_service = ServiceBuilder(global_server_name, namespace).add_port(server_port) reports = {} report_css = """<style> table, th, td { border: 1px solid black; border-collapse: collapse; } tr:nth-child(even) {background: #CCC} tr:nth-child(odd) {background: #FFF} </style> """ report_title = r"""<H1>Pod to Pod network throughput, single connection. (MBytes/sec)</H1> Cell format: <b><i>[bw via ip]|[bw via service]</i></b> <br> CPU limit: <b>no limit</b> <br>
from k8sclient.Components import ServicePort, ServiceBuilder, ReplicaSetBuilder, HostPathVolume namespace = "monkey" name = "theano" image = "127.0.0.1:30100/library/theano:latest" # ports http_port = ServicePort("httpport", 8888, 8888) # service http_service = ServiceBuilder("jupyter", namespace, service_type="NodePort").add_port(http_port) # volume volume_nvidia = HostPathVolume( "nvidia", "/opt/nvidia", "/opt/lib/nvidia", read_only=True ) volume_nvidia_tools = HostPathVolume( "nvidia-tools", "/opt/tools", "/opt/lib/tools", read_only=True ) volume_cuda = HostPathVolume( "cuda", "/opt/cuda", "/opt/cuda-8.0/", read_only=True ) # replica set rs = ReplicaSetBuilder( name, namespace