def run_client(client_node, server_node, server_pod_ip): client_pod_name = "client-" + "-".join(client_node.split(".")) pod = PodBuilder(client_pod_name, namespace).add_container( name=client_pod_name + "-container", image=image, args=client_args % server_pod_ip, limits=glimit, requests=grequest, ).set_node(client_node) try: for i in range(2): pod.deploy() # wait for client complete wait_for_pod_state(namespace, client_pod_name, timeout=600, expect_status=SUCCEEDED) # parse client log to get tx speed. logs = tail_pod_logs(namespace, client_pod_name, lines=20).strip() print logs summary = logs.split("\n")[-1] m = re.match(r".*[^.\d]+([.\d]+) MBytes/sec", summary) if m: break remove_pod(namespace, client_pod_name) if m: print server_node, client_node, server_pod_ip, m.group(1) add_report(client_node, server_node, m.group(1)) else: add_report(client_node, server_node, summary) except: pass finally: remove_pod(namespace, client_pod_name)
def test_pv_fio(): pvc = PVCBuilder("myfiopvc", namespace) pv = HostPVBuilder("100Gi", "myfiopv") pv.attach_pvc(pvc) pv.deploy("/mnt/mypv/mydata/") pvc.deploy() time.sleep(5) print "PV/PVC created, creat pod now" hostpath_pvc = HostPathPVCVolume('pvcvolume', FIO_DIR, "myfiopvc") pod = PodBuilder("hostpathpvfiotest", namespace).set_node("10.19.137.159").add_container( "hostpathpvfiotest", image=fio_image, args=fio_args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, volumes=[hostpath_pvc], FIO_DIR=FIO_DIR, IOENGINE="mmap") pod.deploy()
def deploy(node): node_mark = "-".join(node.split(".")) args = server_args + ' ' + node for i in nodes: if i == node: continue pod_name = ("%s--%s" % ("-".join(i.split(".")), node_mark)) pod = PodBuilder( pod_name, namespace, ).set_node(i).add_container( pod_name, image=image, args=args, requests=grequest, limits=glimit, ) try: for n in xrange(2): pod.deploy() wait_for_pod_state(namespace, pod_name, timeout=600, expect_status=SUCCEEDED) time.sleep(20) logs = tail_pod_logs(namespace, pod_name, lines=20).strip() print logs if logs: reports.append(logs.splitlines()) break remove_pod(namespace, pod_name) except: print 'create pod error!!' finally: remove_pod(namespace, pod_name)
def run_server(server_node): server_pod_name = "server-" + "-".join(server_node.split(".")) PodBuilder(server_pod_name, namespace).add_container( name=server_pod_name + "-container", image=image, args=server_args, limits=glimit, requests=grequest, ports=[ server_port ]).set_node(server_node).attache_service(server_service).deploy() try: # wait for server pod running wait_for_pod_state(namespace, server_pod_name, timeout=600, expect_status=RUNNING) time.sleep(5) # get server pod ip server_pod_ip = get_pod_ip(namespace, server_pod_name) for node in nodes: run_client(node, server_node, server_pod_ip) run_client(node, server_node, global_server_name) except: pass finally: remove_pod(namespace, server_pod_name)
def test(server_node, client_node): print client_node, "->", server_node server_pod_name = "server-" + "-".join(server_node.split(".")) PodBuilder(server_pod_name, namespace).add_container( name=server_pod_name + "-container", image=image, args=server_args, limits=glimit, requests=grequest, ports=[ server_port ]).set_node(server_node).attache_service(server_service).deploy() # wait for server pod running wait_for_pod_state(namespace, server_pod_name, timeout=600, expect_status=RUNNING) time.sleep(3) # get server pod ip server_pod_ip = get_pod_ip(namespace, server_pod_name) run_client(client_node, server_node, server_pod_ip) run_client(client_node, server_node, global_server_name) delete_pod(namespace, server_pod_name) wait_for_pod_state(namespace, server_pod_name, timeout=240, expect_status=NOT_FOUND)
def deploy_pod(): hostpath_pvc = HostPathPVCVolume('pvcvolume', '/mnt/pvc', "mypvc") pod = PodBuilder("hostpathpvtest", namespace).add_container("memcached-pv", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, volumes=[hostpath_pvc]) pod.deploy(restart_policy="Always")
def stress_pv_fio(): pvc_name = "myfiopvc" for i in range(1000): print "round", i pvc = PVCBuilder(pvc_name, namespace) pv = HostPVBuilder("100Gi", "myfiopv") pv.attach_pvc(pvc) pv.deploy("/mnt/mypv/mydata/") pvc.deploy() time.sleep(5) print "PV/PVC created, creat pod now" hostpath_pvc = HostPathPVCVolume('pvcvolume', FIO_DIR, pvc_name) pod_name = "hostpathpvfiotest" pod = PodBuilder(pod_name, namespace).set_node("10.19.137.159").add_container( pod_name, image=fio_image, args=fio_args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, volumes=[hostpath_pvc], FIO_DIR=FIO_DIR, IOENGINE="mmap", FILE_SIZE='64g') pod.deploy() wait_for_pod_state(namespace, pod_name, timeout=600, expect_status=RUNNING) time.sleep(120) # delete the pod delete_pod(namespace, pod_name) wait_for_pod_state(namespace, pod_name, timeout=240, expect_status=NOT_FOUND) pvc.un_deploy() pv.un_deploy()
def deploy(node): node_mark = _get_node_mark(node) for i in range(2): pod_name = ("pod-%s-%d" % (node_mark, i)) + uid service_name = ("service-%s-%d" % (node_mark, i)) + uid service = ServiceBuilder(service_name, namespace).add_port(client_port) pod = PodBuilder( pod_name, namespace, ).set_node(node).add_container( pod_name, image=image, args=args, ports=[client_port], requests={ 'cpu': '0', 'memory': '0' }).attache_service(service).attache_service(global_service) pod.deploy() service.deploy()
def deploy_10kservices(): service_id_start = 0 total_services = [] for nr in range(9): for n in nodes: pod_name = "memcached-%d-%s" % (nr, "-".join(n.split(".")), ) pod = PodBuilder( pod_name, namespace, ).set_node( n ).add_container( pod_name, image=image, args=args, ports=[client_port], requests={'cpu': '0', 'memory': '0'}, limits={'cpu': '0', 'memory': '0'} ) services = [ ServiceBuilder("service-memcached-%d" % (i+service_id_start), namespace).add_port(client_port) for i in range(service_per_pod) ] for s in services: pod.attache_service(s) pod.deploy() wait_for_pod_state(namespace, pod_name, timeout=600, expect_status=RUNNING) total_services += services service_id_start += service_per_pod print "pod are all running, deploy %d services now..." % len(total_services) for s in total_services: s.deploy()
def deploy_services(nr=0): service_id_start = service_per_pod * node_count * nr for n in nodes: pod_name = "memcached-%d-%s" % (nr, "-".join(n.split(".")), ) pod = PodBuilder( pod_name, namespace, ).set_node( n ).add_container( pod_name, image=image, args=args, ports=[client_port], requests={'cpu': '0', 'memory': '0'}, limits={'cpu': '0', 'memory': '0'} ) services = [ ServiceBuilder("service-memcached-%d" % (i+service_id_start), namespace).add_port(client_port) for i in range(service_per_pod) ] for s in services: pod.attache_service(s) s.deploy() pod.deploy() service_id_start += service_per_pod
def test(): pvc1 = HostPathPVCVolume('pvcvolume1', '/mnt/pvc', "mypvc1") pvc2 = HostPathPVCVolume('pvcvolume2', '/mnt/pvc', "mypvc2") PodBuilder( "rbd1", namespace, ).set_node("10.19.137.151").add_container( "rbd1-container", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, volumes=[pvc1], ).deploy() PodBuilder( "rbd2", namespace, ).set_node("10.19.140.7").add_container( "rbd2-container", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, volumes=[pvc2], ).deploy()
def test(node): print node pod_name = "fio-" + "-".join(node.split(".")) reports = [] for n, v in volumes.items(): print n for e in io_engines: print e PodBuilder( pod_name, namespace, ).set_node(node).add_container(pod_name + "-container", image=image, args=args, limits={ 'cpu': '1', 'memory': '2Gi' }, requests={ 'cpu': '0', 'memory': '0' }, volumes=[v], FIO_DIR=FIO_DIR, IOENGINE=e, FILE_SIZE="64g").deploy() # wait to complete wait_for_pod_state(namespace, pod_name, timeout=3600, expect_status=SUCCEEDED) logs = tail_pod_logs(namespace, pod_name).strip() # delete the pod delete_pod(namespace, pod_name) wait_for_pod_state(namespace, pod_name, timeout=240, expect_status=NOT_FOUND) # report = json.loads(logs) report = eval(logs) for job in report["jobs"]: print "READ:", job['read']['bw'], "KB/s" print "WRITE:", job['write']['bw'], "KB/s" reports.append({ "vtype": n, "io_engine": e, "read(KB/s)": report["jobs"][0]['read']['bw'], "write(KB/S)": report["jobs"][0]['write']['bw'] }) return reports
def deploy(name, node, args): PodBuilder(name, namespace).add_container( name=name + "-container", image=image, args=args, limits={ 'cpu': '0', 'memory': '20Gi' }, requests={ 'cpu': '0', 'memory': '0' }, ).set_node(node).deploy()
def run_server(server_node, client_nodes): server_pod_name = "server-" + "-".join(server_node.split(".")) PodBuilder(server_pod_name, namespace).add_container( name=server_pod_name + "-container", image=image, args=server_args, limits=glimit, requests=grequest, ports=[server_port]).set_node(server_node).deploy() # wait for server pod running wait_for_pod_state(namespace, server_pod_name, timeout=600, expect_status=RUNNING) time.sleep(10) # get server pod ip server_pod_ip = get_pod_ip(namespace, server_pod_name) client_count = 0 for node in client_nodes: client_count += 1 PodBuilder("iperf-client-%d" % client_count, namespace).add_container( name="iperf-client-container-%d" % client_count, image=image, args=client_args % server_pod_ip, limits=glimit, requests=grequest, ).set_node(node).deploy() for i in range(1, client_count + 1): wait_for_pod_state(namespace, "iperf-client-%d" % i, timeout=600, expect_status=SUCCEEDED) logs = tail_pod_logs(namespace, "iperf-client-%d" % i, lines=20).strip() print logs.split("\n")[-1]
def stress_pod(): total = 0 start = time.time() while not all(dones): # deploy ready nodes for i, n in enumerate(nodes): pod_id = counts[i] if dones[i]: continue pod_name = ("pod-%s-%d" % (node_marks[i], pod_id)) if readys[i]: # create a new pod PodBuilder( pod_name, namespace, ).set_node(n).add_container( pod_name, image=image, args=args, ports=[client_port], requests={ 'cpu': '0', "memory": '0' }, limits={ 'cpu': '1', "memory": '32Mi' }).attache_service(global_service).deploy() readys[i] = False # print "creating", pod_name else: # check for current pod running readys[i] = is_pod_running(namespace, pod_name) if readys[i]: total += 1 counts[i] += 1 if counts[i] >= POD_PER_NODE: print "It took %ds to deploy %d pod on %s" % ( int(time.time() - start), POD_PER_NODE, n) # print n, "is done~!", "total", total dones[i] = True time.sleep(3) print "it took", time.time() - start, "s"
def deploy(node): pod = PodBuilder( "memcached-1", namespace, ).set_node(node).add_container("pod-memcached-1", image=image, args=args, ports=[client_port], requests={ 'cpu': '200m', 'memory': '256Mi' }, limits={ 'cpu': '1', 'memory': '512Mi' }) for i in range(3000): s = ServiceBuilder("service-memcached-%d" % i, namespace).add_port(client_port) s.deploy() pod.attache_service(s) pod.deploy()
def deploy(node, action=CREATE): node_mark = "-".join(node.split(".")) for i in range(2): pod_name = ("pod-%s-%d" % (node_mark, i)) + uid service_name = ("service-%s-%d" % (node_mark, i)) + uid service = ServiceBuilder(service_name, namespace).add_port(client_port) pod = PodBuilder( pod_name, namespace, ).set_node(node).add_container( pod_name, image=image, args=args, ports=[client_port], requests={ 'cpu': '0', 'memory': '0' }).attache_service(service).attache_service(global_service) if action == CREATE: pod.deploy() service.deploy() else: pod.un_deploy() service.un_deploy()
from k8sclient.Components import ( PodBuilder, ) namespace = "k8sft" image = "127.0.0.1:30100/library/memcached:check3" args = "memcached -m 1028 -u root -v" pod = PodBuilder("justtest", namespace, labels={ "network-restrict-internal-only": "oops" }).add_container("justtest", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, RESTRICTED_INTERNAL_ONLY="1") pod.deploy() # limits larger than node allocable: running
namespace = "k8sft" image = "127.0.0.1:30100/library/memcached:check3" args = "memcached -m 1028 -u root -v" volume_nvidia = HostPathVolume("containers", "/data/docker/containers", "/data/docker/containers", read_only=True) pod = PodBuilder( "justtest", namespace, ).set_node("10.19.140.6").add_container( "justtest", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, volumes=[volume_nvidia], ) pod.deploy() # limits larger than node allocable: running
from k8sclient.Components import ( PodBuilder, ) namespace = "k8sft" image = "ihub.helium.io:30100/library/memcached:check3" args = "memcached -m 1028 -u root -v" pod = PodBuilder("justtest", namespace, labels={ "network-restrict-internal-only": "oops" }).set_node("10.19.137.159").add_container( "justtest", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, RESTRICTED_INTERNAL_ONLY="1") # .add_annotation("security.alpha.kubernetes.io/unsafe-sysctls", "net.core.somaxconn=512") pod.deploy(dns_policy="Default") # limits larger than node allocable: running
from k8sclient.Components import ( PodBuilder, ) namespace = "k8sft" image = "ihub.helium.io:30100/library/hbasestandalone:latest" args = "sleep 36000" pod = PodBuilder( "hbasestandalone", namespace ).set_node( "10.19.140.15" ).add_container( "justtest", image=image, args=args, requests={'cpu': '0', 'memory': '0'}, limits={'cpu': '0', 'memory': '0'}, ) pod.deploy() # limits larger than node allocable: running
from k8sclient.Components import ( PodBuilder, ) namespace = "health-check" image = "127.0.0.1:30100/library/memcached:check2" args = "memcached -m 1028 -u root -v" pod = PodBuilder( "justtest", namespace, ).set_node("10.19.137.154").add_container( "justtest", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, ) pod.deploy()
from k8sclient.Components import ( PodBuilder, ) namespace = "k8sft" image = "127.0.0.1:30100/library/alpine-iperf" args = "iperf -f M -i 1 -m -s" client_args = r"iperf -f M -t 10 -i 1 -c %s" pod = PodBuilder( "iperfserver", namespace, ).set_node("192.168.57.102").add_container( "server", image=image, args=args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, ) #).add_annotation("security.alpha.kubernetes.io/unsafe-sysctls", "net.core.somaxconn=512") pod.deploy() # limits larger than node allocable: running