def test(server_node, client_node): print client_node, "->", server_node server_pod_name = "server-" + "-".join(server_node.split(".")) PodBuilder(server_pod_name, namespace).add_container( name=server_pod_name + "-container", image=image, args=server_args, limits=glimit, requests=grequest, ports=[ server_port ]).set_node(server_node).attache_service(server_service).deploy() # wait for server pod running wait_for_pod_state(namespace, server_pod_name, timeout=600, expect_status=RUNNING) time.sleep(3) # get server pod ip server_pod_ip = get_pod_ip(namespace, server_pod_name) run_client(client_node, server_node, server_pod_ip) run_client(client_node, server_node, global_server_name) delete_pod(namespace, server_pod_name) wait_for_pod_state(namespace, server_pod_name, timeout=240, expect_status=NOT_FOUND)
def run_server(server_node): server_pod_name = "server-" + "-".join(server_node.split(".")) PodBuilder(server_pod_name, namespace).add_container( name=server_pod_name + "-container", image=image, args=server_args, limits=glimit, requests=grequest, ports=[ server_port ]).set_node(server_node).attache_service(server_service).deploy() try: # wait for server pod running wait_for_pod_state(namespace, server_pod_name, timeout=600, expect_status=RUNNING) time.sleep(5) # get server pod ip server_pod_ip = get_pod_ip(namespace, server_pod_name) for node in nodes: run_client(node, server_node, server_pod_ip) run_client(node, server_node, global_server_name) except: pass finally: remove_pod(namespace, server_pod_name)
def run_client(client_node, server_node, server_pod_ip): client_pod_name = "client-" + "-".join(client_node.split(".")) pod = PodBuilder(client_pod_name, namespace).add_container( name=client_pod_name + "-container", image=image, args=client_args % server_pod_ip, limits=glimit, requests=grequest, ).set_node(client_node) try: for i in range(2): pod.deploy() # wait for client complete wait_for_pod_state(namespace, client_pod_name, timeout=600, expect_status=SUCCEEDED) # parse client log to get tx speed. logs = tail_pod_logs(namespace, client_pod_name, lines=20).strip() print logs summary = logs.split("\n")[-1] m = re.match(r".*[^.\d]+([.\d]+) MBytes/sec", summary) if m: break remove_pod(namespace, client_pod_name) if m: print server_node, client_node, server_pod_ip, m.group(1) add_report(client_node, server_node, m.group(1)) else: add_report(client_node, server_node, summary) except: pass finally: remove_pod(namespace, client_pod_name)
def deploy_10kservices(): service_id_start = 0 total_services = [] for nr in range(9): for n in nodes: pod_name = "memcached-%d-%s" % (nr, "-".join(n.split(".")), ) pod = PodBuilder( pod_name, namespace, ).set_node( n ).add_container( pod_name, image=image, args=args, ports=[client_port], requests={'cpu': '0', 'memory': '0'}, limits={'cpu': '0', 'memory': '0'} ) services = [ ServiceBuilder("service-memcached-%d" % (i+service_id_start), namespace).add_port(client_port) for i in range(service_per_pod) ] for s in services: pod.attache_service(s) pod.deploy() wait_for_pod_state(namespace, pod_name, timeout=600, expect_status=RUNNING) total_services += services service_id_start += service_per_pod print "pod are all running, deploy %d services now..." % len(total_services) for s in total_services: s.deploy()
def deploy_1(scrapy_name, url, domain, node): # volume try: volume = CephFSVolume("cephfs", "/tmp", monitors=ceph_monitors, secret_name=ceph_secret, fs_path="scrapy", sub_path=scrapy_name) PodBuilder( scrapy_name, namespace, ).set_node(node).add_container( scrapy_name, image=image, args="scrapy crawl neteasy -a entry_url=%s -a domain=%s" % (url, domain), volumes=[volume], requests={ 'cpu': 2, 'memory': "8Gi" }, limits={ 'cpu': 4, 'memory': "16Gi" }, ).deploy() except: return wait_for_pod_state(namespace, scrapy_name, 60, RUNNING)
def deploy(node): node_mark = "-".join(node.split(".")) args = server_args + ' ' + node for i in nodes: if i == node: continue pod_name = ("%s--%s" % ("-".join(i.split(".")), node_mark)) pod = PodBuilder( pod_name, namespace, ).set_node(i).add_container( pod_name, image=image, args=args, requests=grequest, limits=glimit, ) try: for n in xrange(2): pod.deploy() wait_for_pod_state(namespace, pod_name, timeout=600, expect_status=SUCCEEDED) time.sleep(20) logs = tail_pod_logs(namespace, pod_name, lines=20).strip() print logs if logs: reports.append(logs.splitlines()) break remove_pod(namespace, pod_name) except: print 'create pod error!!' finally: remove_pod(namespace, pod_name)
def deploy(): # volume volume = HostPathVolume("containers", "/apt/containers", "/data/docker/containers") PodBuilder(name, namespace, node_name=node).add_container(name, image=image, args=args, volumes=[volume]).deploy() wait_for_pod_state(namespace, name, 60, RUNNING)
def test(node): print node pod_name = "fio-" + "-".join(node.split(".")) reports = [] for n, v in volumes.items(): print n for e in io_engines: print e PodBuilder( pod_name, namespace, ).set_node(node).add_container(pod_name + "-container", image=image, args=args, limits={ 'cpu': '1', 'memory': '2Gi' }, requests={ 'cpu': '0', 'memory': '0' }, volumes=[v], FIO_DIR=FIO_DIR, IOENGINE=e, FILE_SIZE="64g").deploy() # wait to complete wait_for_pod_state(namespace, pod_name, timeout=3600, expect_status=SUCCEEDED) logs = tail_pod_logs(namespace, pod_name).strip() # delete the pod delete_pod(namespace, pod_name) wait_for_pod_state(namespace, pod_name, timeout=240, expect_status=NOT_FOUND) # report = json.loads(logs) report = eval(logs) for job in report["jobs"]: print "READ:", job['read']['bw'], "KB/s" print "WRITE:", job['write']['bw'], "KB/s" reports.append({ "vtype": n, "io_engine": e, "read(KB/s)": report["jobs"][0]['read']['bw'], "write(KB/S)": report["jobs"][0]['write']['bw'] }) return reports
def stress_pv_fio(): pvc_name = "myfiopvc" for i in range(1000): print "round", i pvc = PVCBuilder(pvc_name, namespace) pv = HostPVBuilder("100Gi", "myfiopv") pv.attach_pvc(pvc) pv.deploy("/mnt/mypv/mydata/") pvc.deploy() time.sleep(5) print "PV/PVC created, creat pod now" hostpath_pvc = HostPathPVCVolume('pvcvolume', FIO_DIR, pvc_name) pod_name = "hostpathpvfiotest" pod = PodBuilder(pod_name, namespace).set_node("10.19.137.159").add_container( pod_name, image=fio_image, args=fio_args, requests={ 'cpu': '0', 'memory': '0' }, limits={ 'cpu': '0', 'memory': '0' }, volumes=[hostpath_pvc], FIO_DIR=FIO_DIR, IOENGINE="mmap", FILE_SIZE='64g') pod.deploy() wait_for_pod_state(namespace, pod_name, timeout=600, expect_status=RUNNING) time.sleep(120) # delete the pod delete_pod(namespace, pod_name) wait_for_pod_state(namespace, pod_name, timeout=240, expect_status=NOT_FOUND) pvc.un_deploy() pv.un_deploy()
def run_server(server_node, client_nodes): server_pod_name = "server-" + "-".join(server_node.split(".")) PodBuilder(server_pod_name, namespace).add_container( name=server_pod_name + "-container", image=image, args=server_args, limits=glimit, requests=grequest, ports=[server_port]).set_node(server_node).deploy() # wait for server pod running wait_for_pod_state(namespace, server_pod_name, timeout=600, expect_status=RUNNING) time.sleep(10) # get server pod ip server_pod_ip = get_pod_ip(namespace, server_pod_name) client_count = 0 for node in client_nodes: client_count += 1 PodBuilder("iperf-client-%d" % client_count, namespace).add_container( name="iperf-client-container-%d" % client_count, image=image, args=client_args % server_pod_ip, limits=glimit, requests=grequest, ).set_node(node).deploy() for i in range(1, client_count + 1): wait_for_pod_state(namespace, "iperf-client-%d" % i, timeout=600, expect_status=SUCCEEDED) logs = tail_pod_logs(namespace, "iperf-client-%d" % i, lines=20).strip() print logs.split("\n")[-1]
def un_deploy(): delete_pod(namespace, name) wait_for_pod_state(namespace, name, 60, NOT_FOUND)
'cpu': '0', 'memory': '0' }, ).set_node(node).deploy() def un_deploy(name): delete_pod(namespace, name) if __name__ == "__main__": # deploy cpu stress client_pod_name = "cpu-stress" # deploy(client_pod_name, "10.19.137.148", "--cpu 8 --timeout 300s") deploy(client_pod_name, "10.19.137.148", "-m 6 --vm-hang 60 --vm-bytes 2G --timeout 360s") wait_for_pod_state(namespace, client_pod_name, timeout=600, expect_status=SUCCEEDED) remove_pod(namespace, client_pod_name) # time.sleep(120) # un_deploy() # memory stress # for i in range(6): # deploy("memory-stress-%d" % i, "10.19.137.154", "-m 6 --vm-hang 60 --vm-bytes 2G --timeout 90s") # time.sleep(150) # for i in range(6): # un_deploy("memory-stress-%d" % i)