from src.api_server import APIServer import threading from src.dep_controller import DepController from src.req_handler import ReqHandler from src.node_controller import NodeController from src.scheduler import Scheduler import unittest import time _nodeCtlLoop = 2 _depCtlLoop = 2 _scheduleCtlLoop = 2 apiServer = APIServer() depController = DepController(apiServer, _depCtlLoop) nodeController = NodeController(apiServer, _nodeCtlLoop) reqHandler = ReqHandler(apiServer) scheduler = Scheduler(apiServer, _scheduleCtlLoop) depControllerThread = threading.Thread(target=depController) nodeControllerThread = threading.Thread(target=nodeController) reqHandlerThread = threading.Thread(target=reqHandler) schedulerThread = threading.Thread(target=scheduler) print("Threads Starting") reqHandlerThread.start() nodeControllerThread.start() depControllerThread.start() schedulerThread.start() print("ReadingFile") instructions = open("tracefiles/delete_deployment.txt", "r") commands = instructions.readlines()
import pandas as pd from src.hpa import HPA from src.load_balancer import LoadBalancer from src.supervisor import Supervisor import time #This is the simulation frontend that will interact with your APIServer to change cluster configurations and handle requests #All building files are guidelines, and you are welcome to change them as much as desired so long as the required functionality is still implemented. _nodeCtlLoop = 1 _depCtlLoop = 1 _scheduleCtlLoop = 1 _hpaCtlLoop = 2 kind = 'UA' apiServer = APIServer() depController = DepController(apiServer, _depCtlLoop) nodeController = NodeController(apiServer, _nodeCtlLoop) reqHandler = ReqHandler(apiServer) scheduler = Scheduler(apiServer, _scheduleCtlLoop) depControllerThread = threading.Thread(target=depController) nodeControllerThread = threading.Thread(target=nodeController) reqHandlerThread = threading.Thread(target=reqHandler) schedulerThread = threading.Thread(target=scheduler) print("Threads Starting") reqHandlerThread.start() nodeControllerThread.start() depControllerThread.start() schedulerThread.start() print("ReadingFile") #Graphing information
from src.api_server import APIServer from src.load_balancer import LoadBalancer from src.hpa import HPA from src.pod import Pod import unittest DEPLOYMENT_INFO = ['Deployment_AA', 2, 2] HPA_INFO = ['Deployment_AA', 75, 10, 5] _hpaCtlLoop = 2 apiServer = APIServer() apiServer.CreateDeployment(DEPLOYMENT_INFO) deployment = apiServer.etcd.deploymentList[0] podName = deployment.deploymentLabel + "_" + str(apiServer.GeneratePodName()) pod = Pod(podName, deployment.cpuCost, deployment.deploymentLabel) pod.status = "RUNNING" pod.requests = [ 'Req 1' ] pod.available_cpu -= 1 podList = [pod, pod] hpa = HPA(apiServer, _hpaCtlLoop, HPA_INFO) class TestUtilisation(unittest.TestCase): def test_average_utilisation(self): load = hpa.calculateAvgUtil(deployment, podList) self.assertEqual(load, 0.5) class TestController(unittest.TestCase):
from src.api_server import APIServer import unittest import time apiServer = APIServer() instructions = open("tracefiles/add_node.txt", "r") commands = instructions.readlines() for command in commands: cmdAttributes = command.split() # print(str(cmdAttributes)) with apiServer.etcdLock: if cmdAttributes[0] == 'AddNode': apiServer.CreateWorker(cmdAttributes[1:]) class TestAddWorker(unittest.TestCase): def test_workers_length(self): self.assertEqual(len(apiServer.GetWorkers()), 1) def test_worker_data(self): worker = apiServer.etcd.nodeList[0] self.assertEqual(worker.assigned_cpu, 4) self.assertEqual(worker.available_cpu, 4)