def StartPod( self, podStarted: "mpod.Pod", node1: "Node", scheduler1: "Scheduler", serviceTargetForPod: "mservice.Service", # globalVar1: "GlobalVar" ): assert podStarted in scheduler1.podQueue assert podStarted.toNode == node1 assert podStarted.targetService == serviceTargetForPod assert podStarted.cpuRequest > -1 assert podStarted.memRequest > -1 assert node1.currentFormalCpuConsumption + podStarted.cpuRequest < node1.cpuCapacity + 1 assert node1.currentFormalMemConsumption + podStarted.memRequest < node1.memCapacity + 1 node1.currentFormalCpuConsumption += podStarted.cpuRequest node1.currentFormalMemConsumption += podStarted.memRequest # globalVar1.currentFormalCpuConsumption += podStarted.cpuRequest # globalVar1.currentFormalMemConsumption += podStarted.memRequest podStarted.atNode = node1 scheduler1.queueLength -= 1 scheduler1.podQueue.remove(podStarted) serviceTargetForPod.amountOfActivePods += 1 podStarted.status = STATUS_POD["Running"] serviceTargetForPod.status = STATUS_SERV["Started"] return ScenarioStep(name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="Starting pod", parameters={"podStarted": describe(podStarted)}, probability=1.0, affected=[describe(podStarted), describe(node1)])
def MarkServiceOutageEvent(self, service1: Service, pod1: Pod, global_: "GlobalVar", scheduler1: "Scheduler", currentFormalCpuConsumptionLoc: int, currentFormalMemConsumptionLoc: int, cpuCapacityLoc:int, memCapacityLoc:int, cpuRequestLoc: int, memRequestLoc: int ): assert scheduler1.status == STATUS_SCHED["Clean"] assert service1.amountOfActivePods == 0 assert service1.status == STATUS_SERV["Started"] assert service1.searchable == True assert pod1.targetService == service1 assert pod1.cpuRequest == cpuRequestLoc assert pod1.memRequest == memRequestLoc service1.status = STATUS_SERV["Interrupted"] global_.is_service_interrupted = True return ScenarioStep( name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="Detected service outage event", parameters={"service.amountOfActivePods": 0, "service": describe(service1)}, probability=1.0, affected=[describe(service1)] )
def KillPod( self, podBeingKilled: "Pod", nodeWithPod: "mnode.Node", serviceOfPod: "mservice.Service", # globalVar1: "GlobalVar", scheduler1: "mscheduler.Scheduler", amountOfActivePodsPrev: int): assert podBeingKilled.atNode == nodeWithPod assert podBeingKilled.targetService == serviceOfPod assert podBeingKilled.status == STATUS_POD["Killing"] # assert podBeingKilled.amountOfActiveRequests == 0 #For Requests assert amountOfActivePodsPrev == serviceOfPod.amountOfActivePods nodeWithPod.currentRealMemConsumption -= podBeingKilled.realInitialMemConsumption nodeWithPod.currentRealCpuConsumption -= podBeingKilled.realInitialCpuConsumption nodeWithPod.currentFormalMemConsumption -= podBeingKilled.memRequest nodeWithPod.currentFormalCpuConsumption -= podBeingKilled.cpuRequest # globalVar1.currentFormalMemConsumption -= podBeingKilled.memRequest # globalVar1.currentFormalCpuConsumption -= podBeingKilled.cpuRequest serviceOfPod.amountOfActivePods -= 1 podBeingKilled.status = STATUS_POD["Pending"] scheduler1.podQueue.add(podBeingKilled) scheduler1.status = STATUS_SCHED["Changed"] return ScenarioStep( name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="Killing pod", parameters={"podBeingKilled": describe(podBeingKilled)}, probability=1.0, affected=[describe(podBeingKilled)])
def Evict_and_replace_less_prioritized_pod_when_target_node_is_defined( self, podPending: "Pod", podToBeReplaced: "Pod", nodeForPodPending: "mnode.Node", # unused scheduler1: "mscheduler.Scheduler", priorityClassOfPendingPod: PriorityClass, priorityClassOfPodToBeReplaced: PriorityClass): assert podPending in scheduler1.podQueue assert podPending.toNode == nodeForPodPending assert nodeForPodPending.isNull == False assert podToBeReplaced.atNode == nodeForPodPending assert podPending.status == STATUS_POD["Pending"] assert priorityClassOfPendingPod == podPending.priorityClass assert priorityClassOfPodToBeReplaced == podToBeReplaced.priorityClass # assert preemptionPolicyOfPendingPod == priorityClassOfPendingPod.preemptionPolicy # assert preemptionPolicyOfPodToBeReplaced == priorityClassOfPodToBeReplaced.preemptionPolicy # assert priorityClassOfPendingPod.preemptionPolicy == self.constSymbol["PreemptLowerPriority"] assert priorityClassOfPendingPod.priority > priorityClassOfPodToBeReplaced.priority assert podToBeReplaced.status == STATUS_POD["Running"] podToBeReplaced.status = STATUS_POD["Killing"] return ScenarioStep( name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description= "Because pod has lower priority, it is getting evicted to make room for new pod", parameters={ "podPending": describe(podPending), "podToBeReplaced": describe(podToBeReplaced) }, probability=1.0, affected=[describe(podPending), describe(podToBeReplaced)])
def SelectNode(self, pod1: "mpod.Pod", SelectedNode: "Node"): assert pod1.toNode == Node.NODE_NULL pod1.toNode = SelectedNode return ScenarioStep(name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="Selected node for pod placement", parameters={ "pod": describe(pod1), "node": describe(SelectedNode) }, probability=1.0, affected=[describe(pod1), describe(SelectedNode)])
def SetDefaultCpuLimitForPodBeforeNodeAssignment(self, pod1: "Pod", node1: "mnode.Node", cpuCapacity: int): assert pod1.cpuLimit == -1 assert cpuCapacity == node1.cpuCapacity pod1.toNode = node1 pod1.cpuLimit = cpuCapacity return ScenarioStep(name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="no description provided", parameters={}, probability=1.0, affected=[describe(pod1)])
def PodsConnectedToServices(self, service1: Service, scheduler1: "mscheduler.Scheduler" ): assert service1.amountOfActivePods > 0 service1.status = STATUS_SERV["Started"] return ScenarioStep( name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="Mark service as started", parameters={}, probability=1.0, affected=[describe(service1)] )
def SetDefaultMemLimitForPod(self, pod1: "Pod", node1: "mnode.Node", memCapacity: int): assert pod1.memLimit == -1 assert node1 == pod1.atNode assert memCapacity == node1.memCapacity pod1.memLimit = memCapacity return ScenarioStep(name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="Setting default memory limit for pod", parameters={ "currentMemoryLimit": -1, "newMemoryLimit": str(pod1.memLimit) }, probability=1.0, affected=[describe(pod1)])
def SetDefaultCpuRequestForPod(self, pod1: "Pod", cpuLimit: int): assert pod1.cpuLimit > -1 assert pod1.cpuRequest == -1 assert cpuLimit == pod1.cpuLimit pod1.cpuRequest = cpuLimit return ScenarioStep(name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="Setting default cpu request for pod", parameters={ "currentCpuRequest": -1, "newCpuRequest": str(pod1.cpuLimit) }, probability=1.0, affected=[describe(pod1)])
def connect_pod_service_labels(self, pod: "Pod", service: "mservice.Service", label: Label): # TODO: full selector support # TODO: only if pod is running, service is started assert pod.targetService == pod.TARGET_SERVICE_NULL assert label in pod.metadata_labels assert label in service.spec_selector assert pod.status == STATUS_POD["Running"] pod.targetService = service service.amountOfActivePods += 1 service.status = STATUS_SERV["Started"] return ScenarioStep(name=sys._getframe().f_code.co_name, subsystem=self.__class__.__name__, description="no description provided", parameters={}, probability=1.0, affected=[describe(pod)])