def testClusterScalingWithPreemptableJobs(self): """ Test scaling simultaneously for a batch of preemptable and non-preemptable jobs. """ config = Config() jobShape = Shape(20, 10, 10, 10, False) preemptableJobShape = Shape(20, 10, 10, 10, True) # Make defaults dummy values config.defaultMemory = 1 config.defaultCores = 1 config.defaultDisk = 1 # non-preemptable node parameters config.nodeTypes = [jobShape, preemptableJobShape] config.minNodes = [0, 0] config.maxNodes = [10, 10] # Algorithm parameters config.targetTime = defaultTargetTime config.betaInertia = 0.9 config.scaleInterval = 3 self._testClusterScaling(config, numJobs=100, numPreemptableJobs=100, jobShape=jobShape)
def testClusterScaling(self): """ Test scaling for a batch of non-preemptable jobs and no preemptable jobs (makes debugging easier). """ config = Config() # Make defaults dummy values config.defaultMemory = 1 config.defaultCores = 1 config.defaultDisk = 1 # No preemptable nodes/jobs config.maxPreemptableNodes = [] # No preemptable nodes # Non-preemptable parameters config.nodeTypes = [Shape(20, 10, 10, 10, False)] config.minNodes = [0] config.maxNodes = [10] # Algorithm parameters config.targetTime = defaultTargetTime config.betaInertia = 0.1 config.scaleInterval = 3 self._testClusterScaling(config, numJobs=100, numPreemptableJobs=0, jobShape=config.nodeTypes[0])
def testClusterScalingMultipleNodeTypes(self): smallNode = Shape(20, 5, 10, 10, False) mediumNode = Shape(20, 10, 10, 10, False) largeNode = Shape(20, 20, 10, 10, False) numJobs = 100 config = Config() # Make defaults dummy values config.defaultMemory = 1 config.defaultCores = 1 config.defaultDisk = 1 # No preemptable nodes/jobs config.preemptableNodeTypes = [] config.minPreemptableNodes = [] config.maxPreemptableNodes = [] # No preemptable nodes # Make sure the node types don't have to be ordered config.nodeTypes = [largeNode, smallNode, mediumNode] config.minNodes = [0, 0, 0] config.maxNodes = [10, 10] # test expansion of this list # Algorithm parameters config.targetTime = defaultTargetTime config.betaInertia = 0.1 config.scaleInterval = 3 mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0) clusterScaler = ScalerThread(mock, mock, config) clusterScaler.start() mock.start() try: # Add small jobs list( map(lambda x: mock.addJob(jobShape=smallNode), list(range(numJobs)))) list( map(lambda x: mock.addJob(jobShape=mediumNode), list(range(numJobs)))) # Add medium completed jobs for i in range(1000): iJ = JobNode(jobStoreID=1, requirements=dict(memory=random.choice( range(smallNode.memory, mediumNode.memory)), cores=mediumNode.cores, disk=largeNode.cores, preemptable=False), command=None, jobName='testClusterScaling', unitName='') clusterScaler.addCompletedJob(iJ, random.choice(range(1, 10))) while mock.getNumberOfJobsIssued() > 0 or mock.getNumberOfNodes( ) > 0: logger.debug("%i nodes currently provisioned" % mock.getNumberOfNodes()) # Make sure there are no large nodes self.assertEqual(mock.getNumberOfNodes(nodeType=largeNode), 0) clusterScaler.check() time.sleep(0.5) finally: clusterScaler.shutdown() mock.shutDown() # Make sure jobs ran on both the small and medium node types self.assertTrue(mock.totalJobs > 0) self.assertTrue(mock.maxWorkers[smallNode] > 0) self.assertTrue(mock.maxWorkers[mediumNode] > 0) self.assertEqual(mock.maxWorkers[largeNode], 0)