コード例 #1
0
ファイル: clusterScalerTest.py プロジェクト: vallurumk/toil
 def testMaxNodes(self):
     """
     Set the scaler to be very aggressive, give it a ton of jobs, and
     make sure it doesn't go over maxNodes.
     """
     self.config.targetTime = 1
     self.config.betaInertia = 0.0
     self.config.maxNodes = [2, 3]
     scaler = ClusterScaler(self.provisioner, self.leader, self.config)
     jobShapes = [
         Shape(wallTime=3600,
               cores=2,
               memory=h2b('1G'),
               disk=h2b('2G'),
               preemptable=True)
     ] * 1000
     jobShapes.extend([
         Shape(wallTime=3600,
               cores=2,
               memory=h2b('1G'),
               disk=h2b('2G'),
               preemptable=False)
     ] * 1000)
     estimatedNodeCounts = scaler.getEstimatedNodeCounts(
         jobShapes, defaultdict(int))
     self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
     self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
コード例 #2
0
ファイル: clusterScalerTest.py プロジェクト: ratschlab/toil
 def testMinNodes(self):
     """
     Without any jobs queued, the scaler should still estimate "minNodes" nodes.
     """
     self.config.betaInertia = 0.0
     self.config.minNodes = [2, 3]
     scaler = ClusterScaler(self.provisioner, self.leader, self.config)
     jobShapes = []
     estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
     self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
     self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
コード例 #3
0
ファイル: clusterScalerTest.py プロジェクト: mr-c/toil
    def testPreemptableDeficitResponse(self):
        """
        When a preemptable deficit was detected by a previous run of the
        loop, the scaler should add non-preemptable nodes to
        compensate in proportion to preemptableCompensation.
        """
        self.config.targetTime = 1
        self.config.betaInertia = 0.0
        self.config.maxNodes = [10, 10]
        # This should mean that one non-preemptable node is launched
        # for every two preemptable nodes "missing".
        self.config.preemptableCompensation = 0.5
        # In this case, we want to explicitly set up the config so
        # that we can have preemptable and non-preemptable nodes of
        # the same type. That is the only situation where
        # preemptableCompensation applies.
        self.config.nodeTypes = [c4_8xlarge_preemptable, c4_8xlarge]
        self.provisioner.setAutoscaledNodeTypes([
            ({t}, None) for t in self.config.nodeTypes
        ])

        scaler = ClusterScaler(self.provisioner, self.leader, self.config)
        # Simulate a situation where a previous run caused a
        # "deficit" of 5 preemptable nodes (e.g. a spot bid was lost)
        scaler.preemptableNodeDeficit[c4_8xlarge] = 5
        # Add a bunch of preemptable jobs (so the bin-packing
        # estimate for the non-preemptable node should still be 0)
        jobShapes = [
            Shape(wallTime=3600,
                  cores=2,
                  memory=h2b('1G'),
                  disk=h2b('2G'),
                  preemptable=True)
        ] * 1000
        estimatedNodeCounts = scaler.getEstimatedNodeCounts(
            jobShapes, defaultdict(int))
        # We don't care about the estimated size of the preemptable
        # nodes. All we want to know is if we responded to the deficit
        # properly: 0.5 * 5 (preemptableCompensation * the deficit) = 3 (rounded up).
        self.assertEqual(
            estimatedNodeCounts[self.provisioner.node_shapes_for_testing[1]],
            3)