コード例 #1
0
ファイル: clusterScalerTest.py プロジェクト: vallurumk/toil
 def testNoLaunchingIfDeltaAlreadyMet(self):
     """
     Check that the scaler doesn't try to launch "0" more instances if
     the delta was able to be met by unignoring nodes.
     """
     # We have only one node type for simplicity
     self.provisioner.nodeTypes = ['c4.8xlarge']
     self.provisioner.nodeShapes = [c4_8xlarge]
     scaler = ClusterScaler(self.provisioner, self.leader, self.config)
     # Pretend there is one ignored worker in the cluster
     self.provisioner.getProvisionedWorkers = MagicMock(return_value=[
         Node('127.0.0.1',
              '127.0.0.1',
              'testNode',
              datetime.datetime.now().isoformat(),
              nodeType='c4.8xlarge',
              preemptable=True)
     ])
     scaler.ignoredNodes.add('127.0.0.1')
     # Exercise the updateClusterSize logic
     self.provisioner.addNodes = MagicMock()
     scaler.updateClusterSize({c4_8xlarge: 1})
     self.assertFalse(self.provisioner.addNodes.called,
                      "addNodes was called when no new nodes were needed")
     self.assertEqual(
         len(scaler.ignoredNodes), 0,
         "The scaler didn't unignore an ignored node when "
         "scaling up")
コード例 #2
0
ファイル: clusterScalerTest.py プロジェクト: vallurumk/toil
    def testPreemptableDeficitIsSet(self):
        """
        Make sure that updateClusterSize sets the preemptable deficit if
        it can't launch preemptable nodes properly. That way, the
        deficit can be communicated to the next run of
        estimateNodeCount.
        """
        # Mock out addNodes. We want to pretend it had trouble
        # launching all 5 nodes, and could only launch 3.
        self.provisioner.addNodes = MagicMock(return_value=3)
        # Pretend there are no nodes in the cluster right now
        self.provisioner.getProvisionedWorkers = MagicMock(return_value=[])
        # In this case, we want to explicitly set up the config so
        # that we can have preemptable and non-preemptable nodes of
        # the same type. That is the only situation where
        # preemptableCompensation applies.
        self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
        self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
        self.provisioner.nodeShapes = [c4_8xlarge_preemptable, c4_8xlarge]
        scaler = ClusterScaler(self.provisioner, self.leader, self.config)
        estimatedNodeCounts = {c4_8xlarge_preemptable: 5, c4_8xlarge: 0}
        scaler.updateClusterSize(estimatedNodeCounts)
        self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 2)
        self.provisioner.addNodes.assert_called_once()

        # OK, now pretend this is a while later, and actually launched
        # the nodes properly. The deficit should disappear
        self.provisioner.addNodes = MagicMock(return_value=5)
        scaler.updateClusterSize(estimatedNodeCounts)
        self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 0)