def setUp(self): self.cli = MetricList() self.text = ''' { "result": [ { "name": "BOUNDARY_MOCK_METRIC", "defaultAggregate": "AVG", "defaultResolutionMS": 1000, "description": "BOUNDARY_MOCK_METRIC", "displayName": "BOUNDARY_MOCK_METRIC", "displayNameShort": "BOUNDARY_MOCK_METRIC", "unit": "number", "isDisabled": false, "isBuiltin": false } ] } ''' self.out = None self.json1 = None self.json2 = None # setup the environment self.old_stdout = sys.stdout sys.stdout = TextIOWrapper(BytesIO(), 'utf-8') sys.stdout = StringIO.StringIO()
def test_list_metric(self): found = False runner_create = CLIRunner(MetricList()) get = runner_create.get_output([]) result_get = json.loads(get) metric_get = result_get['result'] for metric in metric_get: if metric['name'] == 'CPU': found = True self.assertEqual('CPU Utilization', metric['displayName']) self.assertEqual('CPU', metric['displayNameShort']) self.assertTrue(metric['isBuiltin']) self.assertFalse(metric['isDisabled']) self.assertEqual('percent', metric['unit']) self.assertEqual('avg', metric['defaultAggregate']) self.assertEqual(1000, metric['defaultResolutionMS']) self.assertEqual('Overall CPU utilization', metric['description']) self.assertTrue(found)