Beispiel #1
0
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        metric = 'FOO'

        curl = runner.get_output(['-n', metric, '-z'])
        CLITest.check_curl(self, self.cli, curl)
Beispiel #2
0
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        filter_id = 1024

        curl = runner.get_output(['-i', str(filter_id), '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_alarm(self):
        runner_create = CLIRunner(AlarmCreate())

        alarm_name = 'my-alarm' + CLITest.random_string(6)
        trigger_interval = 300000

        create = runner_create.get_output([
            '-n', alarm_name, '-m', 'CPU', '-g', 'max', '-o', 'gt', '-v',
            '0.50', '-r',
            str(trigger_interval)
        ])
        alarm = json.loads(create)
        self.assertEqual(trigger_interval, int(alarm['triggerInterval']))
        self.assertEqual([], alarm['actions'])
        self.assertEqual(1, int(alarm['familyId']))
        self.assertFalse(alarm['isDisabled'])
        self.assertEqual('CPU', alarm['metric'])
        self.assertEqual(alarm_name, alarm['name'])
        self.assertTrue(alarm['perHostNotify'])
        self.assertTrue(alarm['notifyClear'])
        self.assertTrue(alarm['notifySet'])
        self.assertEqual('max', alarm['triggerPredicate']['agg'])
        self.assertEqual('gt', alarm['triggerPredicate']['op'])
        self.assertEqual(0.5, alarm['triggerPredicate']['val'])
        self.assertEqual(3, int(alarm['typeId']))

        self.api.alarm_delete(id=alarm['id'])
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        metric = 'FOO'
        curl = runner.get_output(['-n', metric,
                                  '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_metric(self):
        runner_create = CLIRunner(MetricCreate())
        metric_name = 'METRIC' + CLITest.random_string(6)
        display_name = 'Display Name ' + CLITest.random_string(20)
        display_name_short = 'Short Display Name' + CLITest.random_string(5)
        description = CLITest.random_string(30)
        aggregate = 'avg'
        unit = 'number'
        resolution = 60000
        disabled = False

        create = runner_create.get_output(['-n', metric_name,
                                           '-d', display_name,
                                           '-s', display_name_short,
                                           '-i', description,
                                           '-g', aggregate,
                                           '-r', str(resolution),
                                           '-u', unit,
                                           '-x', str(disabled).lower()])
        metric_create = json.loads(create)
        metric = metric_create['result']

        self.assertEqual(metric_name, metric['name'])
        self.assertEqual(display_name, metric['displayName'])
        self.assertEqual(display_name_short, metric['displayNameShort'])
        self.assertFalse(metric['isDisabled'])
        self.assertEqual(unit, metric['unit'])
        self.assertEqual(aggregate.upper(), metric['defaultAggregate'])
        self.assertEqual(resolution, int(metric['defaultResolutionMS']))
        self.assertEqual(description, metric['description'])

        runner_delete = CLIRunner(MetricDelete())
        delete = runner_delete.get_output(['-n', metric_name])
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        filter_name = 'My-Filter'

        curl = runner.get_output(['-n', filter_name, '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        filter_name = 'My-Filter'

        curl = runner.get_output(['-n', filter_name,
                                  '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        alarm_id = 1024

        curl = runner.get_output(['-i', str(alarm_id),
                                  '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_list_alarm(self):
        runner_list = CLIRunner(AlarmList())

        create = runner_list.get_output([])
        result_list = json.loads(create)
        alarm_list = result_list['result']

        self.assertGreaterEqual(len(alarm_list), 1)
Beispiel #10
0
    def test_list_alarm(self):
        runner_list = CLIRunner(AlarmList())

        create = runner_list.get_output([])
        result_list = json.loads(create)
        alarm_list = result_list['result']

        self.assertGreaterEqual(len(alarm_list), 1)
Beispiel #11
0
    def test_get_actions_installed(self):
        runner = CLIRunner(ActionInstalled())

        result = runner.get_output([])
        actions_result = json.loads(result)
        actions = actions_result['result']

        self.assertGreaterEqual(len(actions), 1)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        enabled = True
        custom = True

        curl = runner.get_output(
            ['-b', str(enabled).lower(), '-c',
             str(custom).lower(), '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        enabled = True
        custom = True

        curl = runner.get_output(['-b', str(enabled).lower(),
                                  '-c', str(custom).lower(),
                                  '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_get_hostgroup(self):
        runner_create = CLIRunner(HostgroupCreate())

        hostgroup_name = 'SAMPLE' + CLITest.random_string(6)
        sources = 'FOO, BAR'

        create = runner_create.get_output(['-n', hostgroup_name,
                                           '-s', sources])
        hostgroup_create = json.loads(create)
        hostgroup = hostgroup_create['result']

        self.assertEqual(hostgroup_name, hostgroup['name'])
        self.assertFalse(hostgroup['system'])
        self.assertTrue(CLITest.is_int(hostgroup['id']))
        hostgroup_id = int(hostgroup['id'])

        runner_get = CLIRunner(HostgroupGet())
        get = runner_get.get_output(['-i', str(hostgroup_id)])
        hostgroup_get = json.loads(get)
        hostgroup = hostgroup_get['result']

        self.assertEqual(hostgroup_name, hostgroup['name'])
        self.assertFalse(hostgroup['system'])
        self.assertTrue(CLITest.is_int(hostgroup['id']))
        self.assertItemsEqual(split(sources, ','), hostgroup['hostnames'])

        runner_delete = CLIRunner(HostgroupDelete())
        delete = runner_delete.get_output(['-i', str(hostgroup_id)])
        hostgroup_get = json.loads(delete)
        hostgroup = hostgroup_get['result']
        self.assertTrue(hostgroup['success'])
    def test_delete_metric(self):
        runner_create = CLIRunner(MetricCreate())
        metric_name = 'METRIC' + CLITest.random_string(6)
        display_name = 'Display Name ' + CLITest.random_string(20)
        display_name_short = 'Short Display Name' + CLITest.random_string(5)
        description = CLITest.random_string(30)
        aggregate = 'avg'
        unit = 'number'
        resolution = 60000
        disabled = False

        create = runner_create.get_output(['-n', metric_name,
                                           '-d', display_name,
                                           '-s', display_name_short,
                                           '-i', description,
                                           '-g', aggregate,
                                           '-r', str(resolution),
                                           '-u', unit,
                                           '-x', str(disabled).lower()])
        metric_create = json.loads(create)
        metric = metric_create['result']


        display_name = 'Display Name ' + CLITest.random_string(20)
        display_name_short = 'Short Display Name' + CLITest.random_string(5)
        description = CLITest.random_string(30)
        aggregate = 'max'
        unit = 'percent'
        resolution = 30000
        disabled = True

        runner_update = CLIRunner(MetricUpdate())
        update = runner_update.get_output(['-n', metric_name,
                                           '-d', display_name,
                                           '-s', display_name_short,
                                           '-i', description,
                                           '-g', aggregate,
                                           '-r', str(resolution),
                                           '-u', unit,
                                           '-x', str(disabled).lower()])

        metric_update = json.loads(update)
        metric = metric_update['result']

        self.assertEqual(metric_name, metric['name'])
        self.assertEqual(display_name, metric['displayName'])
        self.assertEqual(display_name_short, metric['displayNameShort'])
        self.assertEqual(description, metric['description'])
        self.assertFalse(metric['isDisabled'])
        self.assertEqual(unit, metric['unit'])
        self.assertEqual(aggregate, metric['defaultAggregate'].lower())
        self.assertEqual(resolution, int(metric['defaultResolutionMS']))
        self.assertEqual(description, metric['description'])


        runner_delete = CLIRunner(MetricDelete())
        delete = runner_delete.get_output(['-n', metric_name])
        result = json.loads(delete)
        self.assertTrue(result['result']['success'])
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        filter_id = 1024
        filter_name = "FOO"

        curl = runner.get_output([
            '-i',
            str(filter_id), '-n', filter_name, '-s', 'source1,source2,source3',
            '-z'
        ])
        CLITest.check_curl(self, self.cli, curl)
Beispiel #17
0
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        metric = 'CPU'
        measurement = 0.5
        timestamp = 1452643455

        curl = runner.get_output([
            '-n', metric, '-m',
            str(measurement), '-s', 'source1', '-d',
            str(timestamp), '-z'
        ])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        metric = 'CPU'
        measurement = 0.5
        timestamp = 1452643455

        curl = runner.get_output(['-n', metric,
                                  '-m', str(measurement),
                                  '-s', 'source1',
                                  '-d', str(timestamp),
                                  '-z'])
        CLITest.check_curl(self, self.cli, curl)
Beispiel #19
0
    def test_get_metric(self):
        runner_create = CLIRunner(MetricGet())

        get = runner_create.get_output(['-n', 'CPU'])
        metric_get = json.loads(get)

        self.assertEqual('CPU', metric_get['name'])
        self.assertEqual('CPU Utilization', metric_get['displayName'])
        self.assertEqual('CPU', metric_get['displayNameShort'])
        self.assertEqual('Overall CPU utilization', metric_get['description'])
        self.assertTrue(metric_get['isBuiltin'])
        self.assertFalse(metric_get['isDisabled'])
        self.assertEqual('percent', metric_get['unit'])
        self.assertEqual('avg', metric_get['defaultAggregate'])
        self.assertEqual(1000, metric_get['defaultResolutionMS'])
        self.assertEqual('Overall CPU utilization', metric_get['description'])
    def test_get_metric(self):
        runner_create = CLIRunner(MetricGet())

        get = runner_create.get_output(['-n', 'CPU'])
        metric_get = json.loads(get)

        self.assertEqual('CPU', metric_get['name'])
        self.assertEqual('CPU Utilization', metric_get['displayName'])
        self.assertEqual('CPU', metric_get['displayNameShort'])
        self.assertEqual('Overall CPU utilization', metric_get['description'])
        self.assertTrue(metric_get['isBuiltin'])
        self.assertFalse(metric_get['isDisabled'])
        self.assertEqual('percent', metric_get['unit'])
        self.assertEqual('avg', metric_get['defaultAggregate'])
        self.assertEqual(1000, metric_get['defaultResolutionMS'])
        self.assertEqual('Overall CPU utilization', metric_get['description'])
Beispiel #21
0
    def test_update_alarm(self):
        alarm_name = 'my-alarm-' + CLITest.random_string(6)
        metric_name = 'CPU'
        note = CLITest.random_string(50)
        aggregate = 'max'
        op = 'gt'
        value = 0.75
        trigger_interval = 900000
        is_disabled = True
        runner_create = CLIRunner(AlarmCreate())
        create = runner_create.get_output([
            '-n', alarm_name, '-m', metric_name, '-d', note, '-g', aggregate,
            '-o', op, '-v',
            str(value), '-r',
            str(trigger_interval), '-x',
            str(is_disabled).lower()
        ])
        alarm_create = json.loads(create)

        note = CLITest.random_string(50)
        aggregate = 'max'
        op = 'gt'
        value = 0.75
        trigger_interval = 300000
        is_disabled = False

        runner_update = CLIRunner(AlarmUpdate())
        update = runner_update.get_output([
            '-i',
            str(int(alarm_create['id'])), '-n', alarm_name, '-m', metric_name,
            '-d', note, '-g', aggregate, '-o', op, '-v',
            str(value), '-r',
            str(trigger_interval), '-x',
            str(is_disabled).lower()
        ])
        alarm = json.loads(update)

        self.assertEqual(trigger_interval, alarm['triggerInterval'])
        self.assertEqual(1, alarm['familyId'])
        self.assertFalse(is_disabled, alarm['isDisabled'])
        self.assertEqual(metric_name, alarm['metric'])
        self.assertEqual(alarm_name, alarm['name'])
        self.assertEqual(aggregate, alarm['triggerPredicate']['agg'])
        self.assertEqual(op, alarm['triggerPredicate']['op'])
        self.assertEqual(value, alarm['triggerPredicate']['val'])
        self.assertEqual(3, int(alarm['typeId']))
        self.assertEqual(note, alarm['note'])

        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm['id'])])
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        alarm_name = 'my-curl'
        metric = 'CPU'
        aggregate = 'min'
        operation = 'lt'
        value = 0.5
        trigger_interval = 300000
        enabled = False

        curl = runner.get_output([
            '-n', alarm_name, '-m', metric, '-g', aggregate, '-o', operation,
            '-v',
            str(value), '-r',
            str(trigger_interval), '-x',
            str(enabled).lower(), '-z'
        ])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_filter(self):
        runner_create = CLIRunner(HostgroupCreate())
        filter_name = 'Filter' + CLITest.random_string(6)
        sources = 'foo,bar,red,green'

        create = runner_create.get_output(['-n', filter_name,
                                           '-s', sources])
        filter_create = json.loads(create)
        filter = filter_create['result']

        self.assertEqual(filter_name, filter['name'])
        self.assertItemsEqual(split(sources, ','), filter['hostnames'])

        filter_id = filter['id']

        runner_delete = CLIRunner(HostgroupDelete())
        delete = runner_delete.get_output(['-i', str(filter_id)])
        delete_result = json.loads(delete)
        self.assertTrue(delete_result['result']['success'])
    def test_delete_alarm(self):
        name = 'ALARM_DELETE_TEST' + CLITest.random_string(6)
        metric = 'CPU'
        trigger_interval = 60000
        aggregate = 'sum'
        operation = 'gt'
        threshold = '0.80'
        note = CLITest.random_string(20)

        runner_create = CLIRunner(AlarmCreate())
        create = runner_create.get_output(['-n', name,
                                           '-m', metric,
                                           '-g', aggregate,
                                           '-o', operation,
                                           '-v', str(threshold),
                                           '-r', str(trigger_interval),
                                           '-d', note])
        alarm = json.loads(create)
        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm['id'])])
Beispiel #25
0
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        runner_create = CLIRunner(MetricCreate())
        metric_name = 'METRIC'
        display_name = 'Display Name'
        display_name_short = 'Short Display Name'
        description = 'My Description'
        aggregate = 'avg'
        unit = 'number'
        resolution = 60000
        disabled = False

        curl = runner_create.get_output([
            '-n', metric_name, '-d', display_name, '-s', display_name_short,
            '-i', description, '-g', aggregate, '-r',
            str(resolution), '-u', unit, '-x',
            str(disabled).lower(), '-z'
        ])
        CLITest.check_curl(self, self.cli, curl)
    def test_create_metric_batch(self):
        filename = os.path.join(os.path.dirname(__file__), 'metric_import_data.json')
        print(filename)

        runner_create = CLIRunner(MetricCreateBatch())
        create = runner_create.get_output(['-f', filename])

        runner_export = CLIRunner(MetricExport())
        export = runner_export.get_output(['-p', 'TEST_METRIC_IMPORT'])
        metrics = json.loads(export)

        MetricTest.metric_assert(self,
                      metrics['TEST_METRIC_IMPORT_A'],
                      'My Number of Files',
                      'My Files',
                      'My Number Of Files',
                      'number',
                      'SUM',
                      2000,
                      False)
    def test_list_metric(self):
        found = False
        runner_create = CLIRunner(MetricList())

        get = runner_create.get_output([])
        result_get = json.loads(get)
        metric_get = result_get['result']

        for metric in metric_get:
            if metric['name'] == 'CPU':
                found = True
                self.assertEqual('CPU Utilization', metric['displayName'])
                self.assertEqual('CPU', metric['displayNameShort'])
                self.assertTrue(metric['isBuiltin'])
                self.assertFalse(metric['isDisabled'])
                self.assertEqual('percent', metric['unit'])
                self.assertEqual('avg', metric['defaultAggregate'])
                self.assertEqual(1000, metric['defaultResolutionMS'])
                self.assertEqual('Overall CPU utilization', metric['description'])

        self.assertTrue(found)
    def test_list_metric(self):
        found = False
        runner_create = CLIRunner(MetricList())

        get = runner_create.get_output([])
        result_get = json.loads(get)
        metric_get = result_get['result']

        for metric in metric_get:
            if metric['name'] == 'CPU':
                found = True
                self.assertEqual('CPU Utilization', metric['displayName'])
                self.assertEqual('CPU', metric['displayNameShort'])
                self.assertTrue(metric['isBuiltin'])
                self.assertFalse(metric['isDisabled'])
                self.assertEqual('percent', metric['unit'])
                self.assertEqual('avg', metric['defaultAggregate'])
                self.assertEqual(1000, metric['defaultResolutionMS'])
                self.assertEqual('Overall CPU utilization',
                                 metric['description'])

        self.assertTrue(found)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        alarm_id = 1024
        alarm_name = 'my-curl'
        metric_name = 'CPU'
        aggregate = 'min'
        operation = 'lt'
        value = 0.5
        trigger_interval = 300000
        enabled = False

        curl = runner.get_output(['-i', str(alarm_id),
                                  '-n', alarm_name,
                                  '-m', metric_name,
                                  '-g', aggregate,
                                  '-o', operation,
                                  '-v', str(value),
                                  '-r', str(trigger_interval),
                                  '-x', str(enabled).lower(),
                                  '-z'])
        CLITest.check_curl(self, self.cli, curl)
Beispiel #30
0
    def test_get_plugin(self):
        runner = CLIRunner(PluginGet())

        plugin_name = 'httpcheck'

        get = runner.get_output(['-n', plugin_name])
        plugin_get = json.loads(get)
        plugin = plugin_get['result']

        self.assertTrue('download' in plugin)
        self.assertTrue('repoUrl' in plugin)
        self.assertEqual(plugin_name, plugin['name'])
        self.assertTrue('description' in plugin)
        self.assertTrue('paramSchema' in plugin)
        self.assertTrue('paramArray' in plugin)
        self.assertTrue('postExtract' in plugin)
        self.assertTrue('command' in plugin)
        self.assertTrue('ignore' in plugin)
        self.assertTrue('icon' in plugin)
        self.assertTrue('dashboards' in plugin)
        self.assertTrue('version' in plugin)
        self.assertTrue('metrics' in plugin)
        self.assertTrue('metricDefinitions' in plugin)
    def test_get_plugin(self):
        runner = CLIRunner(PluginGet())

        plugin_name = 'httpcheck'

        get = runner.get_output(['-n', plugin_name])
        plugin_get = json.loads(get)
        plugin = plugin_get['result']

        self.assertTrue('download' in plugin)
        self.assertTrue('repoUrl' in plugin)
        self.assertEqual(plugin_name, plugin['name'])
        self.assertTrue('description' in plugin)
        self.assertTrue('paramSchema' in plugin)
        self.assertTrue('paramArray' in plugin)
        self.assertTrue('postExtract' in plugin)
        self.assertTrue('command' in plugin)
        self.assertTrue('ignore' in plugin)
        self.assertTrue('icon' in plugin)
        self.assertTrue('dashboards' in plugin)
        self.assertTrue('version' in plugin)
        self.assertTrue('metrics' in plugin)
        self.assertTrue('metricDefinitions' in plugin)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        runner_create = CLIRunner(MetricCreate())
        metric_name = 'METRIC'
        display_name = 'Display Name'
        display_name_short = 'Short Display Name'
        description = 'My Description'
        aggregate = 'avg'
        unit = 'number'
        resolution = 60000
        disabled = False

        curl = runner_create.get_output(['-n', metric_name,
                                         '-d', display_name,
                                         '-s', display_name_short,
                                         '-i', description,
                                         '-g', aggregate,
                                         '-r', str(resolution),
                                         '-u', unit,
                                         '-x', str(disabled).lower(),
                                         '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_delete_metric(self):
        runner_create = CLIRunner(MetricCreate())
        metric_name = 'METRIC' + CLITest.random_string(6)
        display_name = 'Display Name ' + CLITest.random_string(20)
        display_name_short = 'Short Display Name' + CLITest.random_string(5)
        description = CLITest.random_string(30)
        aggregate = 'avg'
        unit = 'number'
        resolution = 60000
        disabled = False

        create = runner_create.get_output(['-n', metric_name,
                                           '-d', display_name,
                                           '-s', display_name_short,
                                           '-i', description,
                                           '-g', aggregate,
                                           '-r', str(resolution),
                                           '-u', unit,
                                           '-x', str(disabled).lower()])
        metric_create = json.loads(create)
        metric = metric_create['result']

        runner_delete = CLIRunner(MetricDelete())
        delete = runner_delete.get_output(['-n', metric_name])
Beispiel #34
0
    def test_get_alarm(self):
        runner_create = CLIRunner(AlarmCreate())

        name = 'my-alarm'
        metric = 'CPU'
        aggregate = 'max'
        operation = 'gt'
        threshold = 0.50
        trigger_interval = 300000

        create = runner_create.get_output([
            '-n', name, '-m', metric, '-g', aggregate, '-o', operation, '-v',
            str(threshold), '-r',
            str(trigger_interval)
        ])
        alarm_create = json.loads(create)

        runner_get = CLIRunner(AlarmGet())
        get = runner_get.get_output(['-i', str(alarm_create['id'])])
        alarm_get = json.loads(get)['result']

        self.assertEqual(int(alarm_create['triggerInterval']),
                         alarm_get['triggerInterval'])
        self.assertEqual(alarm_create['actions'], alarm_get['actions'])
        self.assertEqual(int(alarm_create['familyId']),
                         int(alarm_get['familyId']))
        self.assertFalse(alarm_create['isDisabled'], alarm_get['isDisabled'])
        self.assertEqual(alarm_create['metric'], alarm_get['metric'])
        self.assertEqual(alarm_create['name'], alarm_get['name'])
        self.assertTrue(alarm_get['perHostNotify'])
        self.assertEqual(alarm_create['triggerPredicate']['agg'],
                         alarm_get['triggerPredicate']['agg'])
        self.assertEqual(alarm_create['triggerPredicate']['op'],
                         alarm_get['triggerPredicate']['op'])
        self.assertEqual(alarm_create['triggerPredicate']['val'],
                         alarm_get['triggerPredicate']['val'])
        self.assertEqual(int(alarm_create['typeId']), int(alarm_get['typeId']))

        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm_get['id'])])
    def test_search_filter(self):
        runner_create = CLIRunner(HostgroupCreate())
        filter_name = 'Filter' + CLITest.random_string(6)
        sources = 'foo,bar,red,green'

        create = runner_create.get_output(['-n', filter_name, '-s', sources])
        filter_create = json.loads(create)
        filter = filter_create['result']
        filter_id = filter['id']

        runner_search = CLIRunner(HostgroupSearch())
        search = runner_search.get_output(['-n', filter_name])
        filter_search = json.loads(search)
        filter = filter_search['result']

        self.assertEqual(filter_name, filter[0]['name'])
        self.assertItemsEqual(split(sources, ','), filter[0]['hostnames'])

        runner_delete = CLIRunner(HostgroupDelete())
        delete = runner_delete.get_output(['-i', str(filter_id)])
        delete_result = json.loads(delete)
        self.assertTrue(delete_result['result']['success'])
Beispiel #36
0
    def test_search_alarm(self):
        alarm_name = 'alarm_test' + CLITest.random_string(6)
        metric_name = 'CPU'
        aggregate = 'max'
        op = 'gt'
        value = 0.50
        trigger_interval = 300000
        note = CLITest.random_string(20)
        enabled = True
        runner_create = CLIRunner(AlarmCreate())

        create = runner_create.get_output([
            '-n', alarm_name, '-m', metric_name, '-g', aggregate, '-o', op,
            '-v',
            str(value), '-r',
            str(trigger_interval), '-d', note, '-x',
            str(enabled).lower()
        ])

        runner_search = CLIRunner(AlarmSearch())
        search = runner_search.get_output(['-n', alarm_name])
        result_search = json.loads(search)
        alarm = result_search['result'][0]
        self.assertEqual(trigger_interval, alarm['interval'])
        self.assertItemsEqual([], alarm['actions'])
        self.assertEqual(1, int(alarm['familyId']))
        self.assertFalse(False, alarm['isDisabled'])
        self.assertEqual(metric_name, alarm['metric'])
        self.assertEqual(alarm_name, alarm['name'])
        self.assertTrue(alarm['perHostNotify'])
        self.assertEqual(aggregate, alarm['triggerPredicate']['agg'])
        self.assertEqual(op, alarm['triggerPredicate']['op'])
        self.assertEqual(value, alarm['triggerPredicate']['val'])
        self.assertEqual(3, int(alarm['typeId']))

        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm['id'])])
    def test_update_alarm(self):
        alarm_name = 'my-alarm-' + CLITest.random_string(6)
        metric_name = 'CPU'
        note = CLITest.random_string(50)
        aggregate = 'max'
        op = 'gt'
        value = 0.75
        trigger_interval = 900000
        is_disabled = True
        runner_create = CLIRunner(AlarmCreate())
        create = runner_create.get_output(['-n', alarm_name,
                                           '-m', metric_name,
                                           '-d', note,
                                           '-g', aggregate,
                                           '-o', op,
                                           '-v', str(value),
                                           '-r', str(trigger_interval),
                                           '-x', str(is_disabled).lower()])
        alarm_create = json.loads(create)

        note = CLITest.random_string(50)
        aggregate = 'max'
        op = 'gt'
        value = 0.75
        trigger_interval = 300000
        is_disabled = False

        runner_update = CLIRunner(AlarmUpdate())
        update = runner_update.get_output(['-i', str(int(alarm_create['id'])),
                                           '-n', alarm_name,
                                           '-m', metric_name,
                                           '-d', note,
                                           '-g', aggregate,
                                           '-o', op,
                                           '-v', str(value),
                                           '-r', str(trigger_interval),
                                           '-x', str(is_disabled).lower()])
        alarm = json.loads(update)

        self.assertEqual(trigger_interval, alarm['triggerInterval'])
        self.assertEqual(1, alarm['familyId'])
        self.assertFalse(is_disabled, alarm['isDisabled'])
        self.assertEqual(metric_name, alarm['metric'])
        self.assertEqual(alarm_name, alarm['name'])
        self.assertEqual(aggregate, alarm['triggerPredicate']['agg'])
        self.assertEqual(op, alarm['triggerPredicate']['op'])
        self.assertEqual(value, alarm['triggerPredicate']['val'])
        self.assertEqual(3, int(alarm['typeId']))
        self.assertEqual(note, alarm['note'])

        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm['id'])])
Beispiel #38
0
    def test_delete_filter(self):
        runner_create = CLIRunner(HostgroupCreate())
        filter_name = 'Filter' + CLITest.random_string(6)
        sources = 'foo,bar,red,green'

        create = runner_create.get_output(['-n', filter_name, '-s', sources])
        filter_create = json.loads(create)
        filter = filter_create['result']
        filter_id = filter['id']

        runner_delete = CLIRunner(HostgroupDelete())
        delete = runner_delete.get_output(['-i', str(filter_id)])
        delete_result = json.loads(delete)
        self.assertTrue(delete_result['result']['success'])
Beispiel #39
0
    def test_create_metric_batch(self):
        filename = os.path.join(os.path.dirname(__file__),
                                'metric_import_data.json')
        print(filename)

        runner_create = CLIRunner(MetricCreateBatch())
        create = runner_create.get_output(['-f', filename])

        runner_export = CLIRunner(MetricExport())
        export = runner_export.get_output(['-p', 'TEST_METRIC_IMPORT'])
        metrics = json.loads(export)

        MetricTest.metric_assert(self, metrics['TEST_METRIC_IMPORT_A'],
                                 'My Number of Files', 'My Files',
                                 'My Number Of Files', 'number', 'SUM', 2000,
                                 False)
    def test_delete_alarm(self):
        name = 'ALARM_DELETE_TEST' + CLITest.random_string(6)
        metric = 'CPU'
        trigger_interval = 60000
        aggregate = 'sum'
        operation = 'gt'
        threshold = '0.80'
        note = CLITest.random_string(20)

        runner_create = CLIRunner(AlarmCreate())
        create = runner_create.get_output([
            '-n', name, '-m', metric, '-g', aggregate, '-o', operation, '-v',
            str(threshold), '-r',
            str(trigger_interval), '-d', note
        ])
        alarm = json.loads(create)
        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm['id'])])
Beispiel #41
0
    def test_delete_metric(self):
        runner_create = CLIRunner(MetricCreate())
        metric_name = 'METRIC' + CLITest.random_string(6)
        display_name = 'Display Name ' + CLITest.random_string(20)
        display_name_short = 'Short Display Name' + CLITest.random_string(5)
        description = CLITest.random_string(30)
        aggregate = 'avg'
        unit = 'number'
        resolution = 60000
        disabled = False

        create = runner_create.get_output([
            '-n', metric_name, '-d', display_name, '-s', display_name_short,
            '-i', description, '-g', aggregate, '-r',
            str(resolution), '-u', unit, '-x',
            str(disabled).lower()
        ])
        metric_create = json.loads(create)
        metric = metric_create['result']

        runner_delete = CLIRunner(MetricDelete())
        delete = runner_delete.get_output(['-n', metric_name])
    def test_search_alarm(self):
        alarm_name = 'alarm_test' + CLITest.random_string(6)
        metric_name = 'CPU'
        aggregate = 'max'
        op = 'gt'
        value = 0.50
        trigger_interval = 300000
        note = CLITest.random_string(20)
        enabled = True
        runner_create = CLIRunner(AlarmCreate())

        create = runner_create.get_output(['-n', alarm_name,
                                           '-m', metric_name,
                                           '-g', aggregate,
                                           '-o', op,
                                           '-v', str(value),
                                           '-r', str(trigger_interval),
                                           '-d', note,
                                           '-x', str(enabled).lower()])

        runner_search = CLIRunner(AlarmSearch())
        search = runner_search.get_output(['-n', alarm_name])
        result_search = json.loads(search)
        alarm = result_search['result'][0]
        self.assertEqual(trigger_interval, alarm['interval'])
        self.assertItemsEqual([], alarm['actions'])
        self.assertEqual(1, int(alarm['familyId']))
        self.assertFalse(False, alarm['isDisabled'])
        self.assertEqual(metric_name, alarm['metric'])
        self.assertEqual(alarm_name, alarm['name'])
        self.assertTrue(alarm['perHostNotify'])
        self.assertEqual(aggregate, alarm['triggerPredicate']['agg'])
        self.assertEqual(op, alarm['triggerPredicate']['op'])
        self.assertEqual(value, alarm['triggerPredicate']['val'])
        self.assertEqual(3, int(alarm['typeId']))

        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm['id'])])
    def test_get_alarm(self):
        runner_create = CLIRunner(AlarmCreate())

        name = 'my-alarm'
        metric = 'CPU'
        aggregate = 'max'
        operation = 'gt'
        threshold = 0.50
        trigger_interval = 300000

        create = runner_create.get_output(['-n', name,
                                           '-m', metric,
                                           '-g', aggregate,
                                           '-o', operation,
                                           '-v', str(threshold),
                                           '-r', str(trigger_interval)])
        alarm_create = json.loads(create)

        runner_get = CLIRunner(AlarmGet())
        get = runner_get.get_output(['-i', str(alarm_create['id'])])
        alarm_get = json.loads(get)['result']

        self.assertEqual(int(alarm_create['triggerInterval']), alarm_get['triggerInterval'])
        self.assertEqual(alarm_create['actions'], alarm_get['actions'])
        self.assertEqual(int(alarm_create['familyId']), int(alarm_get['familyId']))
        self.assertFalse(alarm_create['isDisabled'], alarm_get['isDisabled'])
        self.assertEqual(alarm_create['metric'], alarm_get['metric'])
        self.assertEqual(alarm_create['name'], alarm_get['name'])
        self.assertTrue(alarm_get['perHostNotify'])
        self.assertEqual(alarm_create['triggerPredicate']['agg'], alarm_get['triggerPredicate']['agg'])
        self.assertEqual(alarm_create['triggerPredicate']['op'], alarm_get['triggerPredicate']['op'])
        self.assertEqual(alarm_create['triggerPredicate']['val'], alarm_get['triggerPredicate']['val'])
        self.assertEqual(int(alarm_create['typeId']), int(alarm_get['typeId']))

        runner_delete = CLIRunner(AlarmDelete())
        delete = runner_delete.get_output(['-i', str(alarm_get['id'])])
 def test_cli_curl(self):
     runner = CLIRunner(self.cli)
     curl = runner.get_output(['-n', 'My-Relay', '-z'])
     CLITest.check_curl(self, self.cli, curl)
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        curl = runner.get_output(['-z'])
        CLITest.check_curl(self, self.cli, curl)
Beispiel #46
0
    def test_create_curl(self):
        runner = CLIRunner(self.cli)

        curl = runner.get_output(['-n', 'foo', '-z'])
        CLITest.check_curl(self, self.cli, curl)
    def test_export_metric(self):
        runner_export = CLIRunner(MetricExport())

        export = runner_export.get_output([])
        metrics = json.loads(export)

        self.metric_assert(metrics['SYSTEM.CPU.IOWAIT'],
                           'CPU IO Wait Time',
                           'CPU IO Wait',
                           'The percentage of CPU time spent waiting for IO operations.',
                           'percent',
                           'AVG',
                           1000,
                           False)

        self.metric_assert(metrics['SYSTEM.CPU.STOLEN'],
                           'CPU Stolen Time',
                           'CPU Stolen',
                           'The percentage of time a virtual machine was ' +
                           'ready to run but was not allowed to run by the host OS.',
                           'percent',
                           'AVG',
                           1000,
                           False)
        self.metric_assert(metrics['SYSTEM.CPU.SYS'],
                           'CPU System Time',
                           'CPU System',
                           'The percentage of available CPU time being utilized by the OS.',
                           'percent',
                           'AVG',
                           1000,
                           False)
        self.metric_assert(metrics['SYSTEM.CPU.USER'],
                           'CPU User Time',
                           'CPU User',
                           'The percentage of available CPU time being utilized by programs.',
                           'percent',
                           'AVG',
                           1000,
                           False)
        self.metric_assert(metrics['SYSTEM.FS.USE_PERCENT.TOTAL'],
                           'Filesystem Utilization',
                           'FileSys Used',
                           'Percentage of disk space used (written on). ' +
                           'This is the total among all non-virtual, local filesystems.',
                           'percent',
                           'MAX',
                           1000,
                           False)

        self.metric_assert(metrics['SYSTEM.MEM.FREE'],
                           'Memory Bytes Free',
                           'Mem Free',
                           'The amount of unused memory by programs and the OS.',
                           'bytecount',
                           'AVG',
                           1000,
                           False)

        self.metric_assert(metrics['SYSTEM.METER.KEEPALIVE'],
                                   'SYSTEM.METER.KEEPALIVE',
                                   'SYSTEM.METER.KEEPALIVE',
                                   'SYSTEM.METER.KEEPALIVE',
                                   'number',
                                   'AVG',
                                   1000,
                                   False)

        self.metric_assert(metrics['SYSTEM.OS.CONTEXT_SWITCHES'],
                           'Context Switches',
                           'Context Switches',
                           'The number of switches between programs and the OS in the last second.',
                           'number',
                           'MAX',
                           1000,
                           False)

        self.metric_assert(metrics['SYSTEM.OS.LOADAVG.ONE'],
                           'One Minute Load Average',
                           'Load Avg 1 Min',
                           'An averaging of the number of processes utilizing or waiting for CPU ' +
                           'time over that last minute. This number is divided by number of CPUs to provide an ' +
                           'accurate comparative number between systems with different numbers of CPUs.',
                           'number',
                           'AVG',
                           1000,
                           False)

        self.metric_assert(metrics['SYSTEM.OS.LOADAVG.QUEUE'],
                           'Processor Queue',
                           'Proc Queue',
                           'The number of processes that are waiting to run on a CPU.',
                           'number',
                           'MAX',
                           1000,
                           False)

        self.metric_assert(metrics['SYSTEM.PROC.IDLE'],
                           'Number of Idle Processes',
                           'Proc Idle',
                           'The number of processes that have not executed for more than 20 seconds.',
                           'number',
                           'MAX',
                           1000,
                           False)
        self.metric_assert(metrics['SYSTEM.PROC.THREADS'],
                           'Number of Threads',
                           'Proc Threads',
                           'Number of process threads running. ' +
                           'A single process can have many threads, but a thread can only have one process.',
                           'number',
                           'MAX',
                           1000,
                           False)
        self.metric_assert(metrics['SYSTEM.PROC.TOTAL'],
                           'Total Number of Processes',
                           'Proc Count',
                           'The number of processes running, ' +
                           'including duplicates. Processes are programs that support applications.',
                           'number',
                           'MAX',
                           1000,
                           False)
    def test_list_hostgroup(self):
        runner_list = CLIRunner(HostgroupList())
        list = runner_list.get_output([])
        hostgroup_list = json.loads(list)

        self.assertGreaterEqual(1, len(hostgroup_list))
    def test_export_metric(self):
        runner_export = CLIRunner(MetricExport())

        export = runner_export.get_output([])
        metrics = json.loads(export)

        self.metric_assert(
            metrics['SYSTEM.CPU.IOWAIT'], 'CPU IO Wait Time', 'CPU IO Wait',
            'The percentage of CPU time spent waiting for IO operations.',
            'percent', 'AVG', 1000, False)

        self.metric_assert(
            metrics['SYSTEM.CPU.STOLEN'], 'CPU Stolen Time', 'CPU Stolen',
            'The percentage of time a virtual machine was ' +
            'ready to run but was not allowed to run by the host OS.',
            'percent', 'AVG', 1000, False)
        self.metric_assert(
            metrics['SYSTEM.CPU.SYS'], 'CPU System Time', 'CPU System',
            'The percentage of available CPU time being utilized by the OS.',
            'percent', 'AVG', 1000, False)
        self.metric_assert(
            metrics['SYSTEM.CPU.USER'], 'CPU User Time', 'CPU User',
            'The percentage of available CPU time being utilized by programs.',
            'percent', 'AVG', 1000, False)
        self.metric_assert(
            metrics['SYSTEM.FS.USE_PERCENT.TOTAL'], 'Filesystem Utilization',
            'FileSys Used', 'Percentage of disk space used (written on). ' +
            'This is the total among all non-virtual, local filesystems.',
            'percent', 'MAX', 1000, False)

        self.metric_assert(
            metrics['SYSTEM.MEM.FREE'], 'Memory Bytes Free', 'Mem Free',
            'The amount of unused memory by programs and the OS.', 'bytecount',
            'AVG', 1000, False)

        self.metric_assert(metrics['SYSTEM.METER.KEEPALIVE'],
                           'SYSTEM.METER.KEEPALIVE', 'SYSTEM.METER.KEEPALIVE',
                           'SYSTEM.METER.KEEPALIVE', 'number', 'AVG', 1000,
                           False)

        self.metric_assert(
            metrics['SYSTEM.OS.CONTEXT_SWITCHES'], 'Context Switches',
            'Context Switches',
            'The number of switches between programs and the OS in the last second.',
            'number', 'MAX', 1000, False)

        self.metric_assert(
            metrics['SYSTEM.OS.LOADAVG.ONE'], 'One Minute Load Average',
            'Load Avg 1 Min',
            'An averaging of the number of processes utilizing or waiting for CPU '
            +
            'time over that last minute. This number is divided by number of CPUs to provide an '
            +
            'accurate comparative number between systems with different numbers of CPUs.',
            'number', 'AVG', 1000, False)

        self.metric_assert(
            metrics['SYSTEM.OS.LOADAVG.QUEUE'], 'Processor Queue',
            'Proc Queue',
            'The number of processes that are waiting to run on a CPU.',
            'number', 'MAX', 1000, False)

        self.metric_assert(
            metrics['SYSTEM.PROC.IDLE'], 'Number of Idle Processes',
            'Proc Idle',
            'The number of processes that have not executed for more than 20 seconds.',
            'number', 'MAX', 1000, False)
        self.metric_assert(
            metrics['SYSTEM.PROC.THREADS'], 'Number of Threads',
            'Proc Threads', 'Number of process threads running. ' +
            'A single process can have many threads, but a thread can only have one process.',
            'number', 'MAX', 1000, False)
        self.metric_assert(
            metrics['SYSTEM.PROC.TOTAL'], 'Total Number of Processes',
            'Proc Count', 'The number of processes running, ' +
            'including duplicates. Processes are programs that support applications.',
            'number', 'MAX', 1000, False)
    def test_list_hostgroup(self):
        runner_list = CLIRunner(HostgroupList())
        list = runner_list.get_output([])
        hostgroup_list = json.loads(list)

        self.assertGreaterEqual(1, len(hostgroup_list))