def test_series_loader(self):
     query = Query('*')
     loader_memcache_key = influxgraph.utils.gen_memcache_pattern_key("_".join([
         query.pattern, str(self.default_nodes_limit), str(0)]))
     del self.finder
     _loader_interval = 2
     config = { 'influxdb' : { 'host' : 'localhost',
                               'port' : 8086,
                               'user' : 'root',
                               'pass' : 'root',
                               'db' : self.db_name,
                               'series_loader_interval': _loader_interval,
                               'memcache' : { 'host': 'localhost',
                                              'ttl' : 60,
                                              'max_value': 20,
                                              },
                               'log_level': 0,
                               'fill': 'previous',
                               },}
     try:
         _memcache = memcache.Client([config['influxdb']['memcache']['host']])
         _memcache.delete(SERIES_LOADER_MUTEX_KEY)
     except NameError:
         pass
     finder = influxgraph.InfluxDBFinder(config)
     time.sleep(_loader_interval/2.0)
     # if finder.memcache:
     #     self.assertTrue(finder.memcache.get(SERIES_LOADER_MUTEX_KEY))
     self.assertTrue(finder.memcache)
     self.assertEqual(finder.memcache_ttl, 60,
                      msg="Configured TTL of %s sec, got %s sec TTL instead" % (
                          60, finder.memcache_ttl,))
     self.assertEqual(finder.memcache.server_max_value_length, 1024**2*20,
                      msg="Configured max value of %s MB, got %s instead" % (
                          1024**2*20, finder.memcache.server_max_value_length,))
     # Give series loader more than long enough to finish
     time.sleep(_loader_interval + 2)
     if finder.memcache:
         self.assertTrue(finder.memcache.get(loader_memcache_key))
     del finder
     config['influxdb']['loader_startup_block'] = False
     finder = influxgraph.InfluxDBFinder(config)
     try:
         self.assertTrue(_SERIES_LOADER_LOCK.acquire(block=False))
     except:
         pass
     else:
         _SERIES_LOADER_LOCK.release()
Beispiel #2
0
 def test_field_template_with_value_field(self):
     template = "env.host.measurement.field*"
     del self.finder
     measurements = ['cpuusage']
     fields = {'value': 1}
     tags = {'host': 'my_host1',
             'env': 'my_env1',
             }
     metrics = ['.'.join([tags['env'], tags['host'], m])
                for m in measurements]
     self.client.drop_database(self.db_name)
     self.client.create_database(self.db_name)
     self.write_data(measurements, tags, fields)
     self.config['influxdb']['templates'] = [template]
     self.finder = influxgraph.InfluxDBFinder(self.config)
     cpu_nodes = list(self.finder.find_nodes(Query('my_env1.my_host1.*')))
     expected = measurements
     self.assertEqual([n.name for n in cpu_nodes], expected)
     nodes = list(self.finder.find_nodes(Query('my_env1.my_host1.*.*')))
     self.assertEqual(nodes, [])
     _, data = self.finder.fetch_multi(cpu_nodes, int(self.start_time.strftime("%s")),
                                       int(self.end_time.strftime("%s")))
     for metric in metrics:
         datapoints = [v for v in data[metric] if v]
         self.assertTrue(len(datapoints) == self.num_datapoints)
 def setUp(self):
     # Num datapoints is number of non-zero non-null datapoints
     self.step, self.num_datapoints, self.db_name = 60, 31, 'integration_test'
     self.all_datapoints_num = 61
     self.start_time, self.end_time = (datetime.datetime.utcnow() - datetime.timedelta(hours=1)), \
       datetime.datetime.utcnow()
     self.steps = int(round((int(self.end_time.strftime("%s")) - \
                             int(self.start_time.strftime("%s"))) * 1.0 / self.step)) + 1
     self.config = { 'influxdb': { 'db' : self.db_name,
                                   'log_level': 0,
                                   },
                     'statsd': { 'host': 'localhost' },
                     'search_index': 'index',
                     }
     self.client = InfluxDBClient(database=self.db_name)
     self.metric_prefix = "integration_test"
     self.nodes = ["leaf_node1", "leaf_node2"]
     self.series1, self.series2 = ".".join([self.metric_prefix, self.nodes[0]]), \
       ".".join([self.metric_prefix, self.nodes[1]])
     self.default_nodes_limit = LOADER_LIMIT
     self.series = [self.series1, self.series2,
                    'integration_test.agg_path.min',
                    'integration_test.agg_path.max',
                    'integration_test.agg_path.last',
                    'integration_test.agg_path.sum',
                    ]
     self.series_values = [randint(1,100) for _ in self.series]
     self.setup_db()
     self.finder = influxgraph.InfluxDBFinder(self.config)
 def test_loader_limit(self):
     del self.finder
     config = { 'influxdb' : { 'host' : 'localhost',
                               'port' : 8086,
                               'user' : 'root',
                               'pass' : 'root',
                               'db' : self.db_name,
                               'loader_limit': 1,
                               'series_loader_interval': 1,
                               'log_level': 0,
                               'memcache' : { 'host': 'localhost',
                                              'ttl' : 60,
                                              },
                                              }}
     loader_memcache_key = influxgraph.utils.gen_memcache_pattern_key("_".join([
         '*', str(config['influxdb']['loader_limit']), str(0)]))
     try:
         _memcache = memcache.Client([config['influxdb']['memcache']['host']])
         _memcache.delete(loader_memcache_key)
         _memcache.delete(SERIES_LOADER_MUTEX_KEY)
     except NameError:
         pass
     self.finder = influxgraph.InfluxDBFinder(config)
     self.assertEqual(self.finder.loader_limit, config['influxdb']['loader_limit'])
     time.sleep(config['influxdb']['series_loader_interval'] * 2)
     if self.finder.memcache:
         self.assertTrue(self.finder.memcache.get(loader_memcache_key))
     config['influxdb']['loader_limit'] = 'bad_limit'
     self.assertRaises(Exception, influxgraph.InfluxDBFinder, config)
 def test_index_save_load(self):
     self.finder.index = None
     try:
         os.unlink(self.finder.index_path)
     except OSError:
         pass
     del self.finder
     config = { 'influxdb': { 'host' : 'localhost',
                              'port' : 8086,
                              'user' : 'root',
                              'pass' : 'root',
                              'db' : self.db_name,
                              'reindex_interval': 1,
                              'log_level': 0,
                              },
                }
     finder = influxgraph.InfluxDBFinder(config)
     finder.index_path = 'index'
     finder.save_index()
     index_path = finder.index_path
     finder_index = Node.from_array(finder.index.to_array())
     time.sleep(config['influxdb']['reindex_interval'] + 1)
     del finder
     self.assertTrue(os.path.isfile('index'))
     # Reload index from file
     index_fh = open(index_path, 'rt')
     try:
         index = Node.from_file(index_fh)
     finally:
         index_fh.close()
     self.assertTrue(index is not None)
     for query in ['*', '*.*', '*.*.*', '*.*.*.*']:
         self.assertEqual([path for (path, _) in index.query(query)],
                          [path for (path, _) in finder_index.query(query)])
Beispiel #6
0
 def test_template_measurement_first(self):
     del self.finder
     template = "measurement.host.resource"
     self.config['influxdb']['templates'] = [template]
     measurements = ['load', 'idle',
                     'usage', 'user']
     tags = {'host': 'my_host',
             'resource': 'cpu',
             }
     fields = {'value': 1}
     self.write_data(measurements, tags, fields)
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('*')
     nodes = sorted([n.name for n in self.finder.find_nodes(query)])
     expected = sorted(measurements)
     self.assertEqual(nodes, expected,
                      msg="Got root branch query result %s - wanted %s" % (
                          nodes, expected,))
     query = Query('%s.*' % (measurements[0]))
     nodes = sorted([n.name for n in self.finder.find_nodes(query)])
     expected = [tags['host']]
     self.assertEqual(nodes, expected,
                      msg="Got query %s result %s - wanted %s" % (
                          query.pattern, nodes, expected,))
     query = Query('%s.%s.*' % (measurements[0], tags['host'],))
     nodes = sorted([n.name for n in self.finder.find_nodes(query)])
     expected = sorted([tags['resource']])
     self.assertEqual(nodes, expected,
                      msg="Got query %s result %s - wanted %s" % (
                          query.pattern, nodes, expected,))
 def test_multi_finder_index_build(self):
     """Test index build lock with multiple finders"""
     num_finders = 50
     fh = open(FILE_LOCK, 'w')
     del self.finder
     self.config['influxdb']['reindex_interval'] = 0
     finders = []
     for _ in range(num_finders):
         _finder = influxgraph.InfluxDBFinder(self.config)
         time.sleep(.001)
         finders.append(_finder)
     try:
         self.assertRaises(IOError, fcntl.flock,
                           fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
     except AssertionError:
         i = 0
         while i < 50:
             i += 1
             fcntl.flock(fh, fcntl.LOCK_UN)
             try:
                 self.assertRaises(IOError, fcntl.flock,
                                   fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
             except AssertionError:
                 continue
     finally:
         fcntl.flock(fh, fcntl.LOCK_UN)
         fh.close()
         os.unlink(FILE_LOCK)
Beispiel #8
0
 def test_index_save_load(self):
     config = { 'influxdb': { 'host' : 'localhost',
                              'port' : 8086,
                              'memcache' : {'host': 'localhost',},
                              'user' : 'root',
                              'pass' : 'root',
                              'db' : self.db_name,
                              },
                'statsd': {'host': 'localhost' },
                }
     finder = influxgraph.InfluxDBFinder(config)
     finder.index_path = 'index'
     finder.save_index()
     index_path = finder.index_path
     finder_index = finder.index
     del finder
     self.assertTrue(os.path.isfile('index'))
     # Reload index from file
     index_fh = GzipFile(index_path, 'r')
     try:
         index = NodeTreeIndex.from_file(index_fh)
     finally:
         index_fh.close()
     self.assertTrue(index is not None)
     for query in ['*', '*.*', '*.*.*', '*.*.*.*']:
         self.assertEqual(list(index.query(query)), list(finder_index.query(query)))
Beispiel #9
0
 def setUp(self):
     self.step, self.num_datapoints, self.db_name = 60, 31, 'integration_test'
     self.start_time, self.end_time = (datetime.datetime.utcnow() - datetime.timedelta(hours=1)), \
       datetime.datetime.utcnow()
     self.steps = int(round((int(self.end_time.strftime("%s")) - \
                             int(self.start_time.strftime("%s"))) * 1.0 / self.step)) + 1
     self.config = { 'influxdb': { 'host' : 'localhost',
                                    'port' : 8086,
                                    'user' : 'root',
                                    'pass' : 'root',
                                    'db' : self.db_name,
                                    # 'series_loader_interval': 1,
                                    },
                     'statsd': { 'host': 'localhost' },
                     'search_index': 'index',
                     }
     self.client = InfluxDBClient(database=self.db_name)
     self.metric_prefix = "integration_test"
     self.nodes = ["leaf_node1", "leaf_node2"]
     self.series1, self.series2 = ".".join([self.metric_prefix, self.nodes[0]]), \
       ".".join([self.metric_prefix, self.nodes[1]])
     self.default_nodes_limit = LOADER_LIMIT
     self.series = [self.series1, self.series2,
                    'integration_test.agg_path.min',
                    'integration_test.agg_path.max',
                    'integration_test.agg_path.last',
                    'integration_test.agg_path.sum',
                    ]
     self.setup_db()
     self.finder = influxgraph.InfluxDBFinder(self.config)
Beispiel #10
0
 def test_template_drop_path_part(self):
     del self.finder
     # Filter out first part of metric, keep the remainder as
     # measurement name
     template = "..measurement*"
     self.config['influxdb']['templates'] = [self.template,
                                             template]
     measurements = ['my_host.cpu.load', 'my_host.cpu.idle',
                     'my_host.cpu.usage', 'my_host.cpu.user']
     fields = {"value": 1}
     self.write_data(measurements, {}, fields)
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('*')
     nodes = sorted([n.name for n in self.finder.find_nodes(query)])
     expected = sorted(['my_host', self.metric_prefix])
     self.assertEqual(nodes, expected,
                      msg="Got root branch query result %s - wanted %s" % (
                          nodes, expected,))
     split_measurement = measurements[0].split('.')
     query = Query('%s.*' % (split_measurement[0]))
     nodes = [n.name for n in self.finder.find_nodes(query)]
     expected = [split_measurement[1]]
     self.assertEqual(nodes, expected,
                      msg="Got sub branch query result %s - wanted %s" % (
                          nodes, expected,))
     query = Query('%s.%s.*' % (split_measurement[0], split_measurement[1],))
     nodes = sorted([n.name for n in self.finder.find_nodes(query)])
     expected = sorted([m.split('.')[2] for m in measurements])
     self.assertEqual(nodes, expected,
                      msg="Got sub branch query result %s - wanted %s" % (
                          nodes, expected,))
Beispiel #11
0
 def test_memcache_default_config_values(self):
     del self.finder
     config = {
         'influxdb': {
             'host': 'localhost',
             'port': 8086,
             'user': '******',
             'pass': '******',
             'db': self.db_name,
             'memcache': {
                 'host': 'localhost'
             },
             'log_level': 0,
             'fill': 'previous',
         },
     }
     finder = influxgraph.InfluxDBFinder(config)
     self.assertTrue(finder.memcache)
     self.assertEqual(
         finder.memcache_ttl,
         MEMCACHE_SERIES_DEFAULT_TTL,
         msg="Default TTL should be %s sec, got %s sec TTL instead" % (
             MEMCACHE_SERIES_DEFAULT_TTL,
             finder.memcache_ttl,
         ))
     self.assertEqual(
         finder.memcache.server_max_value_length,
         1024**2 * 1,
         msg="Default max value should be 1 MB, got %s instead" %
         (1024**2 * finder.memcache.server_max_value_length, ))
Beispiel #12
0
 def test_single_fetch_memcache_integration(self):
     self.config['influxdb']['memcache'] = {'host': 'localhost'}
     del self.config['search_index']
     self.finder = influxgraph.InfluxDBFinder(self.config)
     node = list(self.finder.find_nodes(Query(self.series1)))[0]
     aggregation_func = get_aggregation_func(
         node.path, self.finder.aggregation_functions)
     memcache_key = gen_memcache_key(int(self.start_time.strftime("%s")),
                                     int(self.end_time.strftime("%s")),
                                     aggregation_func, [node.path])
     self.finder.memcache.delete(memcache_key)
     node.reader.fetch(int(self.start_time.strftime("%s")),
                       int(self.end_time.strftime("%s")))
     self.assertTrue(
         node.reader.memcache.get(memcache_key),
         msg="Expected data for %s to be in memcache after a fetch" %
         (node.path, ))
     time_info, data = node.reader.fetch(
         int(self.start_time.strftime("%s")),
         int(self.end_time.strftime("%s")))
     datapoints = [v for v in data if v]
     self.assertTrue(self.steps == len(data),
                     msg="Expected %s datapoints, got %s instead" % (
                         self.steps,
                         len(data),
                     ))
Beispiel #13
0
 def test_fill_param_config(self):
     self.config['influxdb']['fill'] = 0
     self.finder = influxgraph.InfluxDBFinder(self.config)
     self.assertEqual(self.finder.fill_param,
                      self.config['influxdb']['fill'])
     nodes = list(self.finder.find_nodes(Query(self.series1)))
     time_info, data = self.finder.fetch_multi(
         nodes, int(self.start_time.strftime("%s")),
         int(self.end_time.strftime("%s")))
     self.assertTrue(
         self.series1 in data,
         msg="Did not get data for requested series %s - got data for %s" %
         (
             self.series1,
             data.keys(),
         ))
     self.assertEqual(
         time_info, (int(self.start_time.strftime("%s")),
                     int(self.end_time.strftime("%s")), self.step),
         msg="Time info and step do not match our requested values")
     self.assertTrue(len(data[self.series1]) == self.all_datapoints_num,
                     msg="Expected %s datapoints - got %s" % (
                         self.all_datapoints_num,
                         len(data[self.series1]),
                     ))
     self.assertTrue(
         data[self.series1][-1] == self.config['influxdb']['fill'])
Beispiel #14
0
 def test_configured_deltas(self):
     del self.finder
     config = { 'influxdb': { 'host' : 'localhost',
                              'port' : 8086,
                              'user' : 'root',
                              'pass' : 'root',
                              'db' : self.db_name,
         # Set data interval to 1 second for queries
         # of one hour or less
         'deltas' : {3600: 1},},
         # 'search_index': 'index',
         }
     finder = influxgraph.InfluxDBFinder(config)
     self.assertTrue(finder.deltas)
     nodes = list(finder.find_nodes(Query(self.series1)))
     paths = [node.path for node in nodes]
     time_info, data = finder.fetch_multi(nodes,
                                          int(self.start_time.strftime("%s")),
                                          int(self.end_time.strftime("%s")))
     self.assertTrue(self.series1 in data,
                     msg="Did not get data for requested series %s - got data for %s" % (
                         self.series1, data.keys(),))
     self.assertTrue(len(data[self.series1]) == 3601,
                     msg="Expected exactly %s data points - got %s instead" % (
                         3601, len(data[self.series1])))
Beispiel #15
0
 def test_index_save_load_failure(self):
     del self.finder.index
     del self.finder
     bad_index_path = 'bad_index'
     try:
         os.unlink(bad_index_path)
     except OSError:
         pass
     finally:
         open(bad_index_path, 'wt').close()
     # Permission errors on all operations
     mask = 0000 if sys.version_info <= (2, ) else 0o000
     os.chmod(bad_index_path, mask)
     config = {
         'influxdb': {
             'host': 'localhost',
             'port': 8086,
             'memcache': {
                 'host': 'localhost',
             },
             'user': '******',
             'pass': '******',
             'db': self.db_name,
             'log_level': 0,
             'fill': 'previous',
         },
         'statsd': {
             'host': 'localhost'
         },
         'search_index': bad_index_path,
     }
     finder = influxgraph.InfluxDBFinder(config)
     del finder
     mask = int('0600') if sys.version_info <= (2, ) else 0o600
     os.chmod(bad_index_path, mask)
     # Corrupt data in index file
     with open(bad_index_path, 'wt') as index_fh:
         index_fh.write('fasdfa}\n')
     finder = influxgraph.InfluxDBFinder(config)
     self.assertTrue(finder.index)
     try:
         os.unlink(bad_index_path)
     except OSError:
         pass
Beispiel #16
0
 def test_multi_tag_values_multi_measurements(self):
     measurements = ['cpu-0', 'cpu-1', 'cpu-2', 'cpu-3']
     fields = {'load': 1, 'idle': 1,
               'usage': 1, 'user': 1,
     }
     tags = {'host': 'my_host1',
             'env': 'my_env1',
             }
     data = [{
         "measurement": measurement,
         "tags": tags,
         "time": _time,
         "fields": fields,
         }
         for measurement in measurements
         for _time in [
             (self.end_time - datetime.timedelta(minutes=30)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.end_time - datetime.timedelta(minutes=2)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             ]]
     metrics = ['.'.join([tags['host'], m, f])
                for f in fields.keys()
                for m in measurements]
     self.client.drop_database(self.db_name)
     self.client.create_database(self.db_name)
     self.assertTrue(self.client.write_points(data))
     tags_env2 = {'host': 'my_host1',
                  'env': 'my_env2',
                  }
     for d in data:
         d['tags'] = tags_env2
     self.assertTrue(self.client.write_points(data))
     template = "env.host.measurement.field*"
     self.config['influxdb']['templates'] = [template]
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('*.*.*.*')
     nodes = list(self.finder.find_nodes(query))
     node_paths = sorted([n.path for n in nodes])
     tag_values = set(['.'.join([t['env'], t['host']])
                       for t in [tags, tags_env2]])
     _metrics = ['.'.join([t, m, f])
                 for t in tag_values
                 for f in fields.keys() if not '.' in f
                 for m in measurements]
     expected = sorted(_metrics)
     self.assertEqual(node_paths, expected,
                      msg="Expected %s nodes - got %s" % (
                          len(expected), len(node_paths)))
     _, multi_tag_data = self.finder.fetch_multi(nodes,
                                       int(self.start_time.strftime("%s")),
                                       int(self.end_time.strftime("%s")))
     for metric in _metrics:
         datapoints = [v for v in multi_tag_data[metric] if v]
         self.assertTrue(len(datapoints) == self.num_datapoints,
                         msg="Expected %s datapoints for %s - got %s" % (
                             self.num_datapoints, metric, len(datapoints),))
Beispiel #17
0
 def test_data_with_fields(self):
     del self.finder
     template = "host.measurement.field*"
     self.config['influxdb']['templates'] = [template]
     measurements = ['cpu-0', 'cpu-1', 'cpu-2', 'cpu-3']
     fields = {'load': 1, 'idle': 1,
               'usage': 1, 'user': 1,
               'user.io': 1, 'idle.io': 1,
               'load.io': 1, 'usage.io': 1,
     }
     tags = {'host': 'my_host',
             'env': 'my_env',
             }
     cpu_metrics = ['.'.join(['my_host', m, f])
                    for m in measurements
                    for f in ['load', 'usage', 'user', 'idle']]
     io_metrics = ['.'.join(['my_host', m, f])
                    for m in measurements
                    for f in ['user.io', 'idle.io', 'load.io', 'usage.io']]
     self.client.drop_database(self.db_name)
     self.client.create_database(self.db_name)
     self.write_data(measurements, tags, fields)
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('%s.*' % (tags['host']))
     nodes = sorted([n.name for n in self.finder.find_nodes(query)])
     expected = measurements
     self.assertEqual(nodes, expected,
                      msg="Got query %s result %s - wanted %s" % (
                          query.pattern, nodes, expected,))
     query = Query('%s.*.*' % (tags['host'],))
     nodes = list(self.finder.find_nodes(query))
     node_paths = sorted([n.path for n in nodes])
     expected = sorted(cpu_metrics)
     self.assertEqual(node_paths, expected,
                      msg="Got query %s result %s - wanted %s" % (
                          query.pattern, node_paths, expected,))
     time_info, data = self.finder.fetch_multi(nodes,
                                               int(self.start_time.strftime("%s")),
                                               int(self.end_time.strftime("%s")))
     for metric in cpu_metrics:
         self.assertTrue(metric in data,
                         msg="Did not get data for requested series %s - got data for %s" % (
                             metric, data.keys(),))
         datapoints = [v for v in data[metric] if v]
         self.assertTrue(len(datapoints) == self.num_datapoints,
                         msg="Expected %s datapoints for %s - got %s" % (
                             self.num_datapoints, metric, len(datapoints),))
     time_info, _data = self.finder.fetch_multi(nodes,
                                                int(self.start_time.strftime("%s")),
                                                int(self.end_time.strftime("%s")))
     for metric in cpu_metrics:
         datapoints = [v for v in _data[metric] if v]
         self.assertTrue(len(datapoints) == self.num_datapoints,
                         msg="Expected %s datapoints for %s - got %s" % (
                             self.num_datapoints, metric, len(datapoints),))
Beispiel #18
0
 def test_tagged_data_no_template_config(self):
     del self.finder
     self.config['influxdb']['templates'] = None
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('*')
     nodes = sorted([n.name for n in self.finder.find_nodes(query)])
     # expected = [self.metric_prefix]
     expected = sorted(self.measurements)
     self.assertEqual(nodes, expected,
                      msg="Expected only measurements in index with "
                      "no templates configured, got %s" % (nodes,))
Beispiel #19
0
 def test_create_root_log_file_should_fail(self):
     _config = { 'influxdb' : { 'host' : 'localhost',
                                'port' : 8086,
                                'user' : 'root',
                                'pass' : 'root',
                                'db' : self.db_name,
                                'log_file' : '/' + str(uuid.uuid4()),
                                'log_level' : 'debug',
                                },
                                }
     finder = influxgraph.InfluxDBFinder(_config)
     self.assertTrue(finder)
Beispiel #20
0
 def test_create_log_file_should_succeed(self):
     config = { 'influxdb' : { 'host' : 'localhost',
                               'port' : 8086,
                               'user' : 'root',
                               'pass' : 'root',
                               'db' : self.db_name,
                               'log_file' : '/tmp/fakey',
                               'log_level' : 'debug',
                               },
                               }
     finder = influxgraph.InfluxDBFinder(config)
     self.assertTrue(finder)
Beispiel #21
0
 def test_memcache_field_keys(self):
     self.config['influxdb']['memcache'] = {'host': 'localhost'}
     self.config['influxdb']['series_loader_interval'] = 2
     self.finder = influxgraph.InfluxDBFinder(self.config)
     time.sleep(2)
     self.assertTrue(self.finder.memcache.get(_MEMCACHE_FIELDS_KEY),
                     msg="Expected field key list to be loaded to cache "
                     "at startup")
     self.finder.memcache.delete(_MEMCACHE_FIELDS_KEY)
     keys_list = self.finder.get_field_keys()
     keys_memcache = self.finder.memcache.get(_MEMCACHE_FIELDS_KEY)
     self.assertEqual(keys_list, keys_memcache)
Beispiel #22
0
 def test_field_data_part_or_no_template_match(self):
     del self.finder
     measurements = ['test']
     fields = {'field1': 1, 'field2': 2}
     tags = {'env': 'my_env',
             'region': 'my_region',
             'dc': 'dc1'
             }
     self.client.drop_database(self.db_name)
     self.client.create_database(self.db_name)
     self.write_data(measurements, tags, fields)
     self.config['influxdb']['templates'] = ['env.template_tag.measurement.field*']
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('*')
     nodes = [n.name for n in self.finder.find_nodes(query)]
     expected = []
     self.assertEqual(nodes, expected)
     self.config['influxdb']['templates'] = ['env.template_tag.measurement.field*',
                                             'env.region.measurement.field*']
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('*')
     nodes = sorted([n.path for n in self.finder.find_nodes(query)])
     expected = [tags['env']]
     self.assertEqual(nodes, expected)
     query = Query('*.*')
     nodes = sorted([n.path for n in self.finder.find_nodes(query)])
     expected = sorted(['.'.join([tags['env'], tags['region']])])
     self.assertEqual(nodes, expected)
     query = Query('*.*.*')
     nodes = sorted([n.path for n in self.finder.find_nodes(query)])
     expected = sorted(['.'.join([tags['env'], tags['region'], measurements[0]])])
     self.assertEqual(nodes, expected)
     query = Query('*.*.*.*')
     nodes = sorted([n.path for n in self.finder.find_nodes(query)])
     expected = sorted(['.'.join([tags['env'], tags['region'], measurements[0], f])
                        for f in fields.keys()])
     self.assertEqual(nodes, expected)
Beispiel #23
0
 def test_reindex(self):
     del self.finder
     _reindex_interval = 2
     config = { 'influxdb' : { 'host' : 'localhost',
                               'port' : 8086,
                               'user' : 'root',
                               'pass' : 'root',
                               'db' : self.db_name,
                               'reindex_interval': _reindex_interval,
                               'memcache' : { 'host': 'localhost',
                                              'ttl' : 60,
                                              'max_value': 20,
                                              },
                               },}
     finder = influxgraph.InfluxDBFinder(config)
     time.sleep(_reindex_interval)
     self.assertTrue(finder.index)
Beispiel #24
0
 def test_template_multiple_tags(self):
     self.client.drop_database(self.db_name)
     self.client.create_database(self.db_name)
     template = "*.disk. host.measurement.path.fstype.field*"
     measurement = 'disk'
     tags = {'host': 'my_host',
             'path': '/',
             'fstype': 'ext4',
             }
     fields = {'free': 1,
               'used': 1,
               }
     self.write_data([measurement], tags, fields)
     self.config['influxdb']['templates'] = [template]
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('%s.%s.%s.%s.*' % (
         tags['host'], measurement, tags['path'], tags['fstype']))
     nodes = list(self.finder.find_nodes(query))
     self.assertEqual(sorted([n.name for n in nodes]), sorted(fields.keys()))
Beispiel #25
0
 def test_non_greedy_field(self):
     measurements = ['cpu-0', 'cpu-1', 'cpu-2', 'cpu-3']
     fields = {'load': 1, 'idle': 1,
               'usage': 1, 'user': 1,
     }
     tags = {'host': 'my_host',
             'env': 'my_env',
             }
     data = [{
         "measurement": measurement,
         "tags": tags,
         "time": _time,
         "fields": fields,
         }
         for measurement in measurements
         for _time in [
             (self.end_time - datetime.timedelta(minutes=30)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.end_time - datetime.timedelta(minutes=2)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             ]]
     metrics = ['.'.join([tags['host'], m, f])
                for f in fields.keys()
                for m in measurements]
     self.client.drop_database(self.db_name)
     self.client.create_database(self.db_name)
     self.assertTrue(self.client.write_points(data))
     template = "host.measurement.field"
     self.config['influxdb']['templates'] = [template]
     self.finder = influxgraph.InfluxDBFinder(self.config)
     query = Query('%s.*.*' % (tags['host'],))
     nodes = list(self.finder.find_nodes(query))
     node_paths = sorted([n.path for n in nodes])
     _metrics = ['.'.join([tags['host'], m, f])
                 for f in fields.keys() if not '.' in f
                 for m in measurements ]
     expected = sorted(_metrics)
     self.assertEqual(node_paths, expected,
                      msg="Expected nodes %s from template with non-greedy field - got %s" % (
                          expected, node_paths))
Beispiel #26
0
 def test_template_measurement_no_tags(self):
     template = "env.host.measurement.field*"
     del self.finder
     measurements = ['cpuusage']
     fields = {'value': 1}
     tags = {'host': 'my_host1',
             'env': 'my_env1',
             }
     metrics = ['.'.join([tags['env'], tags['host'], m])
                for m in measurements]
     self.client.drop_database(self.db_name)
     self.client.create_database(self.db_name)
     self.write_data(measurements, tags, fields)
     fields = {'value': 1,
               }
     self.write_data(measurements, {}, fields)
     self.config['influxdb']['templates'] = [template]
     self.finder = influxgraph.InfluxDBFinder(self.config)
     nodes = [n.name for n in self.finder.find_nodes(Query('*'))]
     expected = [tags['env']]
     self.assertEqual(nodes, expected)
     cpu_nodes = list(self.finder.find_nodes(Query('my_env1.my_host1.*')))
     expected = measurements
     self.assertEqual([n.name for n in cpu_nodes], expected)
Beispiel #27
0
 def setUp(self):
     self.metric_prefix = "template_integration_test"
     self.paths = ['test_type', 'host']
     self.tags = {
         # Tags parsed from metric path
         self.paths[0]: self.metric_prefix,
         self.paths[1]: 'localhost',
         # Default tags not in metric path
         'env': 'int',
         'region': 'the_west',
         }
     self.template = "%s %s.measurement* env=int,region=the_west" % (
         self.metric_prefix, ".".join([p for p in self.paths]))
     self.measurements = ['cpu', 'memory', 'load', 'iops']
     self.graphite_series = ["%s" % (".".join(
         [self.tags[p] for p in self.paths] + [m])) for m in self.measurements]
     self.step, self.num_datapoints, self.db_name = 60, 31, 'integration_test'
     self.start_time, self.end_time = (datetime.datetime.utcnow() - datetime.timedelta(hours=1)), \
       datetime.datetime.utcnow()
     self.steps = int(round((int(self.end_time.strftime("%s")) - \
                             int(self.start_time.strftime("%s"))) * 1.0 / self.step)) + 1
     self.config = {'influxdb': {
         'host': 'localhost',
         'port': 8086,
         'user': '******',
         'pass': '******',
         'db': self.db_name,
         'templates': [
             self.template,
             ],
         },
         }
     self.client = InfluxDBClient(database=self.db_name)
     self.default_nodes_limit = LOADER_LIMIT
     self.setup_db()
     self.finder = influxgraph.InfluxDBFinder(self.config)
Beispiel #28
0
 def test_retention_policies(self):
     del self.finder
     data_point_value = 5
     retention_policies = {60: 'default', 600: '10m', 1800: '30m'}
     config = { 'influxdb' : { 'host' : 'localhost',
                               'port' : 8086,
                               'user' : 'root',
                               'pass' : 'root',
                               'db' : self.db_name,
                               'deltas' : {
                                   1800: 600,
                                   3600: 1800,
                                           },
                               'retention_policies' : retention_policies,
                               }}
     self.client.create_retention_policy('10m', '1d', 1, database=self.db_name, default=False)
     self.client.create_retention_policy('30m', '1d', 1, database=self.db_name, default=False)
     write_data = [{
         "measurement": series,
         "tags": {},
         "time": _time,
         "fields": {
             "value": data_point_value,
             }
         }
         for series in self.series
         for _time in [
             (self.start_time - datetime.timedelta(minutes=60)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.start_time - datetime.timedelta(minutes=50)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.start_time - datetime.timedelta(minutes=40)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.start_time - datetime.timedelta(minutes=30)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.start_time - datetime.timedelta(minutes=20)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.start_time - datetime.timedelta(minutes=10)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.start_time).strftime("%Y-%m-%dT%H:%M:%SZ"),
             ]]
     self.assertTrue(self.client.write_points(write_data, retention_policy='10m'))
     self.assertTrue(self.client.write_points(write_data, retention_policy='30m'))
     finder = influxgraph.InfluxDBFinder(config)
     time.sleep(1)
     nodes = list(finder.find_nodes(Query(self.series1)))
     paths = [node.path for node in nodes]
     self.assertEqual(paths, [self.series1],
                      msg="Did not get requested node %s, got %s" % (
                          self.series1, paths,))
     time_info, data = finder.fetch_multi(nodes,
                                          int((self.start_time - datetime.timedelta(minutes=29)).strftime("%s")),
                                          int(self.start_time.strftime("%s")))
     self.assertTrue(self.series1 in data,
                     msg="Did not get data for requested series %s - got data for %s" % (
                         self.series1, data.keys(),))
     data_points = [v for v in data[self.series1] if v]
     self.assertTrue(len(data_points)==3,
                     msg="Three datapoints in interval in retention policy, got %s from query" % (
                         len(data_points)))
     self.assertTrue(data_points[0]==data_point_value)
     time_info, data = finder.fetch_multi(nodes,
                                          int((self.start_time - datetime.timedelta(minutes=31)).strftime("%s")),
                                          int(self.start_time.strftime("%s")))
     self.assertTrue(self.series1 in data,
                     msg="Did not get data for requested series %s - got data for %s" % (
                         self.series1, data.keys(),))
     data_points = [v for v in data[self.series1] if v]
     self.assertTrue(data_points[0]==data_point_value)
     self.assertTrue(len(data_points)==2,
                     msg="Two datapoints in interval in retention policy, got %s from query" % (
                         len(data_points)))
Beispiel #29
0
 def test_parent_branch_series(self):
     prefix = 'branch_test_prefix'
     written_branches = ['branch_node1.sub_branch1.sub_branch2.sub_branch3',
                         'branch_node2.sub_branch11.sub_branch22.sub_branch33']
     leaf_nodes = ['leaf_node1', 'leaf_node2']
     written_series = [".".join([prefix,
                                 branch, leaf_node,])
                                 for branch in written_branches
                                 for leaf_node in leaf_nodes]
     data = [{
         "measurement": series,
         "tags": {},
         "time": _time,
         "fields": {
             "value": 1,
             }
         }
         for series in written_series
         for _time in [
             (self.end_time - datetime.timedelta(minutes=30)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             (self.end_time - datetime.timedelta(minutes=2)).strftime("%Y-%m-%dT%H:%M:%SZ"),
             ]]
     self.assertTrue(self.client.write_points(data))
     del self.finder
     config = { 'influxdb' : { 'host' : 'localhost',
                               'port' : 8086,
                               'user' : 'root',
                               'pass' : 'root',
                               'db' : self.db_name,
                               'memcache' : { 'host': 'localhost',
                                              'ttl' : 60,
                                              'max_value' : 20},
                               },}
     try:
         _memcache = memcache.Client([config['influxdb']['memcache']['host']])
         memcache_key = influxgraph.utils.gen_memcache_pattern_key("_".join([
             '*', str(self.default_nodes_limit), str(0)]))
         _memcache.delete(memcache_key)
         _memcache.delete(SERIES_LOADER_MUTEX_KEY)
     except NameError:
         pass
     finder = influxgraph.InfluxDBFinder(config)
     time.sleep(1)
     query = Query(prefix + '.*.*.*.*.' + leaf_nodes[0])
     nodes = list(finder.find_nodes(query))
     self.assertTrue(len(nodes)==len(leaf_nodes),
                     msg="Did not get all leaf nodes for wildcard query")
     for node in nodes:
         self.assertTrue(node.is_leaf,
                         msg="Leaf node %s from query %s not marked as leaf" % (
                             node.name, query.pattern,))
     query = Query(prefix)
     nodes = list(finder.find_nodes(query))
     expected = [prefix]
     branches = sorted([n.name for n in nodes])
     self.assertEqual(expected, branches,
                      msg="Expected branches %s, got %s" % (expected, branches,))
     self.assertFalse(nodes[0].is_leaf,
                      msg="Root branch node marked as leaf")
     query = Query(prefix + '.branch_node*.sub_branch*.*')
     nodes = list(finder.find_nodes(query))
     expected = sorted([b.split('.')[2] for b in written_branches])
     branches = sorted([n.name for n in nodes])
     self.assertEqual(branches, expected,
                      msg="Got branches list %s - wanted %s" % (
                          branches, expected,))
     query = Query(".".join([prefix, "branch_node*",
                             "sub_branch*", "sub_branch*", "sub_branch*",
                             "leaf*"]))
     nodes = list(finder.find_nodes(query))
     expected = sorted(leaf_nodes + leaf_nodes)
     found_leaves = sorted([n.name for n in nodes])
     self.assertEqual(found_leaves, expected,
                      msg="Expected leaf node list '%s' - got %s" % (
                          expected, found_leaves))
     for node in nodes:
         self.assertTrue(node.is_leaf,
                         msg="Leaf node %s is not marked as leaf node" % (node))
     query = Query(".".join([prefix, "branch_node*",
                             "sub_branch*", "sub_branch*", "sub_branch*",
                             "{%s}" % (",".join(leaf_nodes),)]))
     nodes = list(finder.find_nodes(query))
     expected = sorted(leaf_nodes + leaf_nodes)
     found_leaves = sorted([n.name for n in nodes])
     self.assertEqual(found_leaves, expected,
                      msg="Expected leaf node list '%s' - got %s" % (
                          expected, found_leaves))
Beispiel #30
0
 def test_memcache_integration(self):
     del self.finder
     config = { 'influxdb' : { 'host' : 'localhost',
                               'port' : 8086,
                               'user' : 'root',
                               'pass' : 'root',
                               'db' : self.db_name,
                               'memcache' : { 'host': 'localhost',
                                              'ttl' : 60,
                                              'max_value' : 20},
                               },}
     try:
         _memcache = memcache.Client([config['influxdb']['memcache']['host']])
     except NameError:
         # No memcache module - no memcache integration tests
         return
     query, limit = Query('*'), 1
     memcache_keys = [influxgraph.utils.gen_memcache_pattern_key("_".join([
         query.pattern, str(limit), str(offset)]))
                      for offset in range(len(self.series))]
     for _key in memcache_keys:
         _memcache.delete(_key)
     _memcache.delete(SERIES_LOADER_MUTEX_KEY)
     finder = influxgraph.InfluxDBFinder(config)
     self.assertTrue(finder.memcache)
     self.assertEqual(finder.memcache_ttl, 60,
                      msg="Configured TTL of %s sec, got %s sec TTL instead" % (
                          60, finder.memcache_ttl,))
     self.assertEqual(finder.memcache.server_max_value_length, 1024**2*20,
                      msg="Configured max value of %s MB, got %s instead" % (
                          1024**2*20, finder.memcache.server_max_value_length,))
     node_names = list(finder.get_all_series(
         limit=limit))
     self.assertTrue(self.series[0] in node_names,
                     msg="Node list does not contain prefix '%s' - %s" % (
                         self.metric_prefix, node_names))
     if finder.memcache:
         for memcache_key in memcache_keys:
             self.assertTrue(finder.memcache.get(memcache_key),
                             msg="No memcache data for key %s" % (memcache_key,))
     limit = 1
     nodes = sorted(list(finder.get_all_series(limit=limit)))
     expected = sorted(self.series)
     self.assertEqual(nodes, expected,
                      msg="Did not get correct series list - "
                      "wanted %s series, got %s" % (
                          len(expected), len(nodes),))
     nodes = list(finder.find_nodes(Query(self.series1)))
     paths = [node.path for node in nodes]
     self.assertEqual(paths, [self.series1],
                      msg="Did not get requested node %s, got %s" % (
                          self.series1, paths,))
     time_info, data = finder.fetch_multi(nodes,
                                          int(self.start_time.strftime("%s")),
                                          int(self.end_time.strftime("%s")))
     self.assertTrue(self.series1 in data,
                     msg="Did not get data for requested series %s - got data for %s" % (
                         self.series1, data.keys(),))
     time_info, data_cached = finder.fetch_multi(nodes,
                                                 int(self.start_time.strftime("%s")),
                                                 int(self.end_time.strftime("%s")))
     self.assertEqual(len(data[self.series1]), len(data_cached[self.series1]),
                      msg="Cached data does not match uncached data for series %s" % (
                          self.series1))
     aggregation_func = list(set(influxgraph.utils.get_aggregation_func(
         path, finder.aggregation_functions) for path in paths))[0]
     memcache_key = influxgraph.utils.gen_memcache_key(
         int(self.start_time.strftime("%s")), int(self.end_time.strftime("%s")),
         aggregation_func, paths)
     if finder.memcache:
         self.assertTrue(finder.memcache.get(memcache_key),
                         msg="Got no memcache data for query %s with key %s" % (
                             query, memcache_key,))
     time_info, reader_data = nodes[0].reader.fetch(int(self.start_time.strftime("%s")),
                                                    int(self.end_time.strftime("%s")))
     self.assertEqual(len(data[self.series1]), len(reader_data),
                      msg="Reader cached data does not match finder cached data"
                      " for series %s" % (self.series1,))