def reloadStorageSchemas(): global SCHEMAS try: SCHEMAS = loadStorageSchemas() except Exception: log.msg("Failed to reload storage SCHEMAS") log.err()
def reloadStorageSchemas(): global schemas try: schemas = loadStorageSchemas() except: log.msg("Failed to reload storage schemas") log.err()
def reloadStorageSchemas(): global SCHEMAS try: SCHEMAS = loadStorageSchemas() except Exception: log.msg("Failed to reload storage SCHEMAS") log.err()
def test_loadStorageSchemas_return_the_default_schema_last(self): from carbon.storage import loadStorageSchemas, defaultSchema schema_list = loadStorageSchemas() last_schema = schema_list[-1] self.assertEquals(last_schema.name, defaultSchema.name) self.assertEquals(last_schema.archives, defaultSchema.archives)
def reloadStorageSchemas(): global schemas try: schemas = loadStorageSchemas() except: log.msg("Failed to reload storage schemas") log.err()
def reloadStorageSchemas(first_run=False): global schemas try: schemas = loadStorageSchemas() except Exception, e: if first_run: raise e log.msg("Failed to reload storage schemas") log.err()
def test_loadStorageSchemas_return_schemas(self): from carbon.storage import loadStorageSchemas, PatternSchema, Archive schema_list = loadStorageSchemas() self.assertEquals(len(schema_list), 3) expected = [ PatternSchema('carbon', '^carbon\.', [Archive.fromString('60:90d')]), PatternSchema('default_1min_for_1day', '.*', [Archive.fromString('60s:1d')]) ] for schema, expected_schema in zip(schema_list[:-1], expected): self.assertEquals(schema.name, expected_schema.name) self.assertEquals(schema.pattern, expected_schema.pattern) for (archive, expected_archive) in zip(schema.archives, expected_schema.archives): self.assertEquals(archive.getTuple(), expected_archive.getTuple())
def test_loadStorageSchemas_return_schemas(self): from carbon.storage import loadStorageSchemas, PatternSchema, Archive schema_list = loadStorageSchemas() self.assertEquals(len(schema_list), 3) expected = [ PatternSchema('carbon', '^carbon\.', [Archive.fromString('60:90d')]), PatternSchema('default_1min_for_1day', '.*', [Archive.fromString('60s:1d')]) ] for schema, expected_schema in zip(schema_list[:-1], expected): self.assertEquals(schema.name, expected_schema.name) self.assertEquals(schema.pattern, expected_schema.pattern) for (archive, expected_archive) in zip(schema.archives, expected_schema.archives): self.assertEquals(archive.getTuple(), expected_archive.getTuple())
def __init__(self, args): # Injecting the Carbon Lib Path if needed if args.carbonlibdir is not None: sys.path.insert(0, args.carbonlibdir) try: from carbon import conf from carbon.conf import settings # set carbon config dir settings.CONF_DIR = args.carbonconfigdir from carbon.storage import loadStorageSchemas, loadAggregationSchemas, DefaultSchema, PatternSchema except ImportError as e: raise SystemExit('[ERROR] Can\'t find the carbon module, try using --carbonlibdir to explicitly include the path') self.DefaultSchema = DefaultSchema self.PatternSchema = PatternSchema # load schemas from storage-schemas.conf and storage-aggregation.conf using carbon module self.storage_schemas = loadStorageSchemas() self.aggregation_schemas = loadAggregationSchemas()
from carbon.storage import getFilesystemPath, loadStorageSchemas,\ loadAggregationSchemas from carbon.conf import settings from carbon import log, events, instrumentation from carbon.util import TokenBucket from twisted.internet import reactor from twisted.internet.task import LoopingCall from twisted.application.service import Service try: import signal except ImportError: log.msg("Couldn't import signal module") SCHEMAS = loadStorageSchemas() AGGREGATION_SCHEMAS = loadAggregationSchemas() CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95 # Inititalize token buckets so that we can enforce rate limits on creates and # updates if the config wants them. CREATE_BUCKET = None UPDATE_BUCKET = None if settings.MAX_CREATES_PER_MINUTE != float('inf'): capacity = settings.MAX_CREATES_PER_MINUTE fill_rate = float(settings.MAX_CREATES_PER_MINUTE) / 60 CREATE_BUCKET = TokenBucket(capacity, fill_rate) if settings.MAX_UPDATES_PER_SECOND != float('inf'): capacity = settings.MAX_UPDATES_PER_SECOND fill_rate = settings.MAX_UPDATES_PER_SECOND
try: from carbon import conf from carbon.conf import settings except ImportError: raise SystemExit('[ERROR] Can\'t find the carbon module, try using --carbonlib to explicitly include the path') #carbon.conf not seeing the config files so give it a nudge settings.CONF_DIR = configPath settings.LOCAL_DATA_DIR = storagePath # import these once we have the settings figured out from carbon.storage import loadStorageSchemas, loadAggregationSchemas # Load the Defined Schemas from our config files schemas = loadStorageSchemas() agg_schemas = loadAggregationSchemas() # check to see if a metric needs to be resized based on the current config def processMetric(fullPath, schemas, agg_schemas): """ method to process a given metric, and resize it if necessary Parameters: fullPath - full path to the metric whisper file schemas - carbon storage schemas loaded from config agg_schemas - carbon storage aggregation schemas load from confg """ schema_config_args = '' schema_file_args = ''
loadAggregationSchemas from carbon.conf import settings from carbon import log, events, instrumentation from carbon.util import TokenBucket from twisted.internet import reactor from twisted.internet.task import LoopingCall from twisted.application.service import Service try: import signal except ImportError: log.msg("Couldn't import signal module") SCHEMAS = loadStorageSchemas() AGGREGATION_SCHEMAS = loadAggregationSchemas() CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95 # Inititalize token buckets so that we can enforce rate limits on creates and # updates if the config wants them. CREATE_BUCKET = None UPDATE_BUCKET = None if settings.MAX_CREATES_PER_MINUTE != float('inf'): capacity = settings.MAX_CREATES_PER_MINUTE fill_rate = float(settings.MAX_CREATES_PER_MINUTE) / 60 CREATE_BUCKET = TokenBucket(capacity, fill_rate) if settings.MAX_UPDATES_PER_SECOND != float('inf'): capacity = settings.MAX_UPDATES_PER_SECOND
def reloadStorageSchemas(): global SCHEMAS try: SCHEMAS = loadStorageSchemas() except Exception as e: log.msg("Failed to reload storage SCHEMAS: %s" % (e))
import whisper from carbon import state from carbon.cache import MetricCache from carbon.storage import getFilesystemPath, loadStorageSchemas,\ loadAggregationSchemas from carbon.conf import settings from carbon import log, events, instrumentation from twisted.internet import reactor from twisted.internet.task import LoopingCall from twisted.application.service import Service lastCreateInterval = 0 createCount = 0 schemas = loadStorageSchemas() agg_schemas = loadAggregationSchemas() CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95 def optimalWriteOrder(): """Generates metrics with the most cached values first and applies a soft rate limit on new metrics""" global lastCreateInterval global createCount metrics = MetricCache.counts() t = time.time() metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending log.debug("Sorted %d cache queues in %.6f seconds" %
def reloadStorageSchemas(): global SCHEMAS try: SCHEMAS = loadStorageSchemas() except Exception, e: log.msg("Failed to reload storage SCHEMAS: %s" % (e))
def test_loadStorageSchemas_return_the_default_schema_last(self): from carbon.storage import loadStorageSchemas, defaultSchema schema_list = loadStorageSchemas() last_schema = schema_list[-1] self.assertEquals(last_schema.name, defaultSchema.name) self.assertEquals(last_schema.archives, defaultSchema.archives)