async def __anit__(self, core, layers, write=False): await s_base.Base.__anit__(self) self.stack = contextlib.ExitStack() self.user = None self.strict = True self.elevated = False self.canceled = False self.core = core self.model = core.model self.layers = layers self.wlyr = self.layers[-1] self.bulk = False self.bulksops = [] # variables used by the storm runtime self.vars = {} self.runt = {} self.debug = False # Set to true to enable debug output. self.write = False # True when the snap has a write lock on a layer. self.tagcache = s_cache.FixedCache(self._addTagNode, size=10000) self.buidcache = s_cache.FixedCache(self._getNodeByBuid, size=100000) self.onfini(self.stack.close) self.changelog = [] self.tagtype = core.model.type('ival') async def fini(): for layr in self.layers: try: await layr.commit() except asyncio.CancelledError: raise except Exception: logger.exception('commit error for layer') # N.B. don't fini the layers here since they are owned by the cortex self.onfini(fini)
async def __anit__(self, auth, node): await HiveIden.__anit__(self, auth, node) self.info.setdefault('roles', ()) self.info.setdefault('admin', False) self.info.setdefault('passwd', None) self.info.setdefault('locked', False) self.info.setdefault('archived', False) self.roles = self.info.get('roles', onedit=self._onRolesEdit) self.admin = self.info.get('admin', onedit=self._onAdminEdit) self.locked = self.info.get('locked', onedit=self._onLockedEdit) # arbitrary profile data for application layer use prof = await self.node.open(('profile',)) self.profile = await prof.dict() # vars cache for persistent user level data storage # TODO: max size check / max count check? pvars = await self.node.open(('vars',)) self.pvars = await pvars.dict() self.fullrules = [] self.permcache = s_cache.FixedCache(self._calcPermAllow) self._initFullRules()
def test_cache_fixed(self): data = collections.defaultdict(int) def getfoo(x): data[x] += 1 return x + 20 cache = s_cache.FixedCache(maxsize=3, onmiss=getfoo) self.false(30 in cache) self.eq(cache.get(30), 50) self.eq(len(cache), 1) self.true(30 in cache) self.eq(cache.get(30), 50) self.eq(cache.get(30), 50) self.eq(cache.get(30), 50) self.eq(data[30], 1) self.eq(cache.get(40), 60) self.eq(cache.get(50), 70) self.eq(cache.get(60), 80) self.eq(data[30], 1) self.eq(cache.get(30), 50) self.eq(data[30], 2) cache.clear() self.eq(cache.get(30), 50) self.eq(data[30], 3)
def test_lib_cache_fixed(self): def callback(name): return name.lower() cache = s_cache.FixedCache(callback, size=2) self.eq('foo', cache.get('FOO')) self.eq('bar', cache.get('BAR')) self.len(2, cache.fifo) self.len(2, cache.cache) self.nn(cache.cache.get('FOO')) self.nn(cache.cache.get('BAR')) self.eq('baz', cache.get('BAZ')) self.len(2, cache.fifo) self.len(2, cache.cache) self.nn(cache.cache.get('BAR')) self.nn(cache.cache.get('BAZ')) cache.clear() self.len(0, cache.fifo) self.len(0, cache.cache)
def __init__(self, **opts): Configable.__init__(self) self.setConfOpts(opts) self.operfuncs = {} self.cmprctors = {} self.setCmprFunc('eq', eq) self.setCmprFunc('lt', lt) self.setCmprFunc('gt', gt) self.setCmprFunc('le', le) self.setCmprFunc('ge', ge) self.setCmprCtor('or', self._cmprCtorOr) self.setCmprCtor('and', self._cmprCtorAnd) self.setCmprCtor('tag', self._cmprCtorTag) self.setCmprCtor('seen', self._cmprCtorSeen) self.setCmprCtor('range', self._cmprCtorRange) # interval and interval-interval comparisons self.setCmprCtor('ival', self._cmprCtorIval) self.setCmprCtor('in', self._cmprCtorIn) self.setCmprCtor('re', self._cmprCtorRe) self.setCmprCtor('has', self._cmprCtorHas) self.setOperFunc('filt', self._stormOperFilt) self.setOperFunc('opts', self._stormOperOpts) self.setOperFunc('save', self._stormOperSave) self.setOperFunc('load', self._stormOperLoad) self.setOperFunc('clear', self._stormOperClear) self.setOperFunc('guid', self._stormOperGuid) self.setOperFunc('join', self._stormOperJoin) self.setOperFunc('lift', self._stormOperLift) self.setOperFunc('refs', self._stormOperRefs) self.setOperFunc('limit', self._stormOperLimit) self.setOperFunc('pivot', self._stormOperPivot) self.setOperFunc('alltag', self._stormOperAllTag) self.setOperFunc('addtag', self._stormOperAddTag) self.setOperFunc('deltag', self._stormOperDelTag) self.setOperFunc('totags', self._stormOperToTags) self.setOperFunc('addnode', self._stormOperAddNode) self.setOperFunc('delnode', self._stormOperDelNode) self.setOperFunc('nexttag', self._stormOperNextSeq) self.setOperFunc('setprop', self._stormOperSetProp) self.setOperFunc('addxref', self._stormOperAddXref) self.setOperFunc('fromtags', self._stormOperFromTags) self.setOperFunc('jointags', self._stormOperJoinTags) self.setOperFunc('show:cols', self._stormOperShowCols) # Cache compiled regex objects. self._rt_regexcache = s_cache.FixedCache(1024, re.compile)
def test_cache_defval(self): # Ensure default behaviors are covered. c = s_cache.Cache() r = c.get('foo') self.none(r) fc = s_cache.FixedCache(maxsize=10) fr = fc.get('foo') self.none(fr) od = s_cache.OnDem() with self.raises(KeyError) as cm: od.get('foo')
async def __anit__(self, view, user): ''' Args: core (cortex): the cortex layers (List[Layer]): the list of layers to access, write layer last ''' await s_base.Base.__anit__(self) self.stack = contextlib.ExitStack() assert user is not None self.strict = True self.elevated = False self.canceled = False self.core = view.core self.view = view self.user = user self.buidprefetch = self.view.isafork() self.layers = list(reversed(view.layers)) self.wlyr = self.layers[-1] self.readonly = self.wlyr.readonly # variables used by the storm runtime self.vars = {} self.runt = {} self.debug = False # Set to true to enable debug output. self.write = False # True when the snap has a write lock on a layer. self._tagcachesize = 10000 self._buidcachesize = 100000 self.tagcache = s_cache.FixedCache(self._addTagNode, size=self._tagcachesize) # Keeps alive the most recently accessed node objects self.buidcache = collections.deque(maxlen=self._buidcachesize) self.livenodes = weakref.WeakValueDictionary() # buid -> Node self._warnonce_keys = set() self.onfini(self.stack.close) self.changelog = [] self.tagtype = self.core.model.type('ival') self.trigson = self.core.trigson
async def __anit__(self, node, auth): await s_base.Base.__anit__(self) self.auth = auth self.node = node # Stores the AuthGate-specific instances of this ruler, by authgate tuple self.gaterulr = {} # gate iden -> GateRuler self.name = node.valu self.iden = node.name() self.info = await node.dict() self.onfini(self.info) self.info.setdefault('rules', ()) self.rules = self.info.get('rules', onedit=self._onRulesEdit) self.permcache = s_cache.FixedCache(self._calcPermAllow)
async def __anit__(self, node, auth): await HiveRuler.__anit__(self, node, auth) self.info.setdefault('roles', ()) self.info.setdefault('admin', False) self.info.setdefault('passwd', None) self.info.setdefault('locked', False) self.info.setdefault('archived', False) # arbitrary profile data for application layer use prof = await self.node.open(('profile', )) self.profile = await prof.dict(nexs=True) # TODO: max size check / max count check? varz = await self.node.open(('vars', )) self.vars = await varz.dict(nexs=True) self.permcache = s_cache.FixedCache(self._allowed)
async def __anit__(self, auth, node): await HiveIden.__anit__(self, auth, node) self.info.setdefault('roles', ()) self.info.setdefault('admin', False) self.info.setdefault('passwd', None) self.info.setdefault('locked', False) self.roles = self.info.get('roles', onedit=self._onRolesEdit) self.admin = self.info.get('admin', onedit=self._onAdminEdit) self.locked = self.info.get('locked', onedit=self._onLockedEdit) self.fullrules = [] self.permcache = s_cache.FixedCache(self._calcPermAllow) self._initFullRules()
async def __anit__(self, core, layers, user): ''' Args: core (cortex): the cortex layers (List[Layer]): the list of layers to access, write layer last ''' await s_base.Base.__anit__(self) self.stack = contextlib.ExitStack() assert user is not None self.strict = True self.elevated = False self.canceled = False self.core = core self.user = user self.model = core.model # it is optimal for a snap to have layers in "bottom up" order self.layers = list(reversed(layers)) self.wlyr = self.layers[-1] # variables used by the storm runtime self.vars = {} self.runt = {} self.debug = False # Set to true to enable debug output. self.write = False # True when the snap has a write lock on a layer. self.tagcache = s_cache.FixedCache(self._addTagNode, size=10000) self.buidcache = collections.deque( maxlen=100000 ) # Keeps alive the most recently accessed node objects self.livenodes = weakref.WeakValueDictionary() # buid -> Node self.onfini(self.stack.close) self.changelog = [] self.tagtype = core.model.type('ival')
return Parser(text).query() def parseEval(text): return Parser(text).eval() async def _forkedParseQuery(args): return await s_coro.forked(parseQuery, args[0], mode=args[1]) async def _forkedParseEval(text): return await s_coro.forked(parseEval, text) evalcache = s_cache.FixedCache(_forkedParseEval, size=100) querycache = s_cache.FixedCache(_forkedParseQuery, size=100) def massage_vartokn(x): return s_ast.Const('' if not x else (x[1:-1] if x[0] == "'" else ( unescape(x) if x[0] == '"' else x))) # For AstConverter, one-to-one replacements from lark to synapse AST terminalClassMap = { 'ABSPROP': s_ast.AbsProp, 'ABSPROPNOUNIV': s_ast.AbsProp, 'ALLTAGS': lambda _: s_ast.TagMatch(''), 'BREAK': lambda _: s_ast.BreakOper(), 'CONTINUE': lambda _: s_ast.ContinueOper(),