def test_link_refused(self): self.assertRaises(LinkRefused, s_telepath.openurl, 'tcp://127.0.0.1:1/foo') self.assertRaises(LinkRefused, s_telepath.openurl, 'ssl://127.0.0.1:1/foo') if s_thishost.get('platform') != 'windows': self.assertRaises(LinkRefused, s_telepath.openurl, 'local://newpnewpnewp/foo')
def _axonhost_confdefs(): confdefs = ( ('axonhost:autorun', { 'type': 'int', 'defval': 0, 'doc': 'Number of Axons to autostart.' }), ('axon:axonbus', { 'type': 'str', 'defval': '', 'doc': 'URL to an axonbus' }), ('axon:bytemax', { 'type': 'int', 'defval': terabyte, 'doc': 'Max size of each axon created by the host.' }), ('axon:listen', { 'type': 'str', 'defval': 'tcp://0.0.0.0:0/axon', 'doc': 'Default listener URLs for the axons created by this host', }), ('axon:tags', { 'defval': (), 'doc': 'Tuple of tag values for the axon to add when sharing over a Axon servicebus.' }), ('axon:syncopts', { 'defval': {}, 'doc': 'kwarg Options used when making a persistent sync directory for axons.' }), ('axon:clones', { 'type': 'int', 'defval': 2, 'doc': 'The default number of clones for a axon.' }), ('axonhost:maxsize', { 'type': 'int', 'defval': 0, 'doc': 'Max total allocations for Axons created by the host. ' 'Only applies if set to a positive integer.' }), ('axon:hostname', { 'type': 'str', 'defval': s_thishost.get('hostname'), 'doc': 'AxonHost hostname' }), ) return confdefs
def main(argv, outp=None): if outp is None: outp = s_output.OutPut() p = getArgParser() opts = p.parse_args(argv) log_level = os.getenv('SYN_DMON_LOG_LEVEL', opts.log_level) if log_level: # pragma: no cover log_level = log_level.upper() if log_level not in LOG_LEVEL_CHOICES: raise ValueError( 'Invalid log level provided: {}'.format(log_level)) logging.basicConfig( level=log_level, format= '%(asctime)s [%(levelname)s] %(message)s [%(filename)s:%(funcName)s]' ) logger.info('log level set to ' + log_level) if opts.lsboot: for path in lsboot(): outp.printf(path) return if opts.onboot: plat = s_thishost.get('platform') if plat not in ('linux', 'darwin'): raise Exception('--onboot does not support platform: %s' % (plat, )) for path in opts.configs: logger.info('onboot add: %s' % (path, )) onboot(path) return if opts.noboot: for path in opts.configs: logger.info('onboot del: %s' % (path, )) noboot(path) return dmon = s_daemon.Daemon() if opts.asboot: dmon.loadDmonFile(cfgfile) for path in opts.configs: dmon.loadDmonFile(path) dmon.main()
def thisHostMust(self, **props): # pragma: no cover ''' Requires a host having a specific property. Args: **props: Raises: unittest.SkipTest if the required property is missing. ''' for k, v in props.items(): if s_thishost.get(k) != v: raise unittest.SkipTest('skip thishost: %s!=%r' % (k, v))
def log(self, level, mesg, **info): ''' Implements the log event convention for an EventBus. Args: level (int): A python logger level for the event mesg (str): A log message **info: Additional log metadata ''' info['time'] = s_common.now() info['host'] = s_thishost.get('hostname') info['level'] = level info['class'] = self.__class__.__name__ self.fire('log', mesg=mesg, **info)
def main(argv, outp=None): if outp is None: outp = s_output.OutPut() p = getArgParser() opts = p.parse_args(argv) if opts.log_level: logging.basicConfig(level=opts.log_level.upper()) logger.info('log level set to ' + opts.log_level) if opts.lsboot: for path in lsboot(): outp.printf(path) return if opts.onboot: plat = s_thishost.get('platform') if plat not in ('linux', 'darwin'): raise Exception('--onboot does not support platform: %s' % (plat, )) for path in opts.configs: logger.info('onboot add: %s' % (path, )) onboot(path) return if opts.noboot: for path in opts.configs: logger.info('onboot del: %s' % (path, )) noboot(path) return dmon = s_daemon.Daemon() if opts.asboot: dmon.loadDmonFile(cfgfile) for path in opts.configs: dmon.loadDmonFile(path) dmon.main()
def __init__(self, datadir, **opts): s_eventbus.EventBus.__init__(self) self.datadir = gendir(datadir) self.opts = opts self.lock = threading.Lock() self.axons = {} self.axonbus = None self.axonforks = {} self.onfini(self._onAxonHostFini) self.opts.setdefault('autorun', 0) # how many axons to auto-start self.opts.setdefault('axonbus', '') # url to axonbus self.opts.setdefault('bytemax', terabyte) # by default make each Axon 1 Terabyte self.opts.setdefault('syncmax', gigabyte * 10) # self.opts.setdefault( 'hostname', s_thishost.get('hostname')) # allow override for testing url = self.opts.get('axonbus') if url: self.axonbus = s_service.openurl(url) self.axonbus.runSynSvc(guid(), self) for name in os.listdir(self.datadir): if not name.endswith('.axon'): continue iden, _ = name.split('.', 1) self._fireAxonIden(iden) # fire auto-run axons auto = self.opts.get('autorun') while len(self.axons) < auto: self.add()
def main(argv): p = getArgParser() opts = p.parse_args(argv) if opts.log_level: logging.basicConfig(level=opts.log_level.upper()) logger.info('log level set to ' + opts.log_level) if opts.lsboot: for path in lsboot(): print(path) return if opts.onboot: plat = s_thishost.get('platform') if plat not in ('linux','darwin'): raise Exception('--onboot does not support platform: %s' % (plat,)) for path in opts.configs: logger.info('onboot add: %s' % (path,)) onboot(path) return if opts.noboot: for path in opts.configs: logger.info('onboot del: %s' % (path,)) noboot(path) return dmon = s_daemon.Daemon() if opts.asboot: dmon.loadDmonFile(cfgfile) for path in opts.configs: dmon.loadDmonFile(path) dmon.main()
def __init__(self, datadir, **opts): s_eventbus.EventBus.__init__(self) self.datadir = gendir(datadir) self.opts = opts self.lock = threading.Lock() self.axons = {} self.axonbus = None self.axonforks = {} self.onfini( self._onAxonHostFini ) self.opts.setdefault('autorun',0) # how many axons to auto-start self.opts.setdefault('axonbus','') # url to axonbus self.opts.setdefault('bytemax',terabyte) # by default make each Axon 1 Terabyte self.opts.setdefault('syncmax',gigabyte * 10) # self.opts.setdefault('hostname', s_thishost.get('hostname') ) # allow override for testing url = self.opts.get('axonbus') if url: self.axonbus = s_service.openurl(url) self.axonbus.runSynSvc(guid(),self) for name in os.listdir(self.datadir): if not name.endswith('.axon'): continue iden,_ = name.split('.',1) self._fireAxonIden(iden) # fire auto-run axons auto = self.opts.get('autorun') while len(self.axons) < auto: self.add()
def test_thishost_hostname(self): self.assertIsNotNone(s_thishost.get('hostname'))
def test_thishost_platform(self): self.assertIsNotNone(s_thishost.get('platform'))
def test_thishost_hostname(self): self.nn(s_thishost.get('hostname'))
def thisHostMust(self, **props): for k,v in props.items(): if s_thishost.get(k) != v: unittest.skip('skip thishost: %s!=%r' % (k,v))
def test_thishost_ptrsize(self): self.nn(s_thishost.get('ptrsize'))
def __init__(self, axondir, **opts): s_eventbus.EventBus.__init__(self) self.inprog = {} self.axondir = gendir(axondir) self.clonedir = gendir(axondir, 'clones') self.clones = {} self.cloneinfo = {} self.clonehosts = set() self.clonelock = threading.Lock() self.readyclones = set() # iden of each clone added as it comes up self.clonesready = threading.Event( ) # set once all clones are up and running self.opts = opts self.axonbus = None self.iden = self.opts.get('iden') self.tags = self.opts.get('tags', ()) self.opts.setdefault('ro', False) self.opts.setdefault('clone', '') # are we a clone? self.opts.setdefault('clones', 2) # how many clones do we want? self.opts.setdefault('axonbus', '') # do we have an axon svcbus? self.opts.setdefault('hostname', s_thishost.get('hostname')) self.opts.setdefault( 'listen', 'tcp://0.0.0.0:0/axon') # our default "ephemeral" listener # if we're a clone, we're read-only and have no clones if self.opts.get('clone'): self.opts['ro'] = True self.opts['clones'] = 0 self.opts.setdefault('synckeep', threedays) self.opts.setdefault('syncsize', gigabyte * 10) corepath = os.path.join(self.axondir, 'axon.db') self.core = s_cortex.openurl('sqlite:///%s' % corepath) fd = genfile(axondir, 'axon.heap') self.link = None self.heap = s_heap.Heap(fd) self.dmon = s_daemon.Daemon() lisn = self.opts.get('listen') if lisn: self.link = self.dmon.listen(lisn) self.axfo = (self.iden, {}) self.axthrs = set() self.setAxonInfo('link', self.link) self.setAxonInfo('opts', self.opts) self.dmon.share('axon', self) # create a reactor to unwrap core/heap sync events self.syncact = s_reactor.Reactor() self.syncact.act('heap:sync', self.heap.sync) self.syncact.act('core:sync', self.core.sync) # wrap core/heap sync events as axon:sync events self.core.on('core:sync', self._fireAxonSync) self.heap.on('heap:sync', self._fireAxonSync) # model details for the actual byte blobs self.core.addTufoForm('axon:blob', ptype='guid') self.core.addTufoProp('axon:blob', 'off', ptype='int', req=True) self.core.addTufoProp('axon:blob', 'size', ptype='int', req=True) self.core.addTufoProp('axon:blob', 'md5', ptype='hash:md5', req=True) self.core.addTufoProp('axon:blob', 'sha1', ptype='hash:sha1', req=True) self.core.addTufoProp('axon:blob', 'sha256', ptype='hash:sha256', req=True) self.core.addTufoProp('axon:blob', 'sha512', ptype='hash:sha512', req=True) self.core.addTufoForm('axon:clone', ptype='guid') dirname = gendir(axondir, 'sync') syncopts = self.opts.get('syncopts', {}) self.syncdir = None self.onfini(self._onAxonFini) self.onfini(self.core.fini) self.onfini(self.heap.fini) self.onfini(self.dmon.fini) # if we're not a clone, create a sync dir if not self.opts.get('clone'): self.syncdir = s_persist.Dir(dirname, **syncopts) self.onfini(self.syncdir.fini) self.on('axon:sync', self.syncdir.add) self.axcthr = None # share last to avoid startup races busurl = self.opts.get('axonbus') if busurl: self.axonbus = s_service.openurl(busurl) props = {'link': self.link, 'tags': self.tags} self.axonbus.runSynSvc(self.iden, self, **props) self.axcthr = self._fireAxonClones()
def test_thishost_platform(self): self.nn(s_thishost.get('platform'))
def thisHostMustNot(self, **props): for k, v in props.items(): if s_thishost.get(k) == v: raise unittest.SkipTest('skip thishost: %s==%r' % (k, v))
def __init__(self, axondir, **opts): s_eventbus.EventBus.__init__(self) self.inprog = {} self.axondir = gendir(axondir) self.clonedir = gendir(axondir,'clones') self.clones = {} self.cloneinfo = {} self.clonehosts = set() self.clonelock = threading.Lock() self.readyclones = set() # iden of each clone added as it comes up self.clonesready = threading.Event() # set once all clones are up and running self.opts = opts self.axonbus = None self.iden = self.opts.get('iden') self.tags = self.opts.get('tags',()) self.opts.setdefault('ro',False) self.opts.setdefault('clone','') # are we a clone? self.opts.setdefault('clones',2) # how many clones do we want? self.opts.setdefault('axonbus','') # do we have an axon svcbus? self.opts.setdefault('hostname', s_thishost.get('hostname') ) self.opts.setdefault('listen','tcp://0.0.0.0:0/axon') # our default "ephemeral" listener # if we're a clone, we're read-only and have no clones if self.opts.get('clone'): self.opts['ro'] = True self.opts['clones'] = 0 self.opts.setdefault('synckeep',threedays) self.opts.setdefault('syncsize',gigabyte*10) self.core = opts.get('core') if self.core == None: savefile = os.path.join(self.axondir,'axon.save') self.core = s_cortex.openurl('ram:///',savefile=savefile) self.onfini( self.core.fini ) fd = genfile(axondir,'axon.heap') self.link = None self.heap = s_heap.Heap(fd) self.dmon = s_daemon.Daemon() lisn = self.opts.get('listen') if lisn: self.link = self.dmon.listen(lisn) self.axfo = (self.iden,{}) self.axthrs = set() self.setAxonInfo('link',self.link) self.setAxonInfo('opts',self.opts) self.dmon.share('axon',self) # create a reactor to unwrap core/heap sync events self.syncact = s_reactor.Reactor() self.syncact.act('heap:sync', self.heap.sync ) self.syncact.act('core:sync', self.core.sync ) # wrap core/heap sync events as axon:sync events self.core.on('core:sync', self._fireAxonSync ) self.heap.on('heap:sync', self._fireAxonSync ) # model details for the actual byte blobs self.core.addTufoForm('axon:blob',ptype='guid') self.core.addTufoProp('axon:blob','ver',ptype='int',defval=1) self.core.addTufoProp('axon:blob','off', ptype='int',req=True) self.core.addTufoProp('axon:blob','size', ptype='int',req=True) self.core.addTufoProp('axon:blob','md5', ptype='hash:md5',req=True) self.core.addTufoProp('axon:blob','sha1', ptype='hash:sha1',req=True) self.core.addTufoProp('axon:blob','sha256', ptype='hash:sha256',req=True) self.core.addTufoProp('axon:blob','sha512', ptype='hash:sha512',req=True) self.core.addTufoForm('axon:clone',ptype='guid') dirname = gendir(axondir,'sync') syncopts = self.opts.get('syncopts',{}) self.syncdir = None # if we're not a clone, create a sync dir if not self.opts.get('clone'): self.syncdir = s_persist.Dir(dirname,**syncopts) self.onfini( self.syncdir.fini ) self.on('axon:sync', self.syncdir.add ) self.onfini( self._onAxonFini ) self.onfini( self.core.fini ) self.onfini( self.heap.fini ) self.onfini( self.dmon.fini ) # share last to avoid startup races busurl = self.opts.get('axonbus') if busurl: self.axonbus = s_service.openurl(busurl) props = {'link':self.link,'tags':self.tags} self.axonbus.runSynSvc(self.iden,self,**props) self._fireAxonClones()
def test_thishost_ptrsize(self): self.assertIsNotNone(s_thishost.get('ptrsize'))
def test_thishost_ptrsize(self): self.assertIsNotNone( s_thishost.get('ptrsize') )
def thisHostMustNot(self, **props): for k,v in props.items(): if s_thishost.get(k) == v: raise unittest.SkipTest('skip thishost: %s==%r' % (k,v))
def _axon_confdefs(): confdefs = ( ('axon:ro', { 'type': 'bool', 'defval': 0, 'doc': 'Axon Read-only mode. Prevents allocating new space for writing data to the heap file.', }), ('axon:clone', { 'type': 'bool', 'defval': 0, 'doc': 'Flag to indicate the axon is to be a clone axon or not. Not usually directly set' 'by the user.', }), ('axon:clone:iden', { 'type': 'str', 'defval': '', 'doc': 'Iden of the axon that this is a clone of (only applies to clones). Not usually' ' directly set by the user.' }), ('axon:clones', { 'type': 'int', 'defval': 2, 'doc': 'Number of clones to make of this axon on the axonbus.', }), ('axon:axonbus', { 'type': 'str', 'defval': '', 'doc': 'Axon servicebus used for making clones of a Axon.', }), ('axon:hostname', { 'type': 'str', 'defval': s_thishost.get('hostname'), 'doc': 'Hostname associated with an Axon.', }), ('axon:listen', { 'type': 'str', 'defval': 'tcp://0.0.0.0:0/axon', 'doc': 'Default listener URL for the axon', }), ('axon:tags', { 'defval': (), 'doc': 'Tuple of tag values for the axon over a Axon servicebus.' }), ('axon:iden', { 'type': 'str', 'defval': None, 'doc': 'Unique identifier for the axon. Not usually directly set by the user.' }), ('axon:syncopts', { 'defval': {}, 'doc': 'kwarg Options used when making a persistent sync directory.' }), ('axon:bytemax', { 'type': 'int', 'defval': terabyte, 'doc': 'Max size of data this axon is allowed to store.' }), ) return confdefs
import os import mmap import threading import ctypes import ctypes.util import synapse.lib.thisplat as s_thisplat import synapse.lib.thishost as s_thishost from synapse.eventbus import EventBus libc = s_thisplat.getLibC() ptrsize = s_thishost.get('ptrsize') # TODO figure out how to use windows mmap for this haspriv = getattr(mmap,'MAP_PRIVATE',None) != None haspread = getattr(os,'pread',None) != None hasremap = getattr(libc,'mremap',None) != None def getAtomFile(fd, memok=True): ''' Factory to construct the most optimal AtomFile for this platform. Example: atom = getAtomFile(fd) # provides thread safe routines for most optimal # offset based file I/O for this platform.
def _memorylockloop(self): ''' Separate thread loop that manages the prefaulting and locking of the memory backing the data file ''' if not s_thishost.get('hasmemlocking'): return MAX_TOTAL_PERCENT = .90 # how much of all the RAM to take MAX_LOCK_AT_ONCE = s_const.gibibyte # Calculate a reasonable maximum amount of memory to lock s_thisplat.maximizeMaxLockedMemory() locked_ulimit = s_thisplat.getMaxLockedMemory() if locked_ulimit < s_const.gibibyte // 2: logger.warning( 'Operating system limit of maximum amount of locked memory (currently %d) is \n' 'too low for optimal performance.', locked_ulimit) logger.debug('memory locking thread started') # Note: available might be larger than max_total in a container max_total = s_thisplat.getTotalMemory() available = s_thisplat.getAvailableMemory() PAGESIZE = 4096 max_to_lock = (min(locked_ulimit, int(max_total * MAX_TOTAL_PERCENT), int(available * MAX_TOTAL_PERCENT)) // PAGESIZE) * PAGESIZE self.max_could_lock = max_to_lock path = self.path.absolute( ) / 'data.mdb' # Path to the file that gets mapped fh = open(path, 'r+b') fileno = fh.fileno() prev_memend = 0 # The last end of the file mapping, so we can start from there # Avoid spamming messages first_end = True limit_warned = False self.locking_memory = True self.resizeevent.set() while not self.isfini: self.resizeevent.wait() if self.isfini: break self.schedCallSafe(self.lockdoneevent.clear) self.resizeevent.clear() try: memstart, memlen = s_thisplat.getFileMappedRegion(path) except s_exc.NoSuchFile: logger.warning('map not found for %s', path) if not self.resizeevent.is_set(): self.schedCallSafe(self.lockdoneevent.set) continue if memlen > max_to_lock: memlen = max_to_lock if not limit_warned: logger.warning('memory locking limit reached') limit_warned = True # Even in the event that we've hit our limit we still have to loop because further mmaps may cause # the base address to change, necessitating relocking what we can # The file might be a little bit smaller than the map because rounding (and mmap fails if you give it a # too-long length) filesize = os.fstat(fileno).st_size goal_end = memstart + min(memlen, filesize) self.lock_goal = goal_end - memstart self.lock_progress = 0 prev_memend = memstart # Actually do the prefaulting and locking. Only do it a chunk at a time to maintain responsiveness. while prev_memend < goal_end: new_memend = min(prev_memend + MAX_LOCK_AT_ONCE, goal_end) memlen = new_memend - prev_memend PROT = 1 # PROT_READ FLAGS = 0x8001 # MAP_POPULATE | MAP_SHARED (Linux only) (for fast prefaulting) try: self.prefaulting = True with s_thisplat.mmap(0, length=new_memend - prev_memend, prot=PROT, flags=FLAGS, fd=fileno, offset=prev_memend - memstart): s_thisplat.mlock(prev_memend, memlen) except OSError as e: logger.warning( 'error while attempting to lock memory of %s: %s', path, e) break finally: self.prefaulting = False prev_memend = new_memend self.lock_progress = prev_memend - memstart if first_end: first_end = False logger.info('completed prefaulting and locking slab') if not self.resizeevent.is_set(): self.schedCallSafe(self.lockdoneevent.set) self.locking_memory = False logger.debug('memory locking thread ended')
def test_thishost_platform(self): self.assertIsNotNone( s_thishost.get('platform') )
def test_thishost_hostname(self): self.assertIsNotNone( s_thishost.get('hostname') )
def thisHostMust(self, **props): for k, v in props.items(): if s_thishost.get(k) != v: unittest.skip('skip thishost: %s!=%r' % (k, v))