def init( self, parameters, resources ): self.beach_api = Beach( self._beach_config_path, realm = 'hcp' ) self.db = CassDb( parameters[ 'db' ], 'hcp_analytics' ) self.audit = self.getActorHandle( resources[ 'auditing' ], timeout = 30, nRetries = 3 ) self.admin = self.getActorHandle( resources[ 'admin' ], timeout = 30, nRetries = 3 ) self.sensorDir = self.getActorHandle( resources[ 'sensordir' ], timeout = 30, nRetries = 3 ) self.genDefaultsIfNotPresent() isSuccess, _oid = self.get_global_config( None ) if isSuccess: self.admin_oid = uuid.UUID( str( _oid[ 'global/admin_oid' ] ) ) else: self.admin_oid = None self.handle( 'get_global_config', self.get_global_config ) self.handle( 'get_org_config', self.get_org_config ) self.handle( 'set_config', self.set_config ) self.handle( 'deploy_org', self.deploy_org ) self.handle( 'get_c2_cert', self.get_c2_cert ) self.handle( 'get_root_cert', self.get_root_cert ) self.handle( 'update_profile', self.update_profile ) self.handle( 'get_profiles', self.get_profiles ) self.handle( 'get_supported_events', self.get_supported_events ) self.handle( 'get_capabilities', self.get_capabilities ) self.handle( 'get_quick_detects', self.get_quick_detects ) self.handle( 'del_sensor', self.del_sensor ) self.handle( 'refresh_all_installers', self.refresh_all_installers ) self.handle( 'set_installer_info', self.set_installer_info ) self.handle( 'del_installer', self.del_installer ) self.metricsUrl = resources.get( 'metrics_url', 'https://limacharlie.io/metrics/opensource' ) self.schedule( ( 60 * 60 ) + random.randint( 0, 60 * 60 ) , self.sendMetricsIfEnabled )
def __init__(self, configFile, identifier='default', sync_frequency=15.0, logging_dest='/dev/log', realm='global', scale=None, actorsRoot=None): self._stopEvent = gevent.event.Event() self._logger = None self._log_level = logging.INFO self._log_dest = logging_dest self._realm = realm self._initLogging(self._log_level, logging_dest) self._threads = gevent.pool.Group() self._owner = 'beach.patrol/%s' % (identifier, ) self._mutex = BoundedSemaphore(value=1) self._entries = OrderedDict() self._freq = sync_frequency self._updateFreq = 60 * 60 self._patrolHash = None self._patrolUrl = None self._isMonitored = False self._originalTtl = None self._beach = Beach(configFile, realm=realm) self._scale = scale self._actorsRoot = actorsRoot if self._actorsRoot is not None and not self._actorsRoot.endswith('/'): self._actorsRoot += '/'
def __init__(self, configFile, identifier='default', sync_frequency=15.0, logging_dest='/dev/log', realm='global', scale=None, actorsRoot=None): self._stopEvent = gevent.event.Event() self._logger = None self._log_level = logging.INFO self._log_dest = logging_dest self._realm = realm self._initLogging(self._log_level, logging_dest) self._threads = gevent.pool.Group() self._owner = 'beach.patrol/%s' % (identifier, ) self._entries = OrderedDict() self._watch = {} self._freq = sync_frequency self._beach = Beach(configFile, realm=realm) self._scale = scale self._actorsRoot = actorsRoot if self._actorsRoot is not None and not self._actorsRoot.endswith('/'): self._actorsRoot += '/'
def __init__(self, beach_config, token, timeout=1000 * 10): self.token = token self.beach = Beach(beach_config, realm='hcp') self.vHandle = self.beach.getActorHandle( 'c2/admin/1.0', ident='cli/955f6e63-9119-4ba6-a969-84b38bfbcc05', timeout=timeout, nRetries=3)
def test_beach_connection(): import yaml from beach.utils import _getIpv4ForIface global beach beach = Beach( os.path.join( curFileDir, 'simple.yaml' ), realm = 'global' ) time.sleep( 1 ) assert( 1 == beach.getNodeCount() )
class HuntsManager(Actor): def init(self, parameters, resources): self.beach_api = Beach(self._beach_config_path, realm='hcp') self.handle('reg_detect', self.handleRegDetect) self.handle('reg_inv', self.handleRegInvestigation) self.handle('unreg_detect', self.handleRegDetect) self.handle('unreg_inv', self.handleUnRegInvestigation) def deinit(self): pass def handleRegDetect(self, msg): uid = msg.data['uid'] name = msg.data['name'] hunter_type = msg.data['hunter_type'] isSuccess = self.beach_api.addToCategory( uid, 'analytics/detects/%s/%s' % (name, hunter_type)) self.log('registering detect %s to %s: %s' % (uid, name, isSuccess)) return (isSuccess, ) def handleUnRegDetect(self, msg): uid = msg.data['uid'] name = msg.data['name'] hunter_type = msg.data['hunter_type'] isSuccess = self.beach_api.removeFromCategory( uid, 'analytics/detects/%s/%s' % (name, hunter_type)) self.log('unregistering detect %s to %s: %s' % (uid, name, isSuccess)) return (isSuccess, ) def handleRegInvestigation(self, msg): uid = msg.data['uid'] name = msg.data['name'] isSuccess = self.beach_api.addToCategory( uid, 'analytics/inv_id/%s' % (name, )) self.log('registering inv %s to %s: %s' % (uid, name, isSuccess)) return (isSuccess, ) def handleUnRegInvestigation(self, msg): uid = msg.data['uid'] name = msg.data['name'] isSuccess = self.beach_api.removeFromCategory( uid, 'analytics/inv_id/%s' % (name, )) self.log('unregistering inv %s to %s: %s' % (uid, name, isSuccess)) return (isSuccess, )
class HuntsManager( Actor ): def init( self, parameters, resources ): self.beach_api = Beach( self._beach_config_path, realm = 'hcp' ) self.handle( 'reg_detect', self.handleRegDetect ) self.handle( 'reg_inv', self.handleRegInvestigation ) self.handle( 'unreg_detect', self.handleRegDetect ) self.handle( 'unreg_inv', self.handleUnRegInvestigation ) def deinit( self ): pass def handleRegDetect( self, msg ): uid = msg.data[ 'uid' ] name = msg.data[ 'name' ] hunter_type = msg.data[ 'hunter_type' ] isSuccess = self.beach_api.addToCategory( uid, 'analytics/detects/%s/%s' % ( name, hunter_type ) ) self.log( 'registering detect %s to %s: %s' % ( uid, name, isSuccess ) ) return ( isSuccess, ) def handleUnRegDetect( self, msg ): uid = msg.data[ 'uid' ] name = msg.data[ 'name' ] hunter_type = msg.data[ 'hunter_type' ] isSuccess = self.beach_api.removeFromCategory( uid, 'analytics/detects/%s/%s' % ( name, hunter_type ) ) self.log( 'unregistering detect %s to %s: %s' % ( uid, name, isSuccess ) ) return ( isSuccess, ) def handleRegInvestigation( self, msg ): uid = msg.data[ 'uid' ] name = msg.data[ 'name' ] isSuccess = self.beach_api.addToCategory( uid, 'analytics/inv_id/%s' % ( name, ) ) self.log( 'registering inv %s to %s: %s' % ( uid, name, isSuccess ) ) return ( isSuccess, ) def handleUnRegInvestigation( self, msg ): uid = msg.data[ 'uid' ] name = msg.data[ 'name' ] isSuccess = self.beach_api.removeFromCategory( uid, 'analytics/inv_id/%s' % ( name, ) ) self.log( 'unregistering inv %s to %s: %s' % ( uid, name, isSuccess ) ) return ( isSuccess, )
def __init__( self, configFile, identifier = 'default', sync_frequency = 15.0, logging_dest = '/dev/log', realm = 'global', scale = None, actorsRoot = None ): self._stopEvent = gevent.event.Event() self._logger = None self._log_level = logging.INFO self._log_dest = logging_dest self._realm = realm self._initLogging( self._log_level, logging_dest ) self._threads = gevent.pool.Group() self._owner = 'beach.patrol/%s' % ( identifier, ) self._entries = OrderedDict() self._watch = {} self._freq = sync_frequency self._beach = Beach( configFile, realm = realm ) self._scale = scale self._actorsRoot = actorsRoot if self._actorsRoot is not None and not self._actorsRoot.endswith( '/' ): self._actorsRoot += '/'
def __init__( self, beach_config, token, timeout = 1000 * 10 ): self.token = token self.beach = Beach( beach_config, realm = 'hcp' ) self.vHandle = self.beach.getActorHandle( 'c2/admin/1.0', ident = 'cli/955f6e63-9119-4ba6-a969-84b38bfbcc05', timeout = timeout, nRetries = 3 )
def beach_cluster(request): global beach global h_hostmanager h_hostmanager = subprocess.Popen([ 'python', '-m', 'beach.hostmanager', os.path.join(curFileDir, 'simple.yaml'), '--log-level', '10' ]) beach = Beach(os.path.join(curFileDir, 'simple.yaml'), realm='global') def beach_teardown(): global beach global h_hostmanager beach.close() h_hostmanager.send_signal(signal.SIGQUIT) assert (0 == h_hostmanager.wait()) request.addfinalizer(beach_teardown)
class Patrol(object): def __init__(self, configFile, identifier='default', sync_frequency=15.0, logging_dest='/dev/log', realm='global', scale=None, actorsRoot=None): self._stopEvent = gevent.event.Event() self._logger = None self._log_level = logging.INFO self._log_dest = logging_dest self._realm = realm self._initLogging(self._log_level, logging_dest) self._threads = gevent.pool.Group() self._owner = 'beach.patrol/%s' % (identifier, ) self._entries = OrderedDict() self._watch = {} self._freq = sync_frequency self._beach = Beach(configFile, realm=realm) self._scale = scale self._actorsRoot = actorsRoot if self._actorsRoot is not None and not self._actorsRoot.endswith('/'): self._actorsRoot += '/' def _initLogging(self, level, dest): self._logger = logging.getLogger() self._logger.setLevel(level) handler = logging.handlers.SysLogHandler(address=dest) handler.setFormatter(logging.Formatter("%(asctime)-15s %(message)s")) self._logger.addHandler(handler) def _log(self, msg): self._logger.info('%s : %s', self.__class__.__name__, msg) def _logCritical(self, msg): self._logger.error('%s : %s', self.__class__.__name__, msg) def _scanForExistingActors(self): tally = {} mtd = self._beach.getAllNodeMetadata() for node_mtd in mtd.itervalues(): if node_mtd is False: continue for aid, actor_mtd in node_mtd.get('data', {}).get('mtd', {}).iteritems(): if self._stopEvent.wait(0): break owner = actor_mtd.get('owner', None) if owner in self._entries: # Looks like a version of that actor was maintained by us before # so we'll add it to our roster. self._watch[aid] = self._entries[owner] self._log('adding pre-existing actor %s to patrol' % aid) tally.setdefault(self._entries[owner].name, 0) tally[self._entries[owner].name] += 1 return tally def _initializeMissingActors(self, existing): if type(self._scale) is int: currentScale = self._scale elif self._scale is not None: currentScale = self._scale() else: currentScale = None for actorEntry in self._entries.itervalues(): if self._stopEvent.wait(0): break actorName = actorEntry.name current = existing.get(actorName, 0) targetNum = actorEntry.initialInstances if currentScale is not None and actorEntry.scalingFactor is not None: targetNum = int(currentScale / actorEntry.scalingFactor) if 0 != (currentScale % actorEntry.scalingFactor): targetNum += 1 if actorEntry.maxInstances is not None and targetNum > actorEntry.maxInstances: targetNum = actor.maxInstances if actorEntry.initialInstances is not None and targetNum < actorEntry.initialInstances: targetNum = actorEntry.initialInstances self._log('actor %s scale %s / factor %s: %d' % (actorName, currentScale, actorEntry.scalingFactor, targetNum)) if current < targetNum: newOwner = '%s/%s' % (self._owner, actorName) self._log( 'actor %s has %d instances but requires %d, spawning' % (actorName, current, targetNum)) for _ in range(targetNum - current): status = self._beach.addActor(*(actorEntry.actorArgs[0]), **(actorEntry.actorArgs[1])) self._log('actor launched: %s' % status) if type(status) is dict and status.get('status', {}).get( 'success', False): self._watch[status['data']['uid']] = actorEntry else: self._log('actor %s is satisfied' % actorName) def start(self): self._stopEvent.clear() self._log('starting, patrolling %d actors' % len(self._entries)) self._log('discovering pre-existing actors') existing = self._scanForExistingActors() if self._stopEvent.wait(0): return self._log('%d pre-existing actors' % len(existing)) self._initializeMissingActors(existing) if self._stopEvent.wait(0): return self._log('starting patrol') gevent.sleep(10) self._threads.add(gevent.spawn(self._sync)) def stop(self): self._log('stopping patrol') self._stopEvent.set() self._threads.join(timeout=30) self._threads.kill(timeout=10) self._log('patrol stopped') def monitor(self, name, initialInstances, maxInstances=None, scalingFactor=None, relaunchOnFailure=True, onFailureCall=None, actorArgs=[], actorKwArgs={}): actorArgs = list(actorArgs) if self._actorsRoot is not None: actorArgs = [self._actorsRoot + actorArgs[0]] + actorArgs[1:] record = _PatrolEntry() record.name = name record.initialInstances = initialInstances record.maxInstances = maxInstances record.scalingFactor = scalingFactor record.relaunchOnFailure = relaunchOnFailure record.onFailureCall = onFailureCall actorKwArgs['owner'] = '%s/%s' % (self._owner, name) record.actorArgs = (actorArgs, actorKwArgs) self._entries['%s/%s' % (self._owner, name)] = record def _processFallenActor(self, actorEntry): isRelaunch = False if actorEntry.relaunchOnFailure: self._log('actor is set to relaunch on failure') status = self._beach.addActor(*(actorEntry.actorArgs[0]), **(actorEntry.actorArgs[1])) if status is not False and status is not None and 'data' in status and 'uid' in status[ 'data']: self._watch[status['data']['uid']] = actorEntry self._log('actor relaunched: %s' % status) isRelaunch = True else: self._log('failed to launch actor: %s' % status) else: self._log('actor is not set to relaunch on failure') return isRelaunch def _sync(self): while not self._stopEvent.wait(self._freq): self._log('running sync') directory = self._beach.getDirectory(timeout=120) if type(directory) is not dict: self._logCritical('error getting directory') continue self._log('found %d actors, testing for %d' % (len(directory['reverse']), len(self._watch))) for actorId in self._watch.keys(): if self._stopEvent.wait(0): break if actorId not in directory.get('reverse', {}): self._log('actor %s has fallen' % actorId) if self._processFallenActor(self._watch[actorId]): del (self._watch[actorId]) def remove(self, name=None, isStopToo=True): removed = [] if name is not None: k = '%s/%s' % (self._owner, name) if k not in self._entries: return False record = self._entries[k] for uid, entry in self._watch.items(): if entry == record: del (self._watch[uid]) removed.append(uid) if isStopToo: self._beach.stopActors(withId=removed) else: if self._beach.stopActors(withId=self._watch.keys()): removed = self._watch.keys() self._watch = {} return removed def loadFromUrl(self, url): if '://' in url: patrolFilePath = url if patrolFilePath.startswith('file://'): patrolFilePath = 'file://%s' % os.path.abspath( patrolFilePath[len('file://'):]) patrolFile = urllib2.urlopen(patrolFilePath) else: patrolFilePath = os.path.abspath(url) patrolFile = open(patrolFilePath, 'r') exec(patrolFile.read(), { 'Patrol': self.monitor, '__file__': patrolFilePath })
def init(self, parameters): self.beach_api = Beach(parameters['beach_config'], realm='hcp') self.handle('reg_detect', self.handleRegDetect) self.handle('reg_inv', self.handleRegInvestigation) self.handle('unreg_detect', self.handleRegDetect) self.handle('unreg_inv', self.handleUnRegInvestigation)
class BeachShell ( cmd.Cmd ): intro = 'Welcome to Beach shell. Type help or ? to list commands.\n' prompt = '(beach) ' def __init__( self, configFile, realm = None ): cmd.Cmd.__init__( self ) self.realm = 'global' self.updatePrompt() self.beach = Beach( configFile ) def updatePrompt( self ): if self.realm is not None: self.prompt = '(beach/%s) ' % self.realm else: self.prompt = '(beach/global) ' def parse( self, parser, line ): try: return parser.parse_args( shlex.split( line ) ) except SystemExit: return None def do_exit( self, s ): self.beach.close() return True def do_quit( self, s ): self.beach.close() return True def emptyline( self ): pass def printOut( self, data ): print( json.dumps( data, indent = 4 ) ) @report_errors def do_gen_key( self, s ): '''Generate a key that can be used as a beach private key.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( 'out', type = str, help = 'the path where to store the key.' ) arguments = self.parse( parser, s ) if arguments is None: return with open( arguments.out, 'w' ) as f: f.write( M2Crypto.Rand.rand_bytes( 0x20 ) ) self.printOut( 'New private key written to %s.' % arguments.out ) @report_errors def do_realm( self, s ): '''Login as a specific user.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( 'realm', type = str, default = 'global', help = 'switch context to a specific realm.' ) arguments = self.parse( parser, s ) if arguments is None: return self.realm = arguments.realm if self.realm is None or self.realm.strip() == '': self.ream = 'global' self.updatePrompt() self.beach.setRealm( self.realm ) @report_errors def do_get_dir( self, s ): '''Retrieve the directory of all Actors.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( '-c', '--category', type = str, dest = 'category', default = None, help = 'only show the directory for a specific category.' ) arguments = self.parse( parser, s ) if arguments is None: return category = arguments.category resp = self.beach.getDirectory() wanted = False if resp is not False and 'realms' in resp: wanted = resp[ 'realms' ].get( self.realm, {} ) if category is not None: wanted = wanted.get( category, {} ) self.printOut( wanted ) @report_errors def do_flush( self, s ): '''Remove all Actors from all nodes in the cluster.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( '--confirm', action = 'store_true', help = 'This command flushes ALL ACTORS from the cluster REGARDLESS of the realm. ' 'Add this flag to confirm you understand this.' ) arguments = self.parse( parser, s ) if arguments is None: return resp = 'Please confirm ( see command help )' if arguments.confirm: resp = self.beach.flush() self.printOut( resp ) @report_errors def do_add_actor( self, s ): '''Add a new Actor to the cluster.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( '-n', '--name', type = str, dest = 'name', required = True, help = 'the name of the actor to spawn.' ) parser.add_argument( '-c', '--category', type = str, dest = 'category', required = True, nargs = '+', help = 'category or categories to add the Actor to.' ) parser.add_argument( '-s', '--strategy', type = str, dest = 'strategy', default = None, help = 'the strategy to use to spawn the actor in the beach.' ) parser.add_argument( '-sh', '--hint', type = str, dest = 'strat_hint', default = None, help = 'hint used as part of some strategies.' ) parser.add_argument( '-p', '--params', type = json.loads, dest = 'params', default = None, help = 'parameters to provide to the Actor, as a JSON string.' ) parser.add_argument( '-i', '--isisolated', dest = 'isIsolated', default = False, action = 'store_true', help = 'if the Actor should be started in isolation mode (standalone process).' ) parser.add_argument( '-id', '--ident', type = str, dest = 'ident', default = None, help = 'identifier secret token used for Actor trust model.' ) parser.add_argument( '-t', '--trusted', type = str, dest = 'trusted', default = [], action = 'append', help = 'identifier token trusted by the Actor trust model.' ) parser.add_argument( '-ll', '--log-level', type = str, dest = 'loglevel', default = None, help = 'custom logging level for actor.' ) parser.add_argument( '-ld', '--log-dest', type = str, dest = 'logdest', default = None, help = 'custom logging destination for actor.' ) parser.add_argument( '-o', '--concurrent', type = int, dest = 'n_concurrent', required = False, default = 1, help = 'the number of concurrent requests handled by the actor.' ) arguments = self.parse( parser, s ) if arguments is None: return resp = self.beach.addActor( arguments.name, arguments.category, strategy = arguments.strategy, strategy_hint = arguments.strat_hint, parameters = arguments.params, isIsolated = arguments.isIsolated, secretIdent = arguments.ident, trustedIdents = arguments.trusted, n_concurrent = arguments.n_concurrent, log_level = arguments.log_level, log_dest = arguments.log_dest ) self.printOut( resp ) @report_errors def do_stop_actor( self, s ): '''Stop a specific set of actors.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( '-i', '--id', type = str, dest = 'id', required = False, nargs = '+', help = 'the IDs of actors to stop.' ) parser.add_argument( '-c', '--category', type = str, dest = 'cat', required = False, nargs = '+', help = 'the categories of actors to stop.' ) arguments = self.parse( parser, s ) if arguments is None: return if arguments.id is None and arguments.cat is None: argparse.error( 'Must specify one of -i or -c.' ) resp = self.beach.stopActors( withId = arguments.id, withCategory = arguments.cat ) self.printOut( resp ) @report_errors def do_get_cluster_health( self, s ): '''Retrieve the health information of all nodes of the cluster.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) arguments = self.parse( parser, s ) if arguments is None: return resp = self.beach.getClusterHealth() self.printOut( resp ) @report_errors def do_get_load_info( self, s ): '''Retrieve the number of free handlers per actor.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) arguments = self.parse( parser, s ) if arguments is None: return resp = self.beach.getLoadInfo() self.printOut( resp ) @report_errors def do_get_mtd( self, s ): '''Retrieve metadata from all nodes.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) resp = self.beach.getAllNodeMetadata() self.printOut( resp ) @report_errors def do_remove_from_category( self, s ): '''Remove an Actor from a category.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( '-i', '--id', type = str, dest = 'id', required = True, help = 'the ID of the actor to add to the category.' ) parser.add_argument( '-c', '--category', type = str, dest = 'category', required = True, help = 'category to add the actor to.' ) arguments = self.parse( parser, s ) if arguments is None: return resp = self.beach.removeFromCategory( arguments.id, arguments.category ) self.printOut( resp ) @report_errors def do_add_to_category( self, s ): '''Add an Actor to a category.''' parser = argparse.ArgumentParser( prog = inspect.stack()[0][3][ 3 : ] ) parser.add_argument( '-i', '--id', type = str, dest = 'id', required = True, help = 'the ID of the actor to add to the category.' ) parser.add_argument( '-c', '--category', type = str, dest = 'category', required = True, help = 'category to add the actor to.' ) arguments = self.parse( parser, s ) if arguments is None: return resp = self.beach.addToCategory( arguments.id, arguments.category ) self.printOut( resp )
r'/hostobjects', 'HostObjects', r'/detects_data', 'JsonDetects', r'/detects', 'ViewDetects', r'/detect', 'ViewDetect', r'/hostchanges', 'HostChanges' ) web.config.debug = False app = web.application( urls, globals() ) render = web.template.render( 'templates', base = 'base', globals = { 'json' : json, 'tsToTime' : tsToTime, '_x_' : _x_, '_xm_' : _xm_, 'hex' : hex, 'sanitize' : sanitizeJson } ) eventRender = web.template.render( 'templates/custom_events', globals = { 'json' : json, 'tsToTime' : tsToTime, '_x_' : _x_, '_xm_' : _xm_, 'hex' : hex, 'sanitize' : sanitizeJson } ) if len( sys.argv ) < 2: print( "Usage: python app.py beach_config [listen_port]" ) sys.exit() beach = Beach( sys.argv[ 1 ], realm = 'hcp' ) del( sys.argv[ 1 ] ) model = beach.getActorHandle( 'models', nRetries = 3, timeout = 30, ident = 'lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903' ) app.run()
import os import sys from beach.beach_api import Beach import logging REPO_ROOT = os.path.join( os.path.dirname( os.path.abspath( __file__ ) ), '..', '..' ) if os.geteuid() != 0: print( 'Not currently running as root. If you meant to run this as part of the Cloud-in-a-Can you should run this with sudo.' ) if 1 < len( sys.argv ): BEACH_CONFIG_FILE = os.path.abspath( sys.argv[ 1 ] ) else: BEACH_CONFIG_FILE = os.path.join( os.path.dirname( os.path.abspath( __file__ ) ), 'sample_cluster.yaml' ) beach = Beach( BEACH_CONFIG_FILE, realm = 'hcp' ) if not beach.flush(): print( "Could not flush Beach cluster. Are you sure it is running?" ) sys.exit(-1) ####################################### # BeaconProcessor # This actor will process incoming # beacons from the sensors. # Parameters: # state_db: these are the connection # details for the mysql database # used to store the low-importance # data tracked at runtime. # _priv_key: the C2 private key.
class BEAdmin(object): token = None def __init__(self, beach_config, token, timeout=1000 * 10): self.token = token self.empty_uuid = uuid.UUID(bytes="\x00" * 16) self.beach = Beach(beach_config, realm='hcp') self.vHandle = self.beach.getActorHandle( 'c2/admin/1.0', ident='cli/955f6e63-9119-4ba6-a969-84b38bfbcc05', timeout=timeout, nRetries=3) def _query(self, cmd, data={}): data['token'] = self.token response = self.vHandle.request(cmd, data) return response def testConnection(self): return self._query('ping') def hcp_getAgentStates(self, aid=None, hostname=None): filters = {} if aid is not None: filters['aid'] = aid if hostname is not None: filters['hostname'] = hostname return self._query('hcp.get_agent_states', filters) def hcp_getTaskings(self, oid=None): return self._query('hcp.get_taskings', {'oid': oid}) def hcp_addTasking(self, mask, moduleId, hashStr): return self._query('hcp.add_tasking', { 'mask': mask, 'module_id': int(moduleId), 'hash': hashStr }) def hcp_delTasking(self, mask, moduleId): return self._query('hcp.remove_tasking', { 'mask': mask, 'module_id': int(moduleId) }) def hcp_getModules(self): return self._query('hcp.get_modules') def hcp_addModule(self, moduleId, binary, signature, description): return self._query( 'hcp.add_module', { 'module_id': moduleId, 'bin': binary, 'signature': signature, 'hash': hashlib.sha256(binary).hexdigest(), 'description': description }) def hcp_delModule(self, moduleId, hashStr): return self._query('hcp.remove_module', { 'module_id': moduleId, 'hash': hashStr }) def hcp_getInstallers(self, oid=None, iid=None, hash=None, withContent=False): return self._query('hcp.get_installers', { 'with_content': withContent, 'oid': oid, 'iid': iid, 'hash': hash }) def hcp_addInstaller(self, oid, iid, description, installer): return self._query( 'hcp.add_installer', { 'oid': oid, 'iid': iid, 'description': description, 'installer': installer }) def hcp_delInstaller(self, oid, iid, hash): return self._query('hcp.remove_installer', { 'oid': oid, 'iid': iid, 'hash': hash }) def hcp_getWhitelist(self, oid=None, iid=None): return self._query('hcp.get_whitelist', {'oid': oid, 'iid': iid}) def hcp_addWhitelist(self, oid, iid, bootstrap): return self._query('hcp.add_whitelist', { 'oid': oid, 'iid': iid, 'bootstrap': bootstrap }) def hcp_delWhitelist(self, oid, iid): return self._query('hcp.remove_whitelist', {'oid': oid, 'iid': iid}) def hbs_getProfiles(self, oid=[]): return self._query('hbs.get_profiles', {'oid': oid}) def hbs_addProfile(self, mask, config, tag): return self._query('hbs.set_profile', { 'mask': mask, 'module_configs': config, 'tag': tag }) def hbs_delProfile(self, mask): return self._query('hbs.del_profile', {'mask': mask}) def hbs_taskAgent(self, toAgent, task, key, id, expiry=None, investigationId=None): # Make sure it's a valid agentid a = AgentId(toAgent) if not type(task) is rSequence: return None s = Signing(key) r = rpcm(isHumanReadable=True, isDebug=True) tags = Symbols() if investigationId is not None and '' != investigationId: task.addStringA(tags.hbs.INVESTIGATION_ID, investigationId) toSign = (rSequence().addSequence( tags.base.HCP_IDENT, rSequence().addBuffer( tags.base.HCP_SENSOR_ID, (a.sensor_id if a.sensor_id is not None else self.empty_uuid).bytes).addBuffer( tags.base.HCP_ORG_ID, (a.org_id if a.org_id is not None else self.empty_uuid).bytes).addBuffer( tags.base.HCP_INSTALLER_ID, (a.ins_id if a.ins_id is not None else self.empty_uuid).bytes).addInt32( tags.base.HCP_ARCHITECTURE, a.architecture if a.architecture is not None else 0).addInt32( tags.base.HCP_PLATFORM, a.platform if a.platform is not None else 0)).addSequence( tags.hbs.NOTIFICATION, task).addInt32(tags.hbs.NOTIFICATION_ID, id)) if None != expiry: toSign.addTimestamp(tags.base.EXPIRY, int(expiry)) toSign = r.serialise(toSign) sig = s.sign(toSign) final = r.serialise(rSequence().addBuffer( tags.base.BINARY, toSign).addBuffer(tags.base.SIGNATURE, sig)) return self._query('hbs.task_agent', { 'task': final, 'aid': str(a), 'expiry': expiry }) def hbs_addKey(self, oid, key): return self._query('hbs.add_key', {'oid': oid, 'key': key})
class BeachShell(cmd.Cmd): intro = "Welcome to Beach shell. Type help or ? to list commands.\n" prompt = "(beach) " def __init__(self, configFile): cmd.Cmd.__init__(self) self.realm = "global" self.updatePrompt() self.beach = Beach(configFile) def updatePrompt(self): if self.realm is not None: self.prompt = "(beach/%s) " % self.realm else: self.prompt = "(beach/global) " def parse(self, parser, line): try: return parser.parse_args(shlex.split(line)) except SystemExit: return None def do_exit(self, s): self.beach.close() return True def do_quit(self, s): self.beach.close() return True def emptyline(self): pass def printOut(self, data): print(json.dumps(data, indent=4)) @report_errors def do_realm(self, s): """Login as a specific user.""" parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument("realm", type=str, default="global", help="switch context to a specific realm.") arguments = self.parse(parser, s) if arguments is None: return self.realm = arguments.realm if self.realm is None or self.realm.strip() == "": self.ream = "global" self.updatePrompt() self.beach.setRealm(self.realm) @report_errors def do_get_dir(self, s): """Retrieve a specific user's profile by UID.""" parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument( "-c", "--category", type=str, dest="category", default=None, help="only show the directory for a specific category.", ) arguments = self.parse(parser, s) if arguments is None: return category = arguments.category resp = self.beach.getDirectory() wanted = False if isMessageSuccess(resp) and "realms" in resp: wanted = resp["realms"].get(self.realm, {}) if category is not None: wanted = wanted.get(category, {}) self.printOut(wanted) @report_errors def do_flush(self, s): """Retrieve a specific user's profile by UID.""" parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument( "--confirm", action="store_true", help="This command flushes ALL ACTORS from the cluster REGARDLESS of the realm. " "Add this flag to confirm you understand this.", ) arguments = self.parse(parser, s) if arguments is None: return resp = "Please confirm ( see command help )" if arguments.confirm: resp = self.beach.flush() self.printOut(resp) @report_errors def do_add_actor(self, s): """Retrieve a specific user's profile by UID.""" parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument( "-n", "--name", type=str, dest="name", required=True, help="the name of the actor to spawn." ) parser.add_argument( "-c", "--category", type=str, dest="category", required=True, help="only show the directory for a specific category.", ) parser.add_argument( "-s", "--strategy", type=str, dest="strategy", default=None, help="the strategy to use to spawn the actor in the beach.", ) parser.add_argument( "-sh", "--hint", type=str, dest="strat_hint", default=None, help="hint used as part of some strategies." ) arguments = self.parse(parser, s) if arguments is None: return resp = self.beach.addActor(arguments.name, arguments.category, arguments.strategy, arguments.strat_hint) self.printOut(resp) @report_errors def do_stop_actor(self, s): """Stop a specific set of actors.""" parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument( "-i", "--id", type=str, dest="id", required=False, nargs="+", help="the IDs of actors to stop." ) parser.add_argument( "-c", "--category", type=str, dest="cat", required=False, nargs="+", help="the categories of actors to stop.", ) arguments = self.parse(parser, s) if arguments is None: return if arguments.id is None and arguments.cat is None: argparse.error("Must specify one of -i or -c.") resp = self.beach.stopActors(withId=arguments.id, withCategory=arguments.cat) self.printOut(resp) @report_errors def do_get_cluster_health(self, s): """Retrieve the health information of all nodes of the cluster.""" parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) arguments = self.parse(parser, s) if arguments is None: return resp = self.beach.getClusterHealth() self.printOut(resp)
dest='source') parser.add_argument( '-i', '--ident', type=str, required=False, default='endpointproxy/8e7a890b-8016-4396-b012-aec73d055dd6', help='Beach identity to use to request list of endpoints.', dest='ident') parser.add_argument( '-u', '--update', type=int, required=False, default=60, help='refresh list of available endpoints every X seconds.', dest='update') arguments = parser.parse_args() currentEndpoints = Set() beach = Beach(arguments.config, realm='hcp') endpointActors = beach.getActorHandle('c2/endpoint', nRetries=3, timeout=30, ident=arguments.ident) updateEndpoints(endpointActors, arguments.update) proxy = LcEndpointProxy(arguments.source, currentEndpoints) proxy.start() gevent.wait()
# This will set the host as a beach node. import sys import os import json import yaml import time # Adding the beach lib directory relatively for this example curFileDir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(curFileDir, '..', '..')) from beach.beach_api import Beach print("Connecting to example beach.") beach = Beach(os.path.join(curFileDir, 'multinode.yaml'), realm='global') print("Creating ping actor in resource beach node.") a1 = beach.addActor('Ping', 'pingers', strategy='resource') print(json.dumps(a1, indent=4)) print("Creating pong actor in affinity( pingers ) beach node.") a2 = beach.addActor('Pong', 'pongers', strategy='affinity', strategy_hint='pingers') print(json.dumps(a2, indent=4)) print("Creating pong actor in isolation.") a3 = beach.addActor('Pong', 'pongers', isIsolated=True) print(json.dumps(a3, indent=4))
import json import yaml import time # Adding the beach lib directory relatively for this example curFileDir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(curFileDir, '..', '..')) from beach.beach_api import Beach from beach.utils import _getIpv4ForIface print("Connecting to example beach.") # Note that we usually would not need to add the extraTmpSeedNode parameter in production # since your config file would be populated. Here to keep the config file simple and # portable we add our IP manually to operate in single-node mode. beach = Beach(os.path.join(curFileDir, 'simple.yaml'), realm='global') print("Creating ping actor in random beach node.") a1 = beach.addActor('Ping', 'pingers') print(json.dumps(a1, indent=4)) print("Creating pong actor in random beach node.") a2 = beach.addActor('Pong', 'pongers') print(json.dumps(a2, indent=4)) print("Idling for a few seconds...") time.sleep(30) print("Querying for beach directory.") d = beach.getDirectory() print(json.dumps(d, indent=4))
import os import sys from beach.beach_api import Beach import logging if 1 < len(sys.argv): BEACH_CONFIG_FILE = os.path.abspath(sys.argv[1]) else: BEACH_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sample_cluster.yaml") beach = Beach(BEACH_CONFIG_FILE, realm="hcp") ####################################### # BeaconProcessor # This actor will process incoming # beacons from the sensors. # Parameters: # state_db: these are the connection # details for the mysql database # used to store the low-importance # data tracked at runtime. ####################################### print( beach.addActor( "c2/BeaconProcessor", "c2/beacon/1.0", parameters={ "state_db": {"url": "hcp-state-db", "db": "hcp", "user": "******", "password": "******"}, "priv_key": os.path.join(os.path.dirname(os.path.abspath(__file__)), "hcp", "c2.priv.pem"), }, secretIdent="beacon/09ba97ab-5557-4030-9db0-1dbe7f2b9cfd",
renderFullPage = web.template.render( 'templates', base = 'base_full', globals = { 'json' : json, 'msTsToTime' : msTsToTime, '_x_' : _x_, '_xm_' : _xm_, 'hex' : hex, 'sanitize' : sanitizeJson, 'EventInterpreter' : EventInterpreter, 'md' : doMarkdown, 'sorted' : sorted, 'InvestigationNature' : InvestigationNature, 'InvestigationConclusion' : InvestigationConclusion } ) eventRender = web.template.render( 'templates/custom_events', globals = { 'json' : json, 'msTsToTime' : msTsToTime, '_x_' : _x_, '_xm_' : _xm_, 'hex' : hex, 'sanitize' : sanitizeJson, 'EventInterpreter' : EventInterpreter, 'sorted' : sorted } ) if len( sys.argv ) < 2: print( "Usage: python app.py beach_config [listen_port]" ) sys.exit() beach = Beach( sys.argv[ 1 ], realm = 'hcp' ) del( sys.argv[ 1 ] ) model = beach.getActorHandle( 'models', nRetries = 3, timeout = 30, ident = 'lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903' ) capabilities = beach.getActorHandle( 'analytics/capabilitymanager', nRetries = 3, timeout = 60, ident = 'lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903' ) sensordir = beach.getActorHandle( 'c2/sensordir', nRetries = 3, timeout = 30, ident = 'lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903' ) app.run()
ENABLE_GET = args.withGet if args.sslSelfSigned: if not os.path.isfile(args.sslCert) and not os.path.isfile( args.sslKey): print("Generating self-signed certs.") if 0 != os.system( 'openssl req -x509 -days 36500 -newkey rsa:4096 -keyout %s -out %s -nodes -sha256 -subj "/C=US/ST=CA/L=Mountain View/O=refractionPOINT/CN=restbridge.beach" > /dev/null 2>&1' % (args.sslKey, args.sslCert)): print("Failed to generate self-signed certificate.") if os.path.isfile(args.sslCert) and os.path.isfile(args.sslKey): print("Using SSL cert/key: %s and %s" % (args.sslCert, args.sslKey)) from web.wsgiserver import CherryPyWSGIServer CherryPyWSGIServer.ssl_certificate = args.sslCert CherryPyWSGIServer.ssl_private_key = args.sslKey else: print("No SSL cert/key at %s and %s so using normal HTTP." % (args.sslCert, args.sslKey)) urls = ( r'/(.*)', 'Bridge', ) web.config.debug = False app = web.application(urls, globals()) beach = Beach(args.configFile, realm=args.realm) handle_cache = {} os.chdir(g_current_dir) app.run()
'public_ip': str( environment.get('X-FORWARDED-FOR', environment.get('REMOTE_ADDR', ''))) } resp = vHandle.request('beacon', data=clean_params) if resp.isSuccess: isSuccess = True response = '<html><h1>%s</h1></html>' % resp.data['resp'] if isSuccess: status = '200 OK' start_response(status, [('Content-Type', 'text/html'), ('Content-Length', str(len(response)))]) return [str(response)] beach = Beach(sys.argv[1], realm='hcp') vHandle = beach.getActorHandle( 'c2/beacon', nRetries=3, timeout=30, ident='http/5bc10821-2d3f-413a-81ee-30759b9f863b') server = wsgi.WSGIServer(('', 80), handle_beacon, spawn=100) server.serve_forever()
g_metrics = info print("Done processing metris.") def periodicUpdate(): while True: updateMetrics() gevent.sleep(SEC_PER_GEN) ############################################################################### # BOILER PLATE ############################################################################### urls = (r'/', 'Index', r'/info', 'GetClusterInfo') web.config.debug = False app = web.application(urls, globals()) render = web.template.render('%s/templates/' % g_current_dir, globals={}) if len(sys.argv) < 2: print("Dashboard takes single argument: cluster config file.") sys.exit() beach = Beach(sys.argv[-1]) sys.argv.pop() os.chdir(g_current_dir) gevent.spawn(periodicUpdate) app.run()
dest='is_display_responses', help='if specified, all actor responses will be displayed') args = parser.parse_args() print("Launching LimaCharlie Appliance Test") printStep('BASE PLATFORM') if 0 == os.system('cqlsh %s -e "desc keyspaces" > /dev/null 2>&1' % getLocalIp()): printSuccess("Cassandra is running.") else: printFailure("Cassandra doesn't seem to be running.") BEACH = Beach(BEACH_CONFIG, 'hcp') resp = BEACH.getClusterHealth() if 0 != len(resp) and resp.values()[0] is not None: printSuccess('Beach cluster is running with %d node(s).' % len(resp)) else: printFailure("Beach cluster doesn't seem to be running.") if 0 == os.system( 'ps -elf | grep -E ".*endpoint_proxy.*" | grep -v grep > /dev/null' ): printSuccess('Sensor proxy is running.') else: printFailure("Sensor proxy doesn't seem to be running.") if 0 == os.system(
class Patrol ( object ): def __init__( self, configFile, identifier = 'default', sync_frequency = 15.0, logging_dest = '/dev/log', realm = 'global', scale = None, actorsRoot = None ): self._stopEvent = gevent.event.Event() self._logger = None self._log_level = logging.INFO self._log_dest = logging_dest self._realm = realm self._initLogging( self._log_level, logging_dest ) self._threads = gevent.pool.Group() self._owner = 'beach.patrol/%s' % ( identifier, ) self._entries = OrderedDict() self._watch = {} self._freq = sync_frequency self._beach = Beach( configFile, realm = realm ) self._scale = scale self._actorsRoot = actorsRoot if self._actorsRoot is not None and not self._actorsRoot.endswith( '/' ): self._actorsRoot += '/' def _initLogging( self, level, dest ): self._logger = logging.getLogger() self._logger.setLevel( level ) handler = logging.handlers.SysLogHandler( address = dest ) handler.setFormatter( logging.Formatter( "%(asctime)-15s %(message)s" ) ) self._logger.addHandler( handler ) def _log( self, msg ): self._logger.info( '%s : %s', self.__class__.__name__, msg ) def _logCritical( self, msg ): self._logger.error( '%s : %s', self.__class__.__name__, msg ) def _scanForExistingActors( self ): tally = {} mtd = self._beach.getAllNodeMetadata() for node_mtd in mtd.itervalues(): if node_mtd is False: continue for aid, actor_mtd in node_mtd.get( 'data', {} ).get( 'mtd', {} ).iteritems(): if self._stopEvent.wait( 0 ): break owner = actor_mtd.get( 'owner', None ) if owner in self._entries: # Looks like a version of that actor was maintained by us before # so we'll add it to our roster. self._watch[ aid ] = self._entries[ owner ] self._log( 'adding pre-existing actor %s to patrol' % aid ) tally.setdefault( self._entries[ owner ].name, 0 ) tally[ self._entries[ owner ].name ] += 1 return tally def _initializeMissingActors( self, existing ): if type( self._scale ) is int: currentScale = self._scale elif self._scale is not None: currentScale = self._scale() else: currentScale = None for actorEntry in self._entries.itervalues(): if self._stopEvent.wait( 0 ): break actorName = actorEntry.name current = existing.get( actorName, 0 ) targetNum = actorEntry.initialInstances if currentScale is not None and actorEntry.scalingFactor is not None: targetNum = int( currentScale / actorEntry.scalingFactor ) if 0 != ( currentScale % actorEntry.scalingFactor ): targetNum += 1 if actorEntry.maxInstances is not None and targetNum > actorEntry.maxInstances: targetNum = actor.maxInstances if actorEntry.initialInstances is not None and targetNum < actorEntry.initialInstances: targetNum = actorEntry.initialInstances self._log( 'actor %s scale %s / factor %s: %d' % ( actorName, currentScale, actorEntry.scalingFactor, targetNum ) ) if current < targetNum: newOwner = '%s/%s' % ( self._owner, actorName ) self._log( 'actor %s has %d instances but requires %d, spawning' % ( actorName, current, targetNum ) ) for _ in range( targetNum - current ): status = self._beach.addActor( *(actorEntry.actorArgs[ 0 ]), **(actorEntry.actorArgs[ 1 ]) ) self._log( 'actor launched: %s' % status ) if type( status ) is dict and status.get( 'status', {} ).get( 'success', False ): self._watch[ status[ 'data' ][ 'uid' ] ] = actorEntry else: self._log( 'actor %s is satisfied' % actorName ) def start( self ): self._stopEvent.clear() self._log( 'starting, patrolling %d actors' % len( self._entries ) ) self._log( 'discovering pre-existing actors' ) existing = self._scanForExistingActors() if self._stopEvent.wait( 0 ): return self._log( '%d pre-existing actors' % len( existing ) ) self._initializeMissingActors( existing ) if self._stopEvent.wait( 0 ): return self._log( 'starting patrol' ) gevent.sleep(10) self._threads.add( gevent.spawn( self._sync ) ) def stop( self ): self._log( 'stopping patrol' ) self._stopEvent.set() self._threads.join( timeout = 30 ) self._threads.kill( timeout = 10 ) self._log( 'patrol stopped' ) def monitor( self, name, initialInstances, maxInstances = None, scalingFactor = None, relaunchOnFailure = True, onFailureCall = None, actorArgs = [], actorKwArgs = {} ): actorArgs = list( actorArgs ) if self._actorsRoot is not None: actorArgs = [ self._actorsRoot + actorArgs[ 0 ] ] + actorArgs[ 1 : ] record = _PatrolEntry() record.name = name record.initialInstances = initialInstances record.maxInstances = maxInstances record.scalingFactor = scalingFactor record.relaunchOnFailure = relaunchOnFailure record.onFailureCall = onFailureCall actorKwArgs[ 'owner' ] = '%s/%s' % ( self._owner, name ) record.actorArgs = ( actorArgs, actorKwArgs ) self._entries[ '%s/%s' % ( self._owner, name ) ] = record def _processFallenActor( self, actorEntry ): isRelaunch = False if actorEntry.relaunchOnFailure: self._log( 'actor is set to relaunch on failure' ) status = self._beach.addActor( *(actorEntry.actorArgs[ 0 ]), **(actorEntry.actorArgs[ 1 ]) ) if status is not False and status is not None and 'data' in status and 'uid' in status[ 'data' ]: self._watch[ status[ 'data' ][ 'uid' ] ] = actorEntry self._log( 'actor relaunched: %s' % status ) isRelaunch = True else: self._log( 'failed to launch actor: %s' % status ) else: self._log( 'actor is not set to relaunch on failure' ) return isRelaunch def _sync( self ): while not self._stopEvent.wait( self._freq ): self._log( 'running sync' ) directory = self._beach.getDirectory( timeout = 120 ) if type( directory ) is not dict: self._logCritical( 'error getting directory' ) continue self._log( 'found %d actors, testing for %d' % ( len( directory[ 'reverse' ] ), len( self._watch ) ) ) for actorId in self._watch.keys(): if self._stopEvent.wait( 0 ): break if actorId not in directory.get( 'reverse', {} ): self._log( 'actor %s has fallen' % actorId ) if self._processFallenActor( self._watch[ actorId ] ): del( self._watch[ actorId ] ) def remove( self, name = None, isStopToo = True ): removed = [] if name is not None: k ='%s/%s' % ( self._owner, name ) if k not in self._entries: return False record = self._entries[ k ] for uid, entry in self._watch.items(): if entry == record: del( self._watch[ uid ] ) removed.append( uid ) if isStopToo: self._beach.stopActors( withId = removed ) else: if self._beach.stopActors( withId = self._watch.keys() ): removed = self._watch.keys() self._watch = {} return removed def loadFromUrl( self, url ): if '://' in url: patrolFilePath = url if patrolFilePath.startswith( 'file://' ): patrolFilePath = 'file://%s' % os.path.abspath( patrolFilePath[ len( 'file://' ) : ] ) patrolFile = urllib2.urlopen( patrolFilePath ) else: patrolFilePath = os.path.abspath( url ) patrolFile = open( patrolFilePath, 'r' ) exec( patrolFile.read(), { 'Patrol' : self.monitor, '__file__' : patrolFilePath } )
userConf = yaml.parse(f.read()) if 'config' in userConf: conf = userConf['config'] else: conf = args.config if conf is None: parser.error('no config specified and ~/.beach is not a valid config.') else: if args.req_realm is None and args.req_cat is None and args.req_cmd is None and args.req_ident is None: app = BeachShell(conf) app.cmdloop() elif args.req_realm is None or args.req_cat is None or args.req_cmd is None: parser.error('--req-* components missing to execute a request.') else: beach = Beach(conf, realm=args.req_realm) h = beach.getActorHandle(args.req_cat, ident=args.req_ident, timeout=args.req_timeout) if 0 == h.getNumAvailable(): h.close() beach.close() eprint("no actors available in category") sys.exit(1) if args.is_broadcast: futures = h.requestFromAll(args.req_cmd, data=args.req_data) else: resp = h.request(args.req_cmd, data=args.req_data) h.close()
def init( self, parameters, resources ): self.beach_api = Beach( self._beach_config_path, realm = 'hcp' ) self.handle( 'reg_detect', self.handleRegDetect ) self.handle( 'reg_inv', self.handleRegInvestigation ) self.handle( 'unreg_detect', self.handleRegDetect ) self.handle( 'unreg_inv', self.handleUnRegInvestigation )
def init(self, parameters, resources): self.beach_api = Beach(self._beach_config_path, realm='hcp') self.handle('reg_detect', self.handleRegDetect) self.handle('reg_inv', self.handleRegInvestigation) self.handle('unreg_detect', self.handleRegDetect) self.handle('unreg_inv', self.handleUnRegInvestigation)
class BeachShell(cmd.Cmd): intro = 'Welcome to Beach shell. Type help or ? to list commands.\n' prompt = '(beach) ' def __init__(self, configFile, realm=None): cmd.Cmd.__init__(self) self.realm = 'global' self.updatePrompt() self.beach = Beach(configFile) def updatePrompt(self): if self.realm is not None: self.prompt = '(beach/%s) ' % self.realm else: self.prompt = '(beach/global) ' def parse(self, parser, line): try: return parser.parse_args(shlex.split(line)) except SystemExit: return None def do_exit(self, s): self.beach.close() return True def do_quit(self, s): self.beach.close() return True def emptyline(self): pass def printOut(self, data): print(json.dumps(data, indent=4)) @report_errors def do_gen_key(self, s): '''Generate a key that can be used as a beach private key.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument('out', type=str, help='the path where to store the key.') arguments = self.parse(parser, s) if arguments is None: return with open(arguments.out, 'w') as f: f.write(M2Crypto.Rand.rand_bytes(0x20)) self.printOut('New private key written to %s.' % arguments.out) @report_errors def do_realm(self, s): '''Login as a specific user.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument('realm', type=str, default='global', help='switch context to a specific realm.') arguments = self.parse(parser, s) if arguments is None: return self.realm = arguments.realm if self.realm is None or self.realm.strip() == '': self.ream = 'global' self.updatePrompt() self.beach.setRealm(self.realm) @report_errors def do_get_dir(self, s): '''Retrieve the directory of all Actors.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument( '-c', '--category', type=str, dest='category', default=None, help='only show the directory for a specific category.') arguments = self.parse(parser, s) if arguments is None: return category = arguments.category resp = self.beach.getDirectory() wanted = False if resp is not False and 'realms' in resp: wanted = resp['realms'].get(self.realm, {}) if category is not None: wanted = wanted.get(category, {}) self.printOut(wanted) @report_errors def do_flush(self, s): '''Remove all Actors from all nodes in the cluster.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument( '--confirm', action='store_true', help= 'This command flushes ALL ACTORS from the cluster REGARDLESS of the realm. ' 'Add this flag to confirm you understand this.') arguments = self.parse(parser, s) if arguments is None: return resp = 'Please confirm ( see command help )' if arguments.confirm: resp = self.beach.flush() self.printOut(resp) @report_errors def do_add_actor(self, s): '''Add a new Actor to the cluster.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument('-n', '--name', type=str, dest='name', required=True, help='the name of the actor to spawn.') parser.add_argument('-c', '--category', type=str, dest='category', required=True, nargs='+', help='category or categories to add the Actor to.') parser.add_argument( '-s', '--strategy', type=str, dest='strategy', default=None, help='the strategy to use to spawn the actor in the beach.') parser.add_argument('-sh', '--hint', type=str, dest='strat_hint', default=None, help='hint used as part of some strategies.') parser.add_argument( '-p', '--params', type=json.loads, dest='params', default=None, help='parameters to provide to the Actor, as a JSON string.') parser.add_argument( '-i', '--isisolated', dest='isIsolated', default=False, action='store_true', help= 'if the Actor should be started in isolation mode (standalone process).' ) parser.add_argument( '-id', '--ident', type=str, dest='ident', default=None, help='identifier secret token used for Actor trust model.') parser.add_argument( '-t', '--trusted', type=str, dest='trusted', default=[], action='append', help='identifier token trusted by the Actor trust model.') parser.add_argument('-ll', '--log-level', type=str, dest='loglevel', default=None, help='custom logging level for actor.') parser.add_argument('-ld', '--log-dest', type=str, dest='logdest', default=None, help='custom logging destination for actor.') parser.add_argument( '-o', '--concurrent', type=int, dest='n_concurrent', required=False, default=1, help='the number of concurrent requests handled by the actor.') parser.add_argument( '-d', '--isdrainable', dest='isDrainable', default=False, action='store_true', help='if the Actor can be requested to drain gracefully.') arguments = self.parse(parser, s) if arguments is None: return resp = self.beach.addActor(arguments.name, arguments.category, strategy=arguments.strategy, strategy_hint=arguments.strat_hint, parameters=arguments.params, isIsolated=arguments.isIsolated, secretIdent=arguments.ident, trustedIdents=arguments.trusted, n_concurrent=arguments.n_concurrent, is_drainable=arguments.isDrainable, log_level=arguments.log_level, log_dest=arguments.log_dest) self.printOut(resp) @report_errors def do_stop_actor(self, s): '''Stop a specific set of actors.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument('-i', '--id', type=str, dest='id', required=False, nargs='+', help='the IDs of actors to stop.') parser.add_argument('-c', '--category', type=str, dest='cat', required=False, nargs='+', help='the categories of actors to stop.') parser.add_argument( '-d', '--delay', type=int, dest='delay', required=False, default=None, help='the number of seconds between stopping each actor.') arguments = self.parse(parser, s) if arguments is None: return if arguments.id is None and arguments.cat is None: argparse.error('Must specify one of -i or -c.') resp = self.beach.stopActors(withId=arguments.id, withCategory=arguments.cat, delay=arguments.delay) self.printOut(resp) @report_errors def do_get_cluster_health(self, s): '''Retrieve the health information of all nodes of the cluster.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) arguments = self.parse(parser, s) if arguments is None: return resp = self.beach.getClusterHealth() self.printOut(resp) @report_errors def do_get_load_info(self, s): '''Retrieve the number of free handlers per actor.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) arguments = self.parse(parser, s) if arguments is None: return resp = self.beach.getLoadInfo() self.printOut(resp) @report_errors def do_get_mtd(self, s): '''Retrieve metadata from all nodes.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) resp = self.beach.getAllNodeMetadata() self.printOut(resp) @report_errors def do_remove_from_category(self, s): '''Remove an Actor from a category.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument('-i', '--id', type=str, dest='id', required=True, help='the ID of the actor to add to the category.') parser.add_argument('-c', '--category', type=str, dest='category', required=True, help='category to add the actor to.') arguments = self.parse(parser, s) if arguments is None: return resp = self.beach.removeFromCategory(arguments.id, arguments.category) self.printOut(resp) @report_errors def do_add_to_category(self, s): '''Add an Actor to a category.''' parser = argparse.ArgumentParser(prog=inspect.stack()[0][3][3:]) parser.add_argument('-i', '--id', type=str, dest='id', required=True, help='the ID of the actor to add to the category.') parser.add_argument('-c', '--category', type=str, dest='category', required=True, help='category to add the actor to.') arguments = self.parse(parser, s) if arguments is None: return resp = self.beach.addToCategory(arguments.id, arguments.category) self.printOut(resp)
def __init__(self, configFile): cmd.Cmd.__init__(self) self.realm = "global" self.updatePrompt() self.beach = Beach(configFile)
def __init__(self, configFile, realm=None): cmd.Cmd.__init__(self) self.realm = 'global' self.updatePrompt() self.beach = Beach(configFile)
class BEAdmin( object ): token = None def __init__( self, beach_config, token, timeout = 1000 * 10 ): self.token = token self.beach = Beach( beach_config, realm = 'hcp' ) self.vHandle = self.beach.getActorHandle( 'c2/admin/1.0', ident = 'cli/955f6e63-9119-4ba6-a969-84b38bfbcc05', timeout = timeout, nRetries = 3 ) def _query( self, cmd, data = {} ): data[ 'token' ] = self.token response = self.vHandle.request( cmd, data ) return response def testConnection( self ): return self._query( 'ping' ) def hcp_getAgentStates( self, aid = None, hostname = None ): filters = {} if aid != None: filters[ 'agent_id' ] = aid if hostname != None: filters[ 'hostname' ] = hostname return self._query( 'hcp.get_agent_states', filters ) def hcp_getPeriod( self ): return self._query( 'hcp.get_period' ) def hcp_setPeriod( self, period ): return self._query( 'hcp.set_period', { 'period' : int( period ) } ) def hcp_getEnrollmentRules( self ): return self._query( 'hcp.get_enrollment_rules' ) def hcp_addEnrollmentRule( self, mask, externalIp, internalIp, newOrg, newSubnet, hostname ): return self._query( 'hcp.add_enrollment_rule', { 'mask' : mask, 'external_ip' : externalIp, 'internal_ip' : internalIp, 'new_org' : newOrg, 'new_subnet' : newSubnet, 'hostname' : hostname } ) def hcp_delEnrollmentRule( self, mask, externalIp, internalIp, newOrg, newSubnet, hostname ): return self._query( 'hcp.del_enrollment_rule', { 'mask' : mask, 'external_ip' : externalIp, 'internal_ip' : internalIp, 'new_org' : newOrg, 'new_subnet' : newSubnet, 'hostname' : hostname } ) def hcp_getTaskings( self ): return self._query( 'hcp.get_taskings' ) def hcp_addTasking( self, mask, moduleId, hashStr ): return self._query( 'hcp.add_tasking', { 'mask' : mask, 'module_id' : int( moduleId ), 'hash' : hashStr } ) def hcp_delTasking( self, mask, moduleId, hashStr ): return self._query( 'hcp.remove_tasking', { 'mask' : mask, 'module_id' : int( moduleId ), 'hash' : hashStr } ) def hcp_getModules( self ): return self._query( 'hcp.get_modules' ) def hcp_addModule( self, moduleId, binary, signature, description ): return self._query( 'hcp.add_module', { 'module_id' : moduleId, 'bin' : binary, 'signature' : signature, 'hash' : hashlib.sha256( binary ).hexdigest(), 'description' : description } ) def hcp_delModule( self, moduleId, hashStr ): return self._query( 'hcp.remove_module', { 'module_id' : moduleId, 'hash' : hashStr } ) def hcp_relocAgent( self, agentid, newOrg, newSubnet ): return self._query( 'hcp.reloc_agent', { 'agentid' : agentid, 'new_org' : newOrg, 'new_subnet' : newSubnet } ) def hcp_getRelocations( self ): return self._query( 'hcp.get_relocations' ) def hbs_getPeriod( self ): return self._query( 'hbs.get_period' ) def hbs_setPeriod( self, period ): return self._query( 'hbs.set_period', { 'period' : int( period ) } ) def hbs_getProfiles( self ): return self._query( 'hbs.get_profiles' ) def hbs_addProfile( self, mask, config ): return self._query( 'hbs.set_profile', { 'mask' : mask, 'module_configs' : config } ) def hbs_delProfile( self, mask ): return self._query( 'hbs.del_profile', { 'mask' : mask } ) def hbs_taskAgent( self, toAgent, task, key, id, expiry = None, investigationId = None ): # Make sure it's a valid agentid a = AgentId( toAgent ) if not a.isValid: return None if not type( task ) is rSequence: return None s = Signing( key ) r = rpcm( isHumanReadable = True, isDebug = True ) tags = Symbols() if investigationId is not None and '' != investigationId: task.addStringA( tags.hbs.INVESTIGATION_ID, investigationId ) toSign = ( rSequence().addSequence( tags.base.HCP_ID, rSequence().addInt8( tags.base.HCP_ID_ORG, a.org ) .addInt8( tags.base.HCP_ID_SUBNET, a.subnet ) .addInt32( tags.base.HCP_ID_UNIQUE, a.unique ) .addInt8( tags.base.HCP_ID_PLATFORM, a.platform ) .addInt8( tags.base.HCP_ID_CONFIG, a.config ) ) .addSequence( tags.hbs.NOTIFICATION, task ) .addInt32( tags.hbs.NOTIFICATION_ID, id ) ) if None != expiry: toSign.addTimestamp( tags.base.EXPIRY, int( expiry ) ) toSign = r.serialise( toSign ) sig = s.sign( toSign ) final = r.serialise( rSequence().addBuffer( tags.base.BINARY, toSign ) .addBuffer( tags.base.SIGNATURE, sig ) ) return self._query( 'hbs.task_agent', { 'task' : final, 'agentid' : str( a ) } )
globals={ 'json': json, 'msTsToTime': msTsToTime, '_x_': _x_, '_xm_': _xm_, 'hex': hex, 'sanitize': sanitizeJson, 'EventInterpreter': EventInterpreter, 'sorted': sorted }) if len(sys.argv) < 2: print("Usage: python app.py beach_config [listen_port]") sys.exit() beach = Beach(sys.argv[1], realm='hcp') del (sys.argv[1]) model = beach.getActorHandle('models', nRetries=3, timeout=30, ident='lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903') capabilities = beach.getActorHandle( 'analytics/capabilitymanager', nRetries=3, timeout=60, ident='lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903') sensordir = beach.getActorHandle( 'c2/sensordir', nRetries=3, timeout=30, ident='lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903')
import os import sys from beach.beach_api import Beach import logging REPO_ROOT = os.path.join( os.path.dirname( os.path.abspath( __file__ ) ), '..', '..' ) if 1 < len( sys.argv ): BEACH_CONFIG_FILE = os.path.abspath( sys.argv[ 1 ] ) else: BEACH_CONFIG_FILE = os.path.join( os.path.dirname( os.path.abspath( __file__ ) ), 'sample_cluster.yaml' ) beach = Beach( BEACH_CONFIG_FILE, realm = 'hcp' ) ####################################### # BeaconProcessor # This actor will process incoming # beacons from the sensors. # Parameters: # state_db: these are the connection # details for the mysql database # used to store the low-importance # data tracked at runtime. # _priv_key: the C2 private key. # task_back_timeout: the number of # seconds to wait during each # beacon to give a chance to any # detects to generate tasks for # the sensor to process right away. ####################################### print( beach.addActor( 'c2/BeaconProcessor',
import os import sys from beach.beach_api import Beach import logging if 1 < len(sys.argv): BEACH_CONFIG_FILE = os.path.abspath(sys.argv[1]) else: BEACH_CONFIG_FILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'sample_cluster.yaml') beach = Beach(BEACH_CONFIG_FILE, realm='hcp') ####################################### # BeaconProcessor # This actor will process incoming # beacons from the sensors. # Parameters: # state_db: these are the connection # details for the mysql database # used to store the low-importance # data tracked at runtime. ####################################### print( beach.addActor('c2/BeaconProcessor', 'c2/beacon/1.0', parameters={ 'state_db': { 'url': 'hcp-state-db', 'db': 'hcp', 'user': '******',
import os import sys from beach.beach_api import Beach import logging REPO_ROOT = os.path.join( os.path.dirname( os.path.abspath( __file__ ) ), '..', '..' ) if os.geteuid() != 0: print( 'Not currently running as root. If you meant to run this as part of the Cloud-in-a-Can you should run this with sudo.' ) if 1 < len( sys.argv ): BEACH_CONFIG_FILE = os.path.abspath( sys.argv[ 1 ] ) else: BEACH_CONFIG_FILE = os.path.join( os.path.dirname( os.path.abspath( __file__ ) ), 'sample_cluster.yaml' ) beach = Beach( BEACH_CONFIG_FILE, realm = 'hcp' ) if not beach.flush(): print( "Could not flush Beach cluster. Are you sure it is running?" ) sys.exit(-1) ####################################### # BeaconProcessor # This actor will process incoming # beacons from the sensors. # Parameters: # state_db: these are the connection # details for the mysql database # used to store the low-importance # data tracked at runtime. # deployment_key: The deployment key
# This will set the host as a beach node. import sys import os import json import yaml import time # Adding the beach lib directory relatively for this example curFileDir = os.path.dirname( os.path.abspath( __file__ ) ) sys.path.append( os.path.join( curFileDir, '..', '..' ) ) from beach.beach_api import Beach print( "Connecting to example beach." ) beach = Beach( os.path.join( curFileDir, 'multinode.yaml' ), realm = 'global' ) print( "Creating ping actor in resource beach node." ) a1 = beach.addActor( 'Ping', 'pingers', strategy = 'resource' ) print( json.dumps( a1, indent = 4 ) ) print( "Creating pong actor in affinity( pingers ) beach node." ) a2 = beach.addActor( 'Pong', 'pongers', strategy = 'affinity', strategy_hint = 'pingers' ) print( json.dumps( a2, indent = 4 ) ) print( "Creating pong actor in isolation." ) a3 = beach.addActor( 'Pong', 'pongers', isIsolated = True ) print( json.dumps( a3, indent = 4 ) ) print( "Idling for a few seconds..." ) time.sleep( 15 )
def __init__( self, configFile, realm = None ): cmd.Cmd.__init__( self ) self.realm = 'global' self.updatePrompt() self.beach = Beach( configFile )
class BEAdmin(object): token = None def __init__(self, beach_config, token, timeout=1000 * 10): self.token = token self.beach = Beach(beach_config, realm='hcp') self.vHandle = self.beach.getActorHandle( 'c2/admin/1.0', ident='cli/955f6e63-9119-4ba6-a969-84b38bfbcc05', timeout=timeout, nRetries=3) def _query(self, cmd, data={}): data['token'] = self.token response = self.vHandle.request(cmd, data) return response def testConnection(self): return self._query('ping') def hcp_getAgentStates(self, aid=None, hostname=None): filters = {} if aid != None: filters['agent_id'] = aid if hostname != None: filters['hostname'] = hostname return self._query('hcp.get_agent_states', filters) def hcp_getEnrollmentRules(self): return self._query('hcp.get_enrollment_rules') def hcp_addEnrollmentRule(self, mask, externalIp, internalIp, newOrg, newSubnet, hostname): return self._query( 'hcp.add_enrollment_rule', { 'mask': mask, 'external_ip': externalIp, 'internal_ip': internalIp, 'new_org': newOrg, 'new_subnet': newSubnet, 'hostname': hostname }) def hcp_delEnrollmentRule(self, mask, externalIp, internalIp, hostname): return self._query( 'hcp.del_enrollment_rule', { 'mask': mask, 'external_ip': externalIp, 'internal_ip': internalIp, 'hostname': hostname }) def hcp_getTaskings(self): return self._query('hcp.get_taskings') def hcp_addTasking(self, mask, moduleId, hashStr): return self._query('hcp.add_tasking', { 'mask': mask, 'module_id': int(moduleId), 'hash': hashStr }) def hcp_delTasking(self, mask, moduleId, hashStr): return self._query('hcp.remove_tasking', { 'mask': mask, 'module_id': int(moduleId), 'hash': hashStr }) def hcp_getModules(self): return self._query('hcp.get_modules') def hcp_addModule(self, moduleId, binary, signature, description): return self._query( 'hcp.add_module', { 'module_id': moduleId, 'bin': binary, 'signature': signature, 'hash': hashlib.sha256(binary).hexdigest(), 'description': description }) def hcp_delModule(self, moduleId, hashStr): return self._query('hcp.remove_module', { 'module_id': moduleId, 'hash': hashStr }) def hcp_relocAgent(self, agentid, newOrg, newSubnet): return self._query('hcp.reloc_agent', { 'agentid': agentid, 'new_org': newOrg, 'new_subnet': newSubnet }) def hcp_getRelocations(self): return self._query('hcp.get_relocations') def hbs_getProfiles(self): return self._query('hbs.get_profiles') def hbs_addProfile(self, mask, config): return self._query('hbs.set_profile', { 'mask': mask, 'module_configs': config }) def hbs_delProfile(self, mask): return self._query('hbs.del_profile', {'mask': mask}) def hbs_taskAgent(self, toAgent, task, key, id, expiry=None, investigationId=None): # Make sure it's a valid agentid a = AgentId(toAgent) if not a.isValid: return None if not type(task) is rSequence: return None s = Signing(key) r = rpcm(isHumanReadable=True, isDebug=True) tags = Symbols() if investigationId is not None and '' != investigationId: task.addStringA(tags.hbs.INVESTIGATION_ID, investigationId) toSign = (rSequence().addSequence( tags.base.HCP_ID, rSequence().addInt8(tags.base.HCP_ID_ORG, a.org).addInt8( tags.base.HCP_ID_SUBNET, a.subnet).addInt32(tags.base.HCP_ID_UNIQUE, a.unique).addInt8( tags.base.HCP_ID_PLATFORM, a.platform).addInt8( tags.base.HCP_ID_CONFIG, a.config)).addSequence( tags.hbs.NOTIFICATION, task).addInt32(tags.hbs.NOTIFICATION_ID, id)) if None != expiry: toSign.addTimestamp(tags.base.EXPIRY, int(expiry)) toSign = r.serialise(toSign) sig = s.sign(toSign) final = r.serialise(rSequence().addBuffer( tags.base.BINARY, toSign).addBuffer(tags.base.SIGNATURE, sig)) return self._query('hbs.task_agent', { 'task': final, 'agentid': str(a), 'expiry': expiry })
os.chdir(os.path.dirname(os.path.abspath(__file__))) urls = (r'/', 'Index', r'/dashboard', 'Dashboard', r'/sensor', 'Sensor', r'/search', 'Search', r'/sensor_state', 'SensorState', r'/timeline', 'Timeline', r'/objsearch', 'ObjSearch', r'/obj', 'ObjViewer', r'/lastevents', 'LastEvents', r'/event', 'EventView', r'/hostobjects', 'HostObjects', r'/detects_data', 'JsonDetects', r'/detects', 'ViewDetects', r'/detect', 'ViewDetect') web.config.debug = False app = web.application(urls, globals()) render = web.template.render('templates', base='base', globals={ 'json': json, 'tsToTime': tsToTime }) if len(sys.argv) < 2: print("Usage: python app.py beach_config [listen_port]") sys.exit() beach = Beach(sys.argv[1], realm='hcp') del (sys.argv[1]) model = beach.getActorHandle('models', nRetries=3, timeout=30, ident='lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903') app.run()
import json import yaml import time # Adding the beach lib directory relatively for this example curFileDir = os.path.dirname( os.path.abspath( __file__ ) ) sys.path.append( os.path.join( curFileDir, '..', '..' ) ) from beach.beach_api import Beach from beach.utils import _getIpv4ForIface print( "Connecting to example beach." ) # Note that we usually would not need to add the extraTmpSeedNode parameter in production # since your config file would be populated. Here to keep the config file simple and # portable we add our IP manually to operate in single-node mode. beach = Beach( os.path.join( curFileDir, 'simple.yaml' ), realm = 'global' ) print( "Creating ping actor in random beach node." ) a1 = beach.addActor( 'Ping', 'pingers' ) print( json.dumps( a1, indent = 4 ) ) print( "Creating pong actor in random beach node." ) a2 = beach.addActor( 'Pong', 'pongers' ) print( json.dumps( a2, indent = 4 ) ) print( "Idling for a few seconds..." ) time.sleep( 30 ) print( "Querying for beach directory." ) d = beach.getDirectory() print( json.dumps( d, indent = 4 ) )
params = urlparse.parse_qs("".join(params).lstrip("&"), strict_parsing=True) except: params = {} if "pl" in params: clean_params = { "payload": params["pl"][0], "public_ip": str(environment.get("X-FORWARDED-FOR", environment.get("REMOTE_ADDR", ""))), } resp = vHandle.request("beacon", data=clean_params) if resp.isSuccess: isSuccess = True response = "<html><h1>%s</h1></html>" % resp.data["resp"] if isSuccess: status = "200 OK" start_response(status, [("Content-Type", "text/html"), ("Content-Length", str(len(response)))]) return [str(response)] beach = Beach(sys.argv[1], realm="hcp") vHandle = beach.getActorHandle("c2/beacon", nRetries=3, timeout=30, ident="http/5bc10821-2d3f-413a-81ee-30759b9f863b") server = wsgi.WSGIServer(("", 80), handle_beacon, spawn=100) server.serve_forever()
if not info.isSuccess: raiseUnavailable(str(info)) return info.data ############################################################################### # BOILER PLATE ############################################################################### os.chdir(os.path.dirname(os.path.abspath(__file__))) urls = (r'/', 'Index', r'/sensorstate', 'SensorState', r'/timeline', 'Timeline', r'/lastevents', 'LastEvents', r'/detects', 'Detects', r'/hostchanges', 'HostChanges', r'/objectloc', 'ObjectLocation') web.config.debug = False app = web.application(urls, globals()) if len(sys.argv) < 2: print("Usage: python app.py beach_config [listen_port]") sys.exit() beach = Beach(sys.argv[1], realm='hcp') del (sys.argv[1]) model = beach.getActorHandle('models', nRetries=3, timeout=30, ident='rest/be41bb0f-449a-45e9-87d8-ef4533336a2d') app.run()
class Patrol(object): def __init__(self, configFile, identifier='default', sync_frequency=15.0, logging_dest='/dev/log', realm='global', scale=None, actorsRoot=None): self._stopEvent = gevent.event.Event() self._logger = None self._log_level = logging.INFO self._log_dest = logging_dest self._realm = realm self._initLogging(self._log_level, logging_dest) self._threads = gevent.pool.Group() self._owner = 'beach.patrol/%s' % (identifier, ) self._mutex = BoundedSemaphore(value=1) self._entries = OrderedDict() self._freq = sync_frequency self._updateFreq = 60 * 60 self._patrolHash = None self._patrolUrl = None self._isMonitored = False self._originalTtl = None self._beach = Beach(configFile, realm=realm) self._scale = scale self._actorsRoot = actorsRoot if self._actorsRoot is not None and not self._actorsRoot.endswith('/'): self._actorsRoot += '/' def _initLogging(self, level, dest): self._logger = logging.getLogger('beach.patrol') self._logger.handlers = [] self._logger.setLevel(level) handler = logging.handlers.SysLogHandler(address=dest) handler.setFormatter(logging.Formatter("%(asctime)-15s %(message)s")) self._logger.addHandler(handler) self._logger.propagate = False def _log(self, msg): self._logger.info('%s : %s', self.__class__.__name__, msg) def _logCritical(self, msg): self._logger.error('%s : %s', self.__class__.__name__, msg) def _scanForExistingActors(self): tally = {} mtd = self._beach.getAllNodeMetadata() for node_mtd in mtd.itervalues(): if node_mtd is False: continue for aid, actor_mtd in node_mtd.get('data', {}).get('mtd', {}).iteritems(): if self._stopEvent.wait(0): break owner = actor_mtd.get('owner', None) if owner in self._entries: tally.setdefault(self._entries[owner].name, 0) tally[self._entries[owner].name] += 1 return tally def _getTargetActorNum(self, actorEntry, currentScale): targetNum = 0 if callable(actorEntry.initialInstances): targetNum = actorEntry.initialInstances() else: targetNum = actorEntry.initialInstances if currentScale is not None and actorEntry.scalingFactor is not None: preScaleTarget = targetNum targetNum = int(currentScale / actorEntry.scalingFactor) if 0 != (currentScale % actorEntry.scalingFactor): targetNum += 1 if actorEntry.maxInstances is not None and targetNum > actorEntry.maxInstances: targetNum = actor.maxInstances if preScaleTarget is not None and targetNum < preScaleTarget: targetNum = preScaleTarget #self._log( 'actor %s scale %s / factor %s: %d' % ( actorEntry.name, # currentScale, # actorEntry.scalingFactor, # targetNum ) ) # If we're only spawning a single actor, it must not be drained # so that availability is maintained. if 1 == targetNum and actorEntry.actorArgs[1].get( 'is_drainable', False): actorEntry.actorArgs[1]['is_drainable'] = False self._log( 'actor %s was set to drainable but only starting once instance to turning it off' % (actorEntry.name, )) return targetNum def _getEffectiveScale(self): if type(self._scale) is int: currentScale = self._scale elif self._scale is not None: currentScale = self._scale() else: currentScale = None return currentScale def _lockDirCache(self): # We do a single refresh of the directory. # Don't force since it may have already gotten a recent snapshot. self._beach.getDirectory(isForce=True) # To remove the directory jitter we will suspend directory refresh temporarily. self._originalTtl = self._beach._dirCacheTtl self._beach._dirCacheTtl = 60 * 5 def _unlockDirCache(self): # Restore the original ttl. self._beach._dirCacheTtl = self._originalTtl def _initializeMissingActors(self, existing): currentScale = self._getEffectiveScale() self._lockDirCache() try: for actorEntry in self._entries.itervalues(): if self._stopEvent.wait(0): break actorName = actorEntry.name current = existing.get(actorName, 0) # This uses the allNodeMetadat which is NEVER cached. targetNum = self._getTargetActorNum(actorEntry, currentScale) if current < targetNum: newOwner = '%s/%s' % (self._owner, actorName) self._log( 'actor %s has %d instances but requires %d, spawning' % (actorName, current, targetNum)) for _ in range(targetNum - current): self._spawnNewActor(actorEntry) else: #self._log( 'actor %s is satisfied (%d)' % ( actorName, targetNum ) ) pass except: raise finally: self._unlockDirCache() def start(self): self._stopEvent.clear() self._log('starting, patrolling %d actors' % len(self._entries)) self._log('starting patrol') gevent.sleep(10) self._threads.add( gevent.spawn(withLogException(self._sync, patrol=self))) def stop(self): self._log('stopping patrol') self._stopEvent.set() self._threads.join(timeout=30) self._threads.kill(timeout=10) self._log('patrol stopped') def monitor(self, name, initialInstances, maxInstances=None, scalingFactor=None, onFailureCall=None, actorArgs=[], actorKwArgs={}): actorArgs = list(actorArgs) if self._actorsRoot is not None: actorArgs = [self._actorsRoot + actorArgs[0]] + actorArgs[1:] record = _PatrolEntry() record.name = name record.initialInstances = initialInstances record.maxInstances = maxInstances record.scalingFactor = scalingFactor record.onFailureCall = onFailureCall # If the time to drain is dynamic we keep a copy of the function. ttd = actorKwArgs.get('time_to_drain', None) if callable(ttd): record.timeToDrainFunc = ttd actorKwArgs['owner'] = '%s/%s' % (self._owner, name) record.actorArgs = (actorArgs, actorKwArgs) self._entries['%s/%s' % (self._owner, name)] = record def _spawnNewActor(self, actorEntry): kwArgs = actorEntry.actorArgs[1] if actorEntry.timeToDrainFunc is not None: kwArgs = kwArgs.copy() kwArgs['time_to_drain'] = actorEntry.timeToDrainFunc() status = self._beach.addActor(*(actorEntry.actorArgs[0]), **(kwArgs)) if status is not False and status is not None and 'data' in status and 'uid' in status[ 'data']: self._log('actor launched: %s' % status) return True elif status is False: self._log( 'timeout waiting for actor to launch: will wait until next sync if it came online' ) else: self._log('failed to launch actor: %s' % status) return False def _sync(self): while not self._stopEvent.wait(self._freq): with self._mutex: self._log('running sync') self._initializeMissingActors(self._scanForExistingActors()) def remove(self, name=None): with self._mutex: if name is not None: k = '%s/%s' % (self._owner, name) if k not in self._entries: return False del (self._entries[k]) else: self._entries = OrderedDict() return True def _getPatrolFromUrl(self, url): if '://' in url: patrolFilePath = url if patrolFilePath.startswith('file://'): patrolFilePath = 'file://%s' % os.path.abspath( patrolFilePath[len('file://'):]) patrolFile = urllib2.urlopen(patrolFilePath) else: patrolFilePath = os.path.abspath(url) patrolFile = open(patrolFilePath, 'r') return patrolFile.read(), patrolFilePath def loadFromUrl(self, url, isMonitorForUpdates=False): patrolContent, patrolFilePath = self._getPatrolFromUrl(url) self._patrolUrl = url self._patrolHash = hashlib.sha256(patrolContent).hexdigest() exec( patrolContent, { 'Patrol': self.monitor, '__file__': patrolFilePath, 'NUM_CPU_CORES': multiprocessing.cpu_count, 'NUM_NODES': self._beach.getNodeCount }) if isMonitorForUpdates and not self._isMonitored: self._isMonitored = True self._threads.add( gevent.spawn(withLogException(self._updatePatrol, patrol=self))) def _updatePatrol(self): while not self._stopEvent.wait(self._updateFreq): try: patrolContent, patrolFilePath = self._getPatrolFromUrl( self._patrolUrl) except: return if self._patrolHash == hashlib.sha256(patrolContent).hexdigest(): return with self._mutex: self._entries = OrderedDict() self._patrolUrl = url self._patrolHash = hashlib.sha256(patrolContent).hexdigest() exec(patrolContent, { 'Patrol': self.monitor, '__file__': patrolFilePath })
class DeploymentManager( Actor ): def init( self, parameters, resources ): self.beach_api = Beach( self._beach_config_path, realm = 'hcp' ) self.db = CassDb( parameters[ 'db' ], 'hcp_analytics' ) self.audit = self.getActorHandle( resources[ 'auditing' ], timeout = 30, nRetries = 3 ) self.admin = self.getActorHandle( resources[ 'admin' ], timeout = 30, nRetries = 3 ) self.sensorDir = self.getActorHandle( resources[ 'sensordir' ], timeout = 30, nRetries = 3 ) self.genDefaultsIfNotPresent() isSuccess, _oid = self.get_global_config( None ) if isSuccess: self.admin_oid = uuid.UUID( str( _oid[ 'global/admin_oid' ] ) ) else: self.admin_oid = None self.handle( 'get_global_config', self.get_global_config ) self.handle( 'get_org_config', self.get_org_config ) self.handle( 'set_config', self.set_config ) self.handle( 'deploy_org', self.deploy_org ) self.handle( 'get_c2_cert', self.get_c2_cert ) self.handle( 'get_root_cert', self.get_root_cert ) self.handle( 'update_profile', self.update_profile ) self.handle( 'get_profiles', self.get_profiles ) self.handle( 'get_supported_events', self.get_supported_events ) self.handle( 'get_capabilities', self.get_capabilities ) self.handle( 'get_quick_detects', self.get_quick_detects ) self.handle( 'del_sensor', self.del_sensor ) self.handle( 'refresh_all_installers', self.refresh_all_installers ) self.handle( 'set_installer_info', self.set_installer_info ) self.handle( 'del_installer', self.del_installer ) self.metricsUrl = resources.get( 'metrics_url', 'https://limacharlie.io/metrics/opensource' ) self.schedule( ( 60 * 60 ) + random.randint( 0, 60 * 60 ) , self.sendMetricsIfEnabled ) def deinit( self ): self.db.shutdown() def sendMetricsIfEnabled( self ): status, conf = self.get_global_config( None ) if status is True and '0' != conf.get( 'global/send_metrics', '0' ): # Metrics upload is enabled. self.log( 'Reporting metrics to %s' % self.metricsUrl ) metrics = {} metrics[ 'deployment_id' ] = conf.get( 'global/deployment_id', '' ) sensorReq = self.admin.request( 'hcp.get_agent_states', {} ) if sensorReq.isSuccess: sensors = sensorReq.data.get( 'agents', {} ) metrics[ 'n_sensors' ] = len( sensors ) del( sensorReq ) dirReq = self.sensorDir.request( 'get_dir', {} ) if dirReq.isSuccess: metrics[ 'n_online_sensors' ] = len( dirReq.data.get( 'dir', {} ) ) del( dirReq ) metrics[ 'n_nodes' ] = self.beach_api.getNodeCount() # Get node health and anonymize the node IPs tmpHealth = self.beach_api.getClusterHealth() metrics[ 'nodes_health' ] = {} nodeCount = 0 for nodeIp, health in tmpHealth.iteritems(): nodeCount += 1 metrics[ 'nodes_health' ][ str( nodeCount ) ] = health # All metrics gathered, send them. try: req = urllib2.Request( self.metricsUrl ) req.add_header( 'Content-Type', 'application/json' ) req.add_header( 'User-Agent', 'lc_cloud' ) response = urllib2.urlopen( req, json.dumps( metrics ) ) except: self.log( 'failed to send metrics: %s' % traceback.format_exc() ) def generateKey( self ): key = { 'pub' : None, 'pubDer' : None, 'pri' : None, 'priDer' : None, } r = M2Crypto.RSA.gen_key( 2048, 65537 ) tmpHandle, tmpPath = tempfile.mkstemp() r.save_pub_key( tmpPath ) with open( tmpPath, 'rb' ) as f: key[ 'pub' ] = f.read() os.close( tmpHandle ) os.unlink( tmpPath ) tmpHandle, tmpPath = tempfile.mkstemp() r.save_key( tmpPath, None ) with open( tmpPath, 'rb' ) as f: key[ 'pri' ] = f.read() os.system( 'openssl rsa -in %s -out %s.pub.der -outform DER -pubout' % ( tmpPath, tmpPath ) ) with open( '%s.pub.der' % tmpPath, 'rb' ) as f: key[ 'pubDer' ] = f.read() os.close( tmpHandle ) os.unlink( tmpPath ) os.unlink( '%s.pub.der' % tmpPath ) tmpHandle, tmpPath = tempfile.mkstemp() r.save_key_der( tmpPath ) with open( tmpPath, 'rb' ) as f: key[ 'priDer' ] = f.read() os.close( tmpHandle ) os.unlink( tmpPath ) return key def generateCert( self ): cert = { 'key' : None, 'cert' : None, } tmpHandle, tmpPath = tempfile.mkstemp() os.system( 'openssl req -x509 -days 36500 -newkey rsa:4096 -keyout %s_key.pem -out %s_cert.pem -nodes -sha256 -subj "/C=US/ST=CA/L=Mountain View/O=refractionPOINT/CN=rp_c2_dev"' % ( tmpPath, tmpPath ) ) with open( '%s_key.pem' % tmpPath, 'rb' ) as f: cert[ 'key' ] = f.read() os.close( tmpHandle ) os.unlink( '%s_key.pem' % tmpPath ) with open( '%s_cert.pem' % tmpPath, 'rb' ) as f: cert[ 'cert' ] = f.read() os.unlink( '%s_cert.pem' % tmpPath ) return cert def packKey( self, key ): return base64.b64encode( msgpack.packb( key ) ) def unpackKey( self, key ): return msgpack.unpackb( base64.b64decode( key ) ) def getMaskFor( self, oid, binName ): aid = AgentId( '0.0.0.0.0' ) aid.org_id = oid if 'x64' in binName: aid.architecture = AgentId.ARCHITECTURE_X64 else: aid.architecture = AgentId.ARCHITECTURE_X86 if 'osx' in binName: aid.platform = AgentId.PLATFORM_MACOS elif 'win' in binName: aid.platform = AgentId.PLATFORM_WINDOWS elif 'ios' in binName: aid.platform = AgentId.PLATFORM_IOS elif 'android' in binName: aid.platform = AgentId.PLATFORM_ANDROID elif 'ubuntu' in binName or 'centos' in binName or 'linux' in binName: aid.platform = AgentId.PLATFORM_LINUX return aid def getProfileFor( self, oid, platform, isHumanReadable = False ): aid = AgentId( ( oid, '0', '0', platform, None ) ) resp = self.admin.request( 'hbs.get_profiles', { 'oid' : oid, 'is_compiled' : True } ) realProfile = None if resp.isSuccess: for profile in resp.data[ 'profiles' ]: if aid.asString() == AgentId( profile[ 'mask' ] ).asString(): r = rpcm( isHumanReadable = isHumanReadable, isDebug = False, isDetailedDeserialize = False ) if isHumanReadable: r.loadSymbols( Symbols.lookups ) r.setBuffer( profile[ 'original_configs' ] ) return r.deserialise( isList = True ) return None def setProfileFor( self, oid, platform, profile ): aid = AgentId( ( oid, '0', '0', platform, None ) ) r = rpcm( isHumanReadable = True, isDebug = False, isDetailedDeserialize = False ) r.loadSymbols( Symbols.lookups ) humanProfile = r.serialise( profile ) if humanProfile is not None: r.setBuffer( humanProfile ) humanProfile = r.deserialise( isList = True ) resp = self.admin.request( 'hbs.set_profile', { 'module_configs' : profile, 'mask' : aid, 'original' : humanProfile } ) return resp return False def getSensorPackage( self ): packages = {} info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/sensorpackage', ) ) if not info or info[ 0 ] is None or info[ 0 ] == '': self.log( 'no sensor package defined' ) else: pkgUrl = urllib2.urlopen( info[ 0 ] ) zipPackage = ZipFile( BytesIO( pkgUrl.read() ) ) packages = { name: zipPackage.read( name ) for name in zipPackage.namelist() } return packages def genDefaultsIfNotPresent( self ): isNeedDefaults = False # Root Key is the canary for needing to generate defaults info = self.db.getOne( 'SELECT conf, value FROM configs WHERE conf = %s', ( 'key/root', ) ) if info is None or '' == info[ 1 ]: isNeedDefaults = True if isNeedDefaults: self.log( 'missing defaults, generating them' ) rootKey = self.packKey( self.generateKey() ) c2Cert = self.packKey( self.generateCert() ) secret = str( uuid.uuid4() ) primaryDomain = 'rp_c2_dev' primaryPort = '443' secondaryDomain = '127.0.0.1' secondaryPort = '443' adminOid = uuid.uuid4() uiDomain = 'limacharlie' defaultWelcome = '''#### New LimaCharlie Deployment Checklist This is a quick checklist of things you likely want to customize with your new deployment: 1. Using the top left menu, head over to the [configuration page](/configs). 1. You'll want to fill in as many of the config values as you can, but more critically 1. Primary and Secondary domains 1. Sensor Package, unless you know what you're doing, use the latest release link that shows up 1. UI Domain, set that to the IP (or DNS if setup) of the LC install (and with port, default 8888) 1. Now head out to the [profile page](/profile), it's the link with your user name (default admin@limacharlie) 1. A default organization (ADMIN_ORG) was already created for you, any users member of that organization will automatically be administrator 1. Create your first organization where you can add new users and enroll sensors 1. Enter the name of your organization in Create Org 1. This will generate new keys and installers for this organization 1. Add yourself (admin@limacharlie) or a new unprivileged user you created to this new organization 1. Select the new organization in the Member Organizations, enter the email of the user to add and click Add Selected 1. Alternatively, as an admin you can arbitrarily join any organization by usng the All Organization panel and click Join Organization 1. If you head over to the [sensor configuration](/sensor_configs) page, you can customize the collectors enabled and the events automatically sent to the cloud, but sane defaults should already be set 1. Go to the [installers and logs page](/manage) 1. There you will find the installers (they're all the same), download one and run it on a relevant host to get your first sensor running 1. On the same page, you will find the installation keys for each organization you're a member of, use it when launching your sensor to make sure it enrolls in the right organization 1. You should not be able to see your sensor enrolled in the [sensors page](/sensors) ''' defaultPolicy = '''### How your data is handled All raw data from sensors, in the form of events, are kept private and under the control of the system owner, as assumed by User membership into the Organization listed as owner of the sensor. Events will at no point be looked at or exported outside the Service without the express authorization of a member of the owning Organization, even for debugging purposes. For troubleshooting purposes an operator of the Service may request access, but it is up to you as owner to approve or deny. If access is approved, auditing of the access will be visible through the general audit log visible to you as the operator temporarily joining the Organization requiring troubleshooting. Extracted information, in the form of Objects (as seen through the Service) will be deemed shareable when its source has been anonymized. This means that the information tuple ( PROCESS_NAME, MyServerNumber2, explorer.exe ) is NOT shareable. but the information tuple ( PROCESS_NAME, explorer.exe, "seen on 3000 hosts" ) IS shareable. However, Object sharing to other users of the Service is done on a if-seen-by-organization basis. This means that "explorer.exe" will only be visible to a User if that User is a member of an Organization that has observed that process on one of its sensors. Therefore, you running "some_unique_executable_to_you.exe" on one of your sensors, where that executable is unique and has never been observed anywhere else, will not result in the sharing of the existence of the executable with Users not member of your Organization. We believe this sharing policy strikes a good balance between privacy and information sharing between users of the Service allowing for a better visibility and investigative power. ''' try: resp = json.loads( urllib2.urlopen( 'https://api.github.com/repos/refractionPOINT/limacharlie/releases/latest' ).read() ) sensorPackage = resp[ 'assets' ][ 0 ][ 'browser_download_url' ] except: sensorpackage = '' self.admin_oid = adminOid self.log( 'loading admin oid' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/admin_oid', str( adminOid ) ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'New admin oid generated.' } ) self.log( 'loading ui domain' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/uidomain', uiDomain ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting ui domain.' } ) self.log( 'loading whatsnew' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/whatsnew', defaultWelcome ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting whatsnew text.' } ) self.log( 'loading policy' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/policy', defaultPolicy ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting policy text.' } ) self.log( 'loading outage display' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/outagetext', '' ) ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/outagestate', '0' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting outagetext info.' } ) self.log( 'loading current latest sensor package version' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/sensorpackage', sensorPackage ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting sensor package.' } ) self.log( 'loading root key' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'key/root', rootKey ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'New root key pair generated.' } ) self.log( 'loading c2 cert' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'key/c2', c2Cert ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'New c2 cert generated.' } ) self.log( 'loading primary domain and port' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/primary', primaryDomain ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting primary domain: %s.' % primaryDomain } ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/primary_port', primaryPort ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting primary port: %s.' % primaryPort } ) self.log( 'loading secondary domain and port' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/secondary', secondaryDomain ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting secondary domain: %s.' % secondaryDomain } ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/secondary_port', secondaryPort ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting secondary port: %s.' % secondaryPort } ) self.log( 'loading metrics upload' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/send_metrics', '0' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting metrics upload.' } ) self.log( 'loading deployment id' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/deployment_id', str(uuid.uuid4()) ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting metrics upload.' } ) self.log( 'loading modeling level' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/modeling_level', '10' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting modeling level.' } ) self.log( 'loading logging dir' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/logging_dir', '' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting logging dir.' } ) self.log( 'loading s3 bucket' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/s3_bucket', '' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting s3 bucket.' } ) self.log( 'loading aws key id' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/aws_key_id', '' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting aws key id.' } ) self.log( 'loading aws secret key id' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/aws_secret_key_id', '' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting aws secret key id.' } ) self.log( 'loading 2fa mode' ) self.db.execute( 'INSERT INTO configs ( conf, value ) VALUES ( %s, %s )', ( 'global/2fa_mode', 'on' ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Setting 2fa mode.' } ) def obfuscate( self, buffer, key ): obf = BytesIO() index = 0 for hx in buffer: obf.write( chr( ( ( ord( key[ index % len( key ) ] ) ^ ( index % 255 ) ) ^ ( STATIC_STORE_MAX_SIZE % 255 ) ) ^ ord( hx ) ) ) index = index + 1 return obf.getvalue() def setSensorConfig( self, sensor, config ): config = self.obfuscate( rpcm().serialise( config ), OBFUSCATION_KEY ) magic = "\xFA\x57\xF0\x0D" + ( "\x00" * ( len( config ) - 4 ) ) if magic not in sensor: return None sensor = sensor.replace( magic, config ) return sensor def genBinariesForOrg( self, sensorPackage, oid ): rootPub = None rootPri = None hbsPub = None c2Cert = None iid = uuid.uuid4() info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'key/root', ) ) if not info or not info[ 0 ]: self.log( 'failed to get root key' ) return False rootKey = self.unpackKey( info[ 0 ] ) rootPub = rootKey[ 'pubDer' ] rootPri = rootKey[ 'priDer' ] del( rootKey ) info = self.db.getOne( 'SELECT pub FROM hbs_keys WHERE oid = %s', ( oid, ) ) if not info or not info[ 0 ]: self.log( 'failed to get hbs key' ) return False hbsPub = info[ 0 ] info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'key/c2', ) ) if not info or not info[ 0 ]: self.log( 'failed to get c2 cert' ) return False c2Cert = self.unpackKey( info[ 0 ] )[ 'cert' ] info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/primary', ) ) if not info or not info[ 0 ]: self.log( 'failed to get primary domain' ) return False primaryDomain = info[ 0 ] info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/primary_port', ) ) if not info or not info[ 0 ]: self.log( 'failed to get primary port' ) return False primaryPort = int( info[ 0 ] ) info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/secondary', ) ) if not info or not info[ 0 ]: self.log( 'failed to get secondary domain' ) return False secondaryDomain = info[ 0 ] info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/secondary_port', ) ) if not info or not info[ 0 ]: self.log( 'failed to get secondary port' ) return False secondaryPort = int( info[ 0 ] ) _ = Symbols hbsConfig = ( rSequence().addBuffer( _.hbs.ROOT_PUBLIC_KEY, hbsPub ) ) signing = Signing( rootPri ) installersToLoad = {} hbsToLoad = {} kernelToLoad = {} for binName, binary in sensorPackage.iteritems(): if binName.startswith( 'hcp_' ): installersToLoad[ binName ] = binary elif binName.startswith( 'hbs_' ) and 'release' in binName: patched = self.setSensorConfig( binary, hbsConfig ) hbsToLoad[ binName ] = ( patched, signing.sign( patched ), hashlib.sha256( patched ).hexdigest() ) elif binName.startswith( 'kernel_' ) and 'release' in binName: kernelToLoad[ binName ] = ( binary, signing.sign( binary ), hashlib.sha256( binary ).hexdigest() ) self.log( 'binaries for %s have been generated, loading them' % oid ) resp = self.admin.request( 'hcp.remove_installer', { 'oid' : oid } ) if not resp.isSuccess: self.log( 'error wiping previous installers: %s' % resp ) return False for binName, binary in installersToLoad.iteritems(): resp = self.admin.request( 'hcp.add_installer', { 'oid' : oid, 'iid' : iid, 'description' : binName, 'installer' : binary } ) if not resp.isSuccess: self.log( 'error loading new installer for %s' % oid ) return False resp = self.admin.request( 'hcp.remove_tasking', { 'oid' : oid } ) if not resp.isSuccess: self.log( 'error wiping previous taskings: %s' % resp ) return False for binName, binInfo in hbsToLoad.iteritems(): binary, binSig, binHash = binInfo aid = self.getMaskFor( oid, binName ) resp = self.admin.request( 'hcp.add_module', { 'module_id' : HcpModuleId.HBS, 'hash' : binHash, 'bin' : binary, 'signature' : binSig } ) if resp.isSuccess: resp = self.admin.request( 'hcp.add_tasking', { 'mask' : aid.asString(), 'module_id' : HcpModuleId.HBS, 'hash' : binHash } ) if not resp.isSuccess: self.log( 'error tasking new hbs module: %s' % resp ) return False else: self.log( 'error adding new hbs module: %s' % resp ) return False for binName, binInfo in kernelToLoad.iteritems(): binary, binSig, binHash = binInfo aid = self.getMaskFor( oid, binName ) resp = self.admin.request( 'hcp.add_module', { 'module_id' : HcpModuleId.KERNEL_ACQ, 'hash' : binHash, 'bin' : binary, 'signature' : binSig } ) if resp.isSuccess: resp = self.admin.request( 'hcp.add_tasking', { 'mask' : aid.asString(), 'module_id' : HcpModuleId.KERNEL_ACQ, 'hash' : binHash } ) if not resp.isSuccess: self.log( 'error tasking new kernel module: %s' % resp ) return False else: self.log( 'error adding new kernel module: %s' % resp ) return False return True def get_global_config( self, msg ): globalConf = { 'global/primary' : '', 'global/secondary' : '', 'global/primary_port' : '', 'global/secondary_port' : '', 'global/sensorpackage' : '', 'global/paging_user' : '', 'global/paging_from' : '', 'global/paging_password' : '', 'global/virustotalkey' : '', 'global/uidomain' : '', 'global/admin_oid' : '', 'global/whatsnew' : '', 'global/outagetext' : '', 'global/outagestate' : '1', 'global/policy' : '', 'global/send_metrics' : '0', 'global/deployment_id' : '', 'global/modeling_level' : 10, 'global/2fa_mode' : 'on', 'global/logging_dir' : '', 'global/s3_bucket' : '', 'global/aws_key_id' : '', 'global/aws_secret_key_id' : '', } info = self.db.execute( 'SELECT conf, value FROM configs WHERE conf IN %s', ( globalConf.keys(), ) ) for row in info: globalConf[ row[ 0 ] ] = row[ 1 ] # Make sure the configs that need to be integers are always integers try: globalConf[ 'global/modeling_level' ] = int( globalConf[ 'global/modeling_level' ] ) except: self.log( "Invalid modeling_level: %s" % globalConf[ 'global/modeling_level' ] ) globalConf[ 'global/modeling_level' ] = 10 return ( True, globalConf ) def get_org_config( self, msg ): oid = uuid.UUID( msg.data[ 'oid' ] ) orgConf = { '%s/slack_token' % oid : '', '%s/slack_bot_token' % oid : '', '%s/webhook_secret' % oid : '', '%s/webhook_dest' % oid : '', } info = self.db.execute( 'SELECT conf, value FROM configs WHERE conf IN %s', ( orgConf.keys(), ) ) for row in info: orgConf[ row[ 0 ] ] = row[ 1 ] return ( True, orgConf ) def set_config( self, msg ): req = msg.data conf = req[ 'conf' ] value = req[ 'value' ] byUser = req[ 'by' ] info = self.db.execute( 'UPDATE configs SET value = %s WHERE conf = %s', ( str( value ), conf ) ) try: oid = uuid.UUID( conf.split( '/' )[ 0 ] ) except: oid = None if oid is None: self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'conf_change', 'msg' : 'Config %s was changed by %s.' % ( conf, byUser ) } ) else: self.audit.shoot( 'record', { 'oid' : oid, 'etype' : 'conf_change', 'msg' : 'Config %s was changed by %s.' % ( conf, byUser ) } ) return ( True, {} ) def deploy_org( self, msg ): req = msg.data isGenerateKey = req.get( 'is_generate_key', True ) isSkipProfiles = req.get( 'skip_profiles', False ) oid = uuid.UUID( req[ 'oid' ] ) if isGenerateKey: key = self.generateKey() resp = self.admin.request( 'hbs.add_key', { 'oid' : oid, 'key' : key[ 'priDer' ], 'pub_key' : key[ 'pubDer' ] } ) if not resp.isSuccess: return ( False, resp.error ) packages = self.getSensorPackage() if 0 == len( packages ): return ( False, 'no binaries in package or no package configured' ) if not self.genBinariesForOrg( packages, oid ): return ( False, 'error generating binaries for org' ) if not isSkipProfiles: resp = self.setProfileFor( oid, AgentId.PLATFORM_WINDOWS, SensorConfig.getDefaultWindowsProfile().toProfile() ) if not resp.isSuccess: return ( False, 'error setting default windows profile: %s' % resp ) resp = self.setProfileFor( oid, AgentId.PLATFORM_MACOS, SensorConfig.getDefaultOsxProfile().toProfile() ) if not resp.isSuccess: return ( False, 'error setting default osx profile: %s' % resp ) resp = self.setProfileFor( oid, AgentId.PLATFORM_LINUX, SensorConfig.getDefaultLinuxProfile().toProfile() ) if not resp.isSuccess: return ( False, 'error setting default linux profile: %s' % resp ) return ( True, {} ) def get_c2_cert( self, msg ): req = msg.data info = self.db.getOne( 'SELECT conf, value FROM configs WHERE conf = %s', ( 'key/c2', ) ) if info is not None: return ( True, self.unpackKey( info[ 1 ] ) ) return ( False, 'not found' ) def get_root_cert( self, msg ): req = msg.data info = self.db.getOne( 'SELECT conf, value FROM configs WHERE conf = %s', ( 'key/root', ) ) if info is not None: return ( True, self.unpackKey( info[ 1 ] ) ) return ( False, 'not found' ) def update_profile( self, msg ): req = msg.data oid = uuid.UUID( req[ 'oid' ] ) platform = req[ 'platform' ].lower() if 'win' in platform: platform = AgentId.PLATFORM_WINDOWS elif 'osx' in platform or 'mac' in platform: platform = AgentId.PLATFORM_MACOS elif 'lin' in platform: platform = AgentId.PLATFORM_LINUX else: return ( False, 'unknown platform: %s' % platform ) profile = SensorConfig() for colId, status in req[ 'collectors' ].iteritems(): if status is False: profile.collectors[ colId ].disable() else: profile.collectors[ colId ].enable() for eventId, status in req[ 'exfil' ].iteritems(): if status is True: profile.collectors[ 0 ].addExfil( eventId ) profile.collectors[ 11 ].setFrequency( req[ 'os_delta' ] ) resp = self.setProfileFor( oid, platform, profile.toProfile() ) if not resp.isSuccess: return ( False, 'error setting default linux profile: %s' % resp ) return ( True, ) def get_profiles( self, msg ): req = msg.data oid = req[ 'oid' ] isHumanReadable = req.get( 'is_human_readable', False ) profiles = {} profiles[ 'win' if not isHumanReadable else 'Windows' ] = self.getProfileFor( oid, AgentId.PLATFORM_WINDOWS, isHumanReadable = isHumanReadable ) profiles[ 'osx' if not isHumanReadable else 'MacOS' ] = self.getProfileFor( oid, AgentId.PLATFORM_MACOS, isHumanReadable = isHumanReadable ) profiles[ 'lin' if not isHumanReadable else 'Linux' ] = self.getProfileFor( oid, AgentId.PLATFORM_LINUX, isHumanReadable = isHumanReadable ) return ( True, profiles ) def get_supported_events( self, msg ): allEvents = {} for attrName, attrVal in Symbols.notification.__dict__.iteritems(): if attrName == 'lookups': continue allEvents[ attrName ] = int( attrVal ) return ( True, allEvents ) def get_capabilities( self, msg ): req = msg.data info = self.db.getOne( 'SELECT conf, value FROM configs WHERE conf = %s', ( 'global/capabilities', ) ) if info is not None: return ( True, { 'capabilities' : info[ 1 ] } ) return ( False, 'not found' ) def get_quick_detects( self, msg ): req = msg.data info = self.db.getOne( 'SELECT conf, value FROM configs WHERE conf = %s', ( 'global/quick_detects', ) ) if info is not None: return ( True, { 'detects' : info[ 1 ] } ) return ( False, 'not found' ) def del_sensor( self, msg ): req = msg.data sid = AgentId( req[ 'sid' ] ).sensor_id self.db.execute( 'DELETE FROM sensor_states WHERE sid = %s', ( sid, ) ) return ( True, ) def refresh_all_installers( self, msg ): resp = self.admin.request( 'hcp.get_whitelist', {} ) if not resp.isSuccess: return ( False, resp.error ) results = [] for entry in resp.data[ 'whitelist' ]: entry[ 'desc' ] = entry[ 'description' ] results.append( self.set_installer_info( None, optEntry = entry ) ) self.audit.shoot( 'record', { 'oid' : self.admin_oid, 'etype' : 'whitelist_refresh', 'msg' : 'All installation keys have been refreshed.' } ) return ( True, results ) def set_installer_info( self, msg, optEntry = None ): if optEntry is not None: req = optEntry else: req = msg.data oid = uuid.UUID( req[ 'oid' ] ) iid = req.get( 'iid', None ) tags = req.get( 'tags', [] ) desc = req.get( 'desc', '' ) if iid is None: # This should be a brand new installer whitelist entry. iid = uuid.uuid4() else: # This entry should already exist. resp = self.admin.request( 'hcp.get_whitelist', { 'oid' : oid, 'iid' : iid, } ) if not resp.isSuccess: return ( False, resp.error ) if 0 == len( resp.data[ 'whitelist' ] ): return ( False, 'unknown installer' ) iid = uuid.UUID( iid ) info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'key/root', ) ) if not info or not info[ 0 ]: self.log( 'failed to get root key' ) return ( False, 'error getting root key' ) rootKey = self.unpackKey( info[ 0 ] ) rootPub = rootKey[ 'pubDer' ] rootPri = rootKey[ 'priDer' ] del( rootKey ) info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/primary', ) ) if not info or not info[ 0 ]: self.log( 'failed to get primary domain' ) return ( False, 'error getting primary domain' ) primaryDomain = info[ 0 ] info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/primary_port', ) ) if not info or not info[ 0 ]: self.log( 'failed to get primary port' ) return ( False, 'error getting primary port' ) primaryPort = int( info[ 0 ] ) info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/secondary', ) ) if not info or not info[ 0 ]: self.log( 'failed to get secondary domain' ) return ( False, 'error getting secondary domain' ) secondaryDomain = info[ 0 ] info = self.db.getOne( 'SELECT value FROM configs WHERE conf = %s', ( 'global/secondary_port', ) ) if not info or not info[ 0 ]: self.log( 'failed to get secondary port' ) return ( False, 'error getting secondary port' ) secondaryPort = int( info[ 0 ] ) _ = Symbols bootstrap = ( rSequence().addStringA( _.hcp.PRIMARY_URL, primaryDomain ) .addInt16( _.hcp.PRIMARY_PORT, primaryPort ) .addStringA( _.hcp.SECONDARY_URL, secondaryDomain ) .addInt16( _.hcp.SECONDARY_PORT, secondaryPort ) .addSequence( _.base.HCP_IDENT, rSequence().addBuffer( _.base.HCP_ORG_ID, oid.bytes ) .addBuffer( _.base.HCP_INSTALLER_ID, iid.bytes ) .addBuffer( _.base.HCP_SENSOR_ID, uuid.UUID( '00000000-0000-0000-0000-000000000000' ).bytes ) .addInt32( _.base.HCP_PLATFORM, 0 ) .addInt32( _.base.HCP_ARCHITECTURE, 0 ) ) .addBuffer( _.hcp.ROOT_PUBLIC_KEY, rootPub ) ) bootstrap = base64.b64encode( rpcm().serialise( bootstrap ) ) resp = self.admin.request( 'hcp.add_whitelist', { 'oid' : oid, 'iid' : iid, 'bootstrap' : bootstrap, 'description' : desc, 'tags' : tags } ) if not resp.isSuccess: return ( False, resp.error ) return ( True, { 'oid' : oid, 'iid' : iid } ) def del_installer( self, msg ): req = msg.data oid = req[ 'oid' ] iid = req[ 'iid' ] resp = self.admin.request( 'hcp.remove_whitelist', { 'oid' : oid, 'iid' : iid } ) if not resp.isSuccess: return ( False, resp.error ) return ( True, )