def run(self): self.logutil.log('Starting Execution') self.active = True channel = self.settings.get('channel') time.sleep(3) self.logutil.log('Submitting Drone Task Request') # Task drones to capture beacon request packets. parameters = {'callback': self.config.upload_url, 'filter' : { 'fcf': (0x0300, 0x0300), 'byteoffset': (7, 0xff, 0x07) }} uuid_task1 = self.taskDrone(droneIndexList=[0], task_plugin='CapturePlugin', task_channel=channel, task_parameters=parameters, module_index=self.moduleIndex()) if uuid_task1 == False: self.logutil.log('Failed to Task Drone') else: self.logutil.log('Successfully tasked Drone with UUID: {0}'.format(uuid_task1)) # Get packets from database and run statistics while self.active: datetime_now = datetime.utcnow() datetime_t30 = datetime_now - timedelta(seconds=30) datetime_t120 = datetime_now - timedelta(seconds=120) n30 = self.getPackets(valueFilterList=[('datetime','>',dateToMicro(datetime_t30))], uuidFilterList=[uuid_task1], count=True) n120 = self.getPackets(valueFilterList=[('datetime','<',dateToMicro(datetime_t30 )), ('datetime','>',dateToMicro(datetime_t120))], uuidFilterList=[uuid_task1], count=True) an90 = n120/3.0 #30-120 seconds is a 90 second range so 3 * 30sec intervals self.logutil.log("debug: Found {0} beacon requests in last 30 seconds, and {1} per 30 secs average over the prior 90 seconds (absolute {2}).".format(n30, an90, n120)) # Calculate a moving average of how many of these we typically # see in a given time, and if we're significantly higher # than that all of a sudden, we're concerned. if n30 > 2 and n30 > (an90*1.5): self.logutil.log(">>>>>ALERT: Noticed increased beacon requests. (n30={0}, an90={1})".format(n30, an90)) self.registerEvent(name='IncreasedBeaconRequestDetection', details={'channel':channel, 'n30':n30, 'n120':n120, 'an90':an90}) self.generateAlert('IncreasedBeaconRequestDetection') # Look for cyclic patterns that indicate a slower scan, perhaps # one is switching across all the channels. #TODO time.sleep(10)
def registerEvent(self, name, details={}, related_packets=[], related_uuids=[]): event_data = { 'module': self.name, 'name': name, 'details': details, 'related_packets': related_packets, 'related_uuids': related_uuids, 'datetime': dateToMicro(datetime.utcnow()) } return self.database.storeEvent(event_data)
def do_callback(self, uuid, cburl, pkt): pkt['uuid'] = uuid pkt['datetime'] = dateToMicro(pkt['datetime']) pkt['bytes'] = base64.b64encode(pkt['bytes']) if 0 in pkt: del pkt[0] # Kill KillerBee's backwards compatible keys if 1 in pkt: del pkt[1] if 2 in pkt: del pkt[2] http_headers = {'Content-Type' : 'application/json', 'User-Agent' : 'Drone'} post_data_json = json.dumps({'uuid':uuid, 'pkt':pkt}) post_object = urllib2.Request(cburl, post_data_json, http_headers) try: response = urllib2.urlopen(post_object) self.logutil.debug('Successful Data Upload task {0} data to: {1}'.format(uuid, cburl)) except(IOError): self.logutil.debug('Failed Data Upload task {0} to: {1}'.format(uuid, cburl)) self.callbacks += 1
def do_callback(self, uuid, cburl, pkt): pkt['uuid'] = uuid pkt['datetime'] = dateToMicro(pkt['datetime']) pkt['bytes'] = base64.b64encode(pkt['bytes']) if 0 in pkt: del pkt[0] # Kill KillerBee's backwards compatible keys if 1 in pkt: del pkt[1] if 2 in pkt: del pkt[2] http_headers = { 'Content-Type': 'application/json', 'User-Agent': 'Drone' } post_data_json = json.dumps({'uuid': uuid, 'pkt': pkt}) post_object = urllib2.Request(cburl, post_data_json, http_headers) try: response = urllib2.urlopen(post_object) self.logutil.debug( 'Successful Data Upload task {0} data to: {1}'.format( uuid, cburl)) except (IOError): self.logutil.debug('Failed Data Upload task {0} to: {1}'.format( uuid, cburl)) self.callbacks += 1
def run(self): self.logutil.log('Starting Execution') self.active = True self.start_time = dateToMicro(datetime.utcnow()) while self.active: if DEV_DEBUG: self.logutil.debug('Checking for new rules') time.sleep(3) # check server for new rules: 'GET /rules/updatecheck', if so load new rules ''' if self.wids.checkNewRules(): new_rules = self.wids.getNewRules() ''' # evaluate each rule serially self.logutil.debug('Evaluating rules') for RuleObject in self.rules: self.evaluateRule(RuleObject) self.logutil.log('Terminating Execution')
def storeAlert(self, alert_name): return self.storeElement( Alert({ 'name': alert_name, 'datetime': dateToMicro(datetime.utcnow()) }))
def storeAlert(self, alert_name): return self.storeElement(Alert({'name':alert_name, 'datetime':dateToMicro(datetime.utcnow())}))
def registerEvent(self, name, details={}, related_packets=[], related_uuids=[]): event_data = {'module':self.name, 'name':name, 'details':details, 'related_packets':related_packets, 'related_uuids':related_uuids, 'datetime':dateToMicro(datetime.utcnow())} return self.database.storeEvent(event_data)
def run(self): self.logutil.log('Starting Execution') self.active = True channel = self.settings.get('channel') time.sleep(3) self.logutil.log('Submitting Drone Task Request') # Task drones to capture beacon request packets. parameters = { 'callback': self.config.upload_url, 'filter': { 'fcf': (0x0300, 0x0300), 'byteoffset': (7, 0xff, 0x07) } } uuid_task1 = self.taskDrone(droneIndexList=[0], task_plugin='CapturePlugin', task_channel=channel, task_parameters=parameters, module_index=self.moduleIndex()) if uuid_task1 == False: self.logutil.log('Failed to Task Drone') else: self.logutil.log( 'Successfully tasked Drone with UUID: {0}'.format(uuid_task1)) # Get packets from database and run statistics while self.active: datetime_now = datetime.utcnow() datetime_t30 = datetime_now - timedelta(seconds=30) datetime_t120 = datetime_now - timedelta(seconds=120) n30 = self.getPackets(valueFilterList=[ ('datetime', '>', dateToMicro(datetime_t30)) ], uuidFilterList=[uuid_task1], count=True) n120 = self.getPackets(valueFilterList=[ ('datetime', '<', dateToMicro(datetime_t30)), ('datetime', '>', dateToMicro(datetime_t120)) ], uuidFilterList=[uuid_task1], count=True) an90 = n120 / 3.0 #30-120 seconds is a 90 second range so 3 * 30sec intervals self.logutil.log( "debug: Found {0} beacon requests in last 30 seconds, and {1} per 30 secs average over the prior 90 seconds (absolute {2})." .format(n30, an90, n120)) # Calculate a moving average of how many of these we typically # see in a given time, and if we're significantly higher # than that all of a sudden, we're concerned. if n30 > 2 and n30 > (an90 * 1.5): self.logutil.log( ">>>>>ALERT: Noticed increased beacon requests. (n30={0}, an90={1})" .format(n30, an90)) self.registerEvent(name='IncreasedBeaconRequestDetection', details={ 'channel': channel, 'n30': n30, 'n120': n120, 'an90': an90 }) self.generateAlert('IncreasedBeaconRequestDetection') # Look for cyclic patterns that indicate a slower scan, perhaps # one is switching across all the channels. #TODO time.sleep(10)