def trigger_reinvestigation(candidates): # get random order of things that are known for _ in range( schedule_func.controller['max_concurrent_reinvestigations'] - schedule_func.s.investigations): if len(candidates) > 0: chosen = candidates.pop() schedule_func.logger.info( 'Starting reinvestigation on: {0} {1}'.format( chosen.name, chosen.state)) chosen.reinvestigate() chosen.p_prev_states.append((chosen.state, int(time.time()))) status = Actions(chosen, schedule_func.s.sdnc).mirror_endpoint() if status: try: schedule_func.s.r.hincrby('vent_plugin_counts', 'ncapture') except Exception as e: # pragma: no cover schedule_func.logger.error( 'Failed to update count of plugins because: {0}'. format(str(e))) else: schedule_func.logger.warning( 'Unable to mirror the endpoint: {0}'.format( chosen.name)) return
def mirror_endpoint(self, endpoint): ''' mirror an endpoint. ''' status = Actions(endpoint, self.sdnc).mirror_endpoint() if status: self.prc.inc_network_tools_counts() else: self.logger.warning('Unable to mirror the endpoint: {0}'.format( endpoint.name))
def test_Actions(): """ Tests Actions """ endpoint = Endpoint('foo') endpoint.endpoint_data = {'mac': '00:00:00:00:00:00'} s = SDNConnect() a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.shutdown_endpoint()
def mirror_endpoint(self, endpoint): ''' mirror an endpoint. ''' status = Actions(endpoint, self.sdnc).mirror_endpoint() if status: try: self.r.hincrby('network_tools_counts', 'ncapture') except Exception as e: # pragma: no cover self.logger.error( 'Failed to update count of plugins because: {0}'.format( str(e))) else: self.logger.warning('Unable to mirror the endpoint: {0}'.format( endpoint.name))
def test_Actions(): """ Tests Actions """ endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } controller = Config().get_config() s = SDNConnect(controller) a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.shutdown_endpoint()
def test_Actions_nosdn(): """ Tests Actions with no SDN controller """ endpoint = Endpoint('foo') endpoint.endpoint_data = { 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } s = SDNConnect() s.sdnc = None a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.shutdown_endpoint()
def trigger_reinvestigation(candidates): # get random order of things that are known for _ in range(func.controller['max_concurrent_reinvestigations'] - func.s.investigations): if len(candidates) > 0: chosen = candidates.pop() func.logger.info('Starting reinvestigation on: {0} {1}'.format( chosen.name, chosen.state)) chosen.reinvestigate() func.s.investigations += 1 chosen.p_prev_states.append( (chosen.state, int(time.time()))) status = Actions(chosen, func.s.sdnc).mirror_endpoint() if not status: func.logger.warning( 'Unable to mirror the endpoint: {0}'.format(chosen.name)) return
def find_new_machines(self, machines): '''parse switch structure to find new machines added to network since last call''' self.get_stored_endpoints() for machine in machines: h = Endpoint.make_hash(machine) ep = None for endpoint in self.endpoints: if h == endpoint.name: ep = endpoint if ep is not None and ep.endpoint_data != machine and not ep.ignore: self.logger.info('Endpoint changed: {0}:{1}'.format( h, machine)) ep.endpoint_data = deepcopy(machine) if ep.state == 'inactive' and machine['active'] == 1: if ep.p_next_state in ['known', 'abnormal']: ep.trigger(ep.p_next_state) else: ep.unknown() ep.p_prev_states.append((ep.state, int(time.time()))) elif ep.state != 'inactive' and machine['active'] == 0: if ep.state in ['mirroring', 'reinvestigating']: status = Actions(ep, self.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'.format( ep.name)) self.investigations -= 1 if ep.state == 'mirroring': ep.p_next_state = 'mirror' elif ep.state == 'reinvestigating': ep.p_next_state = 'reinvestigate' if ep.state in ['known', 'abnormal']: ep.p_next_state = ep.state ep.inactive() ep.p_prev_states.append((ep.state, int(time.time()))) elif ep is None: self.logger.info('Detected new endpoint: {0}:{1}'.format( h, machine)) m = Endpoint(h) m.p_prev_states.append((m.state, int(time.time()))) m.endpoint_data = deepcopy(machine) self.endpoints.append(m) self.store_endpoints() return
def handler_action_update_acls(my_obj): for ip in my_obj: rules = my_obj[ip] endpoints = self.s.endpoints_by_ip(ip) if endpoints: endpoint = endpoints[0] try: status = Actions( endpoint, self.s.sdnc).update_acls( rules_file=self.controller['RULES_FILE'], endpoints=endpoints, force_apply_rules=rules) if not status: self.logger.warning( 'Unable to apply rules: {0} to endpoint: {1}'.format(rules, endpoint.name)) except Exception as e: self.logger.error( 'Unable to apply rules: {0} to endpoint: {1} because {2}'.format(rules, endpoint.name, str(e))) return {}
def find_new_machines(self, machines): '''parse switch structure to find new machines added to network since last call''' change_acls = False machine_ips = set() for machine in machines: machine['ether_vendor'] = get_ether_vendor( machine['mac'], '/poseidon/poseidon/metadata/nmap-mac-prefixes.txt') machine_ips.update(self._parse_machine_ip(machine)) if 'controller_type' not in machine: machine.update({'controller_type': 'none', 'controller': ''}) if machine_ips: self.logger.debug('resolving %s' % machine_ips) resolved_machine_ips = self.dns_resolver.resolve_ips( list(machine_ips)) self.logger.debug('resolver results %s', resolved_machine_ips) for machine in machines: self._update_machine_rdns(machine, resolved_machine_ips) for machine in machines: trunk = False for sw in self.trunk_ports: if sw == machine['segment'] and self.trunk_ports[sw].split( ',')[1] == str( machine['port']) and self.trunk_ports[sw].split( ',')[0] == machine['mac']: trunk = True h = Endpoint.make_hash(machine, trunk=trunk) ep = self.endpoints.get(h, None) if ep is None: change_acls = True m = endpoint_factory(h) m.endpoint_data = deepcopy(machine) self.endpoints[m.name] = m self.logger.info('Detected new endpoint: {0}:{1}'.format( m.name, machine)) else: self.merge_machine_ip(ep.endpoint_data, machine) if ep and ep.endpoint_data != machine and not ep.ignore: diff_txt = self._diff_machine(ep.endpoint_data, machine) self.logger.info('Endpoint changed: {0}:{1}'.format( h, diff_txt)) change_acls = True ep.endpoint_data = deepcopy(machine) if ep.state == 'inactive' and machine['active'] == 1: ep.reactivate() elif ep.state != 'inactive' and machine['active'] == 0: if ep.mirror_active(): self.unmirror_endpoint(ep) ep.deactivate() if change_acls and self.controller['AUTOMATED_ACLS']: status = Actions(None, self.sdnc).update_acls( rules_file=self.controller['RULES_FILE'], endpoints=self.endpoints.values()) if isinstance(status, list): self.logger.info( 'Automated ACLs did the following: {0}'.format(status[1])) for item in status[1]: machine = { 'mac': item[1], 'segment': item[2], 'port': item[3] } h = Endpoint.make_hash(machine) ep = self.endpoints.get(h, None) if ep: ep.acl_data.append( ((item[0], item[4], item[5]), int(time.time()))) self.refresh_endpoints()
def process(self): global CTRL_C signal.signal(signal.SIGINT, partial(self.signal_handler)) while not CTRL_C['STOP']: time.sleep(1) found_work, item = self.get_q_item() ml_returns = {} if found_work and item[0] == self.controller[ 'FA_RABBIT_ROUTING_KEY']: self.faucet_event.append(self.format_rabbit_message(item)) self.logger.debug('Faucet event: {0}'.format( self.faucet_event)) elif found_work: msg = self.format_rabbit_message(item) if 'data' in msg: ml_returns = msg['data'] if ml_returns: self.logger.info('ML results: {0}'.format(ml_returns)) extras = deepcopy(ml_returns) # process results from ml output and update impacted endpoints for ep in self.s.endpoints.values(): if ep.name in ml_returns: del extras[ep.name] if ep.name in ml_returns and 'valid' in ml_returns[ ep.name] and not ep.ignore: if ep.state in ['mirroring', 'reinvestigating']: status = Actions(ep, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(ep.name)) if ml_returns[ep.name]['valid']: ml_decision = None if 'decisions' in ml_returns[ ep.name] and 'behavior' in ml_returns[ ep.name]['decisions']: ml_decision = ml_returns[ ep.name]['decisions']['behavior'] if ml_decision == 'normal': ep.known() else: ep.abnormal() else: ep.unknown() ep.p_prev_states.append((ep.state, int(time.time()))) extra_machines = [] self.logger.debug('extra devices: {0}'.format(extras)) for device in extras: if device['valid']: extra_machine = { 'mac': device['source_mac'], 'segment': NO_DATA, 'port': NO_DATA, 'tenant': NO_DATA, 'active': 0, 'name': None } try: source_ip = ipaddress.ip_address( device['source_ip']) except ValueError: source_ip = None if source_ip: extra_machine['ipv%u' % source_ip.version] = str(source_ip) extra_machines.append(extra_machine) self.s.find_new_machines(extra_machines) queued_endpoints = [ endpoint for endpoint in self.s.endpoints.values() if not endpoint.ignore and endpoint.state == 'queued' and endpoint.p_next_state != 'inactive' ] self.s.investigations = len([ endpoint for endpoint in self.s.endpoints.values() if endpoint.state in ['mirroring', 'reinvestigating'] ]) # mirror things in the order they got added to the queue queued_endpoints = sorted(queued_endpoints, key=lambda x: x.p_prev_states[-1][1]) investigation_budget = max( self.controller['max_concurrent_reinvestigations'] - self.s.investigations, 0) self.logger.debug( 'investigations {0}, budget {1}, queued {2}'.format( str(self.s.investigations), str(investigation_budget), str(len(queued_endpoints)))) for endpoint in queued_endpoints[:investigation_budget]: endpoint.trigger(endpoint.p_next_state) endpoint.p_next_state = None endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) status = Actions(endpoint, self.s.sdnc).mirror_endpoint() if status: try: if self.s.r: self.s.r.hincrby('vent_plugin_counts', 'ncapture') except Exception as e: # pragma: no cover self.logger.error( 'Failed to update count of plugins because: {0}'. format(str(e))) else: self.logger.warning( 'Unable to mirror the endpoint: {0}'.format( endpoint.name)) for endpoint in self.s.endpoints.values(): if not endpoint.ignore: if self.s.sdnc: if endpoint.state == 'unknown': endpoint.p_next_state = 'mirror' endpoint.queue() endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) elif endpoint.state in [ 'mirroring', 'reinvestigating' ]: cur_time = int(time.time()) # timeout after 2 times the reinvestigation frequency # in case something didn't report back, put back in an # unknown state if cur_time - endpoint.p_prev_states[-1][ 1] > 2 * self.controller[ 'reinvestigation_frequency']: self.logger.debug( 'timing out: {0} and setting to unknown'. format(endpoint.name)) status = Actions( endpoint, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(endpoint.name)) endpoint.unknown() endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) else: if endpoint.state != 'known': endpoint.known() self.s.store_endpoints() return
def format_rabbit_message(self, item): ''' read a message off the rabbit_q the message should be item = (routing_key,msg) ''' ret_val = {} routing_key, my_obj = item self.logger.debug('rabbit_message:{0}'.format(my_obj)) my_obj = json.loads(my_obj) self.logger.debug('routing_key:{0}'.format(routing_key)) remove_list = [] if routing_key == 'poseidon.algos.decider': self.logger.debug('decider value:{0}'.format(my_obj)) for name, message in my_obj.items(): endpoint = self.s.endpoints.get(name, None) if endpoint and message.get('plugin', None) == 'ncapture': endpoint.trigger('unknown') endpoint.p_next_state = None endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) if message.get('valid', False): ret_val.update(my_obj) else: ret_val = {} break elif routing_key == 'poseidon.action.ignore': for name in my_obj: endpoint = self.s.endpoints.get(name, None) if endpoint: endpoint.ignore = True elif routing_key == 'poseidon.action.clear.ignored': for name in my_obj: endpoint = self.s.endpoints.get(name, None) if endpoint: endpoint.ignore = False elif routing_key == 'poseidon.action.change': for name, state in my_obj: endpoint = self.s.endpoints.get(name, None) if endpoint: try: if state != 'mirror' and state != 'reinvestigate' and ( endpoint.state == 'mirroring' or endpoint.state == 'reinvestigating'): status = Actions(endpoint, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(endpoint.name)) endpoint.trigger(state) endpoint.p_next_state = None endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) if endpoint.state == 'mirroring' or endpoint.state == 'reinvestigating': status = Actions(endpoint, self.s.sdnc).mirror_endpoint() if status: try: self.s.r.hincrby('vent_plugin_counts', 'ncapture') except Exception as e: # pragma: no cover self.logger.error( 'Failed to update count of plugins because: {0}' .format(str(e))) else: self.logger.warning( 'Unable to mirror the endpoint: {0}'. format(endpoint.name)) except Exception as e: # pragma: no cover self.logger.error( 'Unable to change endpoint {0} because: {1}'. format(endpoint.name, str(e))) elif routing_key == 'poseidon.action.update_acls': for ip in my_obj: rules = my_obj[ip] endpoints = self.s.endpoints_by_ip(ip) if endpoints: endpoint = endpoints[0] try: status = Actions(endpoint, self.s.sdnc).update_acls( rules_file=self.controller['RULES_FILE'], endpoints=endpoints, force_apply_rules=rules) if not status: self.logger.warning( 'Unable to apply rules: {0} to endpoint: {1}'. format(rules, endpoint.name)) except Exception as e: self.logger.error( 'Unable to apply rules: {0} to endpoint: {1} because {2}' .format(rules, endpoint.name, str(e))) elif routing_key == 'poseidon.action.remove': remove_list = [name for name in my_obj] elif routing_key == 'poseidon.action.remove.ignored': remove_list = [ endpoint.name for endpoint in self.s.endpoints.values() if endpoint.ignore ] elif routing_key == 'poseidon.action.remove.inactives': remove_list = [ endpoint.name for endpoint in self.s.endpoints.values() if endpoint.state == 'inactive' ] elif routing_key == self.controller['FA_RABBIT_ROUTING_KEY']: self.logger.debug('FAUCET Event:{0}'.format(my_obj)) ret_val.update(my_obj) for endpoint_name in remove_list: if endpoint_name in self.s.endpoints: del self.s.endpoints[endpoint_name] return ret_val
def find_new_machines(self, machines): '''parse switch structure to find new machines added to network since last call''' change_acls = False for machine in machines: machine['ether_vendor'] = get_ether_vendor( machine['mac'], '/poseidon/poseidon/metadata/nmap-mac-prefixes.txt') machine.update(self._parse_machine_ip(machine)) if not 'controller_type' in machine: machine.update({'controller_type': 'none', 'controller': ''}) trunk = False for sw in self.trunk_ports: if sw == machine['segment'] and self.trunk_ports[sw].split( ',')[1] == str( machine['port']) and self.trunk_ports[sw].split( ',')[0] == machine['mac']: trunk = True h = Endpoint.make_hash(machine, trunk=trunk) ep = self.endpoints.get(h, None) if ep is None: change_acls = True m = endpoint_factory(h) m.p_prev_states.append((m.state, int(time.time()))) m.endpoint_data = deepcopy(machine) self.endpoints[m.name] = m self.logger.info('Detected new endpoint: {0}:{1}'.format( m.name, machine)) else: self.merge_machine_ip(ep.endpoint_data, machine) if ep and ep.endpoint_data != machine and not ep.ignore: diff_txt = self._diff_machine(ep.endpoint_data, machine) self.logger.info('Endpoint changed: {0}:{1}'.format( h, diff_txt)) change_acls = True ep.endpoint_data = deepcopy(machine) if ep.state == 'inactive' and machine['active'] == 1: if ep.p_next_state in ['known', 'abnormal']: ep.trigger(ep.p_next_state) else: ep.unknown() ep.p_prev_states.append((ep.state, int(time.time()))) elif ep.state != 'inactive' and machine['active'] == 0: if ep.state in ['mirroring', 'reinvestigating']: status = Actions(ep, self.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'.format( ep.name)) if ep.state == 'mirroring': ep.p_next_state = 'mirror' elif ep.state == 'reinvestigating': ep.p_next_state = 'reinvestigate' if ep.state in ['known', 'abnormal']: ep.p_next_state = ep.state ep.inactive() ep.p_prev_states.append((ep.state, int(time.time()))) if change_acls and self.controller['AUTOMATED_ACLS']: status = Actions(None, self.sdnc).update_acls( rules_file=self.controller['RULES_FILE'], endpoints=self.endpoints.values()) if isinstance(status, list): self.logger.info( 'Automated ACLs did the following: {0}'.format(status[1])) for item in status[1]: machine = { 'mac': item[1], 'segment': item[2], 'port': item[3] } h = Endpoint.make_hash(machine) ep = self.endpoints.get(h, None) if ep: ep.acl_data.append((item[0], item[4], item[5]), int(time.time())) self.store_endpoints() self.get_stored_endpoints()
def format_rabbit_message(self, item): ''' read a message off the rabbit_q the message should be item = (routing_key,msg) ''' ret_val = {} routing_key, my_obj = item self.logger.debug('rabbit_message:{0}'.format(my_obj)) my_obj = json.loads(my_obj) self.logger.debug('routing_key:{0}'.format(routing_key)) if routing_key == 'poseidon.algos.decider': self.logger.debug('decider value:{0}'.format(my_obj)) # TODO if valid response then send along otherwise nothing for key in my_obj: ret_val[key] = my_obj[key] elif routing_key == 'poseidon.action.ignore': for name in my_obj: for endpoint in self.s.endpoints: if name == endpoint.name: endpoint.ignore = True elif routing_key == 'poseidon.action.clear.ignored': for name in my_obj: for endpoint in self.s.endpoints: if name == endpoint.name: endpoint.ignore = False elif routing_key == 'poseidon.action.change': for name, state in my_obj: for endpoint in self.s.endpoints: if name == endpoint.name: try: if state != 'mirror' and state != 'reinvestigate' and ( endpoint.state == 'mirroring' or endpoint.state == 'reinvestigating'): status = Actions( endpoint, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(endpoint.name)) endpoint.trigger(state) endpoint.p_next_state = None endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) if endpoint.state == 'mirroring' or endpoint.state == 'reinvestigating': status = Actions( endpoint, self.s.sdnc).mirror_endpoint() if status: try: self.r.hincrby('vent_plugin_counts', 'ncapture') except Exception as e: # pragma: no cover self.logger.error( 'Failed to update count of plugins because: {0}' .format(str(e))) else: self.logger.warning( 'Unable to mirror the endpoint: {0}'. format(endpoint.name)) except Exception as e: # pragma: no cover self.logger.error( 'Unable to change endpoint {0} because: {1}'. format(endpoint.name, str(e))) elif routing_key == 'poseidon.action.remove': remove_list = [] for name in my_obj: for endpoint in self.s.endpoints: if name == endpoint.name: remove_list.append(endpoint) for endpoint in remove_list: self.s.endpoints.remove(endpoint) elif routing_key == 'poseidon.action.remove.ignored': remove_list = [] for endpoint in self.s.endpoints: if endpoint.ignore: remove_list.append(endpoint) for endpoint in remove_list: self.s.endpoints.remove(endpoint) elif routing_key == 'poseidon.action.remove.inactives': remove_list = [] for endpoint in self.s.endpoints: if endpoint.state == 'inactive': remove_list.append(endpoint) for endpoint in remove_list: self.s.endpoints.remove(endpoint) elif routing_key == self.controller['FA_RABBIT_ROUTING_KEY']: self.logger.debug('FAUCET Event:{0}'.format(my_obj)) for key in my_obj: ret_val[key] = my_obj[key] return ret_val
def find_new_machines(self, machines): '''parse switch structure to find new machines added to network since last call''' for machine in machines: machine['ether_vendor'] = get_ether_vendor( machine['mac'], '/poseidon/poseidon/metadata/nmap-mac-prefixes.txt') if 'ipv4' in machine and machine['ipv4'] and machine[ 'ipv4'] is not 'None' and machine['ipv4'] is not '0': machine['ipv4_rdns'] = get_rdns_lookup(machine['ipv4']) machine['ipv4_subnet'] = '.'.join( machine['ipv4'].split('.')[:-1]) + '.0/24' else: machine['ipv4_rdns'] = 'NO DATA' machine['ipv4_subnet'] = 'NO DATA' if 'ipv6' in machine and machine['ipv6'] and machine[ 'ipv6'] is not 'None' and machine['ipv6'] is not '0': machine['ipv6_rdns'] = get_rdns_lookup(machine['ipv6']) machine['ipv6_subnet'] = '.'.join( machine['ipv6'].split(':')[0:4]) + '::0/64' else: machine['ipv6_rdns'] = 'NO DATA' machine['ipv6_subnet'] = 'NO DATA' if not 'controller_type' in machine: machine['controller_type'] = 'none' machine['controller'] = '' trunk = False for sw in self.trunk_ports: if sw == machine['segment'] and self.trunk_ports[sw].split( ',')[1] == str( machine['port']) and self.trunk_ports[sw].split( ',')[0] == machine['mac']: trunk = True h = Endpoint.make_hash(machine, trunk=trunk) ep = None for endpoint in self.endpoints: if h == endpoint.name: ep = endpoint if ep is not None and ep.endpoint_data != machine and not ep.ignore: self.logger.info('Endpoint changed: {0}:{1}'.format( h, machine)) ep.endpoint_data = deepcopy(machine) if ep.state == 'inactive' and machine['active'] == 1: if ep.p_next_state in ['known', 'abnormal']: ep.trigger(ep.p_next_state) else: ep.unknown() ep.p_prev_states.append((ep.state, int(time.time()))) elif ep.state != 'inactive' and machine['active'] == 0: if ep.state in ['mirroring', 'reinvestigating']: status = Actions(ep, self.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'.format( ep.name)) self.investigations -= 1 if ep.state == 'mirroring': ep.p_next_state = 'mirror' elif ep.state == 'reinvestigating': ep.p_next_state = 'reinvestigate' if ep.state in ['known', 'abnormal']: ep.p_next_state = ep.state ep.inactive() ep.p_prev_states.append((ep.state, int(time.time()))) elif ep is None: self.logger.info('Detected new endpoint: {0}:{1}'.format( h, machine)) m = Endpoint(h) m.p_prev_states.append((m.state, int(time.time()))) m.endpoint_data = deepcopy(machine) self.endpoints.append(m) self.store_endpoints() return
def unmirror_endpoint(self, endpoint): ''' unmirror an endpoint. ''' status = Actions(endpoint, self.sdnc).unmirror_endpoint() if not status: self.logger.warning('Unable to unmirror the endpoint: {0}'.format( endpoint.name))
def process(self): global CTRL_C signal.signal(signal.SIGINT, partial(self.signal_handler)) while not CTRL_C['STOP']: time.sleep(1) found_work, item = self.get_q_item() ml_returns = {} if found_work and item[0] == self.controller[ 'FA_RABBIT_ROUTING_KEY']: self.faucet_event.append(self.format_rabbit_message(item)) self.logger.debug('Faucet event: {0}'.format( self.faucet_event)) elif found_work: ml_returns = self.format_rabbit_message(item) if ml_returns: self.logger.info('ML results: {0}'.format(ml_returns)) extras = deepcopy(ml_returns) # process results from ml output and update impacted endpoints for ep in self.s.endpoints: if ep.name in ml_returns: del extras[ep.name] if ep.name in ml_returns and 'valid' in ml_returns[ ep.name] and not ep.ignore: if ep.state in ['mirroring', 'reinvestigating']: status = Actions(ep, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(ep.name)) self.s.investigations -= 1 if ml_returns[ep.name]['valid']: ml_decision = None if 'decisions' in ml_returns[ ep.name] and 'behavior' in ml_returns[ ep.name]['decisions']: ml_decision = ml_returns[ ep.name]['decisions']['behavior'] if ml_decision == 'normal': ep.known() else: ep.abnormal() else: ep.unknown() ep.p_prev_states.append((ep.state, int(time.time()))) extra_machines = [] for device in extras: if extras[device]['valid']: extra_machine = { 'mac': extras[device]['source_mac'], 'segment': 'NO DATA', 'port': 'NO DATA', 'tenant': 'NO DATA', 'active': 0, 'name': None } if ':' in extras[device]['source_ip']: extra_machine['ipv6'] = extras[device]['source_ip'] extra_machine['ipv4'] = 0 else: extra_machine['ipv4'] = extras[device]['source_ip'] extra_machine['ipv6'] = 0 extra_machines.append(extra_machine) self.s.find_new_machines(extra_machines) # mirror things in the order they got added to the queue queued_endpoints = [] for endpoint in self.s.endpoints: if not endpoint.ignore: if endpoint.state == 'queued': queued_endpoints.append( (endpoint.name, endpoint.p_prev_states[-1][1])) queued_endpoints = sorted(queued_endpoints, key=lambda x: x[1]) for ep in queued_endpoints: for endpoint in self.s.endpoints: if ep[0] == endpoint.name: if self.s.investigations < self.controller[ 'max_concurrent_reinvestigations']: self.s.investigations += 1 endpoint.trigger(endpoint.p_next_state) endpoint.p_next_state = None endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) status = Actions(endpoint, self.s.sdnc).mirror_endpoint() if not status: self.logger.warning( 'Unable to mirror the endpoint: {0}'. format(endpoint.name)) for endpoint in self.s.endpoints: if not endpoint.ignore: if self.s.sdnc: if endpoint.state == 'unknown': endpoint.p_next_state = 'mirror' endpoint.queue() endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) elif endpoint.state in [ 'mirroring', 'reinvestigating' ]: cur_time = int(time.time()) # timeout after 2 times the reinvestigation frequency # in case something didn't report back, put back in an # unknown state if cur_time - endpoint.p_prev_states[-1][ 1] > 2 * self.controller[ 'reinvestigation_frequency']: status = Actions( endpoint, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(endpoint.name)) endpoint.unknown() self.s.investigations -= 1 endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) else: if endpoint.state != 'known': endpoint.known() return
def test_Actions_nosdn(): """ Tests Actions with no SDN controller """ endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } controller = Config().get_config() s = SDNConnect(controller) s.sdnc = None a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.coprocess_endpoint() a.uncoprocess_endpoint() a.shutdown_endpoint()
def process(self): global CTRL_C signal.signal(signal.SIGINT, partial(self.signal_handler)) while not CTRL_C['STOP']: time.sleep(1) found_work, item = self.get_q_item() ml_returns = {} if found_work and item[0] == self.controller[ 'FA_RABBIT_ROUTING_KEY']: self.faucet_event.append(self.format_rabbit_message(item)) self.logger.debug('Faucet event: {0}'.format( self.faucet_event)) elif found_work: ml_returns = self.format_rabbit_message(item) self.logger.info('ML results: {0}'.format(ml_returns)) # process results from ml output and update impacted endpoints for ep in self.s.endpoints: if ep.name in ml_returns and 'valid' in ml_returns[ ep.name] and not ep.ignore: if ep.state in ['mirroring', 'reinvestigating']: status = Actions(ep, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(ep.name)) self.s.investigations -= 1 if ml_returns[ep.name]['valid']: ml_decision = None if 'decisions' in ml_returns[ ep.name] and 'behavior' in ml_returns[ ep.name]['decisions']: ml_decision = ml_returns[ ep.name]['decisions']['behavior'] if ml_decision == 'normal': ep.known() else: ep.abnormal() else: ep.unknown() ep.p_prev_states.append((ep.state, int(time.time()))) for endpoint in self.s.endpoints: if not endpoint.ignore: if endpoint.state == 'queued': if self.s.investigations < self.controller[ 'max_concurrent_reinvestigations']: self.s.investigations += 1 endpoint.trigger(endpoint.p_next_state) endpoint.p_next_state = None endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) status = Actions(endpoint, self.s.sdnc).mirror_endpoint() if not status: self.logger.warning( 'Unable to mirror the endpoint: {0}'. format(endpoint.name)) elif endpoint.state == 'unknown': # move to mirroring state if self.s.investigations < self.controller[ 'max_concurrent_reinvestigations']: self.s.investigations += 1 endpoint.mirror() endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) status = Actions(endpoint, self.s.sdnc).mirror_endpoint() if not status: self.logger.warning( 'Unable to mirror the endpoint: {0}'. format(endpoint.name)) else: endpoint.p_next_state = 'mirror' endpoint.queue() endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) elif endpoint.state in ['mirroring', 'reinvestigating']: cur_time = int(time.time()) # timeout after 2 times the reinvestigation frequency # in case something didn't report back, put back in an # unknown state if cur_time - endpoint.p_prev_states[-1][ 1] > 2 * self.controller[ 'reinvestigation_frequency']: status = Actions(endpoint, self.s.sdnc).unmirror_endpoint() if not status: self.logger.warning( 'Unable to unmirror the endpoint: {0}'. format(endpoint.name)) endpoint.unknown() self.s.investigations -= 1 endpoint.p_prev_states.append( (endpoint.state, int(time.time())))