def test_FaucetProxy(): """ Tests Faucet """ controller = Config().get_config() proxy = FaucetProxy(controller) proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None) proxy.update_acls() proxy = FaucetProxy(controller) proxy.rabbit_enabled = False proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None) proxy.update_acls() controller = Config().get_config() controller['MIRROR_PORTS'] = '{"foo":1}' controller['ignore_vlans'] = ['foo'] controller['ignore_ports'] = [1] proxy = FaucetProxy(controller)
def test_get_endpoints(): config_dir = '/etc/faucet' log_dir = '/var/log/faucet' if not os.path.exists(config_dir): config_dir = os.path.join(os.getcwd(), 'faucet') if not os.path.exists(log_dir): log_dir = os.path.join(os.getcwd(), 'faucet') try: f = open(os.path.join(log_dir, 'faucet.log'), 'r') except FileNotFoundError: f = open(os.path.join(log_dir, 'faucet.log'), 'w') f.write('Nov 19 18:52:31 faucet.valve INFO DPID 123917682135854 (0x70b3d56cd32e) L2 learned b8:27:eb:ff:39:15 (L2 type 0x0800, L3 src 192.168.1.40) on Port 2 on VLAN 200 (2 hosts total)\n') f.write('Nov 19 18:52:31 faucet.valve INFO DPID 123917682135854 (0x70b3d56cd32e) L2 learned b8:27:eb:ff:39:15 (L2 type 0x0800, L3 src 192.168.1.40) on Port 2 on VLAN 200 (2 hosts total)\n') f.write('Nov 19 18:52:31 faucet.valve INFO DPID 123917682135854 (0x70b3d56cd32e) L2 learned b8:27:eb:ff:39:15 (L2 type 0x0800, L3 src 192.168.1.40) on Port 2 on VLAN 300 (2 hosts total)\n') f.write('Nov 19 18:52:31 faucet.valve INFO DPID 123917682135854 (0x70b3d56cd32e) L2 learned b8:27:eb:cc:39:15 (L2 type 0x0800, L3 src 192.168.1.40) on Port 5 on VLAN 200 (2 hosts total)\n') f.write('Nov 19 18:52:31 faucet.valve INFO DPID 123917682135854 (0x70b3d56cd32e) L2 learned b8:27:eb:cc:39:15 (L2 type 0x0800, L3 src 192.168.1.50) on Port 2 on VLAN 200 (2 hosts total)\n') f.write( 'May 01 17:59:50 faucet.valve INFO DPID 1 (0x1) 1 recently active hosts on VLAN 100, expired [00:00:00:00:00:01 on Port 1]\n') f.write( 'May 01 18:02:15 faucet.valve INFO DPID 1 (0x1) 0 recently active hosts on VLAN 100, expired [00:00:00:00:00:02 on Port 2]\n') f.write('foo\n') f.close() try: f = open(os.path.join(config_dir, 'faucet.yaml'), 'r') except FileNotFoundError: f = open(os.path.join(config_dir, 'faucet.yaml'), 'w') f.write('vlans:\n') f.write(' open:\n') f.write(' vid: 100\n') f.write('dps:\n') f.write(' switch1:\n') f.write(' dp_id: 0x70b3d56cd32e\n') f.write(' hardware: "ZodiacFX"\n') f.write(' proactive_learn: True\n') f.write(' interfaces:\n') f.write(' 1:\n') f.write(' native_vlan: open\n') f.write(' 2:\n') f.write(' native_vlan: open\n') f.write(' 3:\n') f.write(' mirror: 1\n') f.write(' native_vlan: open\n') f.write(' 4:\n') f.write(' native_vlan: open\n') f.write(' 5:\n') f.write(' native_vlan: open\n') f.write(' 6:\n') f.write(' native_vlan: open') controller = Config().get_config() proxy = FaucetProxy(controller) proxy.check_connection() a = proxy.get_endpoints() assert isinstance(a, list) proxy = FaucetProxy(controller) a = proxy.get_endpoints(messages=[{'dp_name': 'switch', 'L2_LEARN': {'l3_src_ip': '10.0.0.1', 'eth_src': '00:00:00:00:00:00', 'port_no': 1, 'vid': '100'}}, { 'version': 1, 'time': 1525205350.0357792, 'dp_id': 1, 'dp_name': 'switch-1', 'event_id': 5, 'PORT_CHANGE': {'port_no': 1, 'reason': 'MODIFY', 'status': False}}, {}]) assert isinstance(a, list)
def __init__(self): self.logger = logger self.faucet_event = [] self.controller = Config().get_config() self.controller['max_concurrent_reinvestigations'] = 10 self.s = SDNConnect(self.controller) endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } endpoint.mirror() endpoint.known() self.s.endpoints[endpoint.name] = endpoint endpoint = endpoint_factory('foo2') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } endpoint.mirror() endpoint.known() self.s.endpoints[endpoint.name] = endpoint self.s.store_endpoints() self.s.get_stored_endpoints()
def __init__(self): self.fa_rabbit_routing_key = 'foo' self.logger = logger self.controller = Config().get_config() self.s = SDNConnect(self.controller) self.faucet_event = [] self.s.sdnc = MockParser()
def test_FaucetProxy(): """ Tests Faucet """ with tempfile.TemporaryDirectory() as tmpdir: faucetconfgetsetter_cl = FaucetLocalConfGetSetter faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join( tmpdir, 'faucet.yaml') shutil.copy(SAMPLE_CONFIG, faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE) proxy = _get_proxy(faucetconfgetsetter_cl) proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None) proxy.update_acls() proxy = _get_proxy(faucetconfgetsetter_cl) proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None) proxy.update_acls() controller = Config().get_config() controller['MIRROR_PORTS'] = {'foo': 1} controller['ignore_vlans'] = ['foo'] controller['ignore_ports'] = [1] proxy = _get_proxy(faucetconfgetsetter_cl, controller)
def __init__(self): self.logger = logger self.faucet_event = [] self.controller = Config().get_config() self.controller['max_concurrent_reinvestigations'] = 10 self.s = SDNConnect() if 'POSEIDON_TRAVIS' in os.environ: self.s.r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) endpoint = Endpoint('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'} endpoint.mirror() endpoint.known() self.s.endpoints.append(endpoint) endpoint = Endpoint('foo2') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'} endpoint.mirror() endpoint.known() self.s.endpoints.append(endpoint) self.s.store_endpoints() self.s.get_stored_endpoints()
def test_FaucetProxy(): """ Tests Faucet """ controller = Config().get_config() proxy = FaucetProxy(controller) proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None) proxy = FaucetProxy(controller) proxy.rabbit_enabled = False proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None) proxy = FaucetProxy(controller) proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None, messages=[{'dp_name': 'switch', 'L2_LEARN': {'l3_src_ip': '10.0.0.1', 'eth_src': '00:00:00:00:00:00', 'port_no': 1, 'vid': '100'}}, { 'version': 1, 'time': 1525205350.0357792, 'dp_id': 1, 'dp_name': 'switch-1', 'event_id': 5, 'PORT_CHANGE': {'port_no': 1, 'reason': 'MODIFY', 'status': False}}, {}]) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None, messages=[{'dp_name': 'switch', 'L2_LEARN': {'l3_src_ip': '10.0.0.1', 'eth_src': '00:00:00:00:00:00', 'port_no': 1, 'vid': '100'}}, { 'version': 1, 'time': 1525205350.0357792, 'dp_id': 1, 'dp_name': 'switch-1', 'event_id': 5, 'PORT_CHANGE': {'port_no': 1, 'reason': 'MODIFY', 'status': False}}, {}])
def __init__(self, skip_rabbit): self.faucet_event = [] self.m_queue = queue.Queue() self.skip_rabbit = skip_rabbit self.logger = logger # get config options self.controller = Config().get_config() # timer class to call things periodically in own thread self.schedule = schedule # setup prometheus self.prom = Prometheus() try: self.prom.initialize_metrics() except Exception as e: # pragma: no cover self.logger.debug( 'Prometheus metrics are already initialized: {0}'.format( str(e))) Prometheus.start() # initialize sdnconnect self.s = SDNConnect() # retrieve endpoints from redis self.s.get_stored_endpoints() # set all retrieved endpoints to inactive at the start for endpoint in self.s.endpoints: if not endpoint.ignore: if endpoint.state != 'inactive': if endpoint.state == 'mirroring': endpoint.p_next_state = 'mirror' elif endpoint.state == 'reinvestigating': endpoint.p_next_state = 'reinvestigate' elif endpoint.state == 'queued': endpoint.p_next_state = 'queue' elif endpoint.state in ['known', 'abnormal']: endpoint.p_next_state = endpoint.state endpoint.endpoint_data['active'] = 0 endpoint.inactive() endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) # store changes to state self.s.store_endpoints() # schedule periodic scan of endpoints thread self.schedule.every(self.controller['scan_frequency']).seconds.do( partial(schedule_job_kickurl, func=self)) # schedule periodic reinvestigations thread self.schedule.every( self.controller['reinvestigation_frequency']).seconds.do( partial(schedule_job_reinvestigation, func=self)) # schedule all threads self.schedule_thread = threading.Thread(target=partial( schedule_thread_worker, schedule=self.schedule), name='st_worker')
def __init__(self): self.states = [ 'active', 'inactive', 'known', 'unknown', 'mirroring', 'abnormal', 'shutdown', 'reinvestigating', 'queued' ] self.controller = Config().get_config() self.sdnc = SDNConnect(self.controller) self.sdnc.get_stored_endpoints()
class Logger: """ Base logger class that handles logging. Outputs to both console, a poseidon specific log file and a user specified syslog. To log, create a logger: logger1 = logging.getLogger('mpapp.area1') """ host = os.getenv('SYSLOG_HOST', 'NOT_CONFIGURED') port = int(os.getenv('SYSLOG_PORT', 514)) level_int = { 'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10 } controller = Config().get_config() # setup existing loggers logging.getLogger('schedule').setLevel(logging.ERROR) use_file_logger = True # ensure log file exists try: if not os.path.exists('/var/log/poseidon'): os.makedirs('/var/log/poseidon') if not os.path.exists('/var/log/poseidon/poseidon.log'): with open('/var/log/poseidon/poseidon.log', 'w'): pass # set up logging to file logging.basicConfig( level=level_int[controller['logger_level'].upper()], format='%(asctime)s [%(levelname)s] %(name)s - %(message)s', filename='/var/log/poseidon/poseidon.log', filemode='a') except Exception as e: # pragma: no cover use_file_logger = False # define a Handler which writes INFO messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(logging.INFO) # set a format which is simpler for console use formatter = logging.Formatter('[%(levelname)s] %(message)s') # tell the handler to use this format console.setFormatter(formatter) # add the handler to the root logger logging.getLogger('').addHandler(console) # don't try to connect to a syslog address if one was not supplied if host != 'NOT_CONFIGURED': # pragma: no cover # if a syslog address was supplied, log to it syslog = logging.handlers.SysLogHandler(address=(host, port), socktype=socket.SOCK_STREAM) f_format = '%(asctime)s [%(levelname)s] %(name)s - %(message)s' f_formatter = logging.Formatter(f_format) syslog.setFormatter(f_formatter) logging.getLogger('').addHandler(syslog)
def __init__(self, endpoint, iterations=1): self.logger = logging.getLogger('collector') self.controller = Config().get_config() self.endpoint = endpoint self.id = endpoint.name self.mac = endpoint.endpoint_data['mac'] self.nic = self.controller['collector_nic'] self.interval = str(self.controller['reinvestigation_frequency']) self.iterations = str(iterations)
def __init__(self): self.r = None self.first_time = True self.sdnc = None self.controller = Config().get_config() self.logger = logger self.get_sdn_context() self.endpoints = [] self.investigations = 0 self.connect_redis()
def __init__(self, controller=None): self.states = [ 'active', 'inactive', 'known', 'unknown', 'mirroring', 'shutdown', 'reinvestigating', 'queued' ] if controller is None: self.controller = Config().get_config() else: self.controller = controller self.sdnc = SDNConnect(self.controller, logger, first_time=False) self.sdnc.get_stored_endpoints()
def test_unmirror_endpoint(): controller = Config().get_config() s = SDNConnect(controller) endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } s.endpoints[endpoint.name] = endpoint s.unmirror_endpoint(endpoint)
def test_clear_filters(): controller = Config().get_config() s = SDNConnect(controller) endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } s.endpoints[endpoint.name] = endpoint s.clear_filters() controller = Config().get_config() controller['TYPE'] = 'bcf' s = SDNConnect(controller) endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } s.endpoints[endpoint.name] = endpoint s.clear_filters()
def test_endpoint_by_name(): controller = Config().get_config() s = SDNConnect(controller) endpoint = s.endpoint_by_name('foo') assert endpoint == None endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } s.endpoints[endpoint.name] = endpoint endpoint2 = s.endpoint_by_name('foo') assert endpoint == endpoint2
def __init__(self): self.r = None self.first_time = True self.sdnc = None self.controller = Config().get_config() trunk_ports = self.controller['trunk_ports'] if isinstance(trunk_ports, str): self.trunk_ports = json.loads(trunk_ports) else: self.trunk_ports = trunk_ports self.logger = logger self.get_sdn_context() self.endpoints = [] self.investigations = 0 self.connect_redis()
def __init__(self): self.logger = logger self.fa_rabbit_routing_key = 'FAUCET.Event' self.faucet_event = None self.controller = Config().get_config() self.s = SDNConnect(self.controller) self.s.controller['TYPE'] = 'None' self.s.get_sdn_context() self.s.controller['TYPE'] = 'bcf' self.s.get_sdn_context() self.s.controller['TYPE'] = 'faucet' self.s.get_sdn_context() if 'POSEIDON_TRAVIS' in os.environ: self.s.r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } endpoint.mirror() endpoint.p_prev_states.append((endpoint.state, int(time.time()))) self.s.endpoints[endpoint.name] = endpoint endpoint = endpoint_factory('foo2') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } endpoint.p_next_state = 'mirror' endpoint.queue() endpoint.p_prev_states.append((endpoint.state, int(time.time()))) self.s.endpoints[endpoint.name] = endpoint endpoint = endpoint_factory('foo3') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } self.s.endpoints[endpoint.name] = endpoint self.s.store_endpoints() self.s.get_stored_endpoints()
def test_Actions(): """ Tests Actions """ endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } controller = Config().get_config() s = SDNConnect(controller) a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.shutdown_endpoint()
def test_Parser(): """ Tests Parser """ def check_config(obj, path, endpoints): obj.config(path, 'mirror', 1, 't1-1') obj.config(path, 'mirror', 2, 0x1) obj.config(path, 'mirror', 2, 't1-1') obj.config(path, 'mirror', 5, 't2-1') obj.config(path, 'mirror', 6, 'bad') obj.config(path, 'unmirror', None, None) obj.config(path, 'unmirror', 1, 't1-1') obj.config(path, 'shutdown', None, None) obj.config(path, 'apply_acls', None, None) obj.config(path, 'apply_acls', 1, 't1-1', endpoints=endpoints, rules_file=os.path.join(os.getcwd(), 'rules.yaml')) obj.config(path, 'unknown', None, None) obj.log(os.path.join(log_dir, 'faucet.log')) config_dir = '/etc/faucet' log_dir = '/var/log/faucet' if not os.path.exists(config_dir): config_dir = os.path.join(os.getcwd(), 'faucet') if not os.path.exists(log_dir): log_dir = os.path.join(os.getcwd(), 'faucet') endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 't1-1', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': '1212::1'} endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'1551805502.0': {'labels': ['developer workstation'], 'behavior': 'normal'}}}, 'ipv4_addresses': { '0.0.0.0': {'os': 'windows'}}, 'ipv6_addresses': {'1212::1': {'os': 'windows'}}} endpoints = [endpoint] parser = Parser(mirror_ports={'t1-1': 2}) parser2 = Parser() controller = Config().get_config() proxy = FaucetProxy(controller) check_config(parser, os.path.join(config_dir, 'faucet.yaml'), endpoints) check_config(parser2, os.path.join(config_dir, 'faucet.yaml'), endpoints) check_config(proxy, os.path.join(config_dir, 'faucet.yaml'), endpoints) check_config(parser, os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'), endpoints) check_config(parser2, os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'), endpoints) check_config(proxy, os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'), endpoints)
def __init__(self): self.logger = logger self.fa_rabbit_routing_key = 'FAUCET.Event' self.faucet_event = None self.controller = Config().get_config() self.s = SDNConnect(self.controller) self.s.controller['TYPE'] = 'None' self.s.get_sdn_context() self.s.controller['TYPE'] = 'bcf' self.s.get_sdn_context() self.s.controller['TYPE'] = 'faucet' self.s.get_sdn_context() self.job_queue = queue.Queue() self.m_queue = queue.Queue() endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } endpoint.mirror() endpoint.p_prev_states.append((endpoint.state, int(time.time()))) self.s.endpoints[endpoint.name] = endpoint endpoint = endpoint_factory('foo2') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } endpoint.p_next_state = 'mirror' endpoint.queue() endpoint.p_prev_states.append((endpoint.state, int(time.time()))) self.s.endpoints[endpoint.name] = endpoint endpoint = endpoint_factory('foo3') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } self.s.endpoints[endpoint.name] = endpoint self.s.store_endpoints() self.s.get_stored_endpoints() self.results = 0
def test_endpoints_by_ip(): controller = Config().get_config() s = SDNConnect(controller) endpoints = s.endpoints_by_ip('10.0.0.1') assert endpoints == [] endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1', 'ipv4': '10.0.0.1', 'ipv6': 'None' } s.endpoints[endpoint.name] = endpoint endpoint2 = s.endpoints_by_ip('10.0.0.1') assert [endpoint] == endpoint2
def test_Parser(): """ Tests Parser """ config_dir = '/etc/faucet' log_dir = '/var/log/faucet' if not os.path.exists(config_dir): config_dir = os.path.join(os.getcwd(), 'faucet') if not os.path.exists(log_dir): log_dir = os.path.join(os.getcwd(), 'faucet') parser = Parser() parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 1, 'switch1') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 0x70b3d56cd32e) parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 'switch1') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 5, 'switch1') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 6, 'bad') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'unmirror', None, None) parser.config(os.path.join(config_dir, 'faucet.yaml'), 'shutdown', None, None) parser.config(os.path.join(config_dir, 'faucet.yaml'), 'unknown', None, None) parser.log(os.path.join(log_dir, 'faucet.log')) controller = Config().get_config() proxy = FaucetProxy(controller) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 1, 'switch1') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 0x70b3d56cd32e) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 'switch1') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 5, 'switch1') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 6, 'bad') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'unmirror', None, None) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'shutdown', None, None) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'unknown', None, None) proxy.log(os.path.join(log_dir, 'faucet.log'))
def __init__(self, endpoint, switch, iterations=1): self.logger = logging.getLogger('collector') self.controller = Config().get_config() self.endpoint = endpoint self.id = endpoint.name self.mac = endpoint.endpoint_data['mac'] nic = self.controller['collector_nic'] try: eval_nic = ast.literal_eval(nic) if switch in eval_nic: self.nic = eval_nic[switch] else: self.logger.error( 'Failed to get collector nic for the switch: {0}'.format(switch)) except ValueError: self.nic = nic self.interval = str(self.controller['reinvestigation_frequency']) self.iterations = str(iterations)
def __init__(self, skip_rabbit, controller=None): self.faucet_event = [] self.m_queue = queue.Queue() self.job_queue = queue.Queue() self.skip_rabbit = skip_rabbit self.logger = logger self.rabbit_channel_connection_local = None self.rabbit_channel_connection_local_fa = None # get config options if controller is None: self.controller = Config().get_config() else: self.controller = controller # timer class to call things periodically in own thread self.schedule = schedule # setup prometheus self.prom = Prometheus() try: self.prom.initialize_metrics() except Exception as e: # pragma: no cover self.logger.debug( 'Prometheus metrics are already initialized: {0}'.format( str(e))) Prometheus.start() # initialize sdnconnect self.s = SDNConnect(self.controller) # schedule periodic scan of endpoints thread self.schedule.every(self.controller['scan_frequency']).seconds.do( self.schedule_job_kickurl) # schedule periodic reinvestigations thread self.schedule.every( self.controller['reinvestigation_frequency']).seconds.do( self.schedule_job_reinvestigation) # schedule all threads self.schedule_thread = threading.Thread(target=partial( schedule_thread_worker, schedule=self.schedule), name='st_worker')
def test_FaucetProxy(): """ Tests Faucet """ controller = Config().get_config() proxy = FaucetProxy(controller) proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None) proxy = FaucetProxy(controller) proxy.rabbit_enabled = False proxy.shutdown_ip('10.0.0.9') proxy.shutdown_endpoint() proxy.mirror_mac('00:00:00:00:00:00', None, None) proxy.mirror_mac('00:00:00:00:00:01', None, None) proxy.unmirror_mac('00:00:00:00:00:00', None, None)
def test_Actions_nosdn(): """ Tests Actions with no SDN controller """ endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1' } controller = Config().get_config() s = SDNConnect(controller) s.sdnc = None a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.coprocess_endpoint() a.uncoprocess_endpoint() a.shutdown_endpoint()
def test_parse_metadata(): controller = Config().get_config() s = SDNConnect(controller) mac_info = { b'poseidon_hash': 'myhash', } ml_info = { b'labels': b'["foo", "bar"]', b'confidences': b'[1.0, 2.0]', 'myhash': b'{"pcap_labels": "mylabels", "decisions": {"behavior": "definitely"}}', } assert s.parse_metadata(mac_info, ml_info) == { 'behavior': 'None', 'confidences': [1.0, 2.0], 'labels': ['foo', 'bar'], 'pcap_labels': 'mylabels', 'behavior': 'definitely' }
def test_show_endpoints(): endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': '1212::1' } endpoint.metadata = { 'mac_addresses': { '00:00:00:00:00:00': { '1551805502': { 'labels': ['developer workstation'], 'behavior': 'normal' } } }, 'ipv4_addresses': { '0.0.0.0': { 'os': 'windows' } }, 'ipv6_addresses': { '1212::1': { 'os': 'windows' } } } controller = Config().get_config() s = SDNConnect(controller) s.endpoints[endpoint.name] = endpoint s.show_endpoints('all') s.show_endpoints('state active') s.show_endpoints('state ignored') s.show_endpoints('state unknown') s.show_endpoints('os windows') s.show_endpoints('role developer-workstation') s.show_endpoints('behavior normal')
def test_merge_machine(): controller = Config().get_config() s = SDNConnect(controller) old_machine = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': '1212::1' } new_machine = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1', 'ipv4': '', 'ipv6': '' } s.merge_machine_ip(old_machine, new_machine) assert old_machine['ipv4'] == new_machine['ipv4'] assert new_machine['ipv6'] == new_machine['ipv6']