def signal_handler(self, signal, frame): ''' hopefully eat a CTRL_C and signal system shutdown ''' global CTRL_C if isinstance(self.s.sdnc, FaucetProxy): Parser().clear_mirrors(self.controller['CONFIG_FILE']) elif isinstance(self.s.sdnc, BcfProxy): self.logger.debug('removing bcf filter rules') retval = self.s.sdnc.remove_filter_rules() self.logger.debug('removed filter rules: {0}'.format(retval)) CTRL_C['STOP'] = True self.logger.debug('CTRL-C: {0}'.format(CTRL_C)) try: for job in self.schedule.jobs: self.logger.debug('CTRLC:{0}'.format(job)) self.schedule.cancel_job(job) self.rabbit_channel_connection_local.close() self.rabbit_channel_connection_local_fa.close() self.logger.debug('SHUTTING DOWN') self.logger.debug('EXITING') sys.exit() except Exception as e: # pragma: no cover self.logger.debug( 'Failed to handle signal properly because: {0}'.format(str(e)))
def __init__(self, controller, *args, **kwargs): '''Initializes Faucet object.''' mirror_ports = controller['MIRROR_PORTS'] if isinstance(mirror_ports, str): self.mirror_ports = json.loads(mirror_ports) else: self.mirror_ports = mirror_ports self.rabbit_enabled = controller['RABBIT_ENABLED'] self.learn_pub_adds = controller['LEARN_PUBLIC_ADDRESSES'] self.reinvestigation_frequency = controller[ 'reinvestigation_frequency'] self.max_concurrent_reinvestigations = controller[ 'max_concurrent_reinvestigations'] self.config_file = controller['CONFIG_FILE'] self.log_file = controller['LOG_FILE'] self.host = controller['URI'] self.user = controller['USER'] self.pw = controller['PASS'] super(FaucetProxy, self).__init__(self.host, self.user, self.pw, self.config_file, self.log_file, self.mirror_ports, self.rabbit_enabled, self.learn_pub_adds, self.reinvestigation_frequency, self.max_concurrent_reinvestigations, *args, **kwargs) self.logger = logging.getLogger('faucet') self.mac_table = {} Parser().clear_mirrors(self.config_file)
def test_Parser(): """ Tests Parser """ def check_config(obj, path, endpoints): obj.config(path, 'mirror', 1, 't1-1') obj.config(path, 'mirror', 2, 0x1) obj.config(path, 'mirror', 2, 't1-1') obj.config(path, 'mirror', 5, 't2-1') obj.config(path, 'mirror', 6, 'bad') obj.config(path, 'unmirror', None, None) obj.config(path, 'unmirror', 1, 't1-1') obj.config(path, 'shutdown', None, None) obj.config(path, 'apply_acls', None, None) obj.config(path, 'apply_acls', 1, 't1-1', endpoints=endpoints, rules_file=os.path.join(os.getcwd(), 'rules.yaml')) obj.config(path, 'unknown', None, None) obj.log(os.path.join(log_dir, 'faucet.log')) config_dir = '/etc/faucet' log_dir = '/var/log/faucet' if not os.path.exists(config_dir): config_dir = os.path.join(os.getcwd(), 'faucet') if not os.path.exists(log_dir): log_dir = os.path.join(os.getcwd(), 'faucet') endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 't1-1', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': '1212::1'} endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'1551805502.0': {'labels': ['developer workstation'], 'behavior': 'normal'}}}, 'ipv4_addresses': { '0.0.0.0': {'os': 'windows'}}, 'ipv6_addresses': {'1212::1': {'os': 'windows'}}} endpoints = [endpoint] parser = Parser(mirror_ports={'t1-1': 2}) parser2 = Parser() controller = Config().get_config() proxy = FaucetProxy(controller) check_config(parser, os.path.join(config_dir, 'faucet.yaml'), endpoints) check_config(parser2, os.path.join(config_dir, 'faucet.yaml'), endpoints) check_config(proxy, os.path.join(config_dir, 'faucet.yaml'), endpoints) check_config(parser, os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'), endpoints) check_config(parser2, os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'), endpoints) check_config(proxy, os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'), endpoints)
def clear_filters(self): ''' clear any exisiting filters. ''' if isinstance(self.sdnc, FaucetProxy): Parser().clear_mirrors(self.controller['CONFIG_FILE']) elif isinstance(self.sdnc, BcfProxy): self.logger.debug('removing bcf filter rules') retval = self.sdnc.remove_filter_rules() self.logger.debug('removed filter rules: {0}'.format(retval))
def main(skip_rabbit=False): # pragma: no cover # setup rabbit and monitoring of the network pmain = Monitor(skip_rabbit=skip_rabbit) if not skip_rabbit: rabbit = Rabbit() host = pmain.controller['rabbit_server'] port = int(pmain.controller['rabbit_port']) exchange = 'topic-poseidon-internal' queue_name = 'poseidon_main' binding_key = ['poseidon.algos.#', 'poseidon.action.#'] retval = rabbit.make_rabbit_connection( host, port, exchange, queue_name, binding_key) pmain.rabbit_channel_local = retval[0] pmain.rabbit_channel_connection_local = retval[1] pmain.rabbit_thread = rabbit.start_channel( pmain.rabbit_channel_local, rabbit_callback, queue_name, pmain.m_queue) if pmain.controller['FA_RABBIT_ENABLED']: rabbit = Rabbit() host = pmain.controller['FA_RABBIT_HOST'] port = pmain.controller['FA_RABBIT_PORT'] exchange = pmain.controller['FA_RABBIT_EXCHANGE'] queue_name = 'poseidon_main' binding_key = [pmain.controller['FA_RABBIT_ROUTING_KEY']+'.#'] retval = rabbit.make_rabbit_connection( host, port, exchange, queue_name, binding_key) pmain.rabbit_channel_local = retval[0] pmain.rabbit_channel_connection_local_fa = retval[1] pmain.rabbit_thread = rabbit.start_channel( pmain.rabbit_channel_local, rabbit_callback, queue_name, pmain.m_queue) pmain.schedule_thread.start() # loop here until told not to try: pmain.process() except Exception as e: logger.error('process() exception: {0}'.format(str(e))) if isinstance(pmain.s.sdnc, FaucetProxy): Parser().clear_mirrors(pmain.controller['CONFIG_FILE']) elif isinstance(pmain.s.sdnc, BcfProxy): pmain.s.sdnc.remove_filter_rules() pmain.logger.debug('SHUTTING DOWN') pmain.logger.debug('EXITING') sys.exit(0)
def test_ignore_events(): parser = Parser(ignore_vlans=[999], ignore_ports={'switch99': 11}) for message_type in ('L2_LEARN', 'L2_EXPIRE', 'PORT_CHANGE'): assert parser.ignore_event({ 'dp_name': 'switch123', message_type: { 'vid': 999, 'port_no': 123 } }) assert not parser.ignore_event({ 'dp_name': 'switch123', message_type: { 'vid': 333, 'port_no': 123 } }) assert parser.ignore_event({ 'dp_name': 'switch99', message_type: { 'vid': 333, 'port_no': 11 } }) assert not parser.ignore_event({ 'dp_name': 'switch99', message_type: { 'vid': 333, 'port_no': 99 } }) assert parser.ignore_event({ 'dp_name': 'switch99', message_type: { 'vid': 333, 'port_no': 99, 'stack_descr': 'something' } }) assert parser.ignore_event({ 'dp_name': 'switch123', 'UNKNOWN': { 'vid': 123, 'port_no': 123 } })
def __init__(self, skip_rabbit): self.faucet_event = [] self.m_queue = queue.Queue() self.skip_rabbit = skip_rabbit self.logger = logger # get config options self.controller = Config().get_config() # timer class to call things periodically in own thread self.schedule = schedule # setup prometheus self.prom = Prometheus() try: self.prom.initialize_metrics() except Exception as e: # pragma: no cover self.logger.debug( 'Prometheus metrics are already initialized: {0}'.format( str(e))) Prometheus.start() # initialize sdnconnect self.s = SDNConnect() # cleanup any old filters if isinstance(self.s.sdnc, FaucetProxy): Parser().clear_mirrors(self.controller['CONFIG_FILE']) elif isinstance(self.s.sdnc, BcfProxy): self.s.sdnc.remove_filter_rules() # retrieve endpoints from redis self.s.get_stored_endpoints() # set all retrieved endpoints to inactive at the start for endpoint in self.s.endpoints: if not endpoint.ignore: if endpoint.state != 'inactive': if endpoint.state == 'mirroring': endpoint.p_next_state = 'mirror' elif endpoint.state == 'reinvestigating': endpoint.p_next_state = 'reinvestigate' elif endpoint.state == 'queued': endpoint.p_next_state = 'queue' elif endpoint.state in ['known', 'abnormal']: endpoint.p_next_state = endpoint.state endpoint.endpoint_data['active'] = 0 endpoint.inactive() endpoint.p_prev_states.append( (endpoint.state, int(time.time()))) # store changes to state self.s.store_endpoints() # schedule periodic scan of endpoints thread self.schedule.every(self.controller['scan_frequency']).seconds.do( partial(schedule_job_kickurl, func=self)) # schedule periodic reinvestigations thread self.schedule.every( self.controller['reinvestigation_frequency']).seconds.do( partial(schedule_job_reinvestigation, func=self)) # schedule all threads self.schedule_thread = threading.Thread(target=partial( schedule_thread_worker, schedule=self.schedule), name='st_worker')
def test_Parser(): """ Tests Parser """ config_dir = '/etc/faucet' log_dir = '/var/log/faucet' if not os.path.exists(config_dir): config_dir = os.path.join(os.getcwd(), 'faucet') if not os.path.exists(log_dir): log_dir = os.path.join(os.getcwd(), 'faucet') parser = Parser() parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 1, 'switch1') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 0x70b3d56cd32e) parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 'switch1') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 5, 'switch1') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 6, 'bad') parser.config(os.path.join(config_dir, 'faucet.yaml'), 'unmirror', None, None) parser.config(os.path.join(config_dir, 'faucet.yaml'), 'shutdown', None, None) parser.config(os.path.join(config_dir, 'faucet.yaml'), 'unknown', None, None) parser.log(os.path.join(log_dir, 'faucet.log')) controller = Config().get_config() proxy = FaucetProxy(controller) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 1, 'switch1') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 0x70b3d56cd32e) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 2, 'switch1') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 5, 'switch1') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'mirror', 6, 'bad') proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'unmirror', None, None) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'shutdown', None, None) proxy.config(os.path.join(config_dir, 'faucet.yaml'), 'unknown', None, None) proxy.log(os.path.join(log_dir, 'faucet.log'))
def test_get_config_file(): config = Parser.get_config_file(None) assert config == '/etc/faucet/faucet.yaml'
def test_clear_mirrors(): Parser.clear_mirrors( os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'))
def test_parse_rules(): Parser.parse_rules( os.path.join(os.getcwd(), 'tests/sample_faucet_config.yaml'))
def _get_parser(faucetconfgetsetter_cl=FaucetLocalConfGetSetter, **kwargs): return Parser(faucetconfgetsetter_cl=faucetconfgetsetter_cl, **kwargs)
def test_clear_mirrors(): with tempfile.TemporaryDirectory() as tmpdir: shutil.copy(SAMPLE_CONFIG, tmpdir) Parser.clear_mirrors( os.path.join(tmpdir, os.path.basename(SAMPLE_CONFIG)))