Exemple #1
0
def test_update_history():
    endpoint = endpoint_factory('foo')
    endpoint.endpoint_data = {
        'tenant': 'foo',
        'mac': '00:00:00:00:00:00',
        'segment': 'foo',
        'port': '1',
        'ipv4': '0.0.0.0',
        'ipv6': '1212::1'
    }
    endpoint.metadata = {
        'mac_addresses': {
            '00:00:00:00:00:00': {
                '1551805502': {
                    'labels': ['developer workstation'],
                    'behavior': 'normal'
                }
            }
        },
        'ipv4_addresses': {
            '0.0.0.0': {
                'os': 'windows'
            }
        },
        'ipv6_addresses': {
            '1212::1': {
                'os': 'windows'
            }
        }
    }
    metadata = {123: {'behavior': 'normal'}}
    prc = PoseidonRedisClient(None)
    prc.update_history(endpoint, {'00:00:00:00:00:00': metadata},
                       {'0.0.0.0': metadata}, {'1212::1': metadata})
Exemple #2
0
def test_update_history():
    endpoint = endpoint_factory('foo')
    endpoint.endpoint_data = {
        'tenant': 'foo',
        'mac': '00:00:00:00:00:00',
        'segment': 'foo',
        'port': '1',
        'ipv4': '0.0.0.0',
        'ipv6': '1212::1'
    }
    endpoint.metadata = {
        'mac_addresses': {
            '00:00:00:00:00:00': {
                '1551805502': {
                    'labels': ['developer workstation']
                }
            }
        },
        'ipv4_addresses': {
            '0.0.0.0': {
                'os': 'windows'
            }
        },
        'ipv6_addresses': {
            '1212::1': {
                'os': 'windows'
            }
        }
    }
    metadata = {123: {'foo': 'bar'}}
    logger = logging.getLogger('test')
    prc = PoseidonRedisClient(logger)
    prc.update_history(endpoint, {'00:00:00:00:00:00': metadata},
                       {'0.0.0.0': metadata}, {'1212::1': metadata})
Exemple #3
0
def test_parse_metadata():
    prc = PoseidonRedisClient(None)
    mac_info = {
        b'poseidon_hash': 'myhash',
    }
    ml_info = {
        'myhash':
        b'{"pcap_labels": "mylabels", "classification": {"labels": ["foo", "bar"], "confidences": [1.0, 2.0]}, "decisions": {"behavior": "definitely"}}',
    }
    assert prc.parse_metadata(mac_info, ml_info) == {
        'behavior': 'None',
        'confidences': [1.0, 2.0],
        'labels': ['foo', 'bar'],
        'pcap_labels': 'mylabels',
        'behavior': 'definitely'
    }
Exemple #4
0
def test_parse_networkml_metadata():
    logger = logging.getLogger('test')
    prc = PoseidonRedisClient(logger)
    mac_info = {
        b'poseidon_hash': 'myhash',
    }
    ml_info = {
        'myhash':
        b'{"pcap_labels": "mylabels", "classification": {"labels": ["foo", "bar"], "confidences": [1.0, 2.0]}}',
    }
    assert prc.parse_networkml_metadata(mac_info, ml_info) == {
        'confidences': [1.0, 2.0],
        'labels': ['foo', 'bar'],
        'pcap_labels': 'mylabels'
    }
    ml_info = {
        'notmyhash':
        b'{"pcap_labels": "mylabels", "classification": {"labels": ["foo", "bar"], "confidences": [1.0, 2.0]}}',
    }
    assert prc.parse_networkml_metadata(mac_info, ml_info) == {}
Exemple #5
0
 def __init__(self, controller, first_time=True):
     self.controller = controller
     self.r = None
     self.first_time = first_time
     self.sdnc = None
     self.endpoints = {}
     trunk_ports = self.controller['trunk_ports']
     if isinstance(trunk_ports, str):
         self.trunk_ports = json.loads(trunk_ports)
     else:
         self.trunk_ports = trunk_ports
     self.logger = logger
     self.get_sdn_context()
     self.prc = PoseidonRedisClient(self.logger)
     self.prc.connect()
     if self.first_time:
         self.endpoints = {}
         self.investigations = 0
         self.coprocessing = 0
         self.clear_filters()
         self.default_endpoints()
Exemple #6
0
def test_redis_smoke(redis_my, redis_my_proc):
    logger = logging.getLogger('test')
    logger.setLevel(logging.DEBUG)
    prc = PoseidonRedisClient(logger,
                              host='localhost',
                              port=redis_my_proc.port)
    prc.connect()
    prc.r.flushall()
    endpoint = endpoint_factory('foo')
    endpoint.endpoint_data = {
        'tenant': 'foo',
        'mac': '00:00:00:00:00:00',
        'segment': 'foo',
        'port': '1',
        'ipv4': '0.0.0.0',
        'ipv6': '1212::1'
    }
    endpoint.metadata = {
        'mac_addresses': {
            '00:00:00:00:00:00': {
                '1551805502': {
                    'labels': ['developer workstation'],
                    'behavior': 'normal'
                }
            }
        },
        'ipv4_addresses': {
            '0.0.0.0': {
                'os': 'windows'
            }
        },
        'ipv6_addresses': {
            '1212::1': {
                'os': 'windows'
            }
        }
    }
    endpoints = {endpoint.name: endpoint}
    prc.store_endpoints(endpoints)
    stored_endpoints = prc.get_stored_endpoints()
    stored_endpoint = stored_endpoints[endpoint.name]
    assert endpoint.endpoint_data == stored_endpoint.endpoint_data
    assert endpoint.metadata == stored_endpoint.metadata
Exemple #7
0
class SDNConnect:
    def __init__(self, controller, first_time=True):
        self.controller = controller
        self.r = None
        self.first_time = first_time
        self.sdnc = None
        self.endpoints = {}
        trunk_ports = self.controller['trunk_ports']
        if isinstance(trunk_ports, str):
            self.trunk_ports = json.loads(trunk_ports)
        else:
            self.trunk_ports = trunk_ports
        self.logger = logger
        self.get_sdn_context()
        self.prc = PoseidonRedisClient(self.logger)
        self.prc.connect()
        if self.first_time:
            self.endpoints = {}
            self.investigations = 0
            self.coprocessing = 0
            self.clear_filters()
            self.default_endpoints()

    def mirror_endpoint(self, endpoint):
        ''' mirror an endpoint. '''
        status = Actions(endpoint, self.sdnc).mirror_endpoint()
        if status:
            self.prc.inc_network_tools_counts()
        else:
            self.logger.warning('Unable to mirror the endpoint: {0}'.format(
                endpoint.name))

    def unmirror_endpoint(self, endpoint):
        ''' unmirror an endpoint. '''
        status = Actions(endpoint, self.sdnc).unmirror_endpoint()
        if not status:
            self.logger.warning('Unable to unmirror the endpoint: {0}'.format(
                endpoint.name))

    def clear_filters(self):
        ''' clear any exisiting filters. '''
        if isinstance(self.sdnc, FaucetProxy):
            self.sdnc.clear_mirrors()

    def default_endpoints(self):
        ''' set endpoints to default state. '''
        self.get_stored_endpoints()
        for endpoint in self.endpoints.values():
            if not endpoint.ignore:
                if endpoint.state != 'inactive':
                    if endpoint.state == 'mirroring':
                        endpoint.p_next_state = 'mirror'
                    elif endpoint.state == 'reinvestigating':
                        endpoint.p_next_state = 'reinvestigate'
                    elif endpoint.state == 'queued':
                        endpoint.p_next_state = 'queue'
                    elif endpoint.state in ['known', 'abnormal']:
                        endpoint.p_next_state = endpoint.state
                    endpoint.endpoint_data['active'] = 0
                    endpoint.inactive()  # pytype: disable=attribute-error
                    endpoint.p_prev_states.append(
                        (endpoint.state, int(time.time())))
        self.store_endpoints()

    def get_stored_endpoints(self):
        ''' load existing endpoints from Redis. '''
        new_endpoints = self.prc.get_stored_endpoints()
        if new_endpoints:
            self.endpoints = new_endpoints

    def get_sdn_context(self):
        controller_type = self.controller.get('TYPE', None)
        if controller_type == 'faucet':
            self.sdnc = FaucetProxy(self.controller)
        elif controller_type == 'None':
            self.sdnc = None
        else:
            self.logger.error('Unknown SDN controller config: {0}'.format(
                self.controller))

    def endpoint_by_name(self, name):
        return self.endpoints.get(name, None)

    def endpoint_by_hash(self, hash_id):
        return self.endpoint_by_name(hash_id)

    def endpoints_by_ip(self, ip):
        endpoints = [
            endpoint for endpoint in self.endpoints.values()
            if ip == endpoint.endpoint_data.get('ipv4', None)
            or ip == endpoint.endpoint_data.get('ipv6', None)
        ]
        return endpoints

    def endpoints_by_mac(self, mac):
        endpoints = [
            endpoint for endpoint in self.endpoints.values()
            if mac == endpoint.endpoint_data['mac']
        ]
        return endpoints

    @staticmethod
    def _connect_rabbit():
        # Rabbit settings
        exchange = 'topic-poseidon-internal'
        exchange_type = 'topic'

        # Starting rabbit connection
        connection = pika.BlockingConnection(
            pika.ConnectionParameters(host='RABBIT_SERVER'))

        channel = connection.channel()
        channel.exchange_declare(exchange=exchange,
                                 exchange_type=exchange_type)

        return channel, exchange, connection

    @staticmethod
    def publish_action(action, message):
        try:
            channel, exchange, connection = SDNConnect._connect_rabbit()
            channel.basic_publish(exchange=exchange,
                                  routing_key=action,
                                  body=message)
            connection.close()
        except Exception as e:  # pragma: no cover
            print(str(e))

    def show_endpoints(self, arg):
        endpoints = []
        if arg == 'all':
            endpoints = list(self.endpoints.values())
        else:
            show_type, arg = arg.split(' ', 1)
            for endpoint in self.endpoints.values():
                if show_type == 'state':
                    if arg == 'active' and endpoint.state != 'inactive':
                        endpoints.append(endpoint)
                    elif arg == 'ignored' and endpoint.ignore:
                        endpoints.append(endpoint)
                    elif endpoint.state == arg:
                        endpoints.append(endpoint)
                elif show_type in ['os', 'behavior', 'role']:
                    mac_addresses = endpoint.metadata.get(
                        'mac_addresses', None)
                    endpoint_mac = endpoint.endpoint_data['mac']
                    if endpoint_mac and mac_addresses and endpoint_mac in mac_addresses:
                        timestamps = mac_addresses[endpoint_mac]
                        try:
                            newest = sorted(
                                [timestamp for timestamp in timestamps])[-1]
                            newest = timestamps[newest]
                        except IndexError:
                            newest = None
                        if newest:
                            if 'labels' in newest:
                                if arg.replace(
                                        '-',
                                        ' ') == newest['labels'][0].lower():
                                    endpoints.append(endpoint)
                            if 'behavior' in newest:
                                if arg == newest['behavior'].lower():
                                    endpoints.append(endpoint)

                    # filter by operating system
                    for ip_field in MACHINE_IP_FIELDS:
                        ip_addresses_field = '_'.join((ip_field, 'addresses'))
                        ip_addresses = endpoint.metadata.get(
                            ip_addresses_field, None)
                        machine_ip = endpoint.endpoint_data.get(ip_field, None)
                        if machine_ip and ip_addresses and machine_ip in ip_addresses:
                            metadata = ip_addresses[machine_ip]
                            os = metadata.get('os', None)
                            if os and os.lower() == arg:
                                endpoints.append(endpoint)
        return endpoints

    def check_endpoints(self, messages=None):
        if not self.sdnc:
            return

        retval = {}
        retval['machines'] = None
        retval['resp'] = 'bad'

        current = None
        parsed = None

        try:
            current = self.sdnc.get_endpoints(messages=messages)
            parsed = self.sdnc.format_endpoints(current)
            retval['machines'] = parsed
            retval['resp'] = 'ok'
        except Exception as e:  # pragma: no cover
            self.logger.error(
                'Could not establish connection to controller because {0}.'.
                format(e))
            retval[
                'controller'] = 'Could not establish connection to controller'

        self.find_new_machines(parsed)

    @staticmethod
    def _diff_machine(machine_a, machine_b):
        def _machine_strlines(machine):
            return str(json.dumps(machine, indent=2)).splitlines()

        machine_a_strlines = _machine_strlines(machine_a)
        machine_b_strlines = _machine_strlines(machine_b)
        return '\n'.join(
            difflib.unified_diff(machine_a_strlines, machine_b_strlines, n=1))

    @staticmethod
    def _parse_machine_ip(machine):
        machine_ip_data = {}
        for ip_field, fields in MACHINE_IP_FIELDS.items():
            try:
                raw_field = machine.get(ip_field, None)
                machine_ip = ipaddress.ip_address(raw_field)
                machine_subnet = ipaddress.ip_network(machine_ip).supernet(
                    new_prefix=MACHINE_IP_PREFIXES[ip_field])
            except ValueError:
                machine_ip = None
                machine_subnet = None
            machine_ip_data[ip_field] = ''
            if machine_ip:
                machine_ip_data.update({
                    ip_field:
                    str(machine_ip),
                    '_'.join((ip_field, 'rdns')):
                    get_rdns_lookup(str(machine_ip)),
                    '_'.join((ip_field, 'subnet')):
                    str(machine_subnet)
                })
            for field in fields:
                if field not in machine_ip_data:
                    machine_ip_data[field] = NO_DATA
        return machine_ip_data

    @staticmethod
    def merge_machine_ip(old_machine, new_machine):
        for ip_field, fields in MACHINE_IP_FIELDS.items():
            ip = new_machine.get(ip_field, None)
            old_ip = old_machine.get(ip_field, None)
            if not ip and old_ip:
                new_machine[ip_field] = old_ip
                for field in fields:
                    if field in old_machine:
                        new_machine[field] = old_machine[field]

    def find_new_machines(self, machines):
        '''parse switch structure to find new machines added to network
        since last call'''
        change_acls = False

        for machine in machines:
            machine['ether_vendor'] = get_ether_vendor(
                machine['mac'],
                '/poseidon/poseidon/metadata/nmap-mac-prefixes.txt')
            machine.update(self._parse_machine_ip(machine))
            if 'controller_type' not in machine:
                machine.update({'controller_type': 'none', 'controller': ''})
            trunk = False
            for sw in self.trunk_ports:
                if sw == machine['segment'] and self.trunk_ports[sw].split(
                        ',')[1] == str(
                            machine['port']) and self.trunk_ports[sw].split(
                                ',')[0] == machine['mac']:
                    trunk = True

            h = Endpoint.make_hash(machine, trunk=trunk)
            ep = self.endpoints.get(h, None)
            if ep is None:
                change_acls = True
                m = endpoint_factory(h)
                m.p_prev_states.append((m.state, int(time.time())))
                m.endpoint_data = deepcopy(machine)
                self.endpoints[m.name] = m
                self.logger.info('Detected new endpoint: {0}:{1}'.format(
                    m.name, machine))
            else:
                self.merge_machine_ip(ep.endpoint_data, machine)

            if ep and ep.endpoint_data != machine and not ep.ignore:
                diff_txt = self._diff_machine(ep.endpoint_data, machine)
                self.logger.info('Endpoint changed: {0}:{1}'.format(
                    h, diff_txt))
                change_acls = True
                ep.endpoint_data = deepcopy(machine)
                if ep.state == 'inactive' and machine['active'] == 1:
                    if ep.p_next_state in ['known', 'abnormal']:
                        # pytype: disable=attribute-error
                        ep.trigger(ep.p_next_state)
                    else:
                        ep.unknown()  # pytype: disable=attribute-error
                    ep.p_prev_states.append((ep.state, int(time.time())))
                elif ep.state != 'inactive' and machine['active'] == 0:
                    if ep.state in ['mirroring', 'reinvestigating']:
                        self.unmirror_endpoint(ep)
                        if ep.state == 'mirroring':
                            ep.p_next_state = 'mirror'
                        elif ep.state == 'reinvestigating':
                            ep.p_next_state = 'reinvestigate'
                    if ep.state in ['known', 'abnormal']:
                        ep.p_next_state = ep.state
                    ep.inactive()  # pytype: disable=attribute-error
                    ep.p_prev_states.append((ep.state, int(time.time())))

        if change_acls and self.controller['AUTOMATED_ACLS']:
            status = Actions(None, self.sdnc).update_acls(
                rules_file=self.controller['RULES_FILE'],
                endpoints=self.endpoints.values())
            if isinstance(status, list):
                self.logger.info(
                    'Automated ACLs did the following: {0}'.format(status[1]))
                for item in status[1]:
                    machine = {
                        'mac': item[1],
                        'segment': item[2],
                        'port': item[3]
                    }
                    h = Endpoint.make_hash(machine)
                    ep = self.endpoints.get(h, None)
                    if ep:
                        ep.acl_data.append(
                            ((item[0], item[4], item[5]), int(time.time())))
        self.refresh_endpoints()

    def store_endpoints(self):
        ''' store current endpoints in Redis. '''
        self.prc.store_endpoints(self.endpoints)

    def refresh_endpoints(self):
        self.logger.debug('refresh endpoints')
        self.store_endpoints()
        self.get_stored_endpoints()
Exemple #8
0
def test_update_networkml(redis_my, redis_my_proc):
    logger = logging.getLogger('test')
    logger.setLevel(logging.DEBUG)
    prc = PoseidonRedisClient(logger,
                              host='localhost',
                              port=redis_my_proc.port)
    prc.connect()
    prc.r.flushall()
    source_mac = '00:00:00:00:00:00'
    ipv4 = '1.2.3.4'
    endpoint = endpoint_factory('foo')
    endpoint.endpoint_data = {
        'tenant': 'foo',
        'mac': source_mac,
        'segment': 'foo',
        'port': '1',
        'ipv4': ipv4,
        'ipv6': '1212::1'
    }
    endpoints = {endpoint.name: endpoint}
    prc.store_endpoints(endpoints)
    networkml_results = {
        'id': '',
        'type': 'metadata',
        'results': {
            'tool': 'networkml',
            'version': 'aversion'
        },
        'file_path': '/files/trace_%s_05-06_23_50_49.pcap' % endpoint.name,
        'data': {
            endpoint.name: {
                'valid': 'true',
                'pcap_labels': 'null',
                'decisions': {
                    'investigate': 'false'
                },
                'classification': {
                    'labels': ['role1', 'role2', 'role3'],
                    'confidences': [0.9, 0.8, 0.7]
                },
                'timestamp': 999.123,
                'source_ip': ipv4,
                'source_mac': source_mac
            },
            'pcap': 'trace_%s_2020-05-06_23_50_49.pcap' % endpoint.name
        }
    }
    prc.store_tool_result(networkml_results, 'networkml')
    good_pof_results = {
        ipv4: {
            'full_os': 'Linux 2.2.x-3.x',
            'short_os': 'Linux',
            'link': 'Ethernet or modem',
            'raw_mtu': '1500',
            'mac': source_mac
        }
    }
    prc.store_p0f_result(good_pof_results)
    prc.store_endpoints(endpoints)
    stored_endpoints = prc.get_stored_endpoints()
    stored_endpoint = stored_endpoints[endpoint.name]
    timestamp = list(
        stored_endpoint.metadata['mac_addresses'][source_mac].keys())[0]
    correlated_metadata = {
        'mac_addresses': {
            source_mac: {
                timestamp: {
                    'labels': ['role1', 'role2', 'role3'],
                    'confidences': [0.9, 0.8, 0.7],
                    'pcap_labels': 'null'
                }
            }
        },
        'ipv4_addresses': {
            ipv4: {
                'os': 'Linux'
            }
        },
        'ipv6_addresses': {
            '1212::1': {}
        }
    }
    assert endpoint.metadata == correlated_metadata
    bad_pof_results = {
        ipv4: {
            'full_os': '',
            'short_os': '',
            'link': '',
            'raw_mtu': '',
            'mac': source_mac
        }
    }
    prc.store_p0f_result(bad_pof_results)
    prc.store_endpoints(endpoints)
    stored_endpoints = prc.get_stored_endpoints()
    stored_endpoint = stored_endpoints[endpoint.name]
    # empty p0f doesn't overwrite
    assert endpoint.metadata == correlated_metadata
Exemple #9
0
def test_update_networkml(redis_my, redis_my_proc):
    logger = logging.getLogger('test')
    logger.setLevel(logging.DEBUG)
    prc = PoseidonRedisClient(logger,
                              host='localhost',
                              port=redis_my_proc.port)
    prc.connect()
    prc.r.flushall()
    source_mac = '00:00:00:00:00:00'
    ipv4 = '1.2.3.4'
    endpoint = endpoint_factory('foo')
    endpoint.endpoint_data = {
        'tenant': 'foo',
        'mac': source_mac,
        'segment': 'foo',
        'port': '1',
        'ipv4': ipv4,
        'ipv6': '1212::1'
    }
    endpoints = {endpoint.name: endpoint}
    prc.store_endpoints(endpoints)
    networkml_results = {
        "id": "",
        "type": "metadata",
        "results": {
            "tool": "networkml",
            "version": "aversion"
        },
        "file_path": "/files/trace_%s_05-06_23_50_49.pcap" % endpoint.name,
        "data": {
            endpoint.name: {
                "valid": "true",
                "pcap_labels": "null",
                "decisions": {
                    "behavior": "normal",
                    "investigate": "false"
                },
                "classification": {
                    "labels": ["role1", "role2", "role3"],
                    "confidences": [0.9, 0.8, 0.7]
                },
                "timestamp": 999.123,
                "source_ip": ipv4,
                "source_mac": source_mac
            },
            "pcap": "trace_%s_2020-05-06_23_50_49.pcap" % endpoint.name
        }
    }
    prc.store_tool_result(networkml_results, 'networkml')
    good_pof_results = {
        ipv4: {
            "full_os": "Linux 2.2.x-3.x",
            "short_os": "Linux",
            "link": "Ethernet or modem",
            "raw_mtu": "1500",
            "mac": source_mac
        }
    }
    prc.store_p0f_result(good_pof_results)
    prc.store_endpoints(endpoints)
    stored_endpoints = prc.get_stored_endpoints()
    stored_endpoint = stored_endpoints[endpoint.name]
    timestamp = list(
        stored_endpoint.metadata['mac_addresses'][source_mac].keys())[0]
    correlated_metadata = {
        'mac_addresses': {
            source_mac: {
                timestamp: {
                    'labels': ['role1', 'role2', 'role3'],
                    'confidences': [0.9, 0.8, 0.7],
                    'behavior': 'normal',
                    'pcap_labels': 'null'
                }
            }
        },
        'ipv4_addresses': {
            ipv4: {
                'os': 'Linux'
            }
        },
        'ipv6_addresses': {
            '1212::1': {}
        }
    }
    assert endpoint.metadata == correlated_metadata
    bad_pof_results = {
        ipv4: {
            "full_os": "",
            "short_os": "",
            "link": "",
            "raw_mtu": "",
            "mac": source_mac
        }
    }
    prc.store_p0f_result(bad_pof_results)
    prc.store_endpoints(endpoints)
    stored_endpoints = prc.get_stored_endpoints()
    stored_endpoint = stored_endpoints[endpoint.name]
    # empty p0f doesn't overwrite
    assert endpoint.metadata == correlated_metadata