class BGPDataConsumer(object):
    """docstring for BGPDataAggregator"""
    def __init__(
        self,
        route_collector="rrc00",
        rpki_validator="rpki-validator.realmv6.org:8282",
    ):
        self.rc = route_collector

        rpki = rpki_validator.split(":")
        self.mgr = RTRManager(rpki[0], rpki[1])

        # self._start_rtr_manager()

        self.stream = BGPStream()
        self.rec = BGPRecord()

    def __del__(self):
        if self.mgr.is_synced():
            self.mgr.stop()

    def _start_rtr_manager(self):
        self.mgr.start()
        while not self.mgr.is_synced():
            sleep(0.2)
            if status.error:
                print("Connection error")
                exit()
Example #2
0
    def test(self):
        """
        Loads a test file that includes crafted bgp updates as
        input and expected messages as output.
        """
        RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest")
        RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "guest")
        RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "rabbitmq")
        RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672)
        RABBITMQ_URI = "amqp://{}:{}@{}:{}//".format(RABBITMQ_USER,
                                                     RABBITMQ_PASS,
                                                     RABBITMQ_HOST,
                                                     RABBITMQ_PORT)
        RPKI_VALIDATOR_HOST = os.getenv("RPKI_VALIDATOR_HOST", "routinator")
        RPKI_VALIDATOR_PORT = os.getenv("RPKI_VALIDATOR_PORT", 3323)

        # check RPKI RTR manager connectivity
        while True:
            try:
                rtrmanager = RTRManager(RPKI_VALIDATOR_HOST,
                                        RPKI_VALIDATOR_PORT)
                rtrmanager.start()
                print("Connected to RPKI VALIDATOR '{}:{}'".format(
                    RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT))
                rtrmanager.stop()
                break
            except Exception:
                print("Could not connect to RPKI VALIDATOR '{}:{}'".format(
                    RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT))
                print("Retrying in 30 seconds...")
                time.sleep(30)

        # exchanges
        self.update_exchange = Exchange("bgp-update",
                                        type="direct",
                                        durable=False,
                                        delivery_mode=1)

        self.hijack_exchange = Exchange("hijack-update",
                                        type="direct",
                                        durable=False,
                                        delivery_mode=1)

        self.pg_amq_bridge = Exchange("amq.direct",
                                      type="direct",
                                      durable=True,
                                      delivery_mode=1)

        # queues
        self.update_queue = Queue(
            "detection-testing",
            exchange=self.pg_amq_bridge,
            routing_key="update-update",
            durable=False,
            auto_delete=True,
            max_priority=1,
            consumer_arguments={"x-priority": 1},
        )

        self.hijack_queue = Queue(
            "hijack-testing",
            exchange=self.hijack_exchange,
            routing_key="update",
            durable=False,
            auto_delete=True,
            max_priority=1,
            consumer_arguments={"x-priority": 1},
        )

        self.hijack_db_queue = Queue(
            "hijack-db-testing",
            exchange=self.pg_amq_bridge,
            routing_key="hijack-update",
            durable=False,
            auto_delete=True,
            max_priority=1,
            consumer_arguments={"x-priority": 1},
        )

        with Connection(RABBITMQ_URI) as connection:
            print("Waiting for pg_amq exchange..")
            Tester.waitExchange(self.pg_amq_bridge, connection.default_channel)
            print("Waiting for hijack exchange..")
            Tester.waitExchange(self.hijack_exchange,
                                connection.default_channel)
            print("Waiting for update exchange..")
            Tester.waitExchange(self.update_exchange,
                                connection.default_channel)

            # query database for the states of the processes
            db_con = self.getDbConnection()
            db_cur = db_con.cursor()
            query = "SELECT name FROM process_states WHERE running=True"
            running_modules = set()
            # wait until all 5 modules are running
            while len(running_modules) < 5:
                db_cur.execute(query)
                entries = db_cur.fetchall()
                for entry in entries:
                    running_modules.add(entry[0])
                db_con.commit()
                print("Running modules: {}".format(running_modules))
                print("{}/5 modules are running.".format(len(running_modules)))
                time.sleep(1)

            Tester.config_request_rpc(connection)

            time.sleep(10)

            for testfile in os.listdir("testfiles/"):
                self.clear()

                self.curr_test = testfile
                self.messages = {}
                # load test
                with open("testfiles/{}".format(testfile), "r") as f:
                    self.messages = json.load(f)

                send_len = len(self.messages)

                with nested(
                        connection.Consumer(
                            self.hijack_queue,
                            callbacks=[self.validate_message],
                            accept=["ujson"],
                        ),
                        connection.Consumer(
                            self.update_queue,
                            callbacks=[self.validate_message],
                            accept=["ujson", "txtjson"],
                        ),
                        connection.Consumer(
                            self.hijack_db_queue,
                            callbacks=[self.validate_message],
                            accept=["ujson", "txtjson"],
                        ),
                ):
                    send_cnt = 0
                    # send and validate all messages in the messages.json file
                    while send_cnt < send_len:
                        self.curr_idx = send_cnt
                        self.send_next_message(connection)
                        send_cnt += 1
                        # sleep until we receive all expected messages
                        while self.curr_idx != send_cnt:
                            time.sleep(0.1)
                            try:
                                connection.drain_events(timeout=10)
                            except socket.timeout:
                                # avoid infinite loop by timeout
                                assert False, "Consumer timeout"

            connection.close()

        time.sleep(5)
        self.supervisor.supervisor.stopAllProcesses()

        self.waitProcess("listener", 0)  # 0 STOPPED
        self.waitProcess("clock", 0)  # 0 STOPPED
        self.waitProcess("detection", 0)  # 0 STOPPED
        self.waitProcess("configuration", 0)  # 0 STOPPED
        self.waitProcess("database", 0)  # 0 STOPPED
        self.waitProcess("observer", 0)  # 0 STOPPED
Example #3
0
    def test(self):
        """
        Loads a test file that includes crafted bgp updates as
        input and expected messages as output.
        """
        RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest")
        RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "guest")
        RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "rabbitmq")
        RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672)
        RABBITMQ_URI = "amqp://{}:{}@{}:{}//".format(
            RABBITMQ_USER, RABBITMQ_PASS, RABBITMQ_HOST, RABBITMQ_PORT
        )
        RPKI_VALIDATOR_HOST = os.getenv("RPKI_VALIDATOR_HOST", "routinator")
        RPKI_VALIDATOR_PORT = os.getenv("RPKI_VALIDATOR_PORT", 3323)

        # check RPKI RTR manager connectivity
        while True:
            try:
                rtrmanager = RTRManager(RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)
                rtrmanager.start()
                print(
                    "Connected to RPKI VALIDATOR '{}:{}'".format(
                        RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT
                    )
                )
                rtrmanager.stop()
                break
            except Exception:
                print(
                    "Could not connect to RPKI VALIDATOR '{}:{}'".format(
                        RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT
                    )
                )
                print("Retrying in 30 seconds...")
                time.sleep(30)

        # exchanges
        self.update_exchange = Exchange(
            "bgp-update", type="direct", durable=False, delivery_mode=1
        )

        self.hijack_exchange = Exchange(
            "hijack-update", type="direct", durable=False, delivery_mode=1
        )

        self.pg_amq_bridge = Exchange(
            "amq.direct", type="direct", durable=True, delivery_mode=1
        )

        # queues
        self.update_queue = Queue(
            "detection-testing",
            exchange=self.pg_amq_bridge,
            routing_key="update-update",
            durable=False,
            auto_delete=True,
            max_priority=1,
            consumer_arguments={"x-priority": 1},
        )

        self.hijack_queue = Queue(
            "hijack-testing",
            exchange=self.hijack_exchange,
            routing_key="update",
            durable=False,
            auto_delete=True,
            max_priority=1,
            consumer_arguments={"x-priority": 1},
        )

        self.hijack_db_queue = Queue(
            "hijack-db-testing",
            exchange=self.pg_amq_bridge,
            routing_key="hijack-update",
            durable=False,
            auto_delete=True,
            max_priority=1,
            consumer_arguments={"x-priority": 1},
        )

        with Connection(RABBITMQ_URI) as connection:
            # print("Waiting for pg_amq exchange..")
            # Tester.waitExchange(self.pg_amq_bridge, connection.default_channel)
            # print("Waiting for hijack exchange..")
            # Tester.waitExchange(self.hijack_exchange, connection.default_channel)
            # print("Waiting for update exchange..")
            # Tester.waitExchange(self.update_exchange, connection.default_channel)

            # wait for dependencies data workers to start
            wait_data_worker_dependencies(DATA_WORKER_DEPENDENCIES)

            while True:
                try:
                    r = requests.get(
                        "http://{}:{}/config".format(CONFIGURATION_HOST, REST_PORT)
                    )
                    result = r.json()
                    assert len(result) > 0
                    break
                except Exception:
                    print("exception")
                time.sleep(1)

            time.sleep(1)

            for testfile in os.listdir("testfiles/"):
                self.clear()

                self.curr_test = testfile
                self.messages = {}
                # load test
                with open("testfiles/{}".format(testfile), "r") as f:
                    self.messages = json.load(f)

                send_len = len(self.messages)

                with nested(
                    connection.Consumer(
                        self.hijack_queue,
                        callbacks=[self.validate_message],
                        accept=["ujson"],
                    ),
                    connection.Consumer(
                        self.update_queue,
                        callbacks=[self.validate_message],
                        accept=["ujson", "txtjson"],
                    ),
                    connection.Consumer(
                        self.hijack_db_queue,
                        callbacks=[self.validate_message],
                        accept=["ujson", "txtjson"],
                    ),
                ):
                    send_cnt = 0
                    # send and validate all messages in the messages.json file
                    while send_cnt < send_len:
                        self.curr_idx = send_cnt
                        self.send_next_message(connection)
                        send_cnt += 1
                        # sleep until we receive all expected messages
                        while self.curr_idx != send_cnt:
                            time.sleep(0.1)
                            try:
                                connection.drain_events(timeout=10)
                            except socket.timeout:
                                # avoid infinite loop by timeout
                                assert False, "Consumer timeout"
Example #4
0
from rtrlib import RTRManager, PfxvState

def callback(pfx_record, data):
    print(pfx_record)

mgr = RTRManager('rpki-validator.realmv6.org', 8282)
mgr.start()
result = mgr.validate(55803, '223.25.52.0', 23)

mgr.for_each_ipv4_record(callback, None)

mgr.stop()

print('\n--', result)
Example #5
0
class BGPLocalAggregator(object):
    """docstring for BGPDataAggregator"""
    def __init__(self,
                 filters={'collector': ['rrc00']},
                 rpki_validator="rpki-validator.realmv6.org:8282",
                 db="metasnap.db"):
        self.stream = BGPStream(
            '/Users/mx/Projects/Uni/bgp-group/bgp_dump.txt')
        self.filters = filters
        self.route_table = dict()
        self.i = 0

        self.metadata_vp = dict()
        self.metadata_rc = dict()
        self.peers = Counter()
        self.prefix4 = Counter()
        self.prefix6 = Counter()

        start_timestamp = get_push_timestamp(datetime.now(timezone.utc))

        # for filter_type, filter_array in filters.items():
        #     for filter_value in filter_array:
        #         self.stream.add_filter(filter_type, filter_value)

        for collector in filters['collector']:
            self.route_table[collector] = defaultdict(dict)
            self.metadata_vp[collector] = defaultdict(list)
            self.metadata_rc[collector] = defaultdict(int)
            self.peers[collector] = defaultdict(int)
            self.prefix4[collector] = defaultdict(int)
            self.prefix6[collector] = defaultdict(int)

        # self.db = DBConnector(db, read_only=False)

        rpki = rpki_validator.split(":")
        self.mgr = RTRManager(rpki[0], rpki[1])
        self.mgr.start()

        self.start_collecting(start_timestamp, start_timestamp)

    def __del__(self):
        if self.mgr.is_synced():
            self.mgr.stop()

    def start_collecting(self, start_timestamp, end_timestamp=0):
        # self.stream.add_interval_filter(start_timestamp, end_timestamp)
        print("Start BGPStream:", start_timestamp, end_timestamp)
        self.stream.start()
        rec = self.stream.get_next_record()
        while (rec):
            if rec.status == "valid":
                # self.i += 1
                # if self.i % 1000000 == 0:
                #     print(self.i // 1000000, end=' ')

                elem = rec.get_next_elem()
                while (elem):
                    origin_asn = ""
                    if elem.type is 'R' or elem.type is 'A':
                        origin_asn = elem.fields['as-path'].split(' ')[-1]

                    try:
                        origin_asn = int(origin_asn)
                    except ValueError:
                        elem = rec.get_next_elem()
                        continue

                    prefix = elem.fields['prefix']
                    ip, mask_len = split_prefix(prefix)

                    # Check if v4 or v6
                    is_v4 = check_ipv4(ip)

                    validated = self.mgr.validate(origin_asn, ip, mask_len)
                    old_elem = self.route_table[rec.collector][(
                        elem.peer_asn, elem.peer_address)].get(prefix)
                    if elem.type is 'R' or elem.type is 'A':
                        self.route_table[rec.collector][(
                            elem.peer_asn, elem.peer_address)][prefix] = Route(
                                origin_asn, rec.collector, prefix, is_v4,
                                validated.state.value)

                        if old_elem:
                            if old_elem.type != validated.state.value:
                                """Make use of the fact that:
                                    0: valid in enum
                                    1: unknown in enum
                                    2: invalid in enum
                                We designed the namedtuple the way to represent that. So valid is a pos 3
                                and so on.
                                """
                                self.metadata_vp[rec.collector][elem.peer_asn][
                                    3 + old_elem.type] -= 1
                                self.metadata_vp[rec.collector][elem.peer_asn][
                                    3 + validated.state] += 1
                        else:
                            if not self.metadata_vp[rec.collector].get(
                                    elem.peer_asn):
                                """Init the metadata-entry if it not exists already"""
                                self.metadata_vp[rec.collector][elem.peer_asn] = \
                                    [elem.peer_asn, rec.collector, rec.time, 0, 0, 0]

                            # Update the VantagePoint Metadate the same way like above.
                            self.metadata_vp[rec.collector][elem.peer_asn][
                                3 + validated.state.value] += 1
                            self.metadata_vp[rec.collector][
                                elem.peer_asn][2] = rec.time

                            self.peers[rec.collector][elem.peer_asn] += 1

                            if is_v4:
                                self.prefix4[rec.collector][prefix] += 1
                            else:
                                self.prefix6[rec.collector][prefix] += 1

                    elif elem.type is 'W':
                        if old_elem:

                            # Reduce the number of IPv4/v6 Addresses for this prefix
                            if is_v4:
                                self.prefix4[rec.collector][prefix] -= 1
                                if self.prefix4[rec.collector][prefix] == 0:
                                    del (self.prefix4[rec.collector][prefix])
                            else:
                                self.prefix6[rec.collector][prefix] -= 1
                                if self.prefix6[rec.collector][prefix] == 0:
                                    del (self.prefix4[rec.collector][prefix])

                            # Reduce number of prefixes belonging to this ASN
                            self.peers[rec.collector][elem.peer_asn] -= 1
                            if self.peers[rec.collector][elem.peer_asn] == 0:
                                del (self.prefix4[rec.collector][prefix])

                            # Update the metadata valid/unknown/invalid count
                            self.metadata_vp[rec.collector][elem.peer_asn][
                                3 + old_elem.type] -= 1

                            # Update the metadata timestamp
                            self.metadata_vp[rec.collector][
                                elem.peer_asn][2] = rec.time

                            # Remove the entry from the route_table
                            self.route_table[rec.collector][(
                                elem.peer_asn,
                                elem.peer_address)].pop(prefix, None)

                        else:
                            ##!TODO: write log about that!
                            pass

                    elem = rec.get_next_elem()

            rec = self.stream.get_next_record()
class BGPDataAggregator(object):
    """docstring for BGPDataAggregator"""
    def __init__(
        self,
        filters={"collector": ["rrc00"]},
        rpki_validator="rpki-validator.realmv6.org:8282",
        settings_file="../settings.json",
    ):
        self.stream = BGPStream()
        self.filters = filters
        self.route_table = dict()
        self.i = 0

        self.metadata_vp = dict()
        self.metadata_rc = dict()
        self.peers = Counter()
        self.prefix4 = Counter()
        self.prefix6 = Counter()

        start_timestamp = get_push_timestamp(datetime.now(timezone.utc))

        for filter_type, filter_array in filters.items():
            for filter_value in filter_array:
                self.stream.add_filter(filter_type, filter_value)

        for collector in filters["collector"]:
            self.route_table[collector] = defaultdict(dict)
            self.metadata_vp[collector] = defaultdict(list)
            self.metadata_rc[collector] = RouteCollectorMeta(None, 0, 0, 0, 0)
            self.peers[collector] = defaultdict(int)
            self.prefix4[collector] = defaultdict(int)
            self.prefix6[collector] = defaultdict(int)

        settings = get_settings(settings_file)
        settings["db"]["password"] = os.environ["PGPASS"]

        self.db = DBConnector(settings["db"])

        rpki = rpki_validator.split(":")
        self.mgr = RTRManager(rpki[0], rpki[1])
        self.mgr.start()

        self.start_collecting(start_timestamp)

    def __del__(self):
        if self.mgr.is_synced():
            self.mgr.stop()

    def push_data(self, timestamp):
        print("UPDATE:", timestamp)
        self.db.update_vp_meta(self.metadata_vp)
        for rc in self.metadata_rc.keys():
            self.metadata_rc[rc] = RouteCollectorMeta(
                rc,
                timestamp,
                len(self.peers[rc].keys()),
                len(self.prefix4[rc]),
                len(self.prefix6[rc]),
            )
        self.db.update_rc_meta(self.metadata_rc)

    def start_collecting(self, start_timestamp, end_timestamp=0):
        self.stream.add_interval_filter(start_timestamp, end_timestamp)
        print("Start BGPStream:", start_timestamp, end_timestamp)
        next_timestamp = init_next_timestamp(start_timestamp, 5)
        print("Next Push to DB at:", next_timestamp)
        self.stream.start()
        rec = BGPRecord()
        while (self.stream.get_next_record(rec)):
            if rec.status == "valid":
                if rec.time >= next_timestamp:
                    self.push_data(next_timestamp)
                    next_timestamp += 300

                elem = rec.get_next_elem()
                while elem:
                    origin_asn = ""
                    if elem.type is "R" or elem.type is "A":
                        origin_asn = elem.fields["as-path"].split(" ")[-1]

                    try:
                        origin_asn = int(origin_asn)
                    except ValueError:
                        elem = rec.get_next_elem()
                        continue

                    prefix = elem.fields["prefix"]
                    ip, mask_len = split_prefix(prefix)

                    # Check if v4 or v6
                    is_v4 = check_ipv4(ip)

                    validated = self.mgr.validate(origin_asn, ip, mask_len)
                    old_elem = self.route_table[rec.collector][(
                        elem.peer_asn, elem.peer_address)].get(prefix)
                    if elem.type is "R" or elem.type is "A":
                        self.route_table[rec.collector][(
                            elem.peer_asn, elem.peer_address)][prefix] = Route(
                                origin_asn,
                                rec.collector,
                                prefix,
                                is_v4,
                                validated.state.value,
                            )

                        if old_elem:
                            if old_elem.type != validated.state.value:
                                """Make use of the fact that:
                                    0: valid in enum
                                    1: unknown in enum
                                    2: invalid in enum
                                We designed the namedtuple the way to represent
                                that. So valid is a pos 3 and so on.
                                """
                                self.metadata_vp[rec.collector][(
                                    elem.peer_asn,
                                    elem.peer_address)][4 + old_elem.type] -= 1
                                self.metadata_vp[rec.collector][(
                                    elem.peer_asn, elem.peer_address
                                )][4 + validated.state.value] += 1
                        else:
                            if not self.metadata_vp[rec.collector].get(
                                (elem.peer_asn, elem.peer_address)):
                                """Init the metadata-entry if it not exists already"""
                                self.metadata_vp[rec.collector][(
                                    elem.peer_asn, elem.peer_address)] = [
                                        elem.peer_asn,
                                        elem.peer_address,
                                        rec.collector,
                                        next_timestamp,
                                        0,
                                        0,
                                        0,
                                    ]

                            # Update the VantagePoint Metadate the same way like above.
                            self.metadata_vp[rec.collector][(
                                elem.peer_asn,
                                elem.peer_address)][4 +
                                                    validated.state.value] += 1
                            self.metadata_vp[rec.collector][(
                                elem.peer_asn,
                                elem.peer_address)][3] = next_timestamp

                            self.peers[rec.collector][elem.peer_asn] += 1

                            if is_v4:
                                self.prefix4[rec.collector][prefix] += 1
                            else:
                                self.prefix6[rec.collector][prefix] += 1

                    elif elem.type is "W":
                        if old_elem:

                            # Reduce the number of IPv4/v6 Addresses for this prefix
                            if is_v4:
                                self.prefix4[rec.collector][prefix] -= 1
                                if self.prefix4[rec.collector][prefix] == 0:
                                    del (self.prefix4[rec.collector][prefix])
                            else:
                                self.prefix6[rec.collector][prefix] -= 1
                                if self.prefix6[rec.collector][prefix] == 0:
                                    del (self.prefix6[rec.collector][prefix])

                            # Reduce number of prefixes belonging to this ASN
                            self.peers[rec.collector][elem.peer_asn] -= 1
                            if self.peers[rec.collector][elem.peer_asn] == 0:
                                del (self.peers[rec.collector][elem.peer_asn])

                            # Update the metadata valid/unknown/invalid count
                            self.metadata_vp[rec.collector][(
                                elem.peer_asn,
                                elem.peer_address)][4 + old_elem.type] -= 1

                            # Update the metadata timestamp
                            self.metadata_vp[rec.collector][(
                                elem.peer_asn,
                                elem.peer_address)][3] = next_timestamp

                            # Remove the entry from the route_table
                            self.route_table[rec.collector][(
                                elem.peer_asn,
                                elem.peer_address)].pop(prefix, None)

                        else:
                            # !!TODO: write log about that!
                            pass

                    elem = rec.get_next_elem()
Example #7
0
class BGPCounter(object):
    """docstring for BGPDataAggregator"""
    def __init__(self,
                 filters={'collector': ['rrc00']},
                 rpki_validator="rpki-validator.realmv6.org:8282",
                 db="metasnap.db"):
        self.stream = BGPStream()
        self.filters = filters
        self.route_table = dict()
        self.i = 0

        for filter_type, filter_array in filters.items():
            for filter_value in filter_array:
                self.stream.add_filter(filter_type, filter_value)

        for collector in filters['collector']:
            self.route_table[collector] = defaultdict(dict)

        # self.db = DBConnector(db, read_only=False)

        rpki = rpki_validator.split(":")
        self.mgr = RTRManager(rpki[0], rpki[1])
        self.mgr.start()

        self.counter = Counter()

        start_timestamp = self.get_push_timestamp(datetime.now(timezone.utc))
        # self.start_collecting(start_timestamp, int(datetime.now(timezone.utc).strftime("%s")))
        self.start_collecting(start_timestamp, start_timestamp)

    def __del__(self):
        if self.mgr.is_synced():
            self.mgr.stop()

    def get_push_timestamp(self, start_time):
        hours = [0, 8, 16, 24]
        # get closest push
        for i in range(0, len(hours)):
            if hours[i + 1] > start_time.hour:
                break

        start_time = start_time.replace(hour=hours[i],
                                        minute=0,
                                        second=0,
                                        microsecond=0)

        return int(start_time.timestamp())

    def start_collecting(self, start_timestamp, end_timestamp=0):
        self.stream.add_interval_filter(start_timestamp, end_timestamp)
        print("Start BGPStream:", start_timestamp, end_timestamp)
        self.stream.start()
        rec = BGPRecord()
        act_dump = "unknown"
        while (self.stream.get_next_record(rec)):
            self.i += 1
            if self.i % 10000 == 0:
                print(self.i)
            if rec.status == "valid":
                if (act_dump != rec.dump_position):
                    act_dump = rec.dump_position
                    print('Dump Position:', rec.dump_position)
                elem = rec.get_next_elem()
                while (elem):

                    self.counter.update(elem.type)

                    elem = rec.get_next_elem()

        print(self.counter)