示例#1
0
 def test_invalid_entry(self, verify_epoch):
     rev_info = self._create_rev_info("1-1", 1, 2)
     verify_epoch.return_value = ConnectedHashTree.EPOCH_PAST
     rev_cache = RevCache()
     # Call
     ntools.assert_false(rev_cache.add(rev_info))
     assert_these_calls(verify_epoch, [call(rev_info.p.epoch)])
示例#2
0
    def __init__(self,
                 conf_dir,
                 addr,
                 api_addr,
                 run_local_api=False,
                 port=None,
                 prom_export=None):
        """
        Initialize an instance of the class SCIONDaemon.
        """
        super().__init__("sciond",
                         conf_dir,
                         prom_export=prom_export,
                         public=[(addr, port)])
        up_labels = {**self._labels, "type": "up"} if self._labels else None
        down_labels = {
            **self._labels, "type": "down"
        } if self._labels else None
        core_labels = {
            **self._labels, "type": "core"
        } if self._labels else None
        self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL,
                                         labels=up_labels)
        self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL,
                                           labels=down_labels)
        self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL,
                                           labels=core_labels)
        self.peer_revs = RevCache()
        # Keep track of requested paths.
        self.requested_paths = ExpiringDict(self.MAX_REQS, self.PATH_REQ_TOUT)
        self.req_path_lock = threading.Lock()
        self._api_sock = None
        self.daemon_thread = None
        os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
        self.api_addr = (api_addr or os.path.join(
            SCIOND_API_SOCKDIR, "%s.sock" % self.addr.isd_as))

        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PATH: {
                PMT.REPLY: self.handle_path_reply,
                PMT.REVOCATION: self.handle_revocation,
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
        }

        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH: {
                SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation
            },
        }

        if run_local_api:
            self._api_sock = ReliableSocket(bind_unix=(self.api_addr,
                                                       "sciond"))
            self._socks.add(self._api_sock, self.handle_accept)
示例#3
0
 def test_invalid_entry(self):
     rev_info = self._create_rev_info("1-ff00:0:300",
                                      1,
                                      timestamp=int(time.time()) - 20)
     rev_cache = RevCache()
     # Call
     ntools.assert_false(rev_cache.add(rev_info))
     assert_these_calls(rev_info.rev_info().active, [call()])
示例#4
0
文件: base.py 项目: ercanucan/scion
 def __init__(self, server_id, conf_dir, prom_export=None):
     """
     :param str server_id: server identifier.
     :param str conf_dir: configuration directory.
     :param str prom_export: prometheus export address.
     """
     super().__init__(server_id, conf_dir, prom_export=prom_export)
     down_labels = {
         **self._labels, "type": "down"
     } if self._labels else None
     core_labels = {
         **self._labels, "type": "core"
     } if self._labels else None
     self.down_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO,
                                        labels=down_labels)
     self.core_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO,
                                        labels=core_labels)
     self.pending_req = defaultdict(list)  # Dict of pending requests.
     self.pen_req_lock = threading.Lock()
     self._request_logger = None
     # Used when l/cPS doesn't have up/dw-path.
     self.waiting_targets = defaultdict(list)
     self.revocations = RevCache(labels=self._labels)
     # A mapping from (hash tree root of AS, IFID) to segments
     self.htroot_if2seg = ExpiringDict(1000, HASHTREE_TTL)
     self.htroot_if2seglock = Lock()
     self.CTRL_PLD_CLASS_MAP = {
         PayloadClass.PATH: {
             PMT.REQUEST: self.path_resolution,
             PMT.REPLY: self.handle_path_segment_record,
             PMT.REG: self.handle_path_segment_record,
             PMT.REVOCATION: self._handle_revocation,
             PMT.SYNC: self.handle_path_segment_record,
         },
         PayloadClass.CERT: {
             CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
             CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
             CertMgmtType.TRC_REPLY: self.process_trc_reply,
             CertMgmtType.TRC_REQ: self.process_trc_request,
         },
     }
     self.SCMP_PLD_CLASS_MAP = {
         SCMPClass.PATH: {
             SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
         },
     }
     self._segs_to_zk = ExpiringDict(1000, self.SEGS_TO_ZK_TTL)
     self._revs_to_zk = ExpiringDict(1000, HASHTREE_EPOCH_TIME)
     self._zkid = ZkID.from_values(self.addr.isd_as, self.id,
                                   [(self.addr.host, self._port)])
     self.zk = Zookeeper(self.topology.isd_as, PATH_SERVICE,
                         self._zkid.copy().pack(), self.topology.zookeepers)
     self.zk.retry("Joining party", self.zk.party_setup)
     self.path_cache = ZkSharedCache(self.zk, self.ZK_PATH_CACHE_PATH,
                                     self._handle_paths_from_zk)
     self.rev_cache = ZkSharedCache(self.zk, self.ZK_REV_CACHE_PATH,
                                    self._rev_entries_handler)
     self._init_request_logger()
示例#5
0
 def test_expired_entry(self):
     key = ("1-1", 1)
     default = "default"
     rev_info = "rev_info"
     rev_cache = RevCache()
     rev_cache._cache[key] = rev_info
     rev_cache._validate_entry = create_mock_full(return_value=False)
     # Call
     ntools.eq_(rev_cache.get(key, default=default), default)
     # Tests
     assert_these_calls(rev_cache._validate_entry, [call(rev_info)])
示例#6
0
 def test_expired_entry(self):
     key = ("1-ff00:0:300", 1)
     default = "default"
     rev_info = "rev_info"
     srev_info = create_mock_full({"rev_info()": rev_info})
     rev_cache = RevCache()
     rev_cache._cache[key] = srev_info
     rev_cache._check_active = create_mock_full(return_value=False)
     # Call
     ntools.eq_(rev_cache.get(key, default=default), default)
     # Tests
     assert_these_calls(rev_cache._check_active, [call(srev_info)])
示例#7
0
 def test(self):
     key = ("1-ff00:0:300", 1)
     rev_info = self._create_rev_info(key[0], key[1])
     rev_cache = RevCache()
     rev_cache.get = create_mock()
     rev_cache.get.return_value = None
     # Call
     ntools.assert_true(rev_cache.add(rev_info))
     # Tests
     ntools.eq_(rev_cache._cache[key], rev_info)
     assert_these_calls(rev_cache.get, [call(key)])
     assert_these_calls(rev_info.rev_info().active, [call()])
示例#8
0
 def test(self, verify_epoch):
     key = ("1-1", 1)
     rev_info = self._create_rev_info(key[0], key[1], 2)
     verify_epoch.return_value = ConnectedHashTree.EPOCH_OK
     rev_cache = RevCache()
     rev_cache.get = create_mock()
     rev_cache.get.return_value = None
     # Call
     ntools.assert_true(rev_cache.add(rev_info))
     # Tests
     ntools.eq_(rev_cache._cache[key], rev_info)
     assert_these_calls(rev_cache.get, [call(key)])
     assert_these_calls(verify_epoch, [call(rev_info.p.epoch)])
示例#9
0
 def test_older_entry_exists(self):
     key = ("1-ff00:0:300", 1)
     now = int(time.time())
     rev_info1 = self._create_rev_info(key[0], key[1], timestamp=now)
     rev_info2 = self._create_rev_info(key[0], key[1], timestamp=now + 1)
     rev_cache = RevCache()
     rev_cache.get = create_mock_full(return_value=rev_info1)
     rev_cache._cache[key] = rev_info1
     # Call
     ntools.assert_true(rev_cache.add(rev_info2))
     # Tests
     ntools.eq_(rev_cache._cache[key], rev_info2)
     assert_these_calls(rev_info2.rev_info().active, [call()])
     assert_these_calls(rev_cache.get, [call(key), call().rev_info()])
示例#10
0
 def test_same_entry_exists(self, verify_epoch):
     key = ("1-1", 1)
     rev_info1 = self._create_rev_info(key[0], key[1], 1)
     rev_info2 = self._create_rev_info(key[0], key[1], 1)
     verify_epoch.return_value = ConnectedHashTree.EPOCH_OK
     rev_cache = RevCache()
     rev_cache.get = create_mock_full(return_value=rev_info1)
     rev_cache._cache[key] = rev_info1
     # Call
     ntools.assert_false(rev_cache.add(rev_info2))
     # Tests
     ntools.eq_(rev_cache._cache[key], rev_info1)
     assert_these_calls(verify_epoch, [call(rev_info2.p.epoch)])
     assert_these_calls(rev_cache.get, [call(key)])
示例#11
0
    def __init__(self,
                 conf_dir,
                 addr,
                 api_addr,
                 run_local_api=False,
                 port=None):
        """
        Initialize an instance of the class SCIONDaemon.
        """
        super().__init__("sciond", conf_dir, host_addr=addr, port=port)
        # TODO replace by pathstore instance
        self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL,
                                         max_res_no=self.MAX_SEG_NO)
        self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL,
                                           max_res_no=self.MAX_SEG_NO)
        self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL,
                                           max_res_no=self.MAX_SEG_NO)
        self.peer_revs = RevCache()
        req_name = "SCIONDaemon Requests %s" % self.addr.isd_as
        self.requests = RequestHandler.start(
            req_name,
            self._check_segments,
            self._fetch_segments,
            self._reply_segments,
            ttl=self.TIMEOUT,
            key_map=self._req_key_map,
        )
        self._api_sock = None
        self.daemon_thread = None
        os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
        self.api_addr = (api_addr or os.path.join(
            SCIOND_API_SOCKDIR, "%s.sock" % self.addr.isd_as))

        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PATH: {
                PMT.REPLY: self.handle_path_reply,
                PMT.REVOCATION: self.handle_revocation,
            }
        }

        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH: {
                SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation
            },
        }

        if run_local_api:
            self._api_sock = ReliableSocket(bind=(self.api_addr, "sciond"))
            self._socks.add(self._api_sock, self.handle_accept)
示例#12
0
    def __init__(self, conf_dir, addr, api_addr, run_local_api=False,
                 port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False):
        """
        Initialize an instance of the class SCIONDaemon.
        """
        super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir,
                         prom_export=prom_export, public=[(addr, port)])
        up_labels = {**self._labels, "type": "up"} if self._labels else None
        down_labels = {**self._labels, "type": "down"} if self._labels else None
        core_labels = {**self._labels, "type": "core"} if self._labels else None
        self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels)
        self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels)
        self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels)
        self.rev_cache = RevCache()
        # Keep track of requested paths.
        self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT)
        self.req_path_lock = threading.Lock()
        self._api_sock = None
        self.daemon_thread = None
        os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
        self.api_addr = (api_addr or get_default_sciond_path())
        if delete_sock:
            try:
                os.remove(self.api_addr)
            except OSError as e:
                if e.errno != errno.ENOENT:
                    logging.error("Could not delete socket %s: %s" % (self.api_addr, e))

        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PATH: {
                PMT.REPLY: self.handle_path_reply,
                PMT.REVOCATION: self.handle_revocation,
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
        }

        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH:
                {SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation},
        }

        if run_local_api:
            self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond"))
            self._socks.add(self._api_sock, self.handle_accept)
示例#13
0
 def __init__(self, server_id, conf_dir):
     """
     :param str server_id: server identifier.
     :param str conf_dir: configuration directory.
     """
     super().__init__(server_id, conf_dir)
     self.down_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO)
     self.core_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO)
     self.pending_req = defaultdict(list)  # Dict of pending requests.
     # Used when l/cPS doesn't have up/dw-path.
     self.waiting_targets = defaultdict(list)
     self.revocations = RevCache()
     # A mapping from (hash tree root of AS, IFID) to segments
     self.htroot_if2seg = ExpiringDict(1000, HASHTREE_TTL)
     self.htroot_if2seglock = Lock()
     self.CTRL_PLD_CLASS_MAP = {
         PayloadClass.PATH: {
             PMT.REQUEST: self.path_resolution,
             PMT.REPLY: self.handle_path_segment_record,
             PMT.REG: self.handle_path_segment_record,
             PMT.REVOCATION: self._handle_revocation,
             PMT.SYNC: self.handle_path_segment_record,
         },
     }
     self.SCMP_PLD_CLASS_MAP = {
         SCMPClass.PATH: {
             SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
         },
     }
     self._segs_to_zk = deque()
     self._revs_to_zk = deque()
     self._zkid = ZkID.from_values(self.addr.isd_as, self.id,
                                   [(self.addr.host, self._port)])
     self.zk = Zookeeper(self.topology.isd_as, PATH_SERVICE,
                         self._zkid.copy().pack(), self.topology.zookeepers)
     self.zk.retry("Joining party", self.zk.party_setup)
     self.path_cache = ZkSharedCache(self.zk, self.ZK_PATH_CACHE_PATH,
                                     self._cached_entries_handler)
     self.rev_cache = ZkSharedCache(self.zk, self.ZK_REV_CACHE_PATH,
                                    self._rev_entries_handler)
示例#14
0
    def test_with_free_up(self):
        key1 = ("1-ff00:0:300", 1)
        key2 = ("1-ff00:0:301", 1)
        now = int(time.time())
        rev_info1 = self._create_rev_info(key1[0], key1[1], timestamp=now)
        rev_info2 = self._create_rev_info(key2[0], key2[1], timestamp=now + 1)

        def check_active_side_effect(srev_info):
            del rev_cache._cache[(srev_info.rev_info().isd_as(),
                                  srev_info.rev_info().p.ifID)]
            return False

        rev_cache = RevCache(capacity=1)
        rev_cache._cache[key1] = rev_info1
        rev_cache._check_active = create_mock()
        rev_cache._check_active.side_effect = check_active_side_effect
        rev_cache.get = create_mock()
        rev_cache.get.return_value = None
        # Call
        ntools.assert_true(rev_cache.add(rev_info2))
        # Tests
        ntools.eq_(rev_cache._cache[key2], rev_info2)
        ntools.assert_true(key1 not in rev_cache._cache)
        assert_these_calls(rev_info2.rev_info().active, [call()])
        assert_these_calls(rev_cache.get, [call(key2)])
示例#15
0
 def test_with_no_free_up(self):
     key1 = ("1-ff00:0:300", 1)
     key2 = ("1-ff00:0:301", 1)
     now = int(time.time())
     rev_info1 = self._create_rev_info(key1[0], key1[1], timestamp=now)
     rev_info2 = self._create_rev_info(key2[0], key2[1], timestamp=now + 1)
     rev_cache = RevCache(capacity=1)
     rev_cache._cache[key1] = rev_info1
     rev_cache._check_active = create_mock_full(return_value=True)
     rev_cache.get = create_mock()
     rev_cache.get.return_value = None
     # Call
     ntools.assert_false(rev_cache.add(rev_info2))
     # Tests
     ntools.assert_true(key1 in rev_cache._cache)
     assert_these_calls(rev_info2.rev_info().active, [call()])
     assert_these_calls(rev_cache.get, [call(key2)])
示例#16
0
 def test_with_no_free_up(self, verify_epoch):
     key1 = ("1-1", 1)
     key2 = ("1-2", 1)
     rev_info1 = self._create_rev_info(key1[0], key1[1], 1)
     rev_info2 = self._create_rev_info(key2[0], key2[1], 2)
     verify_epoch.return_value = ConnectedHashTree.EPOCH_OK
     rev_cache = RevCache(capacity=1)
     rev_cache._cache[key1] = rev_info1
     rev_cache._validate_entry = create_mock_full(return_value=True)
     rev_cache.get = create_mock()
     rev_cache.get.return_value = None
     # Call
     ntools.assert_false(rev_cache.add(rev_info2))
     # Tests
     ntools.assert_true(key1 in rev_cache._cache)
     assert_these_calls(verify_epoch, [call(rev_info2.p.epoch)])
     assert_these_calls(rev_cache.get, [call(key2)])
示例#17
0
    def test_with_free_up(self, verify_epoch):
        key1 = ("1-1", 1)
        key2 = ("1-2", 1)
        rev_info1 = self._create_rev_info(key1[0], key1[1], 1)
        rev_info2 = self._create_rev_info(key2[0], key2[1], 2)
        verify_epoch.return_value = ConnectedHashTree.EPOCH_OK

        def validate_entry_side_effect(rev_info):
            del rev_cache._cache[(rev_info.isd_as(), rev_info.p.ifID)]
            return False

        rev_cache = RevCache(capacity=1)
        rev_cache._cache[key1] = rev_info1
        rev_cache._validate_entry = create_mock()
        rev_cache._validate_entry.side_effect = validate_entry_side_effect
        rev_cache.get = create_mock()
        rev_cache.get.return_value = None
        # Call
        ntools.assert_true(rev_cache.add(rev_info2))
        # Tests
        ntools.eq_(rev_cache._cache[key2], rev_info2)
        ntools.assert_true(key1 not in rev_cache._cache)
        assert_these_calls(verify_epoch, [call(rev_info2.p.epoch)])
        assert_these_calls(rev_cache.get, [call(key2)])
示例#18
0
文件: base.py 项目: stschwar/scion
class PathServer(SCIONElement, metaclass=ABCMeta):
    """
    The SCION Path Server.
    """
    SERVICE_TYPE = PATH_SERVICE
    MAX_SEG_NO = 5  # TODO: replace by config variable.
    # ZK path for incoming PATHs
    ZK_PATH_CACHE_PATH = "path_cache"
    # ZK path for incoming REVs
    ZK_REV_CACHE_PATH = "rev_cache"
    # Max number of segments per propagation packet
    PROP_LIMIT = 5
    # Max number of segments per ZK cache entry
    ZK_SHARE_LIMIT = 10
    # Time to store revocations in zookeeper
    ZK_REV_OBJ_MAX_AGE = HASHTREE_EPOCH_TIME
    # TTL of segments in the queue for ZK (in seconds)
    SEGS_TO_ZK_TTL = 10 * 60

    def __init__(self,
                 server_id,
                 conf_dir,
                 spki_cache_dir=GEN_CACHE_PATH,
                 prom_export=None):
        """
        :param str server_id: server identifier.
        :param str conf_dir: configuration directory.
        :param str prom_export: prometheus export address.
        """
        super().__init__(server_id,
                         conf_dir,
                         spki_cache_dir=spki_cache_dir,
                         prom_export=prom_export)
        self.config = self._load_as_conf()
        down_labels = {
            **self._labels, "type": "down"
        } if self._labels else None
        core_labels = {
            **self._labels, "type": "core"
        } if self._labels else None
        self.down_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO,
                                           labels=down_labels)
        self.core_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO,
                                           labels=core_labels)
        # Dict of pending requests.
        self.pending_req = defaultdict(
            lambda: ExpiringDict(1000, PATH_REQ_TOUT))
        self.pen_req_lock = threading.Lock()
        self._request_logger = None
        # Used when l/cPS doesn't have up/dw-path.
        self.waiting_targets = defaultdict(list)
        self.revocations = RevCache(labels=self._labels)
        # A mapping from (hash tree root of AS, IFID) to segments
        self.htroot_if2seg = ExpiringDict(1000,
                                          self.config.revocation_tree_ttl)
        self.htroot_if2seglock = Lock()
        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PATH: {
                PMT.IFSTATE_INFOS: self.handle_ifstate_infos,
                PMT.REQUEST: self.path_resolution,
                PMT.REPLY: self.handle_path_reply,
                PMT.REG: self.handle_seg_recs,
                PMT.REVOCATION: self._handle_revocation,
                PMT.SYNC: self.handle_seg_recs,
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
        }
        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH: {
                SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
            },
        }
        self._segs_to_zk = ExpiringDict(1000, self.SEGS_TO_ZK_TTL)
        self._revs_to_zk = ExpiringDict(1000, HASHTREE_EPOCH_TIME)
        self._zkid = ZkID.from_values(self.addr.isd_as, self.id,
                                      [(self.addr.host, self._port)])
        self.zk = Zookeeper(self.topology.isd_as, PATH_SERVICE,
                            self._zkid.copy().pack(), self.topology.zookeepers)
        self.zk.retry("Joining party", self.zk.party_setup)
        self.path_cache = ZkSharedCache(self.zk, self.ZK_PATH_CACHE_PATH,
                                        self._handle_paths_from_zk)
        self.rev_cache = ZkSharedCache(self.zk, self.ZK_REV_CACHE_PATH,
                                       self._rev_entries_handler)
        self._init_request_logger()

    def worker(self):
        """
        Worker thread that takes care of reading shared paths from ZK, and
        handling master election for core servers.
        """
        worker_cycle = 1.0
        start = SCIONTime.get_time()
        while self.run_flag.is_set():
            sleep_interval(start, worker_cycle, "cPS.worker cycle",
                           self._quiet_startup())
            start = SCIONTime.get_time()
            try:
                self.zk.wait_connected()
                self.path_cache.process()
                self.rev_cache.process()
                # Try to become a master.
                ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0)
                if ret:  # Either got the lock, or already had it.
                    if ret == ZK_LOCK_SUCCESS:
                        logging.info("Became master")
                    self.path_cache.expire(self.config.propagation_time * 10)
                    self.rev_cache.expire(self.ZK_REV_OBJ_MAX_AGE)
            except ZkNoConnection:
                logging.warning('worker(): ZkNoConnection')
                pass
            self._update_master()
            self._propagate_and_sync()
            self._handle_pending_requests()
            self._update_metrics()

    def _update_master(self):
        pass

    def _rev_entries_handler(self, raw_entries):
        for raw in raw_entries:
            rev_info = RevocationInfo.from_raw(raw)
            try:
                rev_info.validate()
            except SCIONBaseError as e:
                logging.warning("Failed to validate RevInfo from zk: %s\n%s",
                                e, rev_info.short_desc())
                continue
            self._remove_revoked_segments(rev_info)

    def _add_rev_mappings(self, pcb):
        """
        Add if revocation token to segment ID mappings.
        """
        segment_id = pcb.get_hops_hash()
        with self.htroot_if2seglock:
            for asm in pcb.iter_asms():
                hof = asm.pcbm(0).hof()
                egress_h = (asm.p.hashTreeRoot, hof.egress_if)
                self.htroot_if2seg.setdefault(egress_h, set()).add(segment_id)
                ingress_h = (asm.p.hashTreeRoot, hof.ingress_if)
                self.htroot_if2seg.setdefault(ingress_h, set()).add(segment_id)

    @abstractmethod
    def _handle_up_segment_record(self, pcb, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _handle_down_segment_record(self, pcb, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _handle_core_segment_record(self, pcb, **kwargs):
        raise NotImplementedError

    def _add_segment(self, pcb, seg_db, name, reverse=False):
        res = seg_db.update(pcb, reverse=reverse)
        if res == DBResult.ENTRY_ADDED:
            self._add_rev_mappings(pcb)
            logging.info("%s-Segment registered: %s", name, pcb.short_id())
            return True
        elif res == DBResult.ENTRY_UPDATED:
            self._add_rev_mappings(pcb)
            logging.debug("%s-Segment updated: %s", name, pcb.short_id())
        return False

    def handle_ifstate_infos(self, cpld, meta):
        """
        Handles IFStateInfos.

        :param IFStatePayload infos: The state info objects.
        """
        pmgt = cpld.union
        infos = pmgt.union
        assert isinstance(infos, IFStatePayload), type(infos)
        for info in infos.iter_infos():
            if not info.p.active and info.p.revInfo:
                rev_info = info.rev_info()
                try:
                    rev_info.validate()
                except SCIONBaseError as e:
                    logging.warning(
                        "Failed to validate IFStateInfo RevInfo from %s: %s\n%s",
                        meta, e, rev_info.short_desc())
                    continue
                self._handle_revocation(CtrlPayload(PathMgmt(info.rev_info())),
                                        meta)

    def _handle_scmp_revocation(self, pld, meta):
        rev_info = RevocationInfo.from_raw(pld.info.rev_info)
        try:
            rev_info.validate()
        except SCIONBaseError as e:
            logging.warning("Failed to validate SCMP RevInfo from %s: %s\n%s",
                            meta, e, rev_info.short_desc())
            return
        self._handle_revocation(CtrlPayload(PathMgmt(rev_info)), meta)

    def _handle_revocation(self, cpld, meta):
        """
        Handles a revocation of a segment, interface or hop.

        :param rev_info: The RevocationInfo object.
        """
        pmgt = cpld.union
        rev_info = pmgt.union
        assert isinstance(rev_info, RevocationInfo), type(rev_info)
        # Validate before checking for presense in self.revocations, as that will trigger an assert
        # failure if the rev_info is invalid.
        try:
            rev_info.validate()
        except SCIONBaseError as e:
            # Validation already done in the IFStateInfo and SCMP paths, so a failure here means
            # it's from a CtrlPld.
            logging.warning(
                "Failed to validate CtrlPld RevInfo from %s: %s\n%s", meta, e,
                rev_info.short_desc())
            return

        if rev_info in self.revocations:
            return
        logging.debug("Received revocation from %s: %s", meta,
                      rev_info.short_desc())
        try:
            rev_info.validate()
        except SCIONBaseError as e:
            logging.warning("Failed to validate RevInfo from %s: %s", meta, e)
            return
        if meta.ia[0] != self.addr.isd_as[0]:
            logging.info(
                "Dropping revocation received from a different ISD. Src: %s RevInfo: %s"
                % (meta, rev_info.short_desc()))
            return
        self.revocations.add(rev_info)
        self._revs_to_zk[rev_info] = rev_info.copy().pack(
        )  # have to pack copy
        # Remove segments that contain the revoked interface.
        self._remove_revoked_segments(rev_info)
        # Forward revocation to other path servers.
        self._forward_revocation(rev_info, meta)

    def _remove_revoked_segments(self, rev_info):
        """
        Try the previous and next hashes as possible astokens,
        and delete any segment that matches

        :param rev_info: The revocation info
        :type rev_info: RevocationInfo
        """
        if ConnectedHashTree.verify_epoch(
                rev_info.p.epoch) != ConnectedHashTree.EPOCH_OK:
            return
        (hash01, hash12) = ConnectedHashTree.get_possible_hashes(rev_info)
        if_id = rev_info.p.ifID

        with self.htroot_if2seglock:
            down_segs_removed = 0
            core_segs_removed = 0
            up_segs_removed = 0
            for h in (hash01, hash12):
                for sid in self.htroot_if2seg.pop((h, if_id), []):
                    if self.down_segments.delete(
                            sid) == DBResult.ENTRY_DELETED:
                        down_segs_removed += 1
                    if self.core_segments.delete(
                            sid) == DBResult.ENTRY_DELETED:
                        core_segs_removed += 1
                    if not self.topology.is_core_as:
                        if (self.up_segments.delete(sid) ==
                                DBResult.ENTRY_DELETED):
                            up_segs_removed += 1
            logging.debug(
                "Removed segments revoked by [%s]: UP: %d DOWN: %d CORE: %d" %
                (rev_info.short_desc(), up_segs_removed, down_segs_removed,
                 core_segs_removed))

    @abstractmethod
    def _forward_revocation(self, rev_info, meta):
        """
        Forwards a revocation to other path servers that need to be notified.

        :param rev_info: The RevInfo object.
        :param meta: The MessageMeta object.
        """
        raise NotImplementedError

    def _send_path_segments(self,
                            req,
                            req_id,
                            meta,
                            logger,
                            up=None,
                            core=None,
                            down=None):
        """
        Sends path-segments to requester (depending on Path Server's location).
        """
        up = up or set()
        core = core or set()
        down = down or set()
        all_segs = up | core | down
        if not all_segs:
            logger.warning("No segments to send for request: %s from: %s" %
                           (req.short_desc(), meta))
            return
        revs_to_add = self._peer_revs_for_segs(all_segs)
        recs = PathSegmentRecords.from_values(
            {
                PST.UP: up,
                PST.CORE: core,
                PST.DOWN: down
            }, revs_to_add)
        pld = PathSegmentReply.from_values(req.copy(), recs)
        self.send_meta(CtrlPayload(PathMgmt(pld), req_id=req_id), meta)
        logger.info("Sending PATH_REPLY with %d segment(s).", len(all_segs))

    def _peer_revs_for_segs(self, segs):
        """Returns a list of peer revocations for segments in 'segs'."""
        def _handle_one_seg(seg):
            for asm in seg.iter_asms():
                for pcbm in asm.iter_pcbms(1):
                    hof = pcbm.hof()
                    for if_id in [hof.ingress_if, hof.egress_if]:
                        rev_info = self.revocations.get((asm.isd_as(), if_id))
                        if rev_info:
                            revs_to_add.add(rev_info.copy())
                            return

        revs_to_add = set()
        for seg in segs:
            _handle_one_seg(seg)

        return list(revs_to_add)

    def _handle_pending_requests(self):
        rem_keys = []
        # Serve pending requests.
        with self.pen_req_lock:
            for key in self.pending_req:
                for req_key, (req, req_id, meta,
                              logger) in self.pending_req[key].items():
                    if self.path_resolution(CtrlPayload(PathMgmt(req),
                                                        req_id=req_id),
                                            meta,
                                            new_request=False,
                                            logger=logger):
                        meta.close()
                        del self.pending_req[key][req_key]
                if not self.pending_req[key]:
                    rem_keys.append(key)
            for key in rem_keys:
                del self.pending_req[key]

    def _handle_paths_from_zk(self, raw_entries):
        """
        Handles cached paths through ZK, passed as a list.
        """
        for raw in raw_entries:
            recs = PathSegmentRecords.from_raw(raw)
            for type_, pcb in recs.iter_pcbs():
                seg_meta = PathSegMeta(pcb,
                                       self.continue_seg_processing,
                                       type_=type_,
                                       params={'from_zk': True})
                self._process_path_seg(seg_meta)
        if raw_entries:
            logging.debug("Processed %s segments from ZK", len(raw_entries))

    def handle_path_reply(self, cpld, meta):
        pmgt = cpld.union
        reply = pmgt.union
        assert isinstance(reply, PathSegmentReply), type(reply)
        self._handle_seg_recs(reply.recs(), cpld.req_id, meta)

    def handle_seg_recs(self, cpld, meta):
        pmgt = cpld.union
        seg_recs = pmgt.union
        self._handle_seg_recs(seg_recs, cpld.req_id, meta)

    def _handle_seg_recs(self, seg_recs, req_id, meta):
        """
        Handles paths received from the network.
        """
        assert isinstance(seg_recs, PathSegmentRecords), type(seg_recs)
        params = self._dispatch_params(seg_recs, meta)
        # Add revocations for peer interfaces included in the path segments.
        for rev_info in seg_recs.iter_rev_infos():
            self.revocations.add(rev_info)
        # Verify pcbs and process them
        for type_, pcb in seg_recs.iter_pcbs():
            seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta,
                                   type_, params)
            self._process_path_seg(seg_meta, req_id)

    def continue_seg_processing(self, seg_meta):
        """
        For every path segment(that can be verified) received from the network
        or ZK this function gets called to continue the processing for the
        segment.
        The segment is added to pathdb and pending requests are checked.
        """
        pcb = seg_meta.seg
        logging.debug("Successfully verified PCB %s" % pcb.short_id())
        type_ = seg_meta.type
        params = seg_meta.params
        self.handle_ext(pcb)
        self._dispatch_segment_record(type_, pcb, **params)
        self._handle_pending_requests()

    def handle_ext(self, pcb):
        """
        Handle beacon extensions.
        """
        # Handle PCB extensions:
        for asm in pcb.iter_asms():
            pol = asm.routing_pol_ext()
            if pol:
                self.handle_routing_pol_ext(pol)

    def handle_routing_pol_ext(self, ext):
        # TODO(Sezer): Implement extension handling
        logging.debug("Routing policy extension: %s" % ext)

    def _dispatch_segment_record(self, type_, seg, **kwargs):
        # Check that segment does not contain a revoked interface.
        if not self._validate_segment(seg):
            return
        handle_map = {
            PST.UP: self._handle_up_segment_record,
            PST.CORE: self._handle_core_segment_record,
            PST.DOWN: self._handle_down_segment_record,
        }
        handle_map[type_](seg, **kwargs)

    def _validate_segment(self, seg):
        """
        Check segment for revoked upstream/downstream interfaces.

        :param seg: The PathSegment object.
        :return: False, if the path segment contains a revoked upstream/
            downstream interface (not peer). True otherwise.
        """
        for asm in seg.iter_asms():
            pcbm = asm.pcbm(0)
            for if_id in [pcbm.hof().ingress_if, pcbm.hof().egress_if]:
                rev_info = self.revocations.get((asm.isd_as(), if_id))
                if rev_info:
                    logging.debug(
                        "Found revoked interface (%d, %s) in segment %s." %
                        (rev_info.p.ifID, rev_info.isd_as(), seg.short_desc()))
                    return False
        return True

    def _dispatch_params(self, pld, meta):
        return {}

    def _propagate_and_sync(self):
        self._share_via_zk()
        self._share_revs_via_zk()

    def _gen_prop_recs(self, container, limit=PROP_LIMIT):
        count = 0
        pcbs = defaultdict(list)
        while container:
            try:
                _, (type_, pcb) = container.popitem(last=False)
            except KeyError:
                continue
            count += 1
            pcbs[type_].append(pcb.copy())
            if count >= limit:
                yield (pcbs)
                count = 0
                pcbs = defaultdict(list)
        if pcbs:
            yield (pcbs)

    @abstractmethod
    def path_resolution(self,
                        path_request,
                        meta,
                        new_request=True,
                        logger=None):
        """
        Handles all types of path request.
        """
        raise NotImplementedError

    def _handle_waiting_targets(self, pcb):
        """
        Handle any queries that are waiting for a path to any core AS in an ISD.
        """
        dst_ia = pcb.first_ia()
        if not self.is_core_as(dst_ia):
            logging.warning("Invalid waiting target, not a core AS: %s",
                            dst_ia)
            return
        self._send_waiting_queries(dst_ia[0], pcb)

    def _send_waiting_queries(self, dst_isd, pcb):
        targets = self.waiting_targets[dst_isd]
        if not targets:
            return
        path = pcb.get_path(reverse_direction=True)
        src_ia = pcb.first_ia()
        while targets:
            (seg_req, logger) = targets.pop(0)
            meta = self._build_meta(ia=src_ia,
                                    path=path,
                                    host=SVCType.PS_A,
                                    reuse=True)
            req_id = mk_ctrl_req_id()
            self.send_meta(CtrlPayload(PathMgmt(seg_req), req_id=req_id), meta)
            logger.info("Waiting request (%s) sent to %s via %s [id: %016x]",
                        seg_req.short_desc(), meta, pcb.short_desc(), req_id)

    def _share_via_zk(self):
        if not self._segs_to_zk:
            return
        logging.info("Sharing %d segment(s) via ZK", len(self._segs_to_zk))
        for pcb_dict in self._gen_prop_recs(self._segs_to_zk,
                                            limit=self.ZK_SHARE_LIMIT):
            seg_recs = PathSegmentRecords.from_values(pcb_dict)
            self._zk_write(seg_recs.pack())

    def _share_revs_via_zk(self):
        if not self._revs_to_zk:
            return
        logging.info("Sharing %d revocation(s) via ZK", len(self._revs_to_zk))
        while self._revs_to_zk:
            try:
                data = self._revs_to_zk.popitem(last=False)[1]
            except KeyError:
                continue
            self._zk_write_rev(data)

    def _zk_write(self, data):
        hash_ = crypto_hash(data).hex()
        try:
            self.path_cache.store("%s-%s" % (hash_, SCIONTime.get_time()),
                                  data)
        except ZkNoConnection:
            logging.warning("Unable to store segment(s) in shared path: "
                            "no connection to ZK")

    def _zk_write_rev(self, data):
        hash_ = crypto_hash(data).hex()
        try:
            self.rev_cache.store("%s-%s" % (hash_, SCIONTime.get_time()), data)
        except ZkNoConnection:
            logging.warning("Unable to store revocation(s) in shared path: "
                            "no connection to ZK")

    def _init_request_logger(self):
        """
        Initializes the request logger.
        """
        self._request_logger = logging.getLogger("RequestLogger")
        # Create new formatter to include the request in the log.
        formatter = formatter = Rfc3339Formatter(
            "%(asctime)s [%(levelname)s] (%(threadName)s) %(message)s "
            "{id=%(id)s, from=%(from)s}")
        add_formatter('RequestLogger', formatter)

    def get_request_logger(self, req_id, meta):
        """
        Returns a logger adapter for a request.
        """
        # Create a logger for the request to log with context.
        return logging.LoggerAdapter(self._request_logger, {
            "id": req_id,
            "from": str(meta)
        })

    def _init_metrics(self):
        super()._init_metrics()
        REQS_TOTAL.labels(**self._labels).inc(0)
        REQS_PENDING.labels(**self._labels).set(0)
        SEGS_TO_ZK.labels(**self._labels).set(0)
        REVS_TO_ZK.labels(**self._labels).set(0)
        HT_ROOT_MAPPTINGS.labels(**self._labels).set(0)
        IS_MASTER.labels(**self._labels).set(0)

    def _update_metrics(self):
        """
        Updates all Gauge metrics. Subclass can update their own metrics but must
        call the superclass' implementation.
        """
        if not self._labels:
            return
        # Update pending requests metric.
        # XXX(shitz): This could become a performance problem should there ever be
        # a large amount of pending requests (>100'000).
        total_pending = 0
        with self.pen_req_lock:
            for reqs in self.pending_req.values():
                total_pending += len(reqs)
        REQS_PENDING.labels(**self._labels).set(total_pending)
        # Update SEGS_TO_ZK and REVS_TO_ZK metrics.
        SEGS_TO_ZK.labels(**self._labels).set(len(self._segs_to_zk))
        REVS_TO_ZK.labels(**self._labels).set(len(self._revs_to_zk))
        # Update HT_ROOT_MAPPTINGS metric.
        HT_ROOT_MAPPTINGS.labels(**self._labels).set(len(self.htroot_if2seg))
        # Update IS_MASTER metric.
        IS_MASTER.labels(**self._labels).set(int(self.zk.have_lock()))

    def run(self):
        """
        Run an instance of the Path Server.
        """
        threading.Thread(target=thread_safety_net,
                         args=(self.worker, ),
                         name="PS.worker",
                         daemon=True).start()
        threading.Thread(target=thread_safety_net,
                         args=(self._check_trc_cert_reqs, ),
                         name="Elem.check_trc_cert_reqs",
                         daemon=True).start()
        super().run()
示例#19
0
 def test_not_revoked(self):
     pcb = self._mk_pcb()
     inst = Mock()
     inst.check_revoked_interface = SCIONElement.check_revoked_interface
     ntools.eq_(inst.check_revoked_interface(inst, pcb, RevCache()), True)
示例#20
0
class SCIONDaemon(SCIONElement):
    """
    The SCION Daemon used for retrieving and combining paths.
    """
    MAX_REQS = 1024
    # Time a path segment is cached at a host (in seconds).
    SEGMENT_TTL = 300
    # Empty Path TTL
    EMPTY_PATH_TTL = SEGMENT_TTL

    def __init__(self, conf_dir, addr, api_addr, run_local_api=False,
                 port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False):
        """
        Initialize an instance of the class SCIONDaemon.
        """
        super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir,
                         prom_export=prom_export, public=[(addr, port)])
        up_labels = {**self._labels, "type": "up"} if self._labels else None
        down_labels = {**self._labels, "type": "down"} if self._labels else None
        core_labels = {**self._labels, "type": "core"} if self._labels else None
        self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels)
        self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels)
        self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels)
        self.rev_cache = RevCache()
        # Keep track of requested paths.
        self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT)
        self.req_path_lock = threading.Lock()
        self._api_sock = None
        self.daemon_thread = None
        os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
        self.api_addr = (api_addr or get_default_sciond_path())
        if delete_sock:
            try:
                os.remove(self.api_addr)
            except OSError as e:
                if e.errno != errno.ENOENT:
                    logging.error("Could not delete socket %s: %s" % (self.api_addr, e))

        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PATH: {
                PMT.REPLY: self.handle_path_reply,
                PMT.REVOCATION: self.handle_revocation,
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
        }

        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH:
                {SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation},
        }

        if run_local_api:
            self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond"))
            self._socks.add(self._api_sock, self.handle_accept)

    @classmethod
    def start(cls, conf_dir, addr, api_addr=None, run_local_api=False, port=0):
        """
        Initializes and starts a SCIOND instance.
        """
        inst = cls(conf_dir, addr, api_addr, run_local_api, port)
        name = "SCIONDaemon.run %s" % inst.addr.isd_as
        inst.daemon_thread = threading.Thread(
            target=thread_safety_net, args=(inst.run,), name=name, daemon=True)
        inst.daemon_thread.start()
        logging.debug("sciond started with api_addr = %s", inst.api_addr)

    def _get_msg_meta(self, packet, addr, sock):
        if sock != self._udp_sock:
            return packet, SockOnlyMetadata.from_values(sock)  # API socket
        else:
            return super()._get_msg_meta(packet, addr, sock)

    def handle_msg_meta(self, msg, meta):
        """
        Main routine to handle incoming SCION messages.
        """
        if isinstance(meta, SockOnlyMetadata):  # From SCIOND API
            try:
                sciond_msg = SCIONDMsg.from_raw(msg)
            except SCIONParseError as err:
                logging.error(str(err))
                return
            self.api_handle_request(sciond_msg, meta)
            return
        super().handle_msg_meta(msg, meta)

    def handle_path_reply(self, cpld, meta):
        """
        Handle path reply from local path server.
        """
        pmgt = cpld.union
        path_reply = pmgt.union
        assert isinstance(path_reply, PathSegmentReply), type(path_reply)
        recs = path_reply.recs()
        for srev_info in recs.iter_srev_infos():
            self.check_revocation(srev_info, lambda x: self.continue_revocation_processing(
                                  srev_info) if not x else False, meta)

        req = path_reply.req()
        key = req.dst_ia(), req.flags()
        with self.req_path_lock:
            r = self.requested_paths.get(key)
            if r:
                r.notify_reply(path_reply)
            else:
                logging.warning("No outstanding request found for %s", key)
        for type_, pcb in recs.iter_pcbs():
            seg_meta = PathSegMeta(pcb, self.continue_seg_processing,
                                   meta, type_, params=(r,))
            self._process_path_seg(seg_meta, cpld.req_id)

    def continue_revocation_processing(self, srev_info):
        self.rev_cache.add(srev_info)
        self.remove_revoked_segments(srev_info.rev_info())

    def continue_seg_processing(self, seg_meta):
        """
        For every path segment(that can be verified) received from the path
        server this function gets called to continue the processing for the
        segment.
        The segment is added to pathdb and pending requests are checked.
        """
        pcb = seg_meta.seg
        type_ = seg_meta.type
        # Check that segment does not contain a revoked interface.
        if not self.check_revoked_interface(pcb, self.rev_cache):
            return
        map_ = {
            PST.UP: self._handle_up_seg,
            PST.DOWN: self._handle_down_seg,
            PST.CORE: self._handle_core_seg,
        }
        map_[type_](pcb)
        r = seg_meta.params[0]
        if r:
            r.verified_segment()

    def _handle_up_seg(self, pcb):
        if self.addr.isd_as != pcb.last_ia():
            return None
        if self.up_segments.update(pcb) == DBResult.ENTRY_ADDED:
            logging.debug("Up segment added: %s", pcb.short_desc())
            return pcb.first_ia()
        return None

    def _handle_down_seg(self, pcb):
        last_ia = pcb.last_ia()
        if self.addr.isd_as == last_ia:
            return None
        if self.down_segments.update(pcb) == DBResult.ENTRY_ADDED:
            logging.debug("Down segment added: %s", pcb.short_desc())
            return last_ia
        return None

    def _handle_core_seg(self, pcb):
        if self.core_segments.update(pcb) == DBResult.ENTRY_ADDED:
            logging.debug("Core segment added: %s", pcb.short_desc())
            return pcb.first_ia()
        return None

    def api_handle_request(self, msg, meta):
        """
        Handle local API's requests.
        """
        mtype = msg.type()
        if mtype == SMT.PATH_REQUEST:
            threading.Thread(
                target=thread_safety_net,
                args=(self._api_handle_path_request, msg, meta),
                daemon=True).start()
        elif mtype == SMT.REVOCATION:
            self._api_handle_rev_notification(msg, meta)
        elif mtype == SMT.AS_REQUEST:
            self._api_handle_as_request(msg, meta)
        elif mtype == SMT.IF_REQUEST:
            self._api_handle_if_request(msg, meta)
        elif mtype == SMT.SERVICE_REQUEST:
            self._api_handle_service_request(msg, meta)
        elif mtype == SMT.SEGTYPEHOP_REQUEST:
            self._api_handle_seg_type_request(msg, meta)
        else:
            logging.warning(
                "API: type %s not supported.", TypeBase.to_str(mtype))

    def _api_handle_path_request(self, pld, meta):
        request = pld.union
        assert isinstance(request, SCIONDPathRequest), type(request)
        req_id = pld.id

        dst_ia = request.dst_ia()
        src_ia = request.src_ia()
        if not src_ia:
            src_ia = self.addr.isd_as
        thread = threading.current_thread()
        thread.name = "SCIONDaemon API id:%s %s -> %s" % (
            thread.ident, src_ia, dst_ia)
        paths, error = self.get_paths(dst_ia, flush=request.p.flags.refresh)
        if request.p.maxPaths:
            paths = paths[:request.p.maxPaths]

        reply_entries = []
        for path_meta in paths:
            fwd_if = path_meta.fwd_path().get_fwd_if()
            # Set dummy host addr if path is empty.
            haddr, port = None, None
            if fwd_if:
                br = self.ifid2br[fwd_if]
                haddr, port = br.int_addrs.public[0]
            addrs = [haddr] if haddr else []
            first_hop = HostInfo.from_values(addrs, port)
            reply_entry = SCIONDPathReplyEntry.from_values(
                path_meta, first_hop)
            reply_entries.append(reply_entry)
        logging.debug("Replying to api request for %s with %d paths:\n%s",
                      dst_ia, len(paths), "\n".join([p.short_desc() for p in paths]))
        self._send_path_reply(req_id, reply_entries, error, meta)

    def _send_path_reply(self, req_id, reply_entries, error, meta):
        path_reply = SCIONDMsg(SCIONDPathReply.from_values(reply_entries, error), req_id)
        self.send_meta(path_reply.pack(), meta)

    def _api_handle_as_request(self, pld, meta):
        request = pld.union
        assert isinstance(request, SCIONDASInfoRequest), type(request)
        req_ia = request.isd_as()
        if not req_ia or req_ia.is_zero() or req_ia == self.addr.isd_as:
            # Request is for the local AS.
            reply_entry = SCIONDASInfoReplyEntry.from_values(
                self.addr.isd_as, self.is_core_as(), self.topology.mtu)
        else:
            # Request is for a remote AS.
            reply_entry = SCIONDASInfoReplyEntry.from_values(req_ia, self.is_core_as(req_ia))
        as_reply = SCIONDMsg(SCIONDASInfoReply.from_values([reply_entry]), pld.id)
        self.send_meta(as_reply.pack(), meta)

    def _api_handle_if_request(self, pld, meta):
        request = pld.union
        assert isinstance(request, SCIONDIFInfoRequest), type(request)
        all_brs = request.all_brs()
        if_list = []
        if not all_brs:
            if_list = list(request.iter_ids())
        if_entries = []
        for if_id, br in self.ifid2br.items():
            if all_brs or if_id in if_list:
                br_addr, br_port = br.int_addrs.public[0]
                info = HostInfo.from_values([br_addr], br_port)
                reply_entry = SCIONDIFInfoReplyEntry.from_values(if_id, info)
                if_entries.append(reply_entry)
        if_reply = SCIONDMsg(SCIONDIFInfoReply.from_values(if_entries), pld.id)
        self.send_meta(if_reply.pack(), meta)

    def _api_handle_service_request(self, pld, meta):
        request = pld.union
        assert isinstance(request, SCIONDServiceInfoRequest), type(request)
        all_svcs = request.all_services()
        svc_list = []
        if not all_svcs:
            svc_list = list(request.iter_service_types())
        svc_entries = []
        for svc_type in ServiceType.all():
            if all_svcs or svc_type in svc_list:
                lookup_res = self.dns_query_topo(svc_type)
                host_infos = []
                for addr, port in lookup_res:
                    host_infos.append(HostInfo.from_values([addr], port))
                reply_entry = SCIONDServiceInfoReplyEntry.from_values(
                    svc_type, host_infos)
                svc_entries.append(reply_entry)
        svc_reply = SCIONDMsg(SCIONDServiceInfoReply.from_values(svc_entries), pld.id)
        self.send_meta(svc_reply.pack(), meta)

    def _api_handle_rev_notification(self, pld, meta):
        request = pld.union
        assert isinstance(request, SCIONDRevNotification), type(request)
        self.handle_revocation(CtrlPayload(PathMgmt(request.srev_info())), meta, pld)

    def _api_handle_seg_type_request(self, pld, meta):
        request = pld.union
        assert isinstance(request, SCIONDSegTypeHopRequest), type(request)
        segmentType = request.p.type
        db = []
        if segmentType == PST.CORE:
            db = self.core_segments
        elif segmentType == PST.UP:
            db = self.up_segments
        elif segmentType == PST.DOWN:
            db = self.down_segments
        else:
            logging.error("Requesting segment type %s unrecognized.", segmentType)

        seg_entries = []
        for segment in db(full=True):
            if_list = []
            for asm in segment.iter_asms():
                isd_as = asm.isd_as()
                hof = asm.pcbm(0).hof()
                egress = hof.egress_if
                ingress = hof.ingress_if
                if ingress:
                    if_list.append(PathInterface.from_values(isd_as, ingress))
                if egress:
                    if_list.append(PathInterface.from_values(isd_as, egress))
            reply_entry = SCIONDSegTypeHopReplyEntry.from_values(
                if_list, segment.get_timestamp(), segment.get_expiration_time())
            seg_entries.append(reply_entry)
        seg_reply = SCIONDMsg(
            SCIONDSegTypeHopReply.from_values(seg_entries), pld.id)
        self.send_meta(seg_reply.pack(), meta)

    def handle_scmp_revocation(self, pld, meta):
        srev_info = SignedRevInfo.from_raw(pld.info.srev_info)
        self.handle_revocation(CtrlPayload(PathMgmt(srev_info)), meta)

    def handle_revocation(self, cpld, meta, pld=None):
        pmgt = cpld.union
        srev_info = pmgt.union
        rev_info = srev_info.rev_info()
        assert isinstance(rev_info, RevocationInfo), type(rev_info)
        logging.debug("Received revocation: %s from %s", srev_info.short_desc(), meta)
        self.check_revocation(srev_info,
                              lambda e: self.process_revocation(e, srev_info, meta, pld), meta)

    def process_revocation(self, error, srev_info, meta, pld):
        rev_info = srev_info.rev_info()
        status = None
        if error is None:
            status = SCIONDRevReplyStatus.VALID
            self.rev_cache.add(srev_info)
            self.remove_revoked_segments(rev_info)
        else:
            if type(error) == RevInfoValidationError:
                logging.error("Failed to validate RevInfo %s from %s: %s",
                              srev_info.short_desc(), meta, error)
                status = SCIONDRevReplyStatus.INVALID
            if type(error) == RevInfoExpiredError:
                logging.info("Ignoring expired Revinfo, %s from %s", srev_info.short_desc(), meta)
                status = SCIONDRevReplyStatus.STALE
            if type(error) == SignedRevInfoCertFetchError:
                logging.error("Failed to fetch certificate for SignedRevInfo %s from %s: %s",
                              srev_info.short_desc(), meta, error)
                status = SCIONDRevReplyStatus.UNKNOWN
            if type(error) == SignedRevInfoVerificationError:
                logging.error("Failed to verify SRevInfo %s from %s: %s",
                              srev_info.short_desc(), meta, error)
                status = SCIONDRevReplyStatus.SIGFAIL
            if type(error) == SCIONBaseError:
                logging.error("Revocation check failed for %s from %s:\n%s",
                              srev_info.short_desc(), meta, error)
                status = SCIONDRevReplyStatus.UNKNOWN

        if pld:
            rev_reply = SCIONDMsg(SCIONDRevReply.from_values(status), pld.id)
            self.send_meta(rev_reply.pack(), meta)

    def remove_revoked_segments(self, rev_info):
        # Go through all segment databases and remove affected segments.
        removed_up = removed_core = removed_down = 0
        if rev_info.p.linkType == LinkType.CORE:
            removed_core = self._remove_revoked_pcbs(self.core_segments, rev_info)
        elif rev_info.p.linkType in [LinkType.PARENT, LinkType.CHILD]:
            removed_up = self._remove_revoked_pcbs(self.up_segments, rev_info)
            removed_down = self._remove_revoked_pcbs(self.down_segments, rev_info)
        elif rev_info.p.linkType != LinkType.PEER:
            logging.error("Bad RevInfo link type: %s", rev_info.p.linkType)

        logging.info("Removed %d UP- %d CORE- and %d DOWN-Segments." %
                     (removed_up, removed_core, removed_down))

    def _remove_revoked_pcbs(self, db, rev_info):
        """
        Removes all segments from 'db' that have a revoked upstream PCBMarking.

        :param db: The PathSegmentDB.
        :type db: :class:`lib.path_db.PathSegmentDB`
        :param rev_info: The revocation info
        :type rev_info: RevocationInfo

        :returns: The number of deletions.
        :rtype: int
        """

        to_remove = []
        for segment in db(full=True):
            for asm in segment.iter_asms():
                if self._check_revocation_for_asm(rev_info, asm, verify_all=False):
                    logging.debug("Removing segment: %s" % segment.short_desc())
                    to_remove.append(segment.get_hops_hash())
        return db.delete_all(to_remove)

    def _flush_path_dbs(self):
        self.core_segments.flush()
        self.down_segments.flush()
        self.up_segments.flush()

    def get_paths(self, dst_ia, flags=(), flush=False):
        """Return a list of paths."""
        logging.debug("Paths requested for ISDAS=%s, flags=%s, flush=%s",
                      dst_ia, flags, flush)
        if flush:
            logging.info("Flushing PathDBs.")
            self._flush_path_dbs()
        if self.addr.isd_as == dst_ia or (
                self.addr.isd_as.any_as() == dst_ia and
                self.topology.is_core_as):
            # Either the destination is the local AS, or the destination is any
            # core AS in this ISD, and the local AS is in the core
            empty = SCIONPath()
            exp_time = int(time.time()) + self.EMPTY_PATH_TTL
            empty_meta = FwdPathMeta.from_values(empty, [], self.topology.mtu, exp_time)
            return [empty_meta], SCIONDPathReplyError.OK
        paths = self.path_resolution(dst_ia, flags=flags)
        if not paths:
            key = dst_ia, flags
            with self.req_path_lock:
                r = self.requested_paths.get(key)
                if r is None:
                    # No previous outstanding request
                    req = PathSegmentReq.from_values(self.addr.isd_as, dst_ia, flags=flags)
                    r = RequestState(req.copy())
                    self.requested_paths[key] = r
                    self._fetch_segments(req)
            # Wait until event gets set.
            timeout = not r.e.wait(PATH_REQ_TOUT)
            with self.req_path_lock:
                if timeout:
                    r.done()
                if key in self.requested_paths:
                    del self.requested_paths[key]
            if timeout:
                logging.error("Query timed out for %s", dst_ia)
                return [], SCIONDPathReplyError.PS_TIMEOUT
            # Check if we can fulfill the path request.
            paths = self.path_resolution(dst_ia, flags=flags)
            if not paths:
                logging.error("No paths found for %s", dst_ia)
                return [], SCIONDPathReplyError.NO_PATHS
        return paths, SCIONDPathReplyError.OK

    def path_resolution(self, dst_ia, flags=()):
        # dst as == 0 means any core AS in the specified ISD.
        dst_is_core = self.is_core_as(dst_ia) or dst_ia[1] == 0
        sibra = PATH_FLAG_SIBRA in flags
        if self.topology.is_core_as:
            if dst_is_core:
                ret = self._resolve_core_core(dst_ia, sibra=sibra)
            else:
                ret = self._resolve_core_not_core(dst_ia, sibra=sibra)
        elif dst_is_core:
            ret = self._resolve_not_core_core(dst_ia, sibra=sibra)
        elif sibra:
            ret = self._resolve_not_core_not_core_sibra(dst_ia)
        else:
            ret = self._resolve_not_core_not_core_scion(dst_ia)
        if not sibra:
            return ret
        # FIXME(kormat): Strip off PCBs, and just return sibra reservation
        # blocks
        return self._sibra_strip_pcbs(self._strip_nones(ret))

    def _resolve_core_core(self, dst_ia, sibra=False):
        """Resolve path from core to core."""
        res = set()
        for cseg in self.core_segments(last_ia=self.addr.isd_as, sibra=sibra,
                                       **dst_ia.params()):
            res.add((None, cseg, None))
        if sibra:
            return res
        return tuples_to_full_paths(res)

    def _resolve_core_not_core(self, dst_ia, sibra=False):
        """Resolve path from core to non-core."""
        res = set()
        # First check whether there is a direct path.
        for dseg in self.down_segments(
                first_ia=self.addr.isd_as, last_ia=dst_ia, sibra=sibra):
            res.add((None, None, dseg))
        # Check core-down combination.
        for dseg in self.down_segments(last_ia=dst_ia, sibra=sibra):
            dseg_ia = dseg.first_ia()
            if self.addr.isd_as == dseg_ia:
                pass
            for cseg in self.core_segments(
                    first_ia=dseg_ia, last_ia=self.addr.isd_as, sibra=sibra):
                res.add((None, cseg, dseg))
        if sibra:
            return res
        return tuples_to_full_paths(res)

    def _resolve_not_core_core(self, dst_ia, sibra=False):
        """Resolve path from non-core to core."""
        res = set()
        params = dst_ia.params()
        params["sibra"] = sibra
        if dst_ia[0] == self.addr.isd_as[0]:
            # Dst in local ISD. First check whether DST is a (super)-parent.
            for useg in self.up_segments(**params):
                res.add((useg, None, None))
        # Check whether dst is known core AS.
        for cseg in self.core_segments(**params):
            # Check do we have an up-seg that is connected to core_seg.
            for useg in self.up_segments(first_ia=cseg.last_ia(), sibra=sibra):
                res.add((useg, cseg, None))
        if sibra:
            return res
        return tuples_to_full_paths(res)

    def _resolve_not_core_not_core_scion(self, dst_ia):
        """Resolve SCION path from non-core to non-core."""
        up_segs = self.up_segments()
        down_segs = self.down_segments(last_ia=dst_ia)
        core_segs = self._calc_core_segs(dst_ia[0], up_segs, down_segs)
        full_paths = build_shortcut_paths(
            up_segs, down_segs, self.rev_cache)
        tuples = []
        for up_seg in up_segs:
            for down_seg in down_segs:
                tuples.append((up_seg, None, down_seg))
                for core_seg in core_segs:
                    tuples.append((up_seg, core_seg, down_seg))
        full_paths.extend(tuples_to_full_paths(tuples))
        return full_paths

    def _resolve_not_core_not_core_sibra(self, dst_ia):
        """Resolve SIBRA path from non-core to non-core."""
        res = set()
        up_segs = set(self.up_segments(sibra=True))
        down_segs = set(self.down_segments(last_ia=dst_ia, sibra=True))
        for up_seg, down_seg in product(up_segs, down_segs):
            src_core_ia = up_seg.first_ia()
            dst_core_ia = down_seg.first_ia()
            if src_core_ia == dst_core_ia:
                res.add((up_seg, down_seg))
                continue
            for core_seg in self.core_segments(first_ia=dst_core_ia,
                                               last_ia=src_core_ia, sibra=True):
                res.add((up_seg, core_seg, down_seg))
        return res

    def _strip_nones(self, set_):
        """Strip None entries from a set of tuples"""
        res = []
        for tup in set_:
            res.append(tuple(filter(None, tup)))
        return res

    def _sibra_strip_pcbs(self, paths):
        ret = []
        for pcbs in paths:
            resvs = []
            for pcb in pcbs:
                resvs.append(self._sibra_strip_pcb(pcb))
            ret.append(resvs)
        return ret

    def _sibra_strip_pcb(self, pcb):
        assert pcb.is_sibra()
        pcb_ext = pcb.sibra_ext
        resv_info = pcb_ext.info
        resv = ResvBlockSteady.from_values(resv_info, pcb.get_n_hops())
        asms = pcb.iter_asms()
        if pcb_ext.p.up:
            asms = reversed(list(asms))
        iflist = []
        for sof, asm in zip(pcb_ext.iter_sofs(), asms):
            resv.sofs.append(sof)
            iflist.extend(self._sibra_add_ifs(
                asm.isd_as(), sof, resv_info.fwd_dir))
        assert resv.num_hops == len(resv.sofs)
        return pcb_ext.p.id, resv, iflist

    def _sibra_add_ifs(self, isd_as, sof, fwd):
        def _add(ifid):
            if ifid:
                ret.append((isd_as, ifid))
        ret = []
        if fwd:
            _add(sof.ingress)
            _add(sof.egress)
        else:
            _add(sof.egress)
            _add(sof.ingress)
        return ret

    def _wait_for_events(self, events, deadline):
        """
        Wait on a set of events, but only until the specified deadline. Returns
        the number of events that happened while waiting.
        """
        count = 0
        for e in events:
            if e.wait(max(0, deadline - SCIONTime.get_time())):
                count += 1
        return count

    def _fetch_segments(self, req):
        """
        Called to fetch the requested path.
        """
        try:
            addr, port = self.dns_query_topo(ServiceType.PS)[0]
        except SCIONServiceLookupError:
            log_exception("Error querying path service:")
            return
        req_id = mk_ctrl_req_id()
        logging.debug("Sending path request (%s) to [%s]:%s [id: %016x]",
                      req.short_desc(), addr, port, req_id)
        meta = self._build_meta(host=addr, port=port)
        self.send_meta(CtrlPayload(PathMgmt(req), req_id=req_id), meta)

    def _calc_core_segs(self, dst_isd, up_segs, down_segs):
        """
        Calculate all possible core segments joining the provided up and down
        segments. Returns a list of all known segments, and a seperate list of
        the missing AS pairs.
        """
        src_core_ases = set()
        dst_core_ases = set()
        for seg in up_segs:
            src_core_ases.add(seg.first_ia()[1])
        for seg in down_segs:
            dst_core_ases.add(seg.first_ia()[1])
        # Generate all possible AS pairs
        as_pairs = list(product(src_core_ases, dst_core_ases))
        return self._find_core_segs(self.addr.isd_as[0], dst_isd, as_pairs)

    def _find_core_segs(self, src_isd, dst_isd, as_pairs):
        """
        Given a set of AS pairs across 2 ISDs, return the core segments
        connecting those pairs
        """
        core_segs = []
        for src_core_as, dst_core_as in as_pairs:
            src_ia = ISD_AS.from_values(src_isd, src_core_as)
            dst_ia = ISD_AS.from_values(dst_isd, dst_core_as)
            if src_ia == dst_ia:
                continue
            seg = self.core_segments(first_ia=dst_ia, last_ia=src_ia)
            if seg:
                core_segs.extend(seg)
        return core_segs

    def run(self):
        """
        Run an instance of the SCION daemon.
        """
        threading.Thread(
            target=thread_safety_net, args=(self._check_trc_cert_reqs,),
            name="Elem.check_trc_cert_reqs", daemon=True).start()
        super().run()
示例#21
0
文件: base.py 项目: xabarass/scion
    def __init__(self,
                 server_id,
                 conf_dir,
                 spki_cache_dir=GEN_CACHE_PATH,
                 prom_export=None,
                 sciond_path=None):
        """
        :param str server_id: server identifier.
        :param str conf_dir: configuration directory.
        :param str prom_export: prometheus export address.
        :param str sciond_path: path to sciond socket.
        """
        super().__init__(server_id,
                         conf_dir,
                         spki_cache_dir=spki_cache_dir,
                         prom_export=prom_export,
                         sciond_path=sciond_path)
        self.config = self._load_as_conf()
        self.master_key_0 = get_master_key(self.conf_dir, MASTER_KEY_0)
        self.master_key_1 = get_master_key(self.conf_dir, MASTER_KEY_1)
        # TODO: add 2 policies
        self.path_policy = PathPolicy.from_file(
            os.path.join(conf_dir, PATH_POLICY_FILE))
        self.signing_key = get_sig_key(self.conf_dir)
        self.of_gen_key = kdf(self.master_key_0, b"Derive OF Key")
        # Amount of time units a HOF is valid (time unit is EXP_TIME_UNIT).
        self.default_hof_exp_time = int(self.config.segment_ttl /
                                        EXP_TIME_UNIT)
        self.ifid_state = {}
        for ifid in self.ifid2br:
            self.ifid_state[ifid] = InterfaceState()
        self.ifid_state_lock = RLock()
        self.if_revocations = {}
        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PCB: {
                PayloadClass.PCB: self.handle_pcb
            },
            PayloadClass.IFID: {
                PayloadClass.IFID: self.handle_ifid_packet
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
            PayloadClass.PATH: {
                PMT.IFSTATE_REQ: self._handle_ifstate_request,
                PMT.REVOCATION: self._handle_revocation,
            },
        }
        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH: {
                SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
            },
        }

        zkid = ZkID.from_values(self.addr.isd_as, self.id,
                                [(self.addr.host, self._port)]).pack()
        self.zk = Zookeeper(self.addr.isd_as, self.SERVICE_TYPE, zkid,
                            self.topology.zookeepers)
        self.zk.retry("Joining party", self.zk.party_setup)
        self.pcb_cache = ZkSharedCache(self.zk, self.ZK_PCB_CACHE_PATH,
                                       self._handle_pcbs_from_zk)
        self.revobjs_cache = ZkSharedCache(self.zk, self.ZK_REVOCATIONS_PATH,
                                           self.process_rev_objects)
        self.local_rev_cache = RevCache()
        self._rev_seg_lock = RLock()
示例#22
0
文件: base.py 项目: xabarass/scion
class BeaconServer(SCIONElement, metaclass=ABCMeta):
    """
    The SCION PathConstructionBeacon Server.
    """
    SERVICE_TYPE = ServiceType.BS
    # ZK path for incoming PCBs
    ZK_PCB_CACHE_PATH = "pcb_cache"
    # ZK path for revocations.
    ZK_REVOCATIONS_PATH = "rev_cache"
    # Time revocation objects are cached in memory (in seconds).
    ZK_REV_OBJ_MAX_AGE = MIN_REVOCATION_TTL
    # Revocation TTL
    REVOCATION_TTL = MIN_REVOCATION_TTL
    # Revocation Overlapping (seconds)
    REVOCATION_OVERLAP = 2
    # Interval to checked for timed out interfaces.
    IF_TIMEOUT_INTERVAL = 1
    # Interval to send keep-alive msgs
    IFID_INTERVAL = 1
    # Interval between two consecutive requests (in seconds).
    CERT_REQ_RATE = 10

    def __init__(self,
                 server_id,
                 conf_dir,
                 spki_cache_dir=GEN_CACHE_PATH,
                 prom_export=None,
                 sciond_path=None):
        """
        :param str server_id: server identifier.
        :param str conf_dir: configuration directory.
        :param str prom_export: prometheus export address.
        :param str sciond_path: path to sciond socket.
        """
        super().__init__(server_id,
                         conf_dir,
                         spki_cache_dir=spki_cache_dir,
                         prom_export=prom_export,
                         sciond_path=sciond_path)
        self.config = self._load_as_conf()
        self.master_key_0 = get_master_key(self.conf_dir, MASTER_KEY_0)
        self.master_key_1 = get_master_key(self.conf_dir, MASTER_KEY_1)
        # TODO: add 2 policies
        self.path_policy = PathPolicy.from_file(
            os.path.join(conf_dir, PATH_POLICY_FILE))
        self.signing_key = get_sig_key(self.conf_dir)
        self.of_gen_key = kdf(self.master_key_0, b"Derive OF Key")
        # Amount of time units a HOF is valid (time unit is EXP_TIME_UNIT).
        self.default_hof_exp_time = int(self.config.segment_ttl /
                                        EXP_TIME_UNIT)
        self.ifid_state = {}
        for ifid in self.ifid2br:
            self.ifid_state[ifid] = InterfaceState()
        self.ifid_state_lock = RLock()
        self.if_revocations = {}
        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PCB: {
                PayloadClass.PCB: self.handle_pcb
            },
            PayloadClass.IFID: {
                PayloadClass.IFID: self.handle_ifid_packet
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
            PayloadClass.PATH: {
                PMT.IFSTATE_REQ: self._handle_ifstate_request,
                PMT.REVOCATION: self._handle_revocation,
            },
        }
        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH: {
                SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
            },
        }

        zkid = ZkID.from_values(self.addr.isd_as, self.id,
                                [(self.addr.host, self._port)]).pack()
        self.zk = Zookeeper(self.addr.isd_as, self.SERVICE_TYPE, zkid,
                            self.topology.zookeepers)
        self.zk.retry("Joining party", self.zk.party_setup)
        self.pcb_cache = ZkSharedCache(self.zk, self.ZK_PCB_CACHE_PATH,
                                       self._handle_pcbs_from_zk)
        self.revobjs_cache = ZkSharedCache(self.zk, self.ZK_REVOCATIONS_PATH,
                                           self.process_rev_objects)
        self.local_rev_cache = RevCache()
        self._rev_seg_lock = RLock()

    def propagate_downstream_pcb(self, pcb):
        """
        Propagates the beacon to all children.

        :param pcb: path segment.
        :type pcb: PathSegment
        """
        propagated_pcbs = defaultdict(list)
        prop_cnt = 0
        for intf in self.topology.child_interfaces:
            if not intf.to_if_id:
                continue
            new_pcb, meta = self._mk_prop_pcb_meta(pcb.copy(), intf.isd_as,
                                                   intf.if_id)
            if not new_pcb:
                continue
            self.send_meta(CtrlPayload(new_pcb.pcb()), meta)
            propagated_pcbs[(intf.isd_as, intf.if_id)].append(pcb.short_id())
            prop_cnt += 1
        if self._labels:
            BEACONS_PROPAGATED.labels(**self._labels,
                                      type="down").inc(prop_cnt)
        return propagated_pcbs

    def _mk_prop_pcb_meta(self, pcb, dst_ia, egress_if):
        ts = pcb.get_timestamp()
        asm = self._create_asm(pcb.ifID, egress_if, ts, pcb.last_hof())
        if not asm:
            return None, None
        pcb.add_asm(asm, ProtoSignType.ED25519, self.addr.isd_as.pack())
        pcb.sign(self.signing_key)
        one_hop_path = self._create_one_hop_path(egress_if)
        return pcb, self._build_meta(ia=dst_ia,
                                     host=SVCType.BS_A,
                                     path=one_hop_path,
                                     one_hop=True)

    def _create_one_hop_path(self, egress_if):
        ts = int(SCIONTime.get_time())
        info = InfoOpaqueField.from_values(ts, self.addr.isd_as[0], hops=2)
        hf1 = HopOpaqueField.from_values(OneHopPathExt.HOF_EXP_TIME, 0,
                                         egress_if)
        hf1.set_mac(self.of_gen_key, ts, None)
        # Return a path where second HF is empty.
        return SCIONPath.from_values(info, [hf1, HopOpaqueField()])

    def hof_exp_time(self, ts):
        """
        Return the ExpTime based on IF timestamp and the certificate chain/TRC.
        The certificate chain must be valid for the entire HOF lifetime.

        :param int ts: IF timestamp
        :return: HF ExpTime
        :rtype: int
        """
        cert_exp = self._get_my_cert().as_cert.expiration_time
        max_exp_time = int((cert_exp - ts) / EXP_TIME_UNIT)
        return min(max_exp_time, self.default_hof_exp_time)

    def _mk_if_info(self, if_id):
        """
        Small helper method to make it easier to deal with ingress/egress
        interface being 0 while building ASMarkings.
        """
        d = {"remote_ia": ISD_AS.from_values(0, 0), "remote_if": 0, "mtu": 0}
        if not if_id:
            return d
        br = self.ifid2br[if_id]
        d["remote_ia"] = br.interfaces[if_id].isd_as
        d["remote_if"] = br.interfaces[if_id].to_if_id
        d["mtu"] = br.interfaces[if_id].mtu
        return d

    @abstractmethod
    def handle_pcbs_propagation(self):
        """
        Main loop to propagate received beacons.
        """
        raise NotImplementedError

    def _log_propagations(self, propagated_pcbs):
        for (isd_as, if_id), pcbs in propagated_pcbs.items():
            logging.debug("Propagated %d PCBs to %s via %s (%s)", len(pcbs),
                          isd_as, if_id, ", ".join(pcbs))

    def _handle_pcbs_from_zk(self, pcbs):
        """
        Handles cached pcbs through ZK, passed as a list.
        """
        for pcb in pcbs:
            try:
                pcb = PCB.from_raw(pcb)
            except SCIONParseError as e:
                logging.error("Unable to parse raw pcb: %s", e)
                continue
            self.handle_pcb(CtrlPayload(pcb))
        if pcbs:
            logging.debug("Processed %s PCBs from ZK", len(pcbs))

    def handle_pcb(self, cpld, meta=None):
        """
        Handles pcbs received from the network.
        """
        pcb = cpld.union
        assert isinstance(pcb, PCB), type(pcb)
        pcb = pcb.pseg()
        if meta:
            pcb.ifID = meta.path.get_hof().ingress_if
        try:
            self.path_policy.check_filters(pcb)
        except SCIONPathPolicyViolated as e:
            logging.debug("Segment dropped due to path policy: %s\n%s" %
                          (e, pcb.short_desc()))
            return
        if not self._filter_pcb(pcb):
            logging.debug("Segment dropped due to looping: %s" %
                          pcb.short_desc())
            return
        seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta)
        self._process_path_seg(seg_meta, cpld.req_id)

    def continue_seg_processing(self, seg_meta):
        """
        For every verified pcb received from the network or ZK
        this function gets called to continue the processing for the pcb.
        """
        pseg = seg_meta.seg
        logging.debug("Successfully verified PCB %s", pseg.short_id())
        if seg_meta.meta:
            # Segment was received from network, not from zk. Share segment
            # with other beacon servers in this AS.
            entry_name = "%s-%s" % (pseg.get_hops_hash(hex=True), time.time())
            try:
                self.pcb_cache.store(entry_name, pseg.pcb().copy().pack())
            except ZkNoConnection:
                logging.error("Unable to store PCB in shared cache: "
                              "no connection to ZK")
        self.handle_ext(pseg)
        self._handle_verified_beacon(pseg)

    def _filter_pcb(self, pcb, dst_ia=None):
        return True

    def handle_ext(self, pcb):
        """
        Handle beacon extensions.
        """
        # Handle PCB extensions
        for asm in pcb.iter_asms():
            pol = asm.routing_pol_ext()
            if pol:
                self.handle_routing_pol_ext(pol)

    def handle_routing_pol_ext(self, ext):
        # TODO(Sezer): Implement routing policy extension handling
        logging.debug("Routing policy extension: %s" % ext)

    @abstractmethod
    def register_segments(self):
        """
        Registers paths according to the received beacons.
        """
        raise NotImplementedError

    def _log_registrations(self, registrations, seg_type):
        reg_cnt = 0
        for (dst_meta, dst_type), pcbs in registrations.items():
            reg_cnt += len(pcbs)
            logging.debug("Registered %d %s-segments @ %s:%s (%s)", len(pcbs),
                          seg_type, dst_type.upper(), dst_meta,
                          ", ".join(pcbs))
        if self._labels:
            SEGMENTS_REGISTERED.labels(**self._labels,
                                       type=seg_type).inc(reg_cnt)

    def _create_asm(self, in_if, out_if, ts, prev_hof):
        pcbms = list(self._create_pcbms(in_if, out_if, ts, prev_hof))
        if not pcbms:
            return None
        chain = self._get_my_cert()
        _, cert_ver = chain.get_leaf_isd_as_ver()
        return ASMarking.from_values(self.addr.isd_as,
                                     self._get_my_trc().version, cert_ver,
                                     pcbms, self.topology.mtu)

    def _create_pcbms(self, in_if, out_if, ts, prev_hof):
        up_pcbm = self._create_pcbm(in_if, out_if, ts, prev_hof)
        if not up_pcbm:
            return
        yield up_pcbm
        for intf in sorted(self.topology.peer_interfaces):
            in_if = intf.if_id
            with self.ifid_state_lock:
                if (not self.ifid_state[in_if].is_active()
                        and not self._quiet_startup()):
                    continue
            peer_pcbm = self._create_pcbm(in_if,
                                          out_if,
                                          ts,
                                          up_pcbm.hof(),
                                          xover=True)
            if peer_pcbm:
                yield peer_pcbm

    def _create_pcbm(self, in_if, out_if, ts, prev_hof, xover=False):
        in_info = self._mk_if_info(in_if)
        if in_info["remote_ia"].int() and not in_info["remote_if"]:
            return None
        out_info = self._mk_if_info(out_if)
        if out_info["remote_ia"].int() and not out_info["remote_if"]:
            return None
        exp_time = self.hof_exp_time(ts)
        if exp_time <= 0:
            logging.error("Invalid hop field expiration time value: %s",
                          exp_time)
            return None
        hof = HopOpaqueField.from_values(exp_time, in_if, out_if, xover=xover)
        hof.set_mac(self.of_gen_key, ts, prev_hof)
        return PCBMarking.from_values(in_info["remote_ia"],
                                      in_info["remote_if"], in_info["mtu"],
                                      out_info["remote_ia"],
                                      out_info["remote_if"], hof)

    def _terminate_pcb(self, pcb):
        """
        Copies a PCB, terminates it and adds the segment ID.

        Terminating a PCB means adding a opaque field with the egress IF set
        to 0, i.e., there is no AS to forward a packet containing this path
        segment to.
        """
        pcb = pcb.copy()
        asm = self._create_asm(pcb.ifID, 0, pcb.get_timestamp(),
                               pcb.last_hof())
        if not asm:
            return None
        pcb.add_asm(asm, ProtoSignType.ED25519, self.addr.isd_as.pack())
        return pcb

    def handle_ifid_packet(self, cpld, meta):
        """
        Update the interface state for the corresponding interface.

        :param pld: The IFIDPayload.
        :type pld: IFIDPayload
        """
        pld = cpld.union
        assert isinstance(pld, IFIDPayload), type(pld)
        ifid = meta.pkt.path.get_hof().ingress_if
        with self.ifid_state_lock:
            if ifid not in self.ifid_state:
                raise SCIONKeyError("Invalid IF %d in IFIDPayload" % ifid)
            br = self.ifid2br[ifid]
            br.interfaces[ifid].to_if_id = pld.p.origIF
            prev_state = self.ifid_state[ifid].update()
            if prev_state == InterfaceState.INACTIVE:
                logging.info("IF %d activated.", ifid)
            elif prev_state in [
                    InterfaceState.TIMED_OUT, InterfaceState.REVOKED
            ]:
                logging.info("IF %d came back up.", ifid)
            if prev_state != InterfaceState.ACTIVE:
                if self.zk.have_lock():
                    # Inform BRs about the interface coming up.
                    metas = []
                    for br in self.topology.border_routers:
                        br_addr, br_port = br.ctrl_addrs.public
                        metas.append(
                            UDPMetadata.from_values(host=br_addr,
                                                    port=br_port))
                    info = IFStateInfo.from_values(ifid, True)
                    self._send_ifstate_update([info], metas)

    def run(self):
        """
        Run an instance of the Beacon Server.
        """
        threading.Thread(target=thread_safety_net,
                         args=(self.worker, ),
                         name="BS.worker",
                         daemon=True).start()
        # https://github.com/scionproto/scion/issues/308:
        threading.Thread(target=thread_safety_net,
                         args=(self._send_ifid_updates, ),
                         name="BS._send_if_updates",
                         daemon=True).start()
        threading.Thread(target=thread_safety_net,
                         args=(self._handle_if_timeouts, ),
                         name="BS._handle_if_timeouts",
                         daemon=True).start()
        threading.Thread(target=thread_safety_net,
                         args=(self._check_trc_cert_reqs, ),
                         name="Elem.check_trc_cert_reqs",
                         daemon=True).start()
        threading.Thread(target=thread_safety_net,
                         args=(self._check_local_cert, ),
                         name="BS._check_local_cert",
                         daemon=True).start()
        super().run()

    def worker(self):
        """
        Worker thread that takes care of reading shared PCBs from ZK, and
        propagating PCBS/registering paths when master.
        """
        last_propagation = last_registration = 0
        worker_cycle = 1.0
        start = time.time()
        while self.run_flag.is_set():
            sleep_interval(start, worker_cycle, "BS.worker cycle",
                           self._quiet_startup())
            start = time.time()
            # Update IS_MASTER metric.
            if self._labels:
                IS_MASTER.labels(**self._labels).set(int(self.zk.have_lock()))
            try:
                self.zk.wait_connected()
                self.pcb_cache.process()
                self.revobjs_cache.process()
                self.handle_rev_objs()

                ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0)
                if not ret:  # Failed to get the lock
                    continue
                elif ret == ZK_LOCK_SUCCESS:
                    logging.info("Became master")
                    self._became_master()
                self.pcb_cache.expire(self.config.propagation_time * 10)
                self.revobjs_cache.expire(self.ZK_REV_OBJ_MAX_AGE)
            except ZkNoConnection:
                continue
            now = time.time()
            if now - last_propagation >= self.config.propagation_time:
                self.handle_pcbs_propagation()
                last_propagation = now
            if (self.config.registers_paths and
                    now - last_registration >= self.config.registration_time):
                try:
                    self.register_segments()
                except SCIONKeyError as e:
                    logging.error("Error while registering segments: %s", e)
                    pass
                last_registration = now

    def _became_master(self):
        """
        Called when a BS becomes the new master. Resets some state that will be
        rebuilt over time.
        """
        # Reset all timed-out and revoked interfaces to inactive.
        with self.ifid_state_lock:
            for (_, ifstate) in self.ifid_state.items():
                if not ifstate.is_active():
                    ifstate.reset()

    def _get_my_trc(self):
        return self.trust_store.get_trc(self.addr.isd_as[0])

    def _get_my_cert(self):
        return self.trust_store.get_cert(self.addr.isd_as)

    @abstractmethod
    def _handle_verified_beacon(self, pcb):
        """
        Once a beacon has been verified, place it into the right containers.

        :param pcb: verified path segment.
        :type pcb: PathSegment
        """
        raise NotImplementedError

    def process_rev_objects(self, rev_infos):
        """
        Processes revocation infos stored in Zookeeper.
        """
        with self._rev_seg_lock:
            for raw in rev_infos:
                try:
                    srev_info = SignedRevInfo.from_raw(raw)
                except SCIONParseError as e:
                    logging.error("Error parsing revocation info from ZK: %s",
                                  e)
                    continue
                self.check_revocation(
                    srev_info,
                    lambda x: lambda: self.local_rev_cache.add(srev_info)
                    if not x else False)

    def _issue_revocations(self, revoked_ifs):
        """
        Store a RevocationInfo in ZK and send a revocation to all BRs.

        :param list revoked_ifs: A list of interfaces that needs to be revoked.
        """
        # Only the master BS issues revocations.
        if not self.zk.have_lock():
            return
        # Process revoked interfaces.
        infos = []
        for if_id in revoked_ifs:
            br = self.ifid2br[if_id]
            rev_info = RevocationInfo.from_values(
                self.addr.isd_as, if_id, br.interfaces[if_id].link_type,
                int(time.time()), self.REVOCATION_TTL)
            logging.info("Issuing revocation: %s", rev_info.short_desc())
            if self._labels:
                REVOCATIONS_ISSUED.labels(**self._labels).inc()
            chain = self._get_my_cert()
            _, cert_ver = chain.get_leaf_isd_as_ver()
            src = DefaultSignSrc.from_values(
                rev_info.isd_as(), cert_ver,
                self._get_my_trc().version).pack()
            srev_info = SignedRevInfo.from_values(rev_info.copy().pack(),
                                                  ProtoSignType.ED25519, src)
            srev_info.sign(self.signing_key)
            # Add to revocation cache
            self.if_revocations[if_id] = srev_info
            self._process_revocation(srev_info)
            infos.append(IFStateInfo.from_values(if_id, False, srev_info))
        metas = []
        # Add all BRs.
        for br in self.topology.border_routers:
            br_addr, br_port = br.ctrl_addrs.public
            metas.append(UDPMetadata.from_values(host=br_addr, port=br_port))
        # Add local path server.
        if self.topology.path_servers:
            try:
                addr, port = self.dns_query_topo(ServiceType.PS)[0]
            except SCIONServiceLookupError:
                addr, port = None, None
            # Create a meta if there is a local path service
            if addr:
                metas.append(UDPMetadata.from_values(host=addr, port=port))
        self._send_ifstate_update(infos, metas)

    def _handle_scmp_revocation(self, pld, meta):
        srev_info = SignedRevInfo.from_raw(pld.info.srev_info)
        self._handle_revocation(CtrlPayload(PathMgmt(srev_info)), meta)

    def _handle_revocation(self, cpld, meta):
        pmgt = cpld.union
        srev_info = pmgt.union
        rev_info = srev_info.rev_info()
        assert isinstance(rev_info, RevocationInfo), type(rev_info)
        logging.debug("Received revocation from %s: %s", meta,
                      rev_info.short_desc())
        self.check_revocation(
            srev_info, lambda x: self._process_revocation(srev_info)
            if not x else False, meta)

    def handle_rev_objs(self):
        with self._rev_seg_lock:
            for srev_info in self.local_rev_cache.values():
                self._remove_revoked_pcbs(srev_info.rev_info())

    def _process_revocation(self, srev_info):
        """
        Removes PCBs containing a revoked interface and sends the revocation
        to the local PS.

        :param srev_info: The signed RevocationInfo object
        :type srev_info: SignedRevInfo
        """
        rev_info = srev_info.rev_info()
        assert isinstance(rev_info, RevocationInfo), type(rev_info)
        if_id = rev_info.p.ifID
        if not if_id:
            logging.error("Trying to revoke IF with ID 0.")
            return
        with self._rev_seg_lock:
            self.local_rev_cache.add(srev_info.copy())
        srev_info_packed = srev_info.copy().pack()
        entry_name = "%s:%s" % (hash(srev_info_packed), time.time())
        try:
            self.revobjs_cache.store(entry_name, srev_info_packed)
        except ZkNoConnection as exc:
            logging.error("Unable to store revocation in shared cache "
                          "(no ZK connection): %s" % exc)
        self._remove_revoked_pcbs(rev_info)

    @abstractmethod
    def _remove_revoked_pcbs(self, rev_info):
        """
        Removes the PCBs containing the revoked interface.

        :param rev_info: The RevocationInfo object.
        :type rev_info: RevocationInfo
        """
        raise NotImplementedError

    def _pcb_list_to_remove(self, candidates, rev_info):
        """
        Calculates the list of PCBs to remove.
        Called by _remove_revoked_pcbs.

        :param candidates: Candidate PCBs.
        :type candidates: List
        :param rev_info: The RevocationInfo object.
        :type rev_info: RevocationInfo
        """
        to_remove = []
        if not rev_info.active():
            return to_remove
        processed = set()
        for cand in candidates:
            if cand.id in processed:
                continue
            processed.add(cand.id)

            # If the interface on which we received the PCB is
            # revoked, then the corresponding pcb needs to be removed.
            if (self.addr.isd_as == rev_info.isd_as()
                    and cand.pcb.ifID == rev_info.p.ifID):
                to_remove.append(cand.id)

            for asm in cand.pcb.iter_asms():
                if self._check_revocation_for_asm(rev_info, asm, False):
                    to_remove.append(cand.id)

        return to_remove

    def _handle_if_timeouts(self):
        """
        Periodically checks each interface state and issues an IF revocation, if
        no keep-alive message was received for IFID_TOUT.
        """
        while self.run_flag.is_set():
            start_time = time.time()
            with self.ifid_state_lock:
                to_revoke = []
                for (ifid, if_state) in self.ifid_state.items():
                    if self._labels:
                        metric = IF_STATE.labels(ifid=ifid, **self._labels)
                        if if_state.is_active():
                            metric.set(0)
                        elif if_state.is_revoked():
                            metric.set(1)
                        else:
                            metric.set(2)
                    if not if_state.is_expired():
                        # Interface hasn't timed out
                        self.if_revocations.pop(ifid, None)
                        continue
                    srev_info = self.if_revocations.get(ifid, None)
                    if if_state.is_revoked() and srev_info:
                        # Interface is revoked until the revocation time plus the revocation TTL,
                        # we want to issue a new revocation REVOCATION_OVERLAP seconds
                        # before it is expired
                        rev_info = srev_info.rev_info()
                        if (rev_info.p.timestamp + rev_info.p.ttl -
                                self.REVOCATION_OVERLAP > start_time):
                            # Interface has already been revoked within the REVOCATION_TTL -
                            # REVOCATION_OVERLAP period
                            continue
                    if not if_state.is_revoked():
                        logging.info("IF %d went down.", ifid)
                    to_revoke.append(ifid)
                    if_state.revoke_if_expired()
                if to_revoke:
                    self._issue_revocations(to_revoke)
            sleep_interval(start_time, self.IF_TIMEOUT_INTERVAL,
                           "Handle IF timeouts")

    def _handle_ifstate_request(self, cpld, meta):
        # Only master replies to ifstate requests.
        pmgt = cpld.union
        req = pmgt.union
        assert isinstance(req, IFStateRequest), type(req)
        if not self.zk.have_lock():
            return
        with self.ifid_state_lock:
            infos = []
            for (ifid, state) in self.ifid_state.items():
                # Don't include inactive interfaces in update.
                if state.is_inactive():
                    continue
                srev_info = None
                if state.is_revoked():
                    srev_info = self.if_revocations.get(ifid, None)
                    if not srev_info:
                        logging.warning(
                            "No revocation in cache for revoked IFID: %s",
                            ifid)
                        continue
                infos.append(
                    IFStateInfo.from_values(ifid, state.is_active(),
                                            srev_info))
            if not infos and not self._quiet_startup():
                logging.warning(
                    "No IF state info to put in IFState update for %s.", meta)
                return
        self._send_ifstate_update(infos, [meta])

    def _send_ifstate_update(self, state_infos, server_metas):
        payload = CtrlPayload(PathMgmt(
            IFStatePayload.from_values(state_infos)))
        for meta in server_metas:
            logging.debug("IFState update to %s:%s", meta.host, meta.port)
            self.send_meta(payload.copy(), meta)

    def _send_ifid_updates(self):
        start = time.time()
        while self.run_flag.is_set():
            sleep_interval(start, self.IFID_INTERVAL,
                           "BS._send_ifid_updates cycle")
            start = time.time()

            # only master sends keep-alive messages
            if not self.zk.have_lock():
                continue

            # send keep-alives on all known BR interfaces
            for ifid in self.ifid2br:
                br = self.ifid2br[ifid]
                br_addr, br_port = br.int_addrs.public
                one_hop_path = self._create_one_hop_path(ifid)
                meta = self._build_meta(ia=br.interfaces[ifid].isd_as,
                                        host=SVCType.BS_M,
                                        path=one_hop_path,
                                        one_hop=True)
                self.send_meta(CtrlPayload(IFIDPayload.from_values(ifid)),
                               meta, (br_addr, br_port))

    def _check_local_cert(self):
        while self.run_flag.is_set():
            chain = self._get_my_cert()
            exp = min(chain.as_cert.expiration_time,
                      chain.core_as_cert.expiration_time)
            diff = exp - int(time.time())
            if diff > self.config.segment_ttl:
                time.sleep(diff - self.config.segment_ttl)
                continue
            cs_meta = self._get_cs()
            req = CertChainRequest.from_values(self.addr.isd_as,
                                               chain.as_cert.version + 1,
                                               cache_only=True)
            logging.info("Request new certificate chain. Req: %s", req)
            self.send_meta(CtrlPayload(CertMgmt(req)), cs_meta)
            cs_meta.close()
            time.sleep(self.CERT_REQ_RATE)

    def _init_metrics(self):
        super()._init_metrics()
        for type_ in ("core", "up", "down"):
            BEACONS_PROPAGATED.labels(**self._labels, type=type_).inc(0)
            SEGMENTS_REGISTERED.labels(**self._labels, type=type_).inc(0)
        REVOCATIONS_ISSUED.labels(**self._labels).inc(0)
        IS_MASTER.labels(**self._labels).set(0)
示例#23
0
文件: base.py 项目: forstern/scion
class PathServer(SCIONElement, metaclass=ABCMeta):
    """
    The SCION Path Server.
    """
    SERVICE_TYPE = PATH_SERVICE
    MAX_SEG_NO = 5  # TODO: replace by config variable.
    # ZK path for incoming PATHs
    ZK_PATH_CACHE_PATH = "path_cache"
    # ZK path for incoming REVs
    ZK_REV_CACHE_PATH = "rev_cache"
    # Max number of segments per propagation packet
    PROP_LIMIT = 5
    # Max number of segments per ZK cache entry
    ZK_SHARE_LIMIT = 10
    # Time to store revocations in zookeeper
    ZK_REV_OBJ_MAX_AGE = HASHTREE_EPOCH_TIME

    def __init__(self, server_id, conf_dir):
        """
        :param str server_id: server identifier.
        :param str conf_dir: configuration directory.
        """
        super().__init__(server_id, conf_dir)
        self.down_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO)
        self.core_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO)
        self.pending_req = defaultdict(list)  # Dict of pending requests.
        self.pen_req_lock = threading.Lock()
        # Used when l/cPS doesn't have up/dw-path.
        self.waiting_targets = defaultdict(list)
        self.revocations = RevCache()
        # A mapping from (hash tree root of AS, IFID) to segments
        self.htroot_if2seg = ExpiringDict(1000, HASHTREE_TTL)
        self.htroot_if2seglock = Lock()
        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PATH: {
                PMT.REQUEST: self.path_resolution,
                PMT.REPLY: self.handle_path_segment_record,
                PMT.REG: self.handle_path_segment_record,
                PMT.REVOCATION: self._handle_revocation,
                PMT.SYNC: self.handle_path_segment_record,
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
        }
        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH: {
                SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
            },
        }
        self._segs_to_zk = deque()
        self._revs_to_zk = deque()
        self._zkid = ZkID.from_values(self.addr.isd_as, self.id,
                                      [(self.addr.host, self._port)])
        self.zk = Zookeeper(self.topology.isd_as, PATH_SERVICE,
                            self._zkid.copy().pack(), self.topology.zookeepers)
        self.zk.retry("Joining party", self.zk.party_setup)
        self.path_cache = ZkSharedCache(self.zk, self.ZK_PATH_CACHE_PATH,
                                        self._handle_paths_from_zk)
        self.rev_cache = ZkSharedCache(self.zk, self.ZK_REV_CACHE_PATH,
                                       self._rev_entries_handler)

    def worker(self):
        """
        Worker thread that takes care of reading shared paths from ZK, and
        handling master election for core servers.
        """
        worker_cycle = 1.0
        start = SCIONTime.get_time()
        was_master = False
        while self.run_flag.is_set():
            sleep_interval(start, worker_cycle, "cPS.worker cycle",
                           self._quiet_startup())
            start = SCIONTime.get_time()
            try:
                self.zk.wait_connected()
                self.path_cache.process()
                self.rev_cache.process()
                # Try to become a master.
                is_master = self.zk.get_lock(lock_timeout=0, conn_timeout=0)
                if is_master:
                    if not was_master:
                        logging.info("Became master")
                    self.path_cache.expire(self.config.propagation_time * 10)
                    self.rev_cache.expire(self.ZK_REV_OBJ_MAX_AGE)
                    was_master = True
                else:
                    was_master = False
            except ZkNoConnection:
                logging.warning('worker(): ZkNoConnection')
                pass
            self._update_master()
            self._propagate_and_sync()
            self._handle_pending_requests()

    def _update_master(self):
        pass

    def _rev_entries_handler(self, raw_entries):
        for raw in raw_entries:
            rev_info = RevocationInfo.from_raw(raw)
            self._remove_revoked_segments(rev_info)

    def _add_rev_mappings(self, pcb):
        """
        Add if revocation token to segment ID mappings.
        """
        segment_id = pcb.get_hops_hash()
        with self.htroot_if2seglock:
            for asm in pcb.iter_asms():
                hof = asm.pcbm(0).hof()
                egress_h = (asm.p.hashTreeRoot, hof.egress_if)
                self.htroot_if2seg.setdefault(egress_h, set()).add(segment_id)
                ingress_h = (asm.p.hashTreeRoot, hof.ingress_if)
                self.htroot_if2seg.setdefault(ingress_h, set()).add(segment_id)

    @abstractmethod
    def _handle_up_segment_record(self, pcb, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _handle_down_segment_record(self, pcb, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _handle_core_segment_record(self, pcb, **kwargs):
        raise NotImplementedError

    def _add_segment(self, pcb, seg_db, name, reverse=False):
        res = seg_db.update(pcb, reverse=reverse)
        if res == DBResult.ENTRY_ADDED:
            self._add_rev_mappings(pcb)
            logging.info("%s-Segment registered: %s", name, pcb.short_desc())
            return True
        elif res == DBResult.ENTRY_UPDATED:
            self._add_rev_mappings(pcb)
            logging.debug("%s-Segment updated: %s", name, pcb.short_desc())
        return False

    def _handle_scmp_revocation(self, pld, meta):
        rev_info = RevocationInfo.from_raw(pld.info.rev_info)
        self._handle_revocation(rev_info, meta)

    def _handle_revocation(self, rev_info, meta):
        """
        Handles a revocation of a segment, interface or hop.

        :param rev_info: The RevocationInfo object.
        """
        assert isinstance(rev_info, RevocationInfo)
        if not self._validate_revocation(rev_info):
            return
        if meta.ia[0] != self.addr.isd_as[0]:
            logging.info("Dropping revocation received from a different ISD.")
            return

        if rev_info in self.revocations:
            logging.debug("Already received revocation. Dropping...")
            return False
        self.revocations.add(rev_info)
        logging.debug("Received revocation from %s:\n%s", meta.get_addr(),
                      rev_info)
        self._revs_to_zk.append(rev_info.copy().pack())  # have to pack copy
        # Remove segments that contain the revoked interface.
        self._remove_revoked_segments(rev_info)
        # Forward revocation to other path servers.
        self._forward_revocation(rev_info, meta)

    def _remove_revoked_segments(self, rev_info):
        """
        Try the previous and next hashes as possible astokens,
        and delete any segment that matches

        :param rev_info: The revocation info
        :type rev_info: RevocationInfo
        """
        if not ConnectedHashTree.verify_epoch(rev_info.p.epoch):
            return
        (hash01, hash12) = ConnectedHashTree.get_possible_hashes(rev_info)
        if_id = rev_info.p.ifID

        with self.htroot_if2seglock:
            down_segs_removed = 0
            core_segs_removed = 0
            up_segs_removed = 0
            for h in (hash01, hash12):
                for sid in self.htroot_if2seg.pop((h, if_id), []):
                    if self.down_segments.delete(
                            sid) == DBResult.ENTRY_DELETED:
                        down_segs_removed += 1
                    if self.core_segments.delete(
                            sid) == DBResult.ENTRY_DELETED:
                        core_segs_removed += 1
                    if not self.topology.is_core_as:
                        if (self.up_segments.delete(sid) ==
                                DBResult.ENTRY_DELETED):
                            up_segs_removed += 1
            logging.info(
                "Removed segments containing IF %d: "
                "UP: %d DOWN: %d CORE: %d" %
                (if_id, up_segs_removed, down_segs_removed, core_segs_removed))

    @abstractmethod
    def _forward_revocation(self, rev_info, meta):
        """
        Forwards a revocation to other path servers that need to be notified.

        :param rev_info: The RevInfo object.
        :param meta: The MessageMeta object.
        """
        raise NotImplementedError

    def _send_path_segments(self, req, meta, up=None, core=None, down=None):
        """
        Sends path-segments to requester (depending on Path Server's location).
        """
        up = up or set()
        core = core or set()
        down = down or set()
        all_segs = up | core | down
        if not all_segs:
            logging.warning("No segments to send")
            return
        revs_to_add = self._peer_revs_for_segs(all_segs)
        pld = PathRecordsReply.from_values(
            {
                PST.UP: up,
                PST.CORE: core,
                PST.DOWN: down
            }, revs_to_add)
        self.send_meta(pld, meta)
        logging.info(
            "Sending PATH_REPLY with %d segment(s) to:%s "
            "port:%s in response to: %s",
            len(all_segs),
            meta.get_addr(),
            meta.port,
            req.short_desc(),
        )

    def _peer_revs_for_segs(self, segs):
        """Returns a list of peer revocations for segments in 'segs'."""
        def _handle_one_seg(seg):
            for asm in seg.iter_asms():
                for pcbm in asm.iter_pcbms(1):
                    hof = pcbm.hof()
                    for if_id in [hof.ingress_if, hof.egress_if]:
                        rev_info = self.revocations.get((asm.isd_as(), if_id))
                        if rev_info:
                            revs_to_add.add(rev_info.copy())
                            return

        revs_to_add = set()
        for seg in segs:
            _handle_one_seg(seg)

        return list(revs_to_add)

    def _handle_pending_requests(self):
        rem_keys = []
        # Serve pending requests.
        with self.pen_req_lock:
            for key in self.pending_req:
                to_remove = []
                for req, meta in self.pending_req[key]:
                    if self.path_resolution(req, meta, new_request=False):
                        meta.close()
                        to_remove.append((req, meta))
                # Clean state.
                for req_meta in to_remove:
                    self.pending_req[key].remove(req_meta)
                if not self.pending_req[key]:
                    rem_keys.append(key)
            for key in rem_keys:
                del self.pending_req[key]

    def _handle_paths_from_zk(self, raw_entries):
        """
        Handles cached paths through ZK, passed as a list.
        """
        for raw in raw_entries:
            recs = PathSegmentRecords.from_raw(raw)
            for type_, pcb in recs.iter_pcbs():
                seg_meta = PathSegMeta(pcb,
                                       self.continue_seg_processing,
                                       type_=type_,
                                       params={'from_zk': True})
                self.process_path_seg(seg_meta)
        logging.debug("Processed %s segments from ZK", len(raw_entries))

    def handle_path_segment_record(self, seg_recs, meta):
        """
        Handles paths received from the network.
        """
        params = self._dispatch_params(seg_recs, meta)
        # Add revocations for peer interfaces included in the path segments.
        for rev_info in seg_recs.iter_rev_infos():
            self.revocations.add(rev_info)
        # Verify pcbs and process them
        for type_, pcb in seg_recs.iter_pcbs():
            seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta,
                                   type_, params)
            self.process_path_seg(seg_meta)

    def continue_seg_processing(self, seg_meta):
        """
        For every path segment(that can be verified) received from the network
        or ZK this function gets called to continue the processing for the
        segment.
        The segment is added to pathdb and pending requests are checked.
        """
        pcb = seg_meta.seg
        type_ = seg_meta.type
        params = seg_meta.params
        self._dispatch_segment_record(type_, pcb, **params)
        self._handle_pending_requests()

    def _dispatch_segment_record(self, type_, seg, **kwargs):
        # Check that segment does not contain a revoked interface.
        if not self._validate_segment(seg):
            logging.debug("Not adding segment due to revoked interface:\n%s" %
                          seg.short_desc())
            return
        handle_map = {
            PST.UP: self._handle_up_segment_record,
            PST.CORE: self._handle_core_segment_record,
            PST.DOWN: self._handle_down_segment_record,
        }
        handle_map[type_](seg, **kwargs)

    def _validate_segment(self, seg):
        """
        Check segment for revoked upstream/downstream interfaces.

        :param seg: The PathSegment object.
        :return: False, if the path segment contains a revoked upstream/
            downstream interface (not peer). True otherwise.
        """
        for asm in seg.iter_asms():
            pcbm = asm.pcbm(0)
            for if_id in [pcbm.p.inIF, pcbm.p.outIF]:
                rev_info = self.revocations.get((asm.isd_as(), if_id))
                if rev_info:
                    logging.debug("Found revoked interface (%d) in segment "
                                  "%s." % (rev_info.p.ifID, seg.short_desc()))
                    return False
        return True

    def _dispatch_params(self, pld, meta):
        return {}

    def _propagate_and_sync(self):
        self._share_via_zk()
        self._share_revs_via_zk()

    def _gen_prop_recs(self, queue, limit=PROP_LIMIT):
        count = 0
        pcbs = defaultdict(list)
        while queue:
            count += 1
            type_, pcb = queue.popleft()
            pcbs[type_].append(pcb.copy())
            if count >= limit:
                yield (pcbs)
                count = 0
                pcbs = defaultdict(list)
        if pcbs:
            yield (pcbs)

    @abstractmethod
    def path_resolution(self, path_request, meta, new_request):
        """
        Handles all types of path request.
        """
        raise NotImplementedError

    def _handle_waiting_targets(self, pcb):
        """
        Handle any queries that are waiting for a path to any core AS in an ISD.
        """
        dst_ia = pcb.first_ia()
        if not self.is_core_as(dst_ia):
            logging.warning("Invalid waiting target, not a core AS: %s",
                            dst_ia)
            return
        self._send_waiting_queries(dst_ia[0], pcb)

    def _send_waiting_queries(self, dst_isd, pcb):
        targets = self.waiting_targets[dst_isd]
        if not targets:
            return
        path = pcb.get_path(reverse_direction=True)
        src_ia = pcb.first_ia()
        while targets:
            seg_req = targets.pop(0)
            meta = self.DefaultMeta.from_values(ia=src_ia,
                                                path=path,
                                                host=SVCType.PS_A)
            self.send_meta(seg_req, meta)
            logging.info("Waiting request (%s) sent via %s",
                         seg_req.short_desc(), pcb.short_desc())

    def _share_via_zk(self):
        if not self._segs_to_zk:
            return
        logging.info("Sharing %d segment(s) via ZK", len(self._segs_to_zk))
        for pcb_dict in self._gen_prop_recs(self._segs_to_zk,
                                            limit=self.ZK_SHARE_LIMIT):
            seg_recs = PathSegmentRecords.from_values(pcb_dict)
            self._zk_write(seg_recs.pack())

    def _share_revs_via_zk(self):
        if not self._revs_to_zk:
            return
        logging.info("Sharing %d revocation(s) via ZK", len(self._revs_to_zk))
        while self._revs_to_zk:
            self._zk_write_rev(self._revs_to_zk.popleft())

    def _zk_write(self, data):
        hash_ = SHA256.new(data).hexdigest()
        try:
            self.path_cache.store("%s-%s" % (hash_, SCIONTime.get_time()),
                                  data)
        except ZkNoConnection:
            logging.warning("Unable to store segment(s) in shared path: "
                            "no connection to ZK")

    def _zk_write_rev(self, data):
        hash_ = SHA256.new(data).hexdigest()
        try:
            self.rev_cache.store("%s-%s" % (hash_, SCIONTime.get_time()), data)
        except ZkNoConnection:
            logging.warning("Unable to store revocation(s) in shared path: "
                            "no connection to ZK")

    def run(self):
        """
        Run an instance of the Path Server.
        """
        threading.Thread(target=thread_safety_net,
                         args=(self.worker, ),
                         name="PS.worker",
                         daemon=True).start()
        super().run()
示例#24
0
class SCIONDaemon(SCIONElement):
    """
    The SCION Daemon used for retrieving and combining paths.
    """
    # Max time for a path lookup to succeed/fail.
    PATH_REQ_TOUT = 2
    MAX_REQS = 1024
    # Time a path segment is cached at a host (in seconds).
    SEGMENT_TTL = 300

    def __init__(self,
                 conf_dir,
                 addr,
                 api_addr,
                 run_local_api=False,
                 port=None):
        """
        Initialize an instance of the class SCIONDaemon.
        """
        super().__init__("sciond", conf_dir, host_addr=addr, port=port)
        # TODO replace by pathstore instance
        self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL)
        self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL)
        self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL)
        self.peer_revs = RevCache()
        # Keep track of requested paths.
        self.requested_paths = ExpiringDict(self.MAX_REQS, self.PATH_REQ_TOUT)
        self.req_path_lock = threading.Lock()
        self._api_sock = None
        self.daemon_thread = None
        os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
        self.api_addr = (api_addr or os.path.join(
            SCIOND_API_SOCKDIR, "%s.sock" % self.addr.isd_as))

        self.CTRL_PLD_CLASS_MAP = {
            PayloadClass.PATH: {
                PMT.REPLY: self.handle_path_reply,
                PMT.REVOCATION: self.handle_revocation,
            },
            PayloadClass.CERT: {
                CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
                CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
                CertMgmtType.TRC_REPLY: self.process_trc_reply,
                CertMgmtType.TRC_REQ: self.process_trc_request,
            },
        }

        self.SCMP_PLD_CLASS_MAP = {
            SCMPClass.PATH: {
                SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation
            },
        }

        if run_local_api:
            self._api_sock = ReliableSocket(bind=(self.api_addr, "sciond"))
            self._socks.add(self._api_sock, self.handle_accept)

    @classmethod
    def start(cls, conf_dir, addr, api_addr=None, run_local_api=False, port=0):
        """
        Initializes, starts, and returns a SCIONDaemon object.

        Example of usage:
        sd = SCIONDaemon.start(conf_dir, addr)
        paths = sd.get_paths(isd_as)
        """
        inst = cls(conf_dir, addr, api_addr, run_local_api, port)
        name = "SCIONDaemon.run %s" % inst.addr.isd_as
        inst.daemon_thread = threading.Thread(target=thread_safety_net,
                                              args=(inst.run, ),
                                              name=name,
                                              daemon=True)
        inst.daemon_thread.start()
        logging.debug("sciond started with api_addr = %s", inst.api_addr)
        return inst

    def _get_msg_meta(self, packet, addr, sock):
        if sock != self._udp_sock:
            return packet, SockOnlyMetadata.from_values(sock)  # API socket
        else:
            return super()._get_msg_meta(packet, addr, sock)

    def handle_msg_meta(self, msg, meta):
        """
        Main routine to handle incoming SCION messages.
        """
        if isinstance(meta, SockOnlyMetadata):  # From SCIOND API
            try:
                sciond_msg = parse_sciond_msg(msg)
            except SCIONParseError as err:
                logging.error(str(err))
                return
            self.api_handle_request(sciond_msg, meta)
            return
        super().handle_msg_meta(msg, meta)

    def handle_path_reply(self, path_reply, meta):
        """
        Handle path reply from local path server.
        """
        for rev_info in path_reply.iter_rev_infos():
            self.peer_revs.add(rev_info)

        for type_, pcb in path_reply.iter_pcbs():
            seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta,
                                   type_)
            self.process_path_seg(seg_meta)

    def continue_seg_processing(self, seg_meta):
        """
        For every path segment(that can be verified) received from the path
        server this function gets called to continue the processing for the
        segment.
        The segment is added to pathdb and pending requests are checked.
        """
        pcb = seg_meta.seg
        type_ = seg_meta.type
        map_ = {
            PST.UP: self._handle_up_seg,
            PST.DOWN: self._handle_down_seg,
            PST.CORE: self._handle_core_seg,
        }
        ret = map_[type_](pcb)
        if not ret:
            return
        with self.req_path_lock:
            # .items() makes a copy on an expiring dict, so deleting entries is safe.
            for key, e in self.requested_paths.items():
                if self.path_resolution(*key):
                    e.set()
                    del self.requested_paths[key]

    def _handle_up_seg(self, pcb):
        if self.addr.isd_as != pcb.last_ia():
            return None
        if self.up_segments.update(pcb) == DBResult.ENTRY_ADDED:
            logging.debug("Up segment added: %s", pcb.short_desc())
            return pcb.first_ia()
        return None

    def _handle_down_seg(self, pcb):
        last_ia = pcb.last_ia()
        if self.addr.isd_as == last_ia:
            return None
        if self.down_segments.update(pcb) == DBResult.ENTRY_ADDED:
            logging.debug("Down segment added: %s", pcb.short_desc())
            return last_ia
        return None

    def _handle_core_seg(self, pcb):
        if self.core_segments.update(pcb) == DBResult.ENTRY_ADDED:
            logging.debug("Core segment added: %s", pcb.short_desc())
            return pcb.first_ia()
        return None

    def api_handle_request(self, msg, meta):
        """
        Handle local API's requests.
        """
        if msg.MSG_TYPE == SMT.PATH_REQUEST:
            threading.Thread(target=thread_safety_net,
                             args=(self._api_handle_path_request, msg, meta),
                             daemon=True).start()
        elif msg.MSG_TYPE == SMT.REVOCATION:
            self.handle_revocation(msg.rev_info(), meta)
        elif msg.MSG_TYPE == SMT.AS_REQUEST:
            self._api_handle_as_request(msg, meta)
        elif msg.MSG_TYPE == SMT.IF_REQUEST:
            self._api_handle_if_request(msg, meta)
        elif msg.MSG_TYPE == SMT.SERVICE_REQUEST:
            self._api_handle_service_request(msg, meta)
        else:
            logging.warning("API: type %s not supported.",
                            TypeBase.to_str(msg.MSG_TYPE))

    def _api_handle_path_request(self, request, meta):
        req_id = request.id
        if request.p.flags.sibra:
            logging.warning(
                "Requesting SIBRA paths over SCIOND API not supported yet.")
            self._send_path_reply(req_id, [], SCIONDPathReplyError.INTERNAL,
                                  meta)
            return

        dst_ia = request.dst_ia()
        src_ia = request.src_ia()
        if not src_ia:
            src_ia = self.addr.isd_as
        thread = threading.current_thread()
        thread.name = "SCIONDaemon API id:%s %s -> %s" % (thread.ident, src_ia,
                                                          dst_ia)
        paths, error = self.get_paths(dst_ia, flush=request.p.flags.flush)
        if request.p.maxPaths:
            paths = paths[:request.p.maxPaths]
        logging.debug("Replying to api request for %s with %d paths", dst_ia,
                      len(paths))
        reply_entries = []
        for path_meta in paths:
            fwd_if = path_meta.fwd_path().get_fwd_if()
            # Set dummy host addr if path is empty.
            haddr, port = None, None
            if fwd_if:
                br = self.ifid2br[fwd_if]
                haddr, port = br.addr, br.port
            addrs = [haddr] if haddr else []
            first_hop = HostInfo.from_values(addrs, port)
            reply_entry = SCIONDPathReplyEntry.from_values(
                path_meta, first_hop)
            reply_entries.append(reply_entry)
        self._send_path_reply(req_id, reply_entries, error, meta)

    def _send_path_reply(self, req_id, reply_entries, error, meta):
        path_reply = SCIONDPathReply.from_values(req_id, reply_entries, error)
        self.send_meta(path_reply.pack_full(), meta)

    def _api_handle_as_request(self, request, meta):
        remote_as = request.isd_as()
        if remote_as:
            reply_entry = SCIONDASInfoReplyEntry.from_values(
                remote_as, self.is_core_as(remote_as))
        else:
            reply_entry = SCIONDASInfoReplyEntry.from_values(
                self.addr.isd_as, self.is_core_as(), self.topology.mtu)
        as_reply = SCIONDASInfoReply.from_values(request.id, [reply_entry])
        self.send_meta(as_reply.pack_full(), meta)

    def _api_handle_if_request(self, request, meta):
        all_brs = request.all_brs()
        if_list = []
        if not all_brs:
            if_list = list(request.iter_ids())
        if_entries = []
        for if_id, br in self.ifid2br.items():
            if all_brs or if_id in if_list:
                info = HostInfo.from_values([br.addr], br.port)
                reply_entry = SCIONDIFInfoReplyEntry.from_values(if_id, info)
                if_entries.append(reply_entry)
        if_reply = SCIONDIFInfoReply.from_values(request.id, if_entries)
        self.send_meta(if_reply.pack_full(), meta)

    def _api_handle_service_request(self, request, meta):
        all_svcs = request.all_services()
        svc_list = []
        if not all_svcs:
            svc_list = list(request.iter_service_types())
        svc_entries = []
        for svc_type in ServiceType.all():
            if all_svcs or svc_type in svc_list:
                lookup_res = self.dns_query_topo(svc_type)
                host_infos = []
                for addr, port in lookup_res:
                    host_infos.append(HostInfo.from_values([addr], port))
                reply_entry = SCIONDServiceInfoReplyEntry.from_values(
                    svc_type, host_infos)
                svc_entries.append(reply_entry)
        svc_reply = SCIONDServiceInfoReply.from_values(request.id, svc_entries)
        self.send_meta(svc_reply.pack_full(), meta)

    def handle_scmp_revocation(self, pld, meta):
        rev_info = RevocationInfo.from_raw(pld.info.rev_info)
        self.handle_revocation(rev_info, meta)

    def handle_revocation(self, rev_info, meta):
        assert isinstance(rev_info, RevocationInfo)
        if not self._validate_revocation(rev_info):
            return
        # Go through all segment databases and remove affected segments.
        removed_up = self._remove_revoked_pcbs(self.up_segments, rev_info)
        removed_core = self._remove_revoked_pcbs(self.core_segments, rev_info)
        removed_down = self._remove_revoked_pcbs(self.down_segments, rev_info)
        logging.info("Removed %d UP- %d CORE- and %d DOWN-Segments." %
                     (removed_up, removed_core, removed_down))

    def _remove_revoked_pcbs(self, db, rev_info):
        """
        Removes all segments from 'db' that contain an IF token for which
        rev_token is a preimage (within 20 calls).

        :param db: The PathSegmentDB.
        :type db: :class:`lib.path_db.PathSegmentDB`
        :param rev_info: The revocation info
        :type rev_info: RevocationInfo

        :returns: The number of deletions.
        :rtype: int
        """

        if not ConnectedHashTree.verify_epoch(rev_info.p.epoch):
            logging.debug(
                "Failed to verify epoch: rev_info epoch %d,current epoch %d." %
                (rev_info.p.epoch, ConnectedHashTree.get_current_epoch()))
            return 0

        to_remove = []
        for segment in db(full=True):
            for asm in segment.iter_asms():
                if self._verify_revocation_for_asm(rev_info, asm):
                    logging.debug("Removing segment: %s" %
                                  segment.short_desc())
                    to_remove.append(segment.get_hops_hash())
        return db.delete_all(to_remove)

    def _flush_path_dbs(self):
        self.core_segments.flush()
        self.down_segments.flush()
        self.up_segments.flush()

    def get_paths(self, dst_ia, flags=(), flush=False):
        """Return a list of paths."""
        logging.debug("Paths requested for ISDAS=%s, flags=%s, flush=%s",
                      dst_ia, flags, flush)
        if flush:
            logging.info("Flushing PathDBs.")
            self._flush_path_dbs()
        if self.addr.isd_as == dst_ia or (self.addr.isd_as.any_as() == dst_ia
                                          and self.topology.is_core_as):
            # Either the destination is the local AS, or the destination is any
            # core AS in this ISD, and the local AS is in the core
            empty = SCIONPath()
            empty_meta = FwdPathMeta.from_values(empty, [], self.topology.mtu)
            return [empty_meta], SCIONDPathReplyError.OK
        paths = self.path_resolution(dst_ia, flags=flags)
        if not paths:
            key = dst_ia, flags
            with self.req_path_lock:
                if key not in self.requested_paths:
                    # No previous outstanding request
                    self.requested_paths[key] = threading.Event()
                    self._fetch_segments(key)
                e = self.requested_paths[key]
            if not e.wait(self.PATH_REQ_TOUT):
                logging.error("Query timed out for %s", dst_ia)
                return [], SCIONDPathReplyError.PS_TIMEOUT
            paths = self.path_resolution(dst_ia, flags=flags)
        error_code = (SCIONDPathReplyError.OK
                      if paths else SCIONDPathReplyError.NO_PATHS)
        return paths, error_code

    def path_resolution(self, dst_ia, flags=()):
        # dst as == 0 means any core AS in the specified ISD.
        dst_is_core = self.is_core_as(dst_ia) or dst_ia[1] == 0
        sibra = PATH_FLAG_SIBRA in flags
        if self.topology.is_core_as:
            if dst_is_core:
                ret = self._resolve_core_core(dst_ia, sibra=sibra)
            else:
                ret = self._resolve_core_not_core(dst_ia, sibra=sibra)
        elif dst_is_core:
            ret = self._resolve_not_core_core(dst_ia, sibra=sibra)
        elif sibra:
            ret = self._resolve_not_core_not_core_sibra(dst_ia)
        else:
            ret = self._resolve_not_core_not_core_scion(dst_ia)
        if not sibra:
            return ret
        # FIXME(kormat): Strip off PCBs, and just return sibra reservation
        # blocks
        return self._sibra_strip_pcbs(self._strip_nones(ret))

    def _resolve_core_core(self, dst_ia, sibra=False):
        """Resolve path from core to core."""
        res = set()
        for cseg in self.core_segments(last_ia=self.addr.isd_as,
                                       sibra=sibra,
                                       **dst_ia.params()):
            res.add((None, cseg, None))
        if sibra:
            return res
        return tuples_to_full_paths(res)

    def _resolve_core_not_core(self, dst_ia, sibra=False):
        """Resolve path from core to non-core."""
        res = set()
        # First check whether there is a direct path.
        for dseg in self.down_segments(first_ia=self.addr.isd_as,
                                       last_ia=dst_ia,
                                       sibra=sibra):
            res.add((None, None, dseg))
        # Check core-down combination.
        for dseg in self.down_segments(last_ia=dst_ia, sibra=sibra):
            dseg_ia = dseg.first_ia()
            if self.addr.isd_as == dseg_ia:
                pass
            for cseg in self.core_segments(first_ia=dseg_ia,
                                           last_ia=self.addr.isd_as,
                                           sibra=sibra):
                res.add((None, cseg, dseg))
        if sibra:
            return res
        return tuples_to_full_paths(res)

    def _resolve_not_core_core(self, dst_ia, sibra=False):
        """Resolve path from non-core to core."""
        res = set()
        params = dst_ia.params()
        params["sibra"] = sibra
        if dst_ia[0] == self.addr.isd_as[0]:
            # Dst in local ISD. First check whether DST is a (super)-parent.
            for useg in self.up_segments(**params):
                res.add((useg, None, None))
        # Check whether dst is known core AS.
        for cseg in self.core_segments(**params):
            # Check do we have an up-seg that is connected to core_seg.
            for useg in self.up_segments(first_ia=cseg.last_ia(), sibra=sibra):
                res.add((useg, cseg, None))
        if sibra:
            return res
        return tuples_to_full_paths(res)

    def _resolve_not_core_not_core_scion(self, dst_ia):
        """Resolve SCION path from non-core to non-core."""
        up_segs = self.up_segments()
        down_segs = self.down_segments(last_ia=dst_ia)
        core_segs = self._calc_core_segs(dst_ia[0], up_segs, down_segs)
        full_paths = build_shortcut_paths(up_segs, down_segs, self.peer_revs)
        tuples = []
        for up_seg in up_segs:
            for down_seg in down_segs:
                tuples.append((up_seg, None, down_seg))
                for core_seg in core_segs:
                    tuples.append((up_seg, core_seg, down_seg))
        full_paths.extend(tuples_to_full_paths(tuples))
        return full_paths

    def _resolve_not_core_not_core_sibra(self, dst_ia):
        """Resolve SIBRA path from non-core to non-core."""
        res = set()
        up_segs = set(self.up_segments(sibra=True))
        down_segs = set(self.down_segments(last_ia=dst_ia, sibra=True))
        for up_seg, down_seg in product(up_segs, down_segs):
            src_core_ia = up_seg.first_ia()
            dst_core_ia = down_seg.first_ia()
            if src_core_ia == dst_core_ia:
                res.add((up_seg, down_seg))
                continue
            for core_seg in self.core_segments(first_ia=dst_core_ia,
                                               last_ia=src_core_ia,
                                               sibra=True):
                res.add((up_seg, core_seg, down_seg))
        return res

    def _strip_nones(self, set_):
        """Strip None entries from a set of tuples"""
        res = []
        for tup in set_:
            res.append(tuple(filter(None, tup)))
        return res

    def _sibra_strip_pcbs(self, paths):
        ret = []
        for pcbs in paths:
            resvs = []
            for pcb in pcbs:
                resvs.append(self._sibra_strip_pcb(pcb))
            ret.append(resvs)
        return ret

    def _sibra_strip_pcb(self, pcb):
        assert pcb.is_sibra()
        pcb_ext = pcb.sibra_ext
        resv_info = pcb_ext.info
        resv = ResvBlockSteady.from_values(resv_info, pcb.get_n_hops())
        asms = pcb.iter_asms()
        if pcb_ext.p.up:
            asms = reversed(list(asms))
        iflist = []
        for sof, asm in zip(pcb_ext.iter_sofs(), asms):
            resv.sofs.append(sof)
            iflist.extend(
                self._sibra_add_ifs(asm.isd_as(), sof, resv_info.fwd_dir))
        assert resv.num_hops == len(resv.sofs)
        return pcb_ext.p.id, resv, iflist

    def _sibra_add_ifs(self, isd_as, sof, fwd):
        def _add(ifid):
            if ifid:
                ret.append((isd_as, ifid))

        ret = []
        if fwd:
            _add(sof.ingress)
            _add(sof.egress)
        else:
            _add(sof.egress)
            _add(sof.ingress)
        return ret

    def _wait_for_events(self, events, deadline):
        """
        Wait on a set of events, but only until the specified deadline. Returns
        the number of events that happened while waiting.
        """
        count = 0
        for e in events:
            if e.wait(max(0, deadline - SCIONTime.get_time())):
                count += 1
        return count

    def _fetch_segments(self, key):
        """
        Called to fetch the requested path.
        """
        dst_ia, flags = key
        try:
            addr, port = self.dns_query_topo(PATH_SERVICE)[0]
        except SCIONServiceLookupError:
            log_exception("Error querying path service:")
            return
        req = PathSegmentReq.from_values(self.addr.isd_as, dst_ia, flags=flags)
        logging.debug("Sending path request: %s", req.short_desc())
        meta = self.DefaultMeta.from_values(host=addr, port=port)
        self.send_meta(req, meta)

    def _calc_core_segs(self, dst_isd, up_segs, down_segs):
        """
        Calculate all possible core segments joining the provided up and down
        segments. Returns a list of all known segments, and a seperate list of
        the missing AS pairs.
        """
        src_core_ases = set()
        dst_core_ases = set()
        for seg in up_segs:
            src_core_ases.add(seg.first_ia()[1])
        for seg in down_segs:
            dst_core_ases.add(seg.first_ia()[1])
        # Generate all possible AS pairs
        as_pairs = list(product(src_core_ases, dst_core_ases))
        return self._find_core_segs(self.addr.isd_as[0], dst_isd, as_pairs)

    def _find_core_segs(self, src_isd, dst_isd, as_pairs):
        """
        Given a set of AS pairs across 2 ISDs, return the core segments
        connecting those pairs
        """
        core_segs = []
        for src_core_as, dst_core_as in as_pairs:
            src_ia = ISD_AS.from_values(src_isd, src_core_as)
            dst_ia = ISD_AS.from_values(dst_isd, dst_core_as)
            if src_ia == dst_ia:
                continue
            seg = self.core_segments(first_ia=dst_ia, last_ia=src_ia)
            if seg:
                core_segs.extend(seg)
        return core_segs
示例#25
0
 def test_missing_entry(self):
     key = ("1-ff00:0:300", 1)
     default = "default"
     rev_cache = RevCache()
     # Call
     ntools.eq_(rev_cache.get(key, default=default), default)