def _remove_revoked_pcbs(self, db, rev_info): """ Removes all segments from 'db' that contain an IF token for which rev_token is a preimage (within 20 calls). :param db: The PathSegmentDB. :type db: :class:`lib.path_db.PathSegmentDB` :param rev_info: The revocation info :type rev_info: RevocationInfo :returns: The number of deletions. :rtype: int """ if not ConnectedHashTree.verify_epoch(rev_info.p.epoch): logging.debug( "Failed to verify epoch: rev_info epoch %d,current epoch %d." % (rev_info.p.epoch, ConnectedHashTree.get_current_epoch())) return 0 to_remove = [] for segment in db(full=True): for asm in segment.iter_asms(): if self._verify_revocation_for_asm(rev_info, asm): logging.debug("Removing segment: %s" % segment.short_desc()) to_remove.append(segment.get_hops_hash()) return db.delete_all(to_remove)
def _create_next_tree(self): last_ttl_window = 0 while self.run_flag.is_set(): start = time.time() cur_ttl_window = ConnectedHashTree.get_ttl_window() time_to_sleep = (ConnectedHashTree.get_time_till_next_ttl() - HASHTREE_UPDATE_WINDOW) if cur_ttl_window == last_ttl_window: time_to_sleep += HASHTREE_TTL if time_to_sleep > 0: sleep_interval(start, time_to_sleep, "BS._create_next_tree", self._quiet_startup()) # at this point, there should be <= HASHTREE_UPDATE_WINDOW # seconds left in current ttl logging.info("Started computing hashtree for next TTL window (%d)", cur_ttl_window + 2) last_ttl_window = ConnectedHashTree.get_ttl_window() ht_start = time.time() ifs = list(self.ifid2br.keys()) tree = ConnectedHashTree.get_next_tree(self.addr.isd_as, ifs, self.hashtree_gen_key, HashType.SHA256) ht_end = time.time() with self._hash_tree_lock: self._next_tree = tree logging.info( "Finished computing hashtree for TTL window %d in %.3fs" % (cur_ttl_window + 2, ht_end - ht_start))
def _create_next_tree(self): last_ttl_window = 0 ttl = self.config.revocation_tree_ttl update_window = ttl // 3 while self.run_flag.is_set(): start = time.time() cur_ttl_window = ConnectedHashTree.get_ttl_window(ttl) time_to_sleep = ConnectedHashTree.time_until_next_window( ttl) - update_window if cur_ttl_window == last_ttl_window: time_to_sleep += ttl if time_to_sleep > 0: sleep_interval(start, time_to_sleep, "BS._create_next_tree", self._quiet_startup()) # at this point, there should be <= update_window # seconds left in current ttl logging.info("Started computing hashtree for next TTL window (%d)", cur_ttl_window + 2) last_ttl_window = ConnectedHashTree.get_ttl_window(ttl) ht_start = time.time() ifs = list(self.ifid2br.keys()) tree = ConnectedHashTree.get_next_tree(self.addr.isd_as, ifs, self.hashtree_gen_key, ttl, HashType.SHA256) ht_end = time.time() with self._hash_tree_lock: self._next_tree = tree logging.info( "Finished computing hashtree for TTL window %d in %.3fs" % (cur_ttl_window + 2, ht_end - ht_start))
def _pcb_list_to_remove(self, candidates, rev_info): """ Calculates the list of PCBs to remove. Called by _remove_revoked_pcbs. :param candidates: Candidate PCBs. :type candidates: List :param rev_info: The RevocationInfo object. :type rev_info: RevocationInfo """ to_remove = [] processed = set() for cand in candidates: if cand.id in processed: continue processed.add(cand.id) if not ConnectedHashTree.verify_epoch(rev_info.p.epoch): continue # If the interface on which we received the PCB is # revoked, then the corresponding pcb needs to be removed. root_verify = ConnectedHashTree.verify(rev_info, self._get_ht_root()) if (self.addr.isd_as == rev_info.isd_as() and cand.pcb.p.ifID == rev_info.p.ifID and root_verify): to_remove.append(cand.id) for asm in cand.pcb.iter_asms(): if self._verify_revocation_for_asm(rev_info, asm, False): to_remove.append(cand.id) return to_remove
def _remove_revoked_segments(self, rev_info): """ Try the previous and next hashes as possible astokens, and delete any segment that matches :param rev_info: The revocation info :type rev_info: RevocationInfo """ if ConnectedHashTree.verify_epoch( rev_info.p.epoch) != ConnectedHashTree.EPOCH_OK: return (hash01, hash12) = ConnectedHashTree.get_possible_hashes(rev_info) if_id = rev_info.p.ifID with self.htroot_if2seglock: down_segs_removed = 0 core_segs_removed = 0 up_segs_removed = 0 for h in (hash01, hash12): for sid in self.htroot_if2seg.pop((h, if_id), []): if self.down_segments.delete( sid) == DBResult.ENTRY_DELETED: down_segs_removed += 1 if self.core_segments.delete( sid) == DBResult.ENTRY_DELETED: core_segs_removed += 1 if not self.topology.is_core_as: if (self.up_segments.delete(sid) == DBResult.ENTRY_DELETED): up_segs_removed += 1 logging.debug( "Removed segments revoked by [%s]: UP: %d DOWN: %d CORE: %d" % (rev_info.short_desc(), up_segs_removed, down_segs_removed, core_segs_removed))
def test_different_epoch(self, time): # Setup time.return_value = HASHTREE_EPOCH_TIME + 1 # Call and test ntools.eq_(ConnectedHashTree.verify_epoch(0), ConnectedHashTree.EPOCH_OK) ntools.eq_(ConnectedHashTree.verify_epoch(1), ConnectedHashTree.EPOCH_OK)
def test_same_epoch(self, time): # Setup time.return_value = HASHTREE_EPOCH_TIME + HASHTREE_EPOCH_TOLERANCE + 1 # Call and tests ntools.eq_(ConnectedHashTree.verify_epoch(1), ConnectedHashTree.EPOCH_OK) ntools.eq_(ConnectedHashTree.verify_epoch(2), ConnectedHashTree.EPOCH_FUTURE)
def handle_revocation(self, cpld, meta): pmgt = cpld.union rev_info = pmgt.union assert isinstance(rev_info, RevocationInfo), type(rev_info) logging.debug("Revocation info received: %s", rev_info.short_desc()) try: rev_info.validate() except SCIONBaseError as e: logging.warning("Failed to validate RevInfo from %s: %s", meta, e) return SCIONDRevReplyStatus.INVALID # Verify epoch information and on failure return directly epoch_status = ConnectedHashTree.verify_epoch(rev_info.p.epoch) if epoch_status == ConnectedHashTree.EPOCH_PAST: logging.error( "Failed to verify epoch: epoch in the past %d,current epoch %d." % (rev_info.p.epoch, ConnectedHashTree.get_current_epoch())) return SCIONDRevReplyStatus.INVALID if epoch_status == ConnectedHashTree.EPOCH_FUTURE: logging.warning( "Failed to verify epoch: epoch in the future %d,current epoch %d." % (rev_info.p.epoch, ConnectedHashTree.get_current_epoch())) return SCIONDRevReplyStatus.INVALID if epoch_status == ConnectedHashTree.EPOCH_NEAR_PAST: logging.info( "Failed to verify epoch: epoch in the near past %d, current epoch %d." % (rev_info.p.epoch, ConnectedHashTree.get_current_epoch())) return SCIONDRevReplyStatus.STALE self.peer_revs.add(rev_info) # Go through all segment databases and remove affected segments. removed_up = self._remove_revoked_pcbs(self.up_segments, rev_info) removed_core = self._remove_revoked_pcbs(self.core_segments, rev_info) removed_down = self._remove_revoked_pcbs(self.down_segments, rev_info) logging.info("Removed %d UP- %d CORE- and %d DOWN-Segments." % (removed_up, removed_core, removed_down)) total = removed_up + removed_core + removed_down if total > 0: return SCIONDRevReplyStatus.VALID else: # FIXME(scrye): UNKNOWN is returned in the following situations: # - No matching segments exist # - Matching segments exist, but the revoked interface is part of # a peering link; new path queries sent to SCIOND won't use the # link, but nothing is immediately revoked # - A hash check failed and prevented the revocation from taking # place # # This should be fixed in the future to provide clearer meaning # behind why the revocation could not be validated. # # For now, if applications receive an UNKOWN reply to a revocation, # they should strongly consider flushing paths containing the # interface. return SCIONDRevReplyStatus.UNKNOWN
def test(self): # Check that the revocation proof is verifiable in T. isd_as = ISD_AS("1-11") if_ids = [23, 35, 120] initial_seed = b"qwerty" inst = ConnectedHashTree(isd_as, if_ids, initial_seed, HashType.SHA256) root = inst.get_root() # Call proof = inst.get_proof(120) # Tests ntools.eq_(ConnectedHashTree.verify(proof, root), True)
def worker(self): """ Worker thread that takes care of reading shared PCBs from ZK, and propagating PCBS/registering paths when master. """ last_propagation = last_registration = 0 last_ttl_window = ConnectedHashTree.get_ttl_window( self.config.revocation_tree_ttl) worker_cycle = 1.0 start = time.time() while self.run_flag.is_set(): sleep_interval(start, worker_cycle, "BS.worker cycle", self._quiet_startup()) start = time.time() # Update IS_MASTER metric. if self._labels: IS_MASTER.labels(**self._labels).set(int(self.zk.have_lock())) try: self.zk.wait_connected() self.pcb_cache.process() self.revobjs_cache.process() self.handle_rev_objs() cur_ttl_window = ConnectedHashTree.get_ttl_window( self.config.revocation_tree_ttl) if cur_ttl_window != last_ttl_window: self._maintain_hash_tree() last_ttl_window = cur_ttl_window ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0) if not ret: # Failed to get the lock continue elif ret == ZK_LOCK_SUCCESS: logging.info("Became master") self._became_master() self.pcb_cache.expire(self.config.propagation_time * 10) self.revobjs_cache.expire(self.ZK_REV_OBJ_MAX_AGE) except ZkNoConnection: continue now = time.time() if now - last_propagation >= self.config.propagation_time: self.handle_pcbs_propagation() last_propagation = now if (self.config.registers_paths and now - last_registration >= self.config.registration_time): try: self.register_segments() except SCIONKeyError as e: logging.error("Error while registering segments: %s", e) pass last_registration = now
def worker(self): """ Worker thread that takes care of reading shared PCBs from ZK, and propagating PCBS/registering paths when master. """ last_propagation = last_registration = 0 last_ttl_window = ConnectedHashTree.get_ttl_window() worker_cycle = 1.0 was_master = False start = time.time() while self.run_flag.is_set(): sleep_interval(start, worker_cycle, "BS.worker cycle", self._quiet_startup()) start = time.time() try: self.process_pcb_queue() self.handle_unverified_beacons() self.zk.wait_connected() self.pcb_cache.process() self.revobjs_cache.process() self.handle_rev_objs() cur_ttl_window = ConnectedHashTree.get_ttl_window() if cur_ttl_window != last_ttl_window: self._maintain_hash_tree() last_ttl_window = cur_ttl_window if not self.zk.get_lock(lock_timeout=0, conn_timeout=0): was_master = False continue if not was_master: self._became_master() was_master = True self.pcb_cache.expire(self.config.propagation_time * 10) self.revobjs_cache.expire(self.ZK_REV_OBJ_MAX_AGE) except ZkNoConnection: continue now = time.time() if now - last_propagation >= self.config.propagation_time: self.handle_pcbs_propagation() last_propagation = now if (self.config.registers_paths and now - last_registration >= self.config.registration_time): try: self.register_segments() except SCIONKeyError as e: logging.error("Register_segments: %s", e) pass last_registration = now
def add_rev_infos(self, rev_infos): # pragma: no cover """ Appends a list of revocations to the PCB. Replaces existing revocations with newer ones. """ if not rev_infos: return existing = {} current_epoch = ConnectedHashTree.get_current_epoch() for i in range(len(self.p.exts.revInfos)): orphan = self.p.exts.revInfos.disown(i) info_p = orphan.get() if info_p.epoch >= current_epoch: existing[(info_p.isdas, info_p.ifID)] = orphan # Remove revocations for which we already have a newer one. filtered = [] for info in rev_infos: if (info.p.epoch >= current_epoch and (info.p.isdas, info.p.ifID) not in existing): filtered.append(info) self.p.exts.init("revInfos", len(existing) + len(filtered)) for i, orphan in enumerate(existing.values()): self.p.exts.revInfos.adopt(i, orphan) n_existing = len(existing) for i, info in enumerate(filtered): self.p.exts.revInfos[n_existing + i] = info.p
def test(self, hash_func_for_type): # Setup siblings = [] siblings.append(create_mock_full({"isLeft": True, "hash": "10s10"})) siblings.append(create_mock_full({"isLeft": False, "hash": "30s300"})) p = create_mock_full({ "ifID": 2, "epoch": 0, "nonce": b"s20", "siblings": siblings, "prevRoot": "p", "nextRoot": "n", "hashType": 0 }) rev_info = create_mock_full({"p": p}) hashes = [ "20s20", "10s1020s20", "10s1020s2030s300", "p10s1020s2030s300", "10s1020s2030s300n" ] hash_func = create_mock_full(side_effect=hashes) hash_func_for_type.return_value = hash_func # Call hash01, hash12 = ConnectedHashTree.get_possible_hashes(rev_info) # Tests ntools.eq_(hash01, "p10s1020s2030s300") ntools.eq_(hash12, "10s1020s2030s300n")
def _verify_revocation_for_asm(self, rev_info, as_marking, verify_all=True): """ Verifies a revocation for a given AS marking. :param rev_info: The RevocationInfo object. :param as_marking: The ASMarking object. :param verify_all: If true, verify all PCBMs (including peers), otherwise only verify the up/down hop. :return: True, if the revocation successfully revokes an upstream interface in the AS marking, False otherwise. """ if rev_info.isd_as() != as_marking.isd_as(): return False if not ConnectedHashTree.verify(rev_info, as_marking.p.hashTreeRoot): return False for pcbm in as_marking.iter_pcbms(): if rev_info.p.ifID in [ pcbm.hof().ingress_if, pcbm.hof().egress_if ]: return True if not verify_all: break return False
def test(self): # Setup isd_as = ISD_AS("1-11") if_ids = [23, 35, 120] initial_seed = b"qwerty" inst = ConnectedHashTree(isd_as, if_ids, initial_seed) root1_before_update = inst._ht1._nodes[0] root2_before_update = inst._ht2._nodes[0] # Call new_tree = inst.get_next_tree(isd_as, if_ids, b"new!!seed") inst.update(new_tree) # Tests root0_after_update = inst._ht0_root root1_after_update = inst._ht1._nodes[0] ntools.eq_(root1_before_update, root0_after_update) ntools.eq_(root2_before_update, root1_after_update)
def test(self): # Setup siblings = [] siblings.append(create_mock_full({"isLeft": True, "hash": "10s10"})) siblings.append(create_mock_full({"isLeft": False, "hash": "30s300"})) p = create_mock_full({ "ifID": 2, "epoch": 0, "nonce": b"s20", "siblings": siblings, "prevRoot": "p", "nextRoot": "n" }) revProof = create_mock_full({"p": p}) hashes = [ "20s20", "10s1020s20", "10s1020s2030s300", "p10s1020s2030s300", "10s1020s2030s300n" ] hash_new = create_mock_full({"digest()...": hashes}) hash_func = create_mock_full({"new()": hash_new}) # Call hash01, hash12 = ConnectedHashTree.get_possible_hashes( revProof, hash_func) # Tests ntools.eq_(hash01, "p10s1020s2030s300") ntools.eq_(hash12, "10s1020s2030s300n")
def add(self, rev_info): """ Adds rev_info to the cache and returns True if the operation succeeds. """ if not ConnectedHashTree.verify_epoch(rev_info.p.epoch): return False with self._lock: key = _mk_key(rev_info) stored_info = self.get(key) if not stored_info: # Try to free up space in case the cache reaches the cap limit. if len(self._cache) >= self._capacity: for info in list(self._cache.values()): self._validate_entry(info) # Couldn't free up enough space... if len(self._cache) >= self._capacity: logging.error("Revocation cache full!.") return False self._cache[key] = rev_info if self._labels: REVS_ADDED.labels(**self._labels).inc() REVS_TOTAL.labels(**self._labels).inc() REVS_BYTES.labels(**self._labels).inc(len(rev_info)) return True if rev_info.p.epoch > stored_info.p.epoch: self._cache[key] = rev_info if self._labels: REVS_ADDED.labels(**self._labels).inc() REVS_REMOVED.labels(**self._labels).inc() REVS_BYTES.labels(**self._labels).inc(len(rev_info) - len(stored_info)) return True return False
def _handle_if_timeouts(self): """ Periodically checks each interface state and issues an if revocation, if no keep-alive message was received for IFID_TOUT. """ if_id_last_revoked = defaultdict(int) while self.run_flag.is_set(): start_time = time.time() with self.ifid_state_lock: to_revoke = [] for (if_id, if_state) in self.ifid_state.items(): cur_epoch = ConnectedHashTree.get_current_epoch() if not if_state.is_expired() or ( if_state.is_revoked() and if_id_last_revoked[if_id] == cur_epoch): # Either the interface hasn't timed out, or it's already revoked for this # epoch continue if_id_last_revoked[if_id] = cur_epoch if not if_state.is_revoked(): logging.info("IF %d went down.", if_id) to_revoke.append(if_id) if_state.revoke_if_expired() self._issue_revocations(to_revoke) sleep_interval(start_time, self.IF_TIMEOUT_INTERVAL, "Handle IF timeouts")
def _validate_entry(self, rev_info, cur_epoch=None): # pragma: no cover """Removes an expired revocation from the cache.""" if not ConnectedHashTree.verify_epoch(rev_info.p.epoch, cur_epoch): del self._cache[_mk_key(rev_info)] if self._labels: REVS_REMOVED.labels(**self._labels).inc() REVS_TOTAL.labels(**self._labels).dec() REVS_BYTES.labels(**self._labels).dec(len(rev_info)) return False return True
def test_one_timestep(self): # Check that the revocation proof is verifiable across T and T+1. # Setup isd_as = ISD_AS("1-11") if_ids = [23, 35, 120] initial_seed = b"qwerty" inst = ConnectedHashTree(isd_as, if_ids, initial_seed) root = inst.get_root() # Call next_tree = inst.get_next_tree(isd_as, if_ids, b"new!!seed") inst.update(next_tree) # Tests proof = inst.get_proof(35) # if_id = 35. ntools.eq_(ConnectedHashTree.verify(proof, root), True)
def _maintain_hash_tree(self): """ Maintain the hashtree. Update the the windows in the connected tree """ with self._hash_tree_lock: if self._next_tree is not None: self._hash_tree.update(self._next_tree) self._next_tree = None else: logging.critical("Did not create hashtree in time; dying") kill_self() logging.info("New Hash Tree TTL window beginning: %s", ConnectedHashTree.get_ttl_window())
def _add_peer_revs(self, segments): """ Adds revocations to revoked peering interfaces in segments. :returns: Set with modified segments. Elements of the original set stay untouched. """ # TODO(shitz): This could be optimized, by keeping a map of (ISD_AS, IF) # -> RevocationInfo for peer revocations. modified_segs = set() current_epoch = ConnectedHashTree.get_current_epoch() for segment in segments: seg_id = segment.get_hops_hash() with self.pcb_cache_lock: if seg_id in self.pcb_cache: cached_seg = self.pcb_cache[seg_id] logging.debug("Adding segment from PCB cache to response:" " %s" % cached_seg.short_desc()) modified_segs.add(cached_seg) continue revs_to_add = set() for rev_info in list(self.revocations): if rev_info.p.epoch < current_epoch: self.revocations.pop(rev_info) continue for asm in segment.iter_asms(): if asm.isd_as() != rev_info.isd_as(): continue for pcbm in asm.iter_pcbms(1): hof = pcbm.hof() if rev_info.p.ifID in [ hof.ingress_if, hof.egress_if ]: revs_to_add.add(rev_info.copy()) if revs_to_add: new_seg = segment.copy() new_seg.add_rev_infos(list(revs_to_add)) logging.debug("Adding revocations to PCB: %s" % new_seg.short_desc()) self.pcb_cache[seg_id] = new_seg modified_segs.add(new_seg) else: modified_segs.add(segment) return modified_segs
def _validate_segment(self, seg): """ Check segment for revoked upstream/downstream interfaces. :param seg: The PathSegment object. :return: False, if the path segment contains a revoked upstream/ downstream interface (not peer). True otherwise. """ for rev_info in list(self.revocations): if not ConnectedHashTree.verify_epoch(rev_info.p.epoch): self.revocations.pop(rev_info) continue for asm in seg.iter_asms(): pcbm = asm.pcbm(0) if (rev_info.isd_as() == asm.isd_as() and rev_info.p.ifID in [pcbm.p.inIF, pcbm.p.outIF]): logging.debug("Found revoked interface (%d) in segment " "%s." % (rev_info.p.ifID, seg.short_desc())) return False return True
def _handle_if_timeouts(self): """ Periodically checks each interface state and issues an if revocation, if no keep-alive message was received for IFID_TOUT. """ if_id_last_revoked = defaultdict(int) while self.run_flag.is_set(): start_time = time.time() with self.ifid_state_lock: for (if_id, if_state) in self.ifid_state.items(): cur_epoch = ConnectedHashTree.get_current_epoch() # Check if interface has timed-out. if ((if_state.is_expired() or if_state.is_revoked()) and (if_id_last_revoked[if_id] != cur_epoch)): if_id_last_revoked[if_id] = cur_epoch if not if_state.is_revoked(): logging.info("IF %d appears to be down.", if_id) self._issue_revocation(if_id) if_state.revoke_if_expired() sleep_interval(start_time, self.IF_TIMEOUT_INTERVAL, "Handle IF timeouts")
def _skip_peer(cls, peer_rev, ht_root): # pragma: no cover if not peer_rev: return False return (ConnectedHashTree.verify_epoch(peer_rev.p.epoch) and ConnectedHashTree.verify(peer_rev, ht_root))
def _skip_peer(peer_rev, ht_root): # pragma: no cover if not peer_rev: return False rev_status = ConnectedHashTree.verify_epoch(peer_rev.p.epoch) return (rev_status == ConnectedHashTree.EPOCH_OK and ConnectedHashTree.verify(peer_rev, ht_root))
class BeaconServer(SCIONElement, metaclass=ABCMeta): """ The SCION PathConstructionBeacon Server. Attributes: if2rev_tokens: Contains the currently used revocation token hash-chain for each interface. """ SERVICE_TYPE = BEACON_SERVICE # Amount of time units a HOF is valid (time unit is EXP_TIME_UNIT). HOF_EXP_TIME = 63 # Timeout for TRC or Certificate requests. REQUESTS_TIMEOUT = 10 # ZK path for incoming PCBs ZK_PCB_CACHE_PATH = "pcb_cache" # ZK path for revocations. ZK_REVOCATIONS_PATH = "rev_cache" # Time revocation objects are cached in memory (in seconds). ZK_REV_OBJ_MAX_AGE = HASHTREE_EPOCH_TIME # Interval to checked for timed out interfaces. IF_TIMEOUT_INTERVAL = 1 def __init__(self, server_id, conf_dir): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. """ super().__init__(server_id, conf_dir) # TODO: add 2 policies self.path_policy = PathPolicy.from_file( os.path.join(conf_dir, PATH_POLICY_FILE)) self.unverified_beacons = deque() self.trc_requests = {} self.trcs = {} sig_key_file = get_sig_key_file_path(self.conf_dir) self.signing_key = base64.b64decode(read_file(sig_key_file)) self.of_gen_key = PBKDF2(self.config.master_as_key, b"Derive OF Key") self.hashtree_gen_key = PBKDF2(self.config.master_as_key, b"Derive hashtree Key") logging.info(self.config.__dict__) self._hash_tree = None self._hash_tree_lock = Lock() self._next_tree = None self._init_hash_tree() self.ifid_state = {} for ifid in self.ifid2br: self.ifid_state[ifid] = InterfaceState() self.ifid_state_lock = RLock() self.CTRL_PLD_CLASS_MAP = { PayloadClass.PCB: { None: self.handle_pcb }, PayloadClass.IFID: { None: self.handle_ifid_packet }, PayloadClass.CERT: { CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_rep, CertMgmtType.TRC_REPLY: self.process_trc_rep, }, PayloadClass.PATH: { PMT.IFSTATE_REQ: self._handle_ifstate_request, PMT.REVOCATION: self._handle_revocation, }, } self.SCMP_PLD_CLASS_MAP = { SCMPClass.PATH: { SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation, }, } zkid = ZkID.from_values(self.addr.isd_as, self.id, [(self.addr.host, self._port)]).pack() self.zk = Zookeeper(self.addr.isd_as, BEACON_SERVICE, zkid, self.topology.zookeepers) self.zk.retry("Joining party", self.zk.party_setup) self.incoming_pcbs = deque() self.pcb_cache = ZkSharedCache(self.zk, self.ZK_PCB_CACHE_PATH, self.process_pcbs) self.revobjs_cache = ZkSharedCache(self.zk, self.ZK_REVOCATIONS_PATH, self.process_rev_objects) self.local_rev_cache = ExpiringDict( 1000, HASHTREE_EPOCH_TIME + HASHTREE_EPOCH_TOLERANCE) self.local_rev_cache_lock = Lock() def _init_hash_tree(self): ifs = list(self.ifid2br.keys()) self._hash_tree = ConnectedHashTree(self.addr.isd_as, ifs, self.hashtree_gen_key) def _get_ht_proof(self, if_id): with self._hash_tree_lock: return self._hash_tree.get_proof(if_id) def _get_ht_root(self): with self._hash_tree_lock: return self._hash_tree.get_root() def propagate_downstream_pcb(self, pcb): """ Propagates the beacon to all children. :param pcb: path segment. :type pcb: PathSegment """ for r in self.topology.child_border_routers: if not r.interface.to_if_id: continue new_pcb, meta = self._mk_prop_pcb_meta(pcb.copy(), r.interface.isd_as, r.interface.if_id) if not new_pcb: continue self.send_meta(new_pcb, meta) logging.info("Downstream PCB propagated to %s via IF %s", r.interface.isd_as, r.interface.if_id) def _mk_prop_pcb_meta(self, pcb, dst_ia, egress_if): ts = pcb.get_timestamp() asm = self._create_asm(pcb.p.ifID, egress_if, ts, pcb.last_hof()) if not asm: return None, None pcb.add_asm(asm) pcb.sign(self.signing_key) one_hop_path = self._create_one_hop_path(egress_if) if self.DefaultMeta == TCPMetadata: return pcb, self.DefaultMeta.from_values(ia=dst_ia, host=SVCType.BS_A, path=one_hop_path, flags=TCPFlags.ONEHOPPATH) return pcb, UDPMetadata.from_values(ia=dst_ia, host=SVCType.BS_A, path=one_hop_path, ext_hdrs=[OneHopPathExt()]) def _create_one_hop_path(self, egress_if): ts = int(SCIONTime.get_time()) info = InfoOpaqueField.from_values(ts, self.addr.isd_as[0], hops=2) hf1 = HopOpaqueField.from_values(self.HOF_EXP_TIME, 0, egress_if) hf1.set_mac(self.of_gen_key, ts, None) # Return a path where second HF is empty. return SCIONPath.from_values(info, [hf1, HopOpaqueField()]) def _mk_if_info(self, if_id): """ Small helper method to make it easier to deal with ingress/egress interface being 0 while building ASMarkings. """ d = {"remote_ia": ISD_AS.from_values(0, 0), "remote_if": 0, "mtu": 0} if not if_id: return d br = self.ifid2br[if_id] d["remote_ia"] = br.interface.isd_as d["remote_if"] = br.interface.to_if_id d["mtu"] = br.interface.mtu return d @abstractmethod def handle_pcbs_propagation(self): """ Main loop to propagate received beacons. """ raise NotImplementedError def handle_pcb(self, pcb, meta): """Receives beacon and stores it for processing.""" pcb.p.ifID = meta.path.get_hof().ingress_if if not self.path_policy.check_filters(pcb): return self.incoming_pcbs.append(pcb) meta.close() entry_name = "%s-%s" % (pcb.get_hops_hash(hex=True), time.time()) try: self.pcb_cache.store(entry_name, pcb.copy().pack()) except ZkNoConnection: logging.error("Unable to store PCB in shared cache: " "no connection to ZK") def handle_ext(self, pcb): """ Handle beacon extensions. """ # Handle PCB extensions: if pcb.is_sibra(): logging.debug("%s", pcb.sibra_ext) @abstractmethod def process_pcbs(self, pcbs, raw=True): """ Processes new beacons and appends them to beacon list. """ raise NotImplementedError def process_pcb_queue(self): pcbs = [] while self.incoming_pcbs: pcbs.append(self.incoming_pcbs.popleft()) self.process_pcbs(pcbs, raw=False) logging.debug("Processed %d pcbs from incoming queue", len(pcbs)) @abstractmethod def register_segments(self): """ Registers paths according to the received beacons. """ raise NotImplementedError def _create_asm(self, in_if, out_if, ts, prev_hof): pcbms = list(self._create_pcbms(in_if, out_if, ts, prev_hof)) if not pcbms: return None chain = self._get_my_cert() _, cert_ver = chain.get_leaf_isd_as_ver() return ASMarking.from_values(self.addr.isd_as, self._get_my_trc().version, cert_ver, pcbms, self._get_ht_root(), self.topology.mtu, chain) def _create_pcbms(self, in_if, out_if, ts, prev_hof): up_pcbm = self._create_pcbm(in_if, out_if, ts, prev_hof) if not up_pcbm: return yield up_pcbm for br in sorted(self.topology.peer_border_routers): in_if = br.interface.if_id with self.ifid_state_lock: if (not self.ifid_state[in_if].is_active() and not self._quiet_startup()): logging.warning('Peer ifid:%d inactive (not added).', in_if) continue peer_pcbm = self._create_pcbm(in_if, out_if, ts, up_pcbm.hof(), xover=True) if peer_pcbm: yield peer_pcbm def _create_pcbm(self, in_if, out_if, ts, prev_hof, xover=False): in_info = self._mk_if_info(in_if) if in_info["remote_ia"].int() and not in_info["remote_if"]: return None out_info = self._mk_if_info(out_if) if out_info["remote_ia"].int() and not out_info["remote_if"]: return None hof = HopOpaqueField.from_values(self.HOF_EXP_TIME, in_if, out_if, xover=xover) hof.set_mac(self.of_gen_key, ts, prev_hof) return PCBMarking.from_values(in_info["remote_ia"], in_info["remote_if"], in_info["mtu"], out_info["remote_ia"], out_info["remote_if"], hof) def _terminate_pcb(self, pcb): """ Copies a PCB, terminates it and adds the segment ID. Terminating a PCB means adding a opaque field with the egress IF set to 0, i.e., there is no AS to forward a packet containing this path segment to. """ pcb = pcb.copy() asm = self._create_asm(pcb.p.ifID, 0, pcb.get_timestamp(), pcb.last_hof()) if not asm: return None pcb.add_asm(asm) return pcb def handle_ifid_packet(self, pld, meta): """ Update the interface state for the corresponding interface. :param pld: The IFIDPayload. :type pld: IFIDPayload """ ifid = pld.p.relayIF with self.ifid_state_lock: if ifid not in self.ifid_state: raise SCIONKeyError("Invalid IF %d in IFIDPayload" % ifid) br = self.ifid2br[ifid] br.interface.to_if_id = pld.p.origIF prev_state = self.ifid_state[ifid].update() if prev_state == InterfaceState.INACTIVE: logging.info("IF %d activated", ifid) elif prev_state in [ InterfaceState.TIMED_OUT, InterfaceState.REVOKED ]: logging.info("IF %d came back up.", ifid) if not prev_state == InterfaceState.ACTIVE: if self.zk.have_lock(): # Inform BRs about the interface coming up. state_info = IFStateInfo.from_values( ifid, True, self._get_ht_proof(ifid)) pld = IFStatePayload.from_values([state_info]) for br in self.topology.get_all_border_routers(): meta = UDPMetadata.from_values(host=br.addr, port=br.port) self.send_meta(pld.copy(), meta, (br.addr, br.port)) def run(self): """ Run an instance of the Beacon Server. """ threading.Thread(target=thread_safety_net, args=(self.worker, ), name="BS.worker", daemon=True).start() # https://github.com/netsec-ethz/scion/issues/308: threading.Thread(target=thread_safety_net, args=(self._handle_if_timeouts, ), name="BS._handle_if_timeouts", daemon=True).start() threading.Thread(target=thread_safety_net, args=(self._create_next_tree, ), name="BS._create_next_tree", daemon=True).start() super().run() def _create_next_tree(self): last_ttl_window = 0 while self.run_flag.is_set(): start = time.time() cur_ttl_window = ConnectedHashTree.get_ttl_window() time_to_sleep = (ConnectedHashTree.get_time_till_next_ttl() - HASHTREE_UPDATE_WINDOW) if cur_ttl_window == last_ttl_window: time_to_sleep += HASHTREE_TTL if time_to_sleep > 0: sleep_interval(start, time_to_sleep, "BS._create_next_tree", self._quiet_startup()) # at this point, there should be <= HASHTREE_UPDATE_WINDOW # seconds left in current ttl logging.info("Started computing hashtree for next ttl") last_ttl_window = ConnectedHashTree.get_ttl_window() ifs = list(self.ifid2br.keys()) tree = ConnectedHashTree.get_next_tree(self.addr.isd_as, ifs, self.hashtree_gen_key) with self._hash_tree_lock: self._next_tree = tree def _maintain_hash_tree(self): """ Maintain the hashtree. Update the the windows in the connected tree """ with self._hash_tree_lock: if self._next_tree is not None: self._hash_tree.update(self._next_tree) self._next_tree = None else: logging.critical("Did not create hashtree in time; dying") kill_self() logging.info("New Hash Tree TTL beginning") def worker(self): """ Worker thread that takes care of reading shared PCBs from ZK, and propagating PCBS/registering paths when master. """ last_propagation = last_registration = 0 last_ttl_window = ConnectedHashTree.get_ttl_window() worker_cycle = 1.0 was_master = False start = time.time() while self.run_flag.is_set(): sleep_interval(start, worker_cycle, "BS.worker cycle", self._quiet_startup()) start = time.time() try: self.process_pcb_queue() self.handle_unverified_beacons() self.zk.wait_connected() self.pcb_cache.process() self.revobjs_cache.process() self.handle_rev_objs() cur_ttl_window = ConnectedHashTree.get_ttl_window() if cur_ttl_window != last_ttl_window: self._maintain_hash_tree() last_ttl_window = cur_ttl_window if not self.zk.get_lock(lock_timeout=0, conn_timeout=0): was_master = False continue if not was_master: self._became_master() was_master = True self.pcb_cache.expire(self.config.propagation_time * 10) self.revobjs_cache.expire(self.ZK_REV_OBJ_MAX_AGE) except ZkNoConnection: continue now = time.time() if now - last_propagation >= self.config.propagation_time: self.handle_pcbs_propagation() last_propagation = now if (self.config.registers_paths and now - last_registration >= self.config.registration_time): try: self.register_segments() except SCIONKeyError as e: logging.error("Register_segments: %s", e) pass last_registration = now def _became_master(self): """ Called when a BS becomes the new master. Resets some state that will be rebuilt over time. """ # Reset all timed-out and revoked interfaces to inactive. with self.ifid_state_lock: for (_, ifstate) in self.ifid_state.items(): if not ifstate.is_active(): ifstate.reset() def _try_to_verify_beacon(self, pcb, quiet=False): """ Try to verify a beacon. :param pcb: path segment to verify. :type pcb: PathSegment """ assert isinstance(pcb, PathSegment) asm = pcb.asm(-1) if self._check_trc(asm.isd_as(), asm.p.trcVer): if self._verify_beacon(pcb): self._handle_verified_beacon(pcb) else: logging.warning("Invalid beacon. %s", pcb) else: if not quiet: logging.warning("Certificate(s) or TRC missing for pcb: %s", pcb.short_desc()) self.unverified_beacons.append(pcb) @abstractmethod def _check_trc(self, isd_as, trc_ver): """ Return True or False whether the necessary Certificate and TRC files are found. :param ISD_AS isd_is: ISD-AS identifier. :param int trc_ver: TRC file version. """ raise NotImplementedError def _get_my_trc(self): return self.trust_store.get_trc(self.addr.isd_as[0]) def _get_my_cert(self): return self.trust_store.get_cert(self.addr.isd_as) def _get_trc(self, isd_as, trc_ver): """ Get TRC from local storage or memory. :param ISD_AS isd_as: ISD-AS identifier. :param int trc_ver: TRC file version. """ trc = self.trust_store.get_trc(isd_as[0], trc_ver) if not trc: # Requesting TRC file from cert server trc_tuple = isd_as[0], trc_ver now = int(time.time()) if (trc_tuple not in self.trc_requests or (now - self.trc_requests[trc_tuple] > self.REQUESTS_TIMEOUT)): trc_req = TRCRequest.from_values(isd_as, trc_ver) logging.info("Requesting %sv%s TRC", isd_as[0], trc_ver) try: addr, port = self.dns_query_topo(CERTIFICATE_SERVICE)[0] except SCIONServiceLookupError as e: logging.warning("Sending TRC request failed: %s", e) return None meta = UDPMetadata.from_values(host=addr, port=port) self.send_meta(trc_req, meta) self.trc_requests[trc_tuple] = now return None return trc def _verify_beacon(self, pcb): """ Once the necessary certificate and TRC files have been found, verify the beacons. :param pcb: path segment to verify. :type pcb: PathSegment """ assert isinstance(pcb, PathSegment) asm = pcb.asm(-1) cert_ia = asm.isd_as() trc = self.trust_store.get_trc(cert_ia[0], asm.p.trcVer) return verify_sig_chain_trc(pcb.sig_pack(), asm.p.sig, str(cert_ia), asm.chain(), trc, asm.p.trcVer) @abstractmethod def _handle_verified_beacon(self, pcb): """ Once a beacon has been verified, place it into the right containers. :param pcb: verified path segment. :type pcb: PathSegment """ raise NotImplementedError @abstractmethod def process_cert_chain_rep(self, cert_chain_rep, meta): """ Process the Certificate chain reply. """ raise NotImplementedError def process_trc_rep(self, rep, meta): """ Process the TRC reply. :param rep: TRC reply. :type rep: TRCReply """ logging.info("TRC reply received for %s", rep.trc.get_isd_ver()) self.trust_store.add_trc(rep.trc) rep_key = rep.trc.get_isd_ver() if rep_key in self.trc_requests: del self.trc_requests[rep_key] def handle_unverified_beacons(self): """ Handle beacons which are waiting to be verified. """ for _ in range(len(self.unverified_beacons)): pcb = self.unverified_beacons.popleft() self._try_to_verify_beacon(pcb, quiet=True) def process_rev_objects(self, rev_infos): """ Processes revocation infos stored in Zookeeper. """ with self.local_rev_cache_lock: for raw in rev_infos: try: rev_info = RevocationInfo.from_raw(raw) except SCIONParseError as e: logging.error( "Error processing revocation info from ZK: %s", e) continue self.local_rev_cache[rev_info] = rev_info.copy() def _issue_revocation(self, if_id): """ Store a RevocationInfo in ZK and send a revocation to all BRs. :param if_id: The interface that needs to be revoked. :type if_id: int """ # Only the master BS issues revocations. if not self.zk.have_lock(): return rev_info = self._get_ht_proof(if_id) logging.error("Issuing revocation for IF %d.", if_id) # Issue revocation to all BRs. info = IFStateInfo.from_values(if_id, False, rev_info) pld = IFStatePayload.from_values([info]) for br in self.topology.get_all_border_routers(): meta = UDPMetadata.from_values(host=br.addr, port=br.port) self.send_meta(pld.copy(), meta, (br.addr, br.port)) self._process_revocation(rev_info) self._send_rev_to_local_ps(rev_info) def _send_rev_to_local_ps(self, rev_info): """ Sends the given revocation to its local path server. :param rev_info: The RevocationInfo object :type rev_info: RevocationInfo """ if self.zk.have_lock() and self.topology.path_servers: try: addr, port = self.dns_query_topo(PATH_SERVICE)[0] except SCIONServiceLookupError: # If there are no local path servers, stop here. return logging.info("Sending revocation to local PS.") meta = UDPMetadata.from_values(host=addr, port=port) self.send_meta(rev_info.copy(), meta) def _handle_scmp_revocation(self, pld, meta): rev_info = RevocationInfo.from_raw(pld.info.rev_info) logging.info("Received revocation via SCMP:\n%s", rev_info.short_desc()) self._process_revocation(rev_info) def _handle_revocation(self, rev_info, meta): logging.info("Received revocation via TCP/UDP:\n%s", rev_info.short_desc()) if not self._validate_revocation(rev_info): return self._process_revocation(rev_info) def handle_rev_objs(self): with self.local_rev_cache_lock: for rev_info in self.local_rev_cache.values(): self._remove_revoked_pcbs(rev_info) def _process_revocation(self, rev_info): """ Removes PCBs containing a revoked interface and sends the revocation to the local PS. :param rev_info: The RevocationInfo object :type rev_info: RevocationInfo """ assert isinstance(rev_info, RevocationInfo) if_id = rev_info.p.ifID if not if_id: logging.error("Trying to revoke IF with ID 0.") return with self.local_rev_cache_lock: self.local_rev_cache[rev_info] = rev_info.copy() logging.info("Storing revocation in ZK.") rev_token = rev_info.copy().pack() entry_name = "%s:%s" % (hash(rev_token), time.time()) try: self.revobjs_cache.store(entry_name, rev_token) except ZkNoConnection as exc: logging.error("Unable to store revocation in shared cache " "(no ZK connection): %s" % exc) self._remove_revoked_pcbs(rev_info) @abstractmethod def _remove_revoked_pcbs(self, rev_info): """ Removes the PCBs containing the revoked interface. :param rev_info: The RevocationInfo object. :type rev_info: RevocationInfo """ raise NotImplementedError def _pcb_list_to_remove(self, candidates, rev_info): """ Calculates the list of PCBs to remove. Called by _remove_revoked_pcbs. :param candidates: Candidate PCBs. :type candidates: List :param rev_info: The RevocationInfo object. :type rev_info: RevocationInfo """ to_remove = [] processed = set() for cand in candidates: if cand.id in processed: continue processed.add(cand.id) if not ConnectedHashTree.verify_epoch(rev_info.p.epoch): continue # If the interface on which we received the PCB is # revoked, then the corresponding pcb needs to be removed, if # the proof can be verified with the own AS's root for the current # epoch and the if_id of the interface on which pcb was received # matches that in the rev_info root_verify = ConnectedHashTree.verify(rev_info, self._get_ht_root()) if (self.addr.isd_as == rev_info.isd_as() and cand.pcb.p.ifID == rev_info.p.ifID and root_verify): to_remove.append(cand.id) for asm in cand.pcb.iter_asms(): if self._verify_revocation_for_asm(rev_info, asm, False): to_remove.append(cand.id) return to_remove def _handle_if_timeouts(self): """ Periodically checks each interface state and issues an if revocation, if no keep-alive message was received for IFID_TOUT. """ if_id_last_revoked = defaultdict(int) while self.run_flag.is_set(): start_time = time.time() with self.ifid_state_lock: for (if_id, if_state) in self.ifid_state.items(): cur_epoch = ConnectedHashTree.get_current_epoch() # Check if interface has timed-out. if ((if_state.is_expired() or if_state.is_revoked()) and (if_id_last_revoked[if_id] != cur_epoch)): if_id_last_revoked[if_id] = cur_epoch if not if_state.is_revoked(): logging.info("IF %d appears to be down.", if_id) self._issue_revocation(if_id) if_state.revoke_if_expired() sleep_interval(start_time, self.IF_TIMEOUT_INTERVAL, "Handle IF timeouts") def _handle_ifstate_request(self, req, meta): # Only master replies to ifstate requests. if not self.zk.have_lock(): return assert isinstance(req, IFStateRequest) logging.debug("Received ifstate req:\n%s", req) infos = [] with self.ifid_state_lock: if req.p.ifID == IFStateRequest.ALL_INTERFACES: ifid_states = self.ifid_state.items() elif req.p.ifID in self.ifid_state: ifid_states = [(req.p.ifID, self.ifid_state[req.p.ifID])] else: logging.error( "Received ifstate request from %s for unknown " "interface %s.", meta.get_addr(), req.p.ifID) return for (ifid, state) in ifid_states: # Don't include inactive interfaces in response. if state.is_inactive(): continue info = IFStateInfo.from_values(ifid, state.is_active(), self._get_ht_proof(ifid)) infos.append(info) if not infos and not self._quiet_startup(): logging.warning("No IF state info to put in response.") return payload = IFStatePayload.from_values(infos) self.send_meta(payload, meta, (meta.host, meta.port))
def _init_hash_tree(self): ifs = list(self.ifid2br.keys()) self._hash_tree = ConnectedHashTree(self.addr.isd_as, ifs, self.hashtree_gen_key, HashType.SHA256)
class BeaconServer(SCIONElement, metaclass=ABCMeta): """ The SCION PathConstructionBeacon Server. Attributes: if2rev_tokens: Contains the currently used revocation token hash-chain for each interface. """ SERVICE_TYPE = BEACON_SERVICE # Amount of time units a HOF is valid (time unit is EXP_TIME_UNIT). HOF_EXP_TIME = 63 # ZK path for incoming PCBs ZK_PCB_CACHE_PATH = "pcb_cache" # ZK path for revocations. ZK_REVOCATIONS_PATH = "rev_cache" # Time revocation objects are cached in memory (in seconds). ZK_REV_OBJ_MAX_AGE = HASHTREE_EPOCH_TIME # Interval to checked for timed out interfaces. IF_TIMEOUT_INTERVAL = 1 def __init__(self, server_id, conf_dir): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. """ super().__init__(server_id, conf_dir) # TODO: add 2 policies self.path_policy = PathPolicy.from_file( os.path.join(conf_dir, PATH_POLICY_FILE)) self.signing_key = get_sig_key(self.conf_dir) self.of_gen_key = kdf(self.config.master_as_key, b"Derive OF Key") self.hashtree_gen_key = kdf(self.config.master_as_key, b"Derive hashtree Key") logging.info(self.config.__dict__) self._hash_tree = None self._hash_tree_lock = Lock() self._next_tree = None self._init_hash_tree() self.ifid_state = {} for ifid in self.ifid2br: self.ifid_state[ifid] = InterfaceState() self.ifid_state_lock = RLock() self.CTRL_PLD_CLASS_MAP = { PayloadClass.PCB: { None: self.handle_pcb }, PayloadClass.IFID: { None: self.handle_ifid_packet }, PayloadClass.CERT: { CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request, CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply, CertMgmtType.TRC_REPLY: self.process_trc_reply, CertMgmtType.TRC_REQ: self.process_trc_request, }, PayloadClass.PATH: { PMT.IFSTATE_REQ: self._handle_ifstate_request, PMT.REVOCATION: self._handle_revocation, }, } self.SCMP_PLD_CLASS_MAP = { SCMPClass.PATH: { SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation, }, } zkid = ZkID.from_values(self.addr.isd_as, self.id, [(self.addr.host, self._port)]).pack() self.zk = Zookeeper(self.addr.isd_as, BEACON_SERVICE, zkid, self.topology.zookeepers) self.zk.retry("Joining party", self.zk.party_setup) self.pcb_cache = ZkSharedCache(self.zk, self.ZK_PCB_CACHE_PATH, self._handle_pcbs_from_zk) self.revobjs_cache = ZkSharedCache(self.zk, self.ZK_REVOCATIONS_PATH, self.process_rev_objects) self.local_rev_cache = ExpiringDict( 1000, HASHTREE_EPOCH_TIME + HASHTREE_EPOCH_TOLERANCE) self._rev_seg_lock = RLock() def _init_hash_tree(self): ifs = list(self.ifid2br.keys()) self._hash_tree = ConnectedHashTree(self.addr.isd_as, ifs, self.hashtree_gen_key, HashType.SHA256) def _get_ht_proof(self, if_id): with self._hash_tree_lock: return self._hash_tree.get_proof(if_id) def _get_ht_root(self): with self._hash_tree_lock: return self._hash_tree.get_root() def propagate_downstream_pcb(self, pcb): """ Propagates the beacon to all children. :param pcb: path segment. :type pcb: PathSegment """ propagated_pcbs = defaultdict(list) for intf in self.topology.child_interfaces: if not intf.to_if_id: continue new_pcb, meta = self._mk_prop_pcb_meta(pcb.copy(), intf.isd_as, intf.if_id) if not new_pcb: continue self.send_meta(new_pcb, meta) propagated_pcbs[(intf.isd_as, intf.if_id)].append(pcb.short_id()) return propagated_pcbs def _mk_prop_pcb_meta(self, pcb, dst_ia, egress_if): ts = pcb.get_timestamp() asm = self._create_asm(pcb.p.ifID, egress_if, ts, pcb.last_hof()) if not asm: return None, None pcb.add_asm(asm) pcb.sign(self.signing_key) one_hop_path = self._create_one_hop_path(egress_if) return pcb, self._build_meta(ia=dst_ia, host=SVCType.BS_A, path=one_hop_path, one_hop=True) def _create_one_hop_path(self, egress_if): ts = int(SCIONTime.get_time()) info = InfoOpaqueField.from_values(ts, self.addr.isd_as[0], hops=2) hf1 = HopOpaqueField.from_values(self.HOF_EXP_TIME, 0, egress_if) hf1.set_mac(self.of_gen_key, ts, None) # Return a path where second HF is empty. return SCIONPath.from_values(info, [hf1, HopOpaqueField()]) def _mk_if_info(self, if_id): """ Small helper method to make it easier to deal with ingress/egress interface being 0 while building ASMarkings. """ d = {"remote_ia": ISD_AS.from_values(0, 0), "remote_if": 0, "mtu": 0} if not if_id: return d br = self.ifid2br[if_id] d["remote_ia"] = br.interfaces[if_id].isd_as d["remote_if"] = br.interfaces[if_id].to_if_id d["mtu"] = br.interfaces[if_id].mtu return d @abstractmethod def handle_pcbs_propagation(self): """ Main loop to propagate received beacons. """ raise NotImplementedError def _log_propagations(self, propagated_pcbs): for (isd_as, if_id), pcbs in propagated_pcbs.items(): logging.debug("Propagated %d PCBs to %s via %s (%s)", len(pcbs), isd_as, if_id, ", ".join(pcbs)) def _handle_pcbs_from_zk(self, pcbs): """ Handles cached pcbs through ZK, passed as a list. """ for pcb in pcbs: try: pcb = PathSegment.from_raw(pcb) except SCIONParseError as e: logging.error("Unable to parse raw pcb: %s", e) continue self.handle_pcb(pcb) if pcbs: logging.debug("Processed %s PCBs from ZK", len(pcbs)) def handle_pcb(self, pcb, meta=None): """ Handles pcbs received from the network. """ if meta: pcb.p.ifID = meta.path.get_hof().ingress_if try: self.path_policy.check_filters(pcb) except SCIONPathPolicyViolated as e: logging.debug("Segment dropped due to path policy: %s\n%s" % (e, pcb.short_desc())) return if not self._filter_pcb(pcb): logging.debug("Segment dropped due to looping: %s" % pcb.short_desc()) return seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta) self._process_path_seg(seg_meta) def continue_seg_processing(self, seg_meta): """ For every verified pcb received from the network or ZK this function gets called to continue the processing for the pcb. """ pcb = seg_meta.seg logging.debug("Successfully verified PCB %s", pcb.short_id()) if seg_meta.meta: # Segment was received from network, not from zk. Share segment # with other beacon servers in this AS. entry_name = "%s-%s" % (pcb.get_hops_hash(hex=True), time.time()) try: self.pcb_cache.store(entry_name, pcb.copy().pack()) except ZkNoConnection: logging.error("Unable to store PCB in shared cache: " "no connection to ZK") self.handle_ext(pcb) self._handle_verified_beacon(pcb) def _filter_pcb(self, pcb, dst_ia=None): return True def handle_ext(self, pcb): """ Handle beacon extensions. """ # Handle PCB extensions if pcb.is_sibra(): logging.debug("%s", pcb.sibra_ext) for asm in pcb.iter_asms(): pol = asm.routing_pol_ext() if pol: self.handle_routing_pol_ext(pol) def handle_routing_pol_ext(self, ext): # TODO(Sezer): Implement routing policy extension handling logging.debug("Routing policy extension: %s" % ext) @abstractmethod def register_segments(self): """ Registers paths according to the received beacons. """ raise NotImplementedError def _log_registrations(self, registrations, seg_type): for (dst_meta, dst_type), pcbs in registrations.items(): logging.debug("Registered %d %s-segments @ %s:%s (%s)", len(pcbs), seg_type, dst_type.upper(), dst_meta, ", ".join(pcbs)) def _create_asm(self, in_if, out_if, ts, prev_hof): pcbms = list(self._create_pcbms(in_if, out_if, ts, prev_hof)) if not pcbms: return None chain = self._get_my_cert() _, cert_ver = chain.get_leaf_isd_as_ver() return ASMarking.from_values(self.addr.isd_as, self._get_my_trc().version, cert_ver, pcbms, self._get_ht_root(), self.topology.mtu) def _create_pcbms(self, in_if, out_if, ts, prev_hof): up_pcbm = self._create_pcbm(in_if, out_if, ts, prev_hof) if not up_pcbm: return yield up_pcbm for intf in sorted(self.topology.peer_interfaces): in_if = intf.if_id with self.ifid_state_lock: if (not self.ifid_state[in_if].is_active() and not self._quiet_startup()): continue peer_pcbm = self._create_pcbm(in_if, out_if, ts, up_pcbm.hof(), xover=True) if peer_pcbm: yield peer_pcbm def _create_pcbm(self, in_if, out_if, ts, prev_hof, xover=False): in_info = self._mk_if_info(in_if) if in_info["remote_ia"].int() and not in_info["remote_if"]: return None out_info = self._mk_if_info(out_if) if out_info["remote_ia"].int() and not out_info["remote_if"]: return None hof = HopOpaqueField.from_values(self.HOF_EXP_TIME, in_if, out_if, xover=xover) hof.set_mac(self.of_gen_key, ts, prev_hof) return PCBMarking.from_values(in_info["remote_ia"], in_info["remote_if"], in_info["mtu"], out_info["remote_ia"], out_info["remote_if"], hof) def _terminate_pcb(self, pcb): """ Copies a PCB, terminates it and adds the segment ID. Terminating a PCB means adding a opaque field with the egress IF set to 0, i.e., there is no AS to forward a packet containing this path segment to. """ pcb = pcb.copy() asm = self._create_asm(pcb.p.ifID, 0, pcb.get_timestamp(), pcb.last_hof()) if not asm: return None pcb.add_asm(asm) return pcb def handle_ifid_packet(self, pld, meta): """ Update the interface state for the corresponding interface. :param pld: The IFIDPayload. :type pld: IFIDPayload """ ifid = pld.p.relayIF with self.ifid_state_lock: if ifid not in self.ifid_state: raise SCIONKeyError("Invalid IF %d in IFIDPayload" % ifid) br = self.ifid2br[ifid] br.interfaces[ifid].to_if_id = pld.p.origIF prev_state = self.ifid_state[ifid].update() if prev_state == InterfaceState.INACTIVE: logging.info("IF %d activated", ifid) elif prev_state in [ InterfaceState.TIMED_OUT, InterfaceState.REVOKED ]: logging.info("IF %d came back up.", ifid) if not prev_state == InterfaceState.ACTIVE: if self.zk.have_lock(): # Inform BRs about the interface coming up. state_info = IFStateInfo.from_values( ifid, True, self._get_ht_proof(ifid)) pld = IFStatePayload.from_values([state_info]) for br in self.topology.border_routers: br_addr, br_port = br.int_addrs[0].public[0] meta = UDPMetadata.from_values(host=br_addr, port=br_port) self.send_meta(pld.copy(), meta, (br_addr, br_port)) def run(self): """ Run an instance of the Beacon Server. """ threading.Thread(target=thread_safety_net, args=(self.worker, ), name="BS.worker", daemon=True).start() # https://github.com/netsec-ethz/scion/issues/308: threading.Thread(target=thread_safety_net, args=(self._handle_if_timeouts, ), name="BS._handle_if_timeouts", daemon=True).start() threading.Thread(target=thread_safety_net, args=(self._create_next_tree, ), name="BS._create_next_tree", daemon=True).start() threading.Thread(target=thread_safety_net, args=(self._check_trc_cert_reqs, ), name="Elem.check_trc_cert_reqs", daemon=True).start() super().run() def _create_next_tree(self): last_ttl_window = 0 while self.run_flag.is_set(): start = time.time() cur_ttl_window = ConnectedHashTree.get_ttl_window() time_to_sleep = (ConnectedHashTree.get_time_till_next_ttl() - HASHTREE_UPDATE_WINDOW) if cur_ttl_window == last_ttl_window: time_to_sleep += HASHTREE_TTL if time_to_sleep > 0: sleep_interval(start, time_to_sleep, "BS._create_next_tree", self._quiet_startup()) # at this point, there should be <= HASHTREE_UPDATE_WINDOW # seconds left in current ttl logging.info("Started computing hashtree for next TTL window (%d)", cur_ttl_window + 2) last_ttl_window = ConnectedHashTree.get_ttl_window() ht_start = time.time() ifs = list(self.ifid2br.keys()) tree = ConnectedHashTree.get_next_tree(self.addr.isd_as, ifs, self.hashtree_gen_key, HashType.SHA256) ht_end = time.time() with self._hash_tree_lock: self._next_tree = tree logging.info( "Finished computing hashtree for TTL window %d in %.3fs" % (cur_ttl_window + 2, ht_end - ht_start)) def _maintain_hash_tree(self): """ Maintain the hashtree. Update the the windows in the connected tree """ with self._hash_tree_lock: if self._next_tree is not None: self._hash_tree.update(self._next_tree) self._next_tree = None else: logging.critical("Did not create hashtree in time; dying") kill_self() logging.info("New Hash Tree TTL window beginning: %s", ConnectedHashTree.get_ttl_window()) def worker(self): """ Worker thread that takes care of reading shared PCBs from ZK, and propagating PCBS/registering paths when master. """ last_propagation = last_registration = 0 last_ttl_window = ConnectedHashTree.get_ttl_window() worker_cycle = 1.0 start = time.time() while self.run_flag.is_set(): sleep_interval(start, worker_cycle, "BS.worker cycle", self._quiet_startup()) start = time.time() try: self.zk.wait_connected() self.pcb_cache.process() self.revobjs_cache.process() self.handle_rev_objs() cur_ttl_window = ConnectedHashTree.get_ttl_window() if cur_ttl_window != last_ttl_window: self._maintain_hash_tree() last_ttl_window = cur_ttl_window ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0) if not ret: # Failed to get the lock continue elif ret == ZK_LOCK_SUCCESS: logging.info("Became master") self._became_master() self.pcb_cache.expire(self.config.propagation_time * 10) self.revobjs_cache.expire(self.ZK_REV_OBJ_MAX_AGE) except ZkNoConnection: continue now = time.time() if now - last_propagation >= self.config.propagation_time: self.handle_pcbs_propagation() last_propagation = now if (self.config.registers_paths and now - last_registration >= self.config.registration_time): try: self.register_segments() except SCIONKeyError as e: logging.error("Error while registering segments: %s", e) pass last_registration = now def _became_master(self): """ Called when a BS becomes the new master. Resets some state that will be rebuilt over time. """ # Reset all timed-out and revoked interfaces to inactive. with self.ifid_state_lock: for (_, ifstate) in self.ifid_state.items(): if not ifstate.is_active(): ifstate.reset() def _get_my_trc(self): return self.trust_store.get_trc(self.addr.isd_as[0]) def _get_my_cert(self): return self.trust_store.get_cert(self.addr.isd_as) @abstractmethod def _handle_verified_beacon(self, pcb): """ Once a beacon has been verified, place it into the right containers. :param pcb: verified path segment. :type pcb: PathSegment """ raise NotImplementedError def process_rev_objects(self, rev_infos): """ Processes revocation infos stored in Zookeeper. """ with self._rev_seg_lock: for raw in rev_infos: try: rev_info = RevocationInfo.from_raw(raw) except SCIONParseError as e: logging.error( "Error processing revocation info from ZK: %s", e) continue self.local_rev_cache[rev_info] = rev_info.copy() def _issue_revocation(self, if_id): """ Store a RevocationInfo in ZK and send a revocation to all BRs. :param if_id: The interface that needs to be revoked. :type if_id: int """ # Only the master BS issues revocations. if not self.zk.have_lock(): return rev_info = self._get_ht_proof(if_id) logging.info("Issuing revocation: %s", rev_info.short_desc()) # Issue revocation to all BRs. info = IFStateInfo.from_values(if_id, False, rev_info) pld = IFStatePayload.from_values([info]) for br in self.topology.border_routers: br_addr, br_port = br.int_addrs[0].public[0] meta = UDPMetadata.from_values(host=br_addr, port=br_port) self.send_meta(pld.copy(), meta, (br_addr, br_port)) self._process_revocation(rev_info) self._send_rev_to_local_ps(rev_info) def _send_rev_to_local_ps(self, rev_info): """ Sends the given revocation to its local path server. :param rev_info: The RevocationInfo object :type rev_info: RevocationInfo """ if self.zk.have_lock() and self.topology.path_servers: try: addr, port = self.dns_query_topo(PATH_SERVICE)[0] except SCIONServiceLookupError: # If there are no local path servers, stop here. return meta = UDPMetadata.from_values(host=addr, port=port) self.send_meta(rev_info.copy(), meta) def _handle_scmp_revocation(self, pld, meta): rev_info = RevocationInfo.from_raw(pld.info.rev_info) logging.debug("Received revocation via SCMP: %s (from %s)", rev_info.short_desc(), meta) self._process_revocation(rev_info) def _handle_revocation(self, rev_info, meta): logging.debug("Received revocation via TCP/UDP: %s (from %s)", rev_info.short_desc(), meta) if not self._validate_revocation(rev_info): return self._process_revocation(rev_info) def handle_rev_objs(self): with self._rev_seg_lock: for rev_info in self.local_rev_cache.values(): self._remove_revoked_pcbs(rev_info) def _process_revocation(self, rev_info): """ Removes PCBs containing a revoked interface and sends the revocation to the local PS. :param rev_info: The RevocationInfo object :type rev_info: RevocationInfo """ assert isinstance(rev_info, RevocationInfo) if_id = rev_info.p.ifID if not if_id: logging.error("Trying to revoke IF with ID 0.") return with self._rev_seg_lock: self.local_rev_cache[rev_info] = rev_info.copy() rev_token = rev_info.copy().pack() entry_name = "%s:%s" % (hash(rev_token), time.time()) try: self.revobjs_cache.store(entry_name, rev_token) except ZkNoConnection as exc: logging.error("Unable to store revocation in shared cache " "(no ZK connection): %s" % exc) self._remove_revoked_pcbs(rev_info) @abstractmethod def _remove_revoked_pcbs(self, rev_info): """ Removes the PCBs containing the revoked interface. :param rev_info: The RevocationInfo object. :type rev_info: RevocationInfo """ raise NotImplementedError def _pcb_list_to_remove(self, candidates, rev_info): """ Calculates the list of PCBs to remove. Called by _remove_revoked_pcbs. :param candidates: Candidate PCBs. :type candidates: List :param rev_info: The RevocationInfo object. :type rev_info: RevocationInfo """ to_remove = [] processed = set() for cand in candidates: if cand.id in processed: continue processed.add(cand.id) if not ConnectedHashTree.verify_epoch(rev_info.p.epoch): continue # If the interface on which we received the PCB is # revoked, then the corresponding pcb needs to be removed. root_verify = ConnectedHashTree.verify(rev_info, self._get_ht_root()) if (self.addr.isd_as == rev_info.isd_as() and cand.pcb.p.ifID == rev_info.p.ifID and root_verify): to_remove.append(cand.id) for asm in cand.pcb.iter_asms(): if self._verify_revocation_for_asm(rev_info, asm, False): to_remove.append(cand.id) return to_remove def _handle_if_timeouts(self): """ Periodically checks each interface state and issues an if revocation, if no keep-alive message was received for IFID_TOUT. """ if_id_last_revoked = defaultdict(int) while self.run_flag.is_set(): start_time = time.time() with self.ifid_state_lock: for (if_id, if_state) in self.ifid_state.items(): cur_epoch = ConnectedHashTree.get_current_epoch() if not if_state.is_expired() or ( if_state.is_revoked() and if_id_last_revoked[if_id] == cur_epoch): # Either the interface hasn't timed out, or it's already revoked for this # epoch continue if_id_last_revoked[if_id] = cur_epoch if not if_state.is_revoked(): logging.info("IF %d went down.", if_id) self._issue_revocation(if_id) if_state.revoke_if_expired() sleep_interval(start_time, self.IF_TIMEOUT_INTERVAL, "Handle IF timeouts") def _handle_ifstate_request(self, req, meta): # Only master replies to ifstate requests. if not self.zk.have_lock(): return assert isinstance(req, IFStateRequest) infos = [] with self.ifid_state_lock: if req.p.ifID == IFStateRequest.ALL_INTERFACES: ifid_states = self.ifid_state.items() elif req.p.ifID in self.ifid_state: ifid_states = [(req.p.ifID, self.ifid_state[req.p.ifID])] else: logging.error( "Received ifstate request from %s for unknown " "interface %s.", meta, req.p.ifID) return for (ifid, state) in ifid_states: # Don't include inactive interfaces in response. if state.is_inactive(): continue info = IFStateInfo.from_values(ifid, state.is_active(), self._get_ht_proof(ifid)) infos.append(info) if not infos and not self._quiet_startup(): logging.warning("No IF state info to put in response. Req: %s" % req.short_desc()) return payload = IFStatePayload.from_values(infos) self.send_meta(payload, meta, (meta.host, meta.port))
def test_two_timesteps(self): # Check that the revocation proof is "NOT" verifiable across T and T+2. # Setup isd_as = ISD_AS("1-11") if_ids = [23, 35, 120] initial_seed = b"qwerty" inst = ConnectedHashTree(isd_as, if_ids, initial_seed, HashType.SHA256) root = inst.get_root() # Call new_tree = inst.get_next_tree(isd_as, if_ids, b"newseed.@1", HashType.SHA256) inst.update(new_tree) new_tree = inst.get_next_tree(isd_as, if_ids, b"newseed.@2", HashType.SHA256) inst.update(new_tree) # Tests proof = inst.get_proof(35) # if_id = 35. ntools.eq_(ConnectedHashTree.verify(proof, root), False)