class DNSCachingClient(DNSClient): """ Caching variant of the DNS client. """ def __init__(self, dns_servers, domain, lifetime=5.0): # pragma: no cover """ :param list dns_servers: DNS server IP addresses as strings. E.g. ``["127.0.0.1", "8.8.8.8"]`` :param string domain: The DNS domain to query. :param float lifetime: Number of seconds in total to try resolving before failing. """ super().__init__(dns_servers, domain, lifetime=lifetime) self.cache = ExpiringDict(max_len=DNS_CACHE_MAX_SIZE, max_age_seconds=DNS_CACHE_MAX_AGE) def query(self, qname, fallback=None, quiet=False): """ Check if the answer is already in the cache. If not, pass it along to the DNS client and cache the result. :param string qname: A relative DNS record to query. E.g. ``"bs"`` :param list fallback: If provided, and the DNS query fails, use this as the answer instead. :param bool quiet: If set, don't log warnings/errors. :returns: A list of `Host address <HostAddrBase>`_ objects. :raises: DNSLibTimeout: No responses received. DNSLibNxDomain: Name doesn't exist. DNSLibError: Unexpected error. """ answer = self.cache.get(qname) if answer is None: answer = fallback try: answer = super().query(qname) except DNSLibBaseError as e: if fallback is None: raise if isinstance(e, DNSLibMinorError): level = logging.WARN else: level = logging.ERROR if not quiet: logging.log( level, "DNS failure, using fallback value for %s: %s", qname, e) self.cache[qname] = answer shuffle(answer) return answer
class CertServer(SCIONElement): """ The SCION Certificate Server. """ SERVICE_TYPE = CERTIFICATE_SERVICE # ZK path for incoming cert chains ZK_CC_CACHE_PATH = "cert_chain_cache" # ZK path for incoming TRCs ZK_TRC_CACHE_PATH = "trc_cache" ZK_DRKEY_PATH = "drkey_cache" def __init__(self, server_id, conf_dir, spki_cache_dir=GEN_CACHE_PATH, prom_export=None): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. :param str prom_export: prometheus export address. """ super().__init__(server_id, conf_dir, spki_cache_dir=spki_cache_dir, prom_export=prom_export) self.config = self._load_as_conf() cc_labels = {**self._labels, "type": "cc"} if self._labels else None trc_labels = {**self._labels, "type": "trc"} if self._labels else None drkey_labels = { **self._labels, "type": "drkey" } if self._labels else None self.cc_requests = RequestHandler.start( "CC Requests", self._check_cc, self._fetch_cc, self._reply_cc, labels=cc_labels, ) self.trc_requests = RequestHandler.start( "TRC Requests", self._check_trc, self._fetch_trc, self._reply_trc, labels=trc_labels, ) self.drkey_protocol_requests = RequestHandler.start( "DRKey Requests", self._check_drkey, self._fetch_drkey, self._reply_proto_drkey, labels=drkey_labels, ) self.CTRL_PLD_CLASS_MAP = { PayloadClass.CERT: { CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request, CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply, CertMgmtType.TRC_REQ: self.process_trc_request, CertMgmtType.TRC_REPLY: self.process_trc_reply, }, PayloadClass.DRKEY: { DRKeyMgmtType.FIRST_ORDER_REQUEST: self.process_drkey_request, DRKeyMgmtType.FIRST_ORDER_REPLY: self.process_drkey_reply, }, } zkid = ZkID.from_values(self.addr.isd_as, self.id, [(self.addr.host, self._port)]).pack() self.zk = Zookeeper(self.topology.isd_as, CERTIFICATE_SERVICE, zkid, self.topology.zookeepers) self.zk.retry("Joining party", self.zk.party_setup) self.trc_cache = ZkSharedCache(self.zk, self.ZK_TRC_CACHE_PATH, self._cached_trcs_handler) self.cc_cache = ZkSharedCache(self.zk, self.ZK_CC_CACHE_PATH, self._cached_certs_handler) self.drkey_cache = ZkSharedCache(self.zk, self.ZK_DRKEY_PATH, self._cached_drkeys_handler) self.signing_key = get_sig_key(self.conf_dir) self.private_key = get_enc_key(self.conf_dir) self.drkey_secrets = ExpiringDict(DRKEY_MAX_SV, DRKEY_MAX_TTL) self.first_order_drkeys = ExpiringDict(DRKEY_MAX_KEYS, DRKEY_MAX_TTL) def worker(self): """ Worker thread that takes care of reading shared entries from ZK, and handling master election. """ worker_cycle = 1.0 start = SCIONTime.get_time() while self.run_flag.is_set(): sleep_interval(start, worker_cycle, "CS.worker cycle", self._quiet_startup()) start = SCIONTime.get_time() # Update IS_MASTER metric. if self._labels: IS_MASTER.labels(**self._labels).set(int(self.zk.have_lock())) try: self.zk.wait_connected() self.trc_cache.process() self.cc_cache.process() self.drkey_cache.process() # Try to become a master. ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0) if ret: # Either got the lock, or already had it. if ret == ZK_LOCK_SUCCESS: logging.info("Became master") self.trc_cache.expire(worker_cycle * 10) self.cc_cache.expire(worker_cycle * 10) self.drkey_cache.expire(worker_cycle * 10) except ZkNoConnection: logging.warning('worker(): ZkNoConnection') pass def _cached_trcs_handler(self, raw_entries): """ Handles cached (through ZK) TRCs, passed as a list. """ for raw in raw_entries: trc = TRC.from_raw(raw.decode('utf-8')) rep = CtrlPayload(CertMgmt(TRCReply.from_values(trc))) self.process_trc_reply(rep, None, from_zk=True) if len(raw_entries) > 0: logging.debug("Processed %s trcs from ZK", len(raw_entries)) def _cached_certs_handler(self, raw_entries): """ Handles cached (through ZK) chains, passed as a list. """ for raw in raw_entries: cert = CertificateChain.from_raw(raw.decode('utf-8')) rep = CtrlPayload(CertMgmt(CertChainReply.from_values(cert))) self.process_cert_chain_reply(rep, None, from_zk=True) if len(raw_entries) > 0: logging.debug("Processed %s certs from ZK", len(raw_entries)) def _cached_drkeys_handler(self, raw_entries): for raw in raw_entries: msg = CtrlPayload(DRKeyMgmt(DRKeyReply.from_raw(raw))) self.process_drkey_reply(msg, None, from_zk=True) def _share_object(self, pld, is_trc): """ Share path segments (via ZK) with other path servers. """ pld_packed = pld.pack() pld_hash = crypto_hash(pld_packed).hex() try: if is_trc: self.trc_cache.store( "%s-%s" % (pld_hash, SCIONTime.get_time()), pld_packed) else: self.cc_cache.store("%s-%s" % (pld_hash, SCIONTime.get_time()), pld_packed) except ZkNoConnection: logging.warning("Unable to store %s in shared path: " "no connection to ZK" % "TRC" if is_trc else "CC") return logging.debug("%s stored in ZK: %s" % ("TRC" if is_trc else "CC", pld_hash)) def process_cert_chain_request(self, cpld, meta): """Process a certificate chain request.""" cmgt = cpld.union req = cmgt.union assert isinstance(req, CertChainRequest), type(req) key = req.isd_as(), req.p.version logging.info("Cert chain request received for %sv%s from %s", *key, meta) REQS_TOTAL.labels(**self._labels, type="cc").inc() local = meta.ia == self.addr.isd_as if not self._check_cc(key): if not local: logging.warning( "Dropping CC request from %s for %sv%s: " "CC not found && requester is not local)", meta, *key) else: self.cc_requests.put((key, (meta, req, cpld.req_id))) return self._reply_cc(key, (meta, req, cpld.req_id)) def process_cert_chain_reply(self, cpld, meta, from_zk=False): """Process a certificate chain reply.""" cmgt = cpld.union rep = cmgt.union assert isinstance(rep, CertChainReply), type(rep) ia_ver = rep.chain.get_leaf_isd_as_ver() logging.info("Cert chain reply received for %sv%s (ZK: %s)" % (ia_ver[0], ia_ver[1], from_zk)) self.trust_store.add_cert(rep.chain) if not from_zk: self._share_object(rep.chain, is_trc=False) # Reply to all requests for this certificate chain self.cc_requests.put((ia_ver, None)) def _check_cc(self, key): isd_as, ver = key ver = None if ver == CertChainRequest.NEWEST_VERSION else ver cert_chain = self.trust_store.get_cert(isd_as, ver) if cert_chain: return True logging.debug('Cert chain not found for %sv%s', *key) return False def _fetch_cc(self, key, req_info): # Do not attempt to fetch the CertChain from a remote AS if the cacheOnly flag is set. _, orig_req, _ = req_info if orig_req.p.cacheOnly: return self._send_cc_request(*key) def _send_cc_request(self, isd_as, ver): req = CertChainRequest.from_values(isd_as, ver, cache_only=True) path_meta = self._get_path_via_sciond(isd_as) if path_meta: meta = self._build_meta(isd_as, host=SVCType.CS_A, path=path_meta.fwd_path()) req_id = mk_ctrl_req_id() self.send_meta(CtrlPayload(CertMgmt(req), req_id=req_id), meta) logging.info( "Cert chain request sent to %s via [%s]: %s [id: %016x]", meta, path_meta.short_desc(), req.short_desc(), req_id) else: logging.warning( "Cert chain request (for %s) not sent: " "no path found", req.short_desc()) def _reply_cc(self, key, req_info): isd_as, ver = key ver = None if ver == CertChainRequest.NEWEST_VERSION else ver meta = req_info[0] req_id = req_info[2] cert_chain = self.trust_store.get_cert(isd_as, ver) self.send_meta( CtrlPayload(CertMgmt(CertChainReply.from_values(cert_chain)), req_id=req_id), meta) logging.info("Cert chain for %sv%s sent to %s [id: %016x]", isd_as, ver, meta, req_id) def process_trc_request(self, cpld, meta): """Process a TRC request.""" cmgt = cpld.union req = cmgt.union assert isinstance(req, TRCRequest), type(req) key = req.isd_as()[0], req.p.version logging.info("TRC request received for %sv%s from %s [id: %s]", *key, meta, cpld.req_id_str()) REQS_TOTAL.labels(**self._labels, type="trc").inc() local = meta.ia == self.addr.isd_as if not self._check_trc(key): if not local: logging.warning( "Dropping TRC request from %s for %sv%s: " "TRC not found && requester is not local)", meta, *key) else: self.trc_requests.put((key, (meta, req, cpld.req_id))) return self._reply_trc(key, (meta, req, cpld.req_id)) def process_trc_reply(self, cpld, meta, from_zk=False): """ Process a TRC reply. :param trc_rep: TRC reply. :type trc_rep: TRCReply """ cmgt = cpld.union trc_rep = cmgt.union assert isinstance(trc_rep, TRCReply), type(trc_rep) isd, ver = trc_rep.trc.get_isd_ver() logging.info("TRCReply received for ISD %sv%s, ZK: %s [id: %s]", isd, ver, from_zk, cpld.req_id_str()) self.trust_store.add_trc(trc_rep.trc) if not from_zk: self._share_object(trc_rep.trc, is_trc=True) # Reply to all requests for this TRC self.trc_requests.put(((isd, ver), None)) def _check_trc(self, key): isd, ver = key ver = None if ver == TRCRequest.NEWEST_VERSION else ver trc = self.trust_store.get_trc(isd, ver) if trc: return True logging.debug('TRC not found for %sv%s', *key) return False def _fetch_trc(self, key, req_info): # Do not attempt to fetch the TRC from a remote AS if the cacheOnly flag is set. _, orig_req, _ = req_info if orig_req.p.cacheOnly: return self._send_trc_request(*key) def _send_trc_request(self, isd, ver): trc_req = TRCRequest.from_values(isd, ver, cache_only=True) path_meta = self._get_path_via_sciond(trc_req.isd_as()) if path_meta: meta = self._build_meta(path_meta.dst_ia(), host=SVCType.CS_A, path=path_meta.fwd_path()) req_id = mk_ctrl_req_id() self.send_meta(CtrlPayload(CertMgmt(trc_req), req_id=req_id), meta) logging.info("TRC request sent to %s via [%s]: %s [id: %016x]", meta, path_meta.short_desc(), trc_req.short_desc(), req_id) else: logging.warning("TRC request not sent for %s: no path found.", trc_req.short_desc()) def _reply_trc(self, key, req_info): isd, ver = key ver = None if ver == TRCRequest.NEWEST_VERSION else ver meta = req_info[0] req_id = req_info[2] trc = self.trust_store.get_trc(isd, ver) self.send_meta( CtrlPayload(CertMgmt(TRCReply.from_values(trc)), req_id=req_id), meta) logging.info("TRC for %sv%s sent to %s [id: %016x]", isd, ver, meta, req_id) def process_drkey_request(self, cpld, meta): """ Process first order DRKey requests from other ASes. :param DRKeyRequest req: the DRKey request :param UDPMetadata meta: the metadata """ dpld = cpld.union req = dpld.union assert isinstance(req, DRKeyRequest), type(req) logging.info("DRKeyRequest received from %s: %s [id: %s]", meta, req.short_desc(), cpld.req_id_str()) REQS_TOTAL.labels(**self._labels, type="drkey").inc() try: cert = self._verify_drkey_request(req, meta) except SCIONVerificationError as e: logging.warning("Invalid DRKeyRequest from %s. Reason %s: %s", meta, e, req.short_desc()) return sv = self._get_drkey_secret(get_drkey_exp_time(req.p.flags.prefetch)) cert_version = self.trust_store.get_cert( self.addr.isd_as).certs[0].version trc_version = self.trust_store.get_trc(self.addr.isd_as[0]).version rep = get_drkey_reply(sv, self.addr.isd_as, meta.ia, self.private_key, self.signing_key, cert_version, cert, trc_version) self.send_meta(CtrlPayload(DRKeyMgmt(rep), req_id=cpld.req_id), meta) logging.info("DRKeyReply sent to %s: %s [id: %s]", meta, req.short_desc(), cpld.req_id_str()) def _verify_drkey_request(self, req, meta): """ Verify that the first order DRKey request is legit. I.e. the signature is valid, the correct ISD AS is queried, timestamp is recent. :param DRKeyRequest req: the first order DRKey request. :param UDPMetadata meta: the metadata. :returns Certificate of the requester. :rtype: Certificate :raises: SCIONVerificationError """ if self.addr.isd_as != req.isd_as: raise SCIONVerificationError("Request for other ISD-AS: %s" % req.isd_as) if drkey_time() - req.p.timestamp > DRKEY_REQUEST_TIMEOUT: raise SCIONVerificationError( "Expired request from %s. %ss old. Max %ss" % (meta.ia, drkey_time() - req.p.timestamp, DRKEY_REQUEST_TIMEOUT)) trc = self.trust_store.get_trc(meta.ia[0]) chain = self.trust_store.get_cert(meta.ia, req.p.certVer) err = [] if not chain: self._send_cc_request(meta.ia, req.p.certVer) err.append("Certificate not present for %s(v: %s)" % (meta.ia, req.p.certVer)) if not trc: self._send_trc_request(meta.ia[0], req.p.trcVer) err.append("TRC not present for %s(v: %s)" % (meta.ia[0], req.p.trcVer)) if err: raise SCIONVerificationError(", ".join(err)) raw = drkey_signing_input_req(req.isd_as, req.p.flags.prefetch, req.p.timestamp) try: verify_sig_chain_trc(raw, req.p.signature, meta.ia, chain, trc) except SCIONVerificationError as e: raise SCIONVerificationError(str(e)) return chain.certs[0] def process_drkey_reply(self, cpld, meta, from_zk=False): """ Process first order DRKey reply from other ASes. :param DRKeyReply rep: the received DRKey reply :param UDPMetadata meta: the metadata :param Bool from_zk: if the reply has been received from Zookeeper """ dpld = cpld.union rep = dpld.union assert isinstance(rep, DRKeyReply), type(rep) logging.info("DRKeyReply received from %s: %s [id: %s]", meta, rep.short_desc(), cpld.req_id_str()) src = meta or "ZK" try: cert = self._verify_drkey_reply(rep, meta) raw = decrypt_drkey(rep.p.cipher, self.private_key, cert.subject_enc_key_raw) except SCIONVerificationError as e: logging.info("Invalid DRKeyReply from %s. Reason %s: %s", src, e, rep.short_desc()) return except CryptoError as e: logging.info("Unable to decrypt DRKeyReply from %s. Reason %s: %s", src, e, rep.short_desc()) return drkey = FirstOrderDRKey(rep.isd_as, self.addr.isd_as, rep.p.expTime, raw) self.first_order_drkeys[drkey] = drkey if not from_zk: pld_packed = rep.copy().pack() try: self.drkey_cache.store("%s-%s" % (rep.isd_as, rep.p.expTime), pld_packed) except ZkNoConnection: logging.warning("Unable to store DRKey for %s in shared path: " "no connection to ZK" % rep.isd_as) return self.drkey_protocol_requests.put((drkey, None)) def _verify_drkey_reply(self, rep, meta): """ Verify that the first order DRKey reply is legit. I.e. the signature matches, timestamp is recent. :param DRKeyReply rep: the first order DRKey reply. :param UDPMetadata meta: the metadata. :returns Certificate of the responder. :rtype: Certificate :raises: SCIONVerificationError """ if meta and meta.ia != rep.isd_as: raise SCIONVerificationError("Response from other ISD-AS: %s" % rep.isd_as) if drkey_time() - rep.p.timestamp > DRKEY_REQUEST_TIMEOUT: raise SCIONVerificationError( "Expired reply from %s. %ss old. Max %ss" % (rep.isd_as, drkey_time() - rep.p.timestamp, DRKEY_REQUEST_TIMEOUT)) trc = self.trust_store.get_trc(rep.isd_as[0]) chain = self.trust_store.get_cert(rep.isd_as, rep.p.certVerSrc) err = [] if not chain: self._send_cc_request(rep.isd_as, rep.p.certVerSrc) err.append("Certificate not present for %s(v: %s)" % (rep.isd_as, rep.p.certVerSrc)) if not trc: self._send_trc_request(rep.isd_as[0], rep.p.trcVer) err.append("TRC not present for %s(v: %s)" % (rep.isd_as[0], rep.p.trcVer)) if err: raise SCIONVerificationError(", ".join(err)) raw = get_signing_input_rep(rep.isd_as, rep.p.timestamp, rep.p.expTime, rep.p.cipher) try: verify_sig_chain_trc(raw, rep.p.signature, rep.isd_as, chain, trc) except SCIONVerificationError as e: raise SCIONVerificationError(str(e)) return chain.certs[0] def _check_drkey(self, drkey): """ Check if first order DRKey with the same (SrcIA, DstIA, expTime) is available. :param FirstOrderDRKey drkey: the searched DRKey. :returns: if the the first order DRKey is available. :rtype: Bool """ if drkey in self.first_order_drkeys: return True return False def _fetch_drkey(self, drkey, _): """ Fetch missing first order DRKey with the same (SrcIA, DstIA, expTime). :param FirstOrderDRKey drkey: The missing DRKey. """ cert = self.trust_store.get_cert(self.addr.isd_as) trc = self.trust_store.get_trc(self.addr.isd_as[0]) if not cert or not trc: logging.warning( "DRKeyRequest for %s not sent. Own CertChain/TRC not present.", drkey.src_ia) return req = get_drkey_request(drkey.src_ia, False, self.signing_key, cert.certs[0].version, trc.version) path_meta = self._get_path_via_sciond(drkey.src_ia) if path_meta: meta = self._build_meta(drkey.src_ia, host=SVCType.CS_A, path=path_meta.fwd_path()) req_id = mk_ctrl_req_id() self.send_meta(CtrlPayload(DRKeyMgmt(req)), meta) logging.info("DRKeyRequest (%s) sent to %s via %s [id: %016x]", req.short_desc(), meta, path_meta, req_id) else: logging.warning("DRKeyRequest (for %s) not sent", req.short_desc()) def _reply_proto_drkey(self, drkey, meta): pass # TODO(roosd): implement in future PR def _get_drkey_secret(self, exp_time): """ Get the drkey secret. A new secret is initialized if no secret is found. :param int exp_time: expiration time of the drkey secret :return: the according drkey secret :rtype: DRKeySecretValue """ sv = self.drkey_secrets.get(exp_time) if not sv: sv = DRKeySecretValue( kdf(self.config.master_as_key, b"Derive DRKey Key"), exp_time) self.drkey_secrets[sv.exp_time] = sv return sv def _init_metrics(self): super()._init_metrics() for type_ in ("trc", "cc", "drkey"): REQS_TOTAL.labels(**self._labels, type=type_).inc(0) IS_MASTER.labels(**self._labels).set(0) def run(self): """ Run an instance of the Cert Server. """ threading.Thread(target=thread_safety_net, args=(self.worker, ), name="CS.worker", daemon=True).start() super().run()
class SCIONDaemon(SCIONElement): """ The SCION Daemon used for retrieving and combining paths. """ MAX_REQS = 1024 # Time a path segment is cached at a host (in seconds). SEGMENT_TTL = 300 # Empty Path TTL EMPTY_PATH_TTL = SEGMENT_TTL def __init__(self, conf_dir, addr, api_addr, run_local_api=False, port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False): """ Initialize an instance of the class SCIONDaemon. """ super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir, prom_export=prom_export, public=[(addr, port)]) up_labels = {**self._labels, "type": "up"} if self._labels else None down_labels = {**self._labels, "type": "down"} if self._labels else None core_labels = {**self._labels, "type": "core"} if self._labels else None self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels) self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels) self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels) self.rev_cache = RevCache() # Keep track of requested paths. self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT) self.req_path_lock = threading.Lock() self._api_sock = None self.daemon_thread = None os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True) self.api_addr = (api_addr or get_default_sciond_path()) if delete_sock: try: os.remove(self.api_addr) except OSError as e: if e.errno != errno.ENOENT: logging.error("Could not delete socket %s: %s" % (self.api_addr, e)) self.CTRL_PLD_CLASS_MAP = { PayloadClass.PATH: { PMT.REPLY: self.handle_path_reply, PMT.REVOCATION: self.handle_revocation, }, PayloadClass.CERT: { CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request, CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply, CertMgmtType.TRC_REPLY: self.process_trc_reply, CertMgmtType.TRC_REQ: self.process_trc_request, }, } self.SCMP_PLD_CLASS_MAP = { SCMPClass.PATH: {SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation}, } if run_local_api: self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond")) self._socks.add(self._api_sock, self.handle_accept) @classmethod def start(cls, conf_dir, addr, api_addr=None, run_local_api=False, port=0): """ Initializes and starts a SCIOND instance. """ inst = cls(conf_dir, addr, api_addr, run_local_api, port) name = "SCIONDaemon.run %s" % inst.addr.isd_as inst.daemon_thread = threading.Thread( target=thread_safety_net, args=(inst.run,), name=name, daemon=True) inst.daemon_thread.start() logging.debug("sciond started with api_addr = %s", inst.api_addr) def _get_msg_meta(self, packet, addr, sock): if sock != self._udp_sock: return packet, SockOnlyMetadata.from_values(sock) # API socket else: return super()._get_msg_meta(packet, addr, sock) def handle_msg_meta(self, msg, meta): """ Main routine to handle incoming SCION messages. """ if isinstance(meta, SockOnlyMetadata): # From SCIOND API try: sciond_msg = SCIONDMsg.from_raw(msg) except SCIONParseError as err: logging.error(str(err)) return self.api_handle_request(sciond_msg, meta) return super().handle_msg_meta(msg, meta) def handle_path_reply(self, cpld, meta): """ Handle path reply from local path server. """ pmgt = cpld.union path_reply = pmgt.union assert isinstance(path_reply, PathSegmentReply), type(path_reply) recs = path_reply.recs() for srev_info in recs.iter_srev_infos(): self.check_revocation(srev_info, lambda x: self.continue_revocation_processing( srev_info) if not x else False, meta) req = path_reply.req() key = req.dst_ia(), req.flags() with self.req_path_lock: r = self.requested_paths.get(key) if r: r.notify_reply(path_reply) else: logging.warning("No outstanding request found for %s", key) for type_, pcb in recs.iter_pcbs(): seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta, type_, params=(r,)) self._process_path_seg(seg_meta, cpld.req_id) def continue_revocation_processing(self, srev_info): self.rev_cache.add(srev_info) self.remove_revoked_segments(srev_info.rev_info()) def continue_seg_processing(self, seg_meta): """ For every path segment(that can be verified) received from the path server this function gets called to continue the processing for the segment. The segment is added to pathdb and pending requests are checked. """ pcb = seg_meta.seg type_ = seg_meta.type # Check that segment does not contain a revoked interface. if not self.check_revoked_interface(pcb, self.rev_cache): return map_ = { PST.UP: self._handle_up_seg, PST.DOWN: self._handle_down_seg, PST.CORE: self._handle_core_seg, } map_[type_](pcb) r = seg_meta.params[0] if r: r.verified_segment() def _handle_up_seg(self, pcb): if self.addr.isd_as != pcb.last_ia(): return None if self.up_segments.update(pcb) == DBResult.ENTRY_ADDED: logging.debug("Up segment added: %s", pcb.short_desc()) return pcb.first_ia() return None def _handle_down_seg(self, pcb): last_ia = pcb.last_ia() if self.addr.isd_as == last_ia: return None if self.down_segments.update(pcb) == DBResult.ENTRY_ADDED: logging.debug("Down segment added: %s", pcb.short_desc()) return last_ia return None def _handle_core_seg(self, pcb): if self.core_segments.update(pcb) == DBResult.ENTRY_ADDED: logging.debug("Core segment added: %s", pcb.short_desc()) return pcb.first_ia() return None def api_handle_request(self, msg, meta): """ Handle local API's requests. """ mtype = msg.type() if mtype == SMT.PATH_REQUEST: threading.Thread( target=thread_safety_net, args=(self._api_handle_path_request, msg, meta), daemon=True).start() elif mtype == SMT.REVOCATION: self._api_handle_rev_notification(msg, meta) elif mtype == SMT.AS_REQUEST: self._api_handle_as_request(msg, meta) elif mtype == SMT.IF_REQUEST: self._api_handle_if_request(msg, meta) elif mtype == SMT.SERVICE_REQUEST: self._api_handle_service_request(msg, meta) elif mtype == SMT.SEGTYPEHOP_REQUEST: self._api_handle_seg_type_request(msg, meta) else: logging.warning( "API: type %s not supported.", TypeBase.to_str(mtype)) def _api_handle_path_request(self, pld, meta): request = pld.union assert isinstance(request, SCIONDPathRequest), type(request) req_id = pld.id dst_ia = request.dst_ia() src_ia = request.src_ia() if not src_ia: src_ia = self.addr.isd_as thread = threading.current_thread() thread.name = "SCIONDaemon API id:%s %s -> %s" % ( thread.ident, src_ia, dst_ia) paths, error = self.get_paths(dst_ia, flush=request.p.flags.refresh) if request.p.maxPaths: paths = paths[:request.p.maxPaths] reply_entries = [] for path_meta in paths: fwd_if = path_meta.fwd_path().get_fwd_if() # Set dummy host addr if path is empty. haddr, port = None, None if fwd_if: br = self.ifid2br[fwd_if] haddr, port = br.int_addrs.public[0] addrs = [haddr] if haddr else [] first_hop = HostInfo.from_values(addrs, port) reply_entry = SCIONDPathReplyEntry.from_values( path_meta, first_hop) reply_entries.append(reply_entry) logging.debug("Replying to api request for %s with %d paths:\n%s", dst_ia, len(paths), "\n".join([p.short_desc() for p in paths])) self._send_path_reply(req_id, reply_entries, error, meta) def _send_path_reply(self, req_id, reply_entries, error, meta): path_reply = SCIONDMsg(SCIONDPathReply.from_values(reply_entries, error), req_id) self.send_meta(path_reply.pack(), meta) def _api_handle_as_request(self, pld, meta): request = pld.union assert isinstance(request, SCIONDASInfoRequest), type(request) req_ia = request.isd_as() if not req_ia or req_ia.is_zero() or req_ia == self.addr.isd_as: # Request is for the local AS. reply_entry = SCIONDASInfoReplyEntry.from_values( self.addr.isd_as, self.is_core_as(), self.topology.mtu) else: # Request is for a remote AS. reply_entry = SCIONDASInfoReplyEntry.from_values(req_ia, self.is_core_as(req_ia)) as_reply = SCIONDMsg(SCIONDASInfoReply.from_values([reply_entry]), pld.id) self.send_meta(as_reply.pack(), meta) def _api_handle_if_request(self, pld, meta): request = pld.union assert isinstance(request, SCIONDIFInfoRequest), type(request) all_brs = request.all_brs() if_list = [] if not all_brs: if_list = list(request.iter_ids()) if_entries = [] for if_id, br in self.ifid2br.items(): if all_brs or if_id in if_list: br_addr, br_port = br.int_addrs.public[0] info = HostInfo.from_values([br_addr], br_port) reply_entry = SCIONDIFInfoReplyEntry.from_values(if_id, info) if_entries.append(reply_entry) if_reply = SCIONDMsg(SCIONDIFInfoReply.from_values(if_entries), pld.id) self.send_meta(if_reply.pack(), meta) def _api_handle_service_request(self, pld, meta): request = pld.union assert isinstance(request, SCIONDServiceInfoRequest), type(request) all_svcs = request.all_services() svc_list = [] if not all_svcs: svc_list = list(request.iter_service_types()) svc_entries = [] for svc_type in ServiceType.all(): if all_svcs or svc_type in svc_list: lookup_res = self.dns_query_topo(svc_type) host_infos = [] for addr, port in lookup_res: host_infos.append(HostInfo.from_values([addr], port)) reply_entry = SCIONDServiceInfoReplyEntry.from_values( svc_type, host_infos) svc_entries.append(reply_entry) svc_reply = SCIONDMsg(SCIONDServiceInfoReply.from_values(svc_entries), pld.id) self.send_meta(svc_reply.pack(), meta) def _api_handle_rev_notification(self, pld, meta): request = pld.union assert isinstance(request, SCIONDRevNotification), type(request) self.handle_revocation(CtrlPayload(PathMgmt(request.srev_info())), meta, pld) def _api_handle_seg_type_request(self, pld, meta): request = pld.union assert isinstance(request, SCIONDSegTypeHopRequest), type(request) segmentType = request.p.type db = [] if segmentType == PST.CORE: db = self.core_segments elif segmentType == PST.UP: db = self.up_segments elif segmentType == PST.DOWN: db = self.down_segments else: logging.error("Requesting segment type %s unrecognized.", segmentType) seg_entries = [] for segment in db(full=True): if_list = [] for asm in segment.iter_asms(): isd_as = asm.isd_as() hof = asm.pcbm(0).hof() egress = hof.egress_if ingress = hof.ingress_if if ingress: if_list.append(PathInterface.from_values(isd_as, ingress)) if egress: if_list.append(PathInterface.from_values(isd_as, egress)) reply_entry = SCIONDSegTypeHopReplyEntry.from_values( if_list, segment.get_timestamp(), segment.get_expiration_time()) seg_entries.append(reply_entry) seg_reply = SCIONDMsg( SCIONDSegTypeHopReply.from_values(seg_entries), pld.id) self.send_meta(seg_reply.pack(), meta) def handle_scmp_revocation(self, pld, meta): srev_info = SignedRevInfo.from_raw(pld.info.srev_info) self.handle_revocation(CtrlPayload(PathMgmt(srev_info)), meta) def handle_revocation(self, cpld, meta, pld=None): pmgt = cpld.union srev_info = pmgt.union rev_info = srev_info.rev_info() assert isinstance(rev_info, RevocationInfo), type(rev_info) logging.debug("Received revocation: %s from %s", srev_info.short_desc(), meta) self.check_revocation(srev_info, lambda e: self.process_revocation(e, srev_info, meta, pld), meta) def process_revocation(self, error, srev_info, meta, pld): rev_info = srev_info.rev_info() status = None if error is None: status = SCIONDRevReplyStatus.VALID self.rev_cache.add(srev_info) self.remove_revoked_segments(rev_info) else: if type(error) == RevInfoValidationError: logging.error("Failed to validate RevInfo %s from %s: %s", srev_info.short_desc(), meta, error) status = SCIONDRevReplyStatus.INVALID if type(error) == RevInfoExpiredError: logging.info("Ignoring expired Revinfo, %s from %s", srev_info.short_desc(), meta) status = SCIONDRevReplyStatus.STALE if type(error) == SignedRevInfoCertFetchError: logging.error("Failed to fetch certificate for SignedRevInfo %s from %s: %s", srev_info.short_desc(), meta, error) status = SCIONDRevReplyStatus.UNKNOWN if type(error) == SignedRevInfoVerificationError: logging.error("Failed to verify SRevInfo %s from %s: %s", srev_info.short_desc(), meta, error) status = SCIONDRevReplyStatus.SIGFAIL if type(error) == SCIONBaseError: logging.error("Revocation check failed for %s from %s:\n%s", srev_info.short_desc(), meta, error) status = SCIONDRevReplyStatus.UNKNOWN if pld: rev_reply = SCIONDMsg(SCIONDRevReply.from_values(status), pld.id) self.send_meta(rev_reply.pack(), meta) def remove_revoked_segments(self, rev_info): # Go through all segment databases and remove affected segments. removed_up = removed_core = removed_down = 0 if rev_info.p.linkType == LinkType.CORE: removed_core = self._remove_revoked_pcbs(self.core_segments, rev_info) elif rev_info.p.linkType in [LinkType.PARENT, LinkType.CHILD]: removed_up = self._remove_revoked_pcbs(self.up_segments, rev_info) removed_down = self._remove_revoked_pcbs(self.down_segments, rev_info) elif rev_info.p.linkType != LinkType.PEER: logging.error("Bad RevInfo link type: %s", rev_info.p.linkType) logging.info("Removed %d UP- %d CORE- and %d DOWN-Segments." % (removed_up, removed_core, removed_down)) def _remove_revoked_pcbs(self, db, rev_info): """ Removes all segments from 'db' that have a revoked upstream PCBMarking. :param db: The PathSegmentDB. :type db: :class:`lib.path_db.PathSegmentDB` :param rev_info: The revocation info :type rev_info: RevocationInfo :returns: The number of deletions. :rtype: int """ to_remove = [] for segment in db(full=True): for asm in segment.iter_asms(): if self._check_revocation_for_asm(rev_info, asm, verify_all=False): logging.debug("Removing segment: %s" % segment.short_desc()) to_remove.append(segment.get_hops_hash()) return db.delete_all(to_remove) def _flush_path_dbs(self): self.core_segments.flush() self.down_segments.flush() self.up_segments.flush() def get_paths(self, dst_ia, flags=(), flush=False): """Return a list of paths.""" logging.debug("Paths requested for ISDAS=%s, flags=%s, flush=%s", dst_ia, flags, flush) if flush: logging.info("Flushing PathDBs.") self._flush_path_dbs() if self.addr.isd_as == dst_ia or ( self.addr.isd_as.any_as() == dst_ia and self.topology.is_core_as): # Either the destination is the local AS, or the destination is any # core AS in this ISD, and the local AS is in the core empty = SCIONPath() exp_time = int(time.time()) + self.EMPTY_PATH_TTL empty_meta = FwdPathMeta.from_values(empty, [], self.topology.mtu, exp_time) return [empty_meta], SCIONDPathReplyError.OK paths = self.path_resolution(dst_ia, flags=flags) if not paths: key = dst_ia, flags with self.req_path_lock: r = self.requested_paths.get(key) if r is None: # No previous outstanding request req = PathSegmentReq.from_values(self.addr.isd_as, dst_ia, flags=flags) r = RequestState(req.copy()) self.requested_paths[key] = r self._fetch_segments(req) # Wait until event gets set. timeout = not r.e.wait(PATH_REQ_TOUT) with self.req_path_lock: if timeout: r.done() if key in self.requested_paths: del self.requested_paths[key] if timeout: logging.error("Query timed out for %s", dst_ia) return [], SCIONDPathReplyError.PS_TIMEOUT # Check if we can fulfill the path request. paths = self.path_resolution(dst_ia, flags=flags) if not paths: logging.error("No paths found for %s", dst_ia) return [], SCIONDPathReplyError.NO_PATHS return paths, SCIONDPathReplyError.OK def path_resolution(self, dst_ia, flags=()): # dst as == 0 means any core AS in the specified ISD. dst_is_core = self.is_core_as(dst_ia) or dst_ia[1] == 0 sibra = PATH_FLAG_SIBRA in flags if self.topology.is_core_as: if dst_is_core: ret = self._resolve_core_core(dst_ia, sibra=sibra) else: ret = self._resolve_core_not_core(dst_ia, sibra=sibra) elif dst_is_core: ret = self._resolve_not_core_core(dst_ia, sibra=sibra) elif sibra: ret = self._resolve_not_core_not_core_sibra(dst_ia) else: ret = self._resolve_not_core_not_core_scion(dst_ia) if not sibra: return ret # FIXME(kormat): Strip off PCBs, and just return sibra reservation # blocks return self._sibra_strip_pcbs(self._strip_nones(ret)) def _resolve_core_core(self, dst_ia, sibra=False): """Resolve path from core to core.""" res = set() for cseg in self.core_segments(last_ia=self.addr.isd_as, sibra=sibra, **dst_ia.params()): res.add((None, cseg, None)) if sibra: return res return tuples_to_full_paths(res) def _resolve_core_not_core(self, dst_ia, sibra=False): """Resolve path from core to non-core.""" res = set() # First check whether there is a direct path. for dseg in self.down_segments( first_ia=self.addr.isd_as, last_ia=dst_ia, sibra=sibra): res.add((None, None, dseg)) # Check core-down combination. for dseg in self.down_segments(last_ia=dst_ia, sibra=sibra): dseg_ia = dseg.first_ia() if self.addr.isd_as == dseg_ia: pass for cseg in self.core_segments( first_ia=dseg_ia, last_ia=self.addr.isd_as, sibra=sibra): res.add((None, cseg, dseg)) if sibra: return res return tuples_to_full_paths(res) def _resolve_not_core_core(self, dst_ia, sibra=False): """Resolve path from non-core to core.""" res = set() params = dst_ia.params() params["sibra"] = sibra if dst_ia[0] == self.addr.isd_as[0]: # Dst in local ISD. First check whether DST is a (super)-parent. for useg in self.up_segments(**params): res.add((useg, None, None)) # Check whether dst is known core AS. for cseg in self.core_segments(**params): # Check do we have an up-seg that is connected to core_seg. for useg in self.up_segments(first_ia=cseg.last_ia(), sibra=sibra): res.add((useg, cseg, None)) if sibra: return res return tuples_to_full_paths(res) def _resolve_not_core_not_core_scion(self, dst_ia): """Resolve SCION path from non-core to non-core.""" up_segs = self.up_segments() down_segs = self.down_segments(last_ia=dst_ia) core_segs = self._calc_core_segs(dst_ia[0], up_segs, down_segs) full_paths = build_shortcut_paths( up_segs, down_segs, self.rev_cache) tuples = [] for up_seg in up_segs: for down_seg in down_segs: tuples.append((up_seg, None, down_seg)) for core_seg in core_segs: tuples.append((up_seg, core_seg, down_seg)) full_paths.extend(tuples_to_full_paths(tuples)) return full_paths def _resolve_not_core_not_core_sibra(self, dst_ia): """Resolve SIBRA path from non-core to non-core.""" res = set() up_segs = set(self.up_segments(sibra=True)) down_segs = set(self.down_segments(last_ia=dst_ia, sibra=True)) for up_seg, down_seg in product(up_segs, down_segs): src_core_ia = up_seg.first_ia() dst_core_ia = down_seg.first_ia() if src_core_ia == dst_core_ia: res.add((up_seg, down_seg)) continue for core_seg in self.core_segments(first_ia=dst_core_ia, last_ia=src_core_ia, sibra=True): res.add((up_seg, core_seg, down_seg)) return res def _strip_nones(self, set_): """Strip None entries from a set of tuples""" res = [] for tup in set_: res.append(tuple(filter(None, tup))) return res def _sibra_strip_pcbs(self, paths): ret = [] for pcbs in paths: resvs = [] for pcb in pcbs: resvs.append(self._sibra_strip_pcb(pcb)) ret.append(resvs) return ret def _sibra_strip_pcb(self, pcb): assert pcb.is_sibra() pcb_ext = pcb.sibra_ext resv_info = pcb_ext.info resv = ResvBlockSteady.from_values(resv_info, pcb.get_n_hops()) asms = pcb.iter_asms() if pcb_ext.p.up: asms = reversed(list(asms)) iflist = [] for sof, asm in zip(pcb_ext.iter_sofs(), asms): resv.sofs.append(sof) iflist.extend(self._sibra_add_ifs( asm.isd_as(), sof, resv_info.fwd_dir)) assert resv.num_hops == len(resv.sofs) return pcb_ext.p.id, resv, iflist def _sibra_add_ifs(self, isd_as, sof, fwd): def _add(ifid): if ifid: ret.append((isd_as, ifid)) ret = [] if fwd: _add(sof.ingress) _add(sof.egress) else: _add(sof.egress) _add(sof.ingress) return ret def _wait_for_events(self, events, deadline): """ Wait on a set of events, but only until the specified deadline. Returns the number of events that happened while waiting. """ count = 0 for e in events: if e.wait(max(0, deadline - SCIONTime.get_time())): count += 1 return count def _fetch_segments(self, req): """ Called to fetch the requested path. """ try: addr, port = self.dns_query_topo(ServiceType.PS)[0] except SCIONServiceLookupError: log_exception("Error querying path service:") return req_id = mk_ctrl_req_id() logging.debug("Sending path request (%s) to [%s]:%s [id: %016x]", req.short_desc(), addr, port, req_id) meta = self._build_meta(host=addr, port=port) self.send_meta(CtrlPayload(PathMgmt(req), req_id=req_id), meta) def _calc_core_segs(self, dst_isd, up_segs, down_segs): """ Calculate all possible core segments joining the provided up and down segments. Returns a list of all known segments, and a seperate list of the missing AS pairs. """ src_core_ases = set() dst_core_ases = set() for seg in up_segs: src_core_ases.add(seg.first_ia()[1]) for seg in down_segs: dst_core_ases.add(seg.first_ia()[1]) # Generate all possible AS pairs as_pairs = list(product(src_core_ases, dst_core_ases)) return self._find_core_segs(self.addr.isd_as[0], dst_isd, as_pairs) def _find_core_segs(self, src_isd, dst_isd, as_pairs): """ Given a set of AS pairs across 2 ISDs, return the core segments connecting those pairs """ core_segs = [] for src_core_as, dst_core_as in as_pairs: src_ia = ISD_AS.from_values(src_isd, src_core_as) dst_ia = ISD_AS.from_values(dst_isd, dst_core_as) if src_ia == dst_ia: continue seg = self.core_segments(first_ia=dst_ia, last_ia=src_ia) if seg: core_segs.extend(seg) return core_segs def run(self): """ Run an instance of the SCION daemon. """ threading.Thread( target=thread_safety_net, args=(self._check_trc_cert_reqs,), name="Elem.check_trc_cert_reqs", daemon=True).start() super().run()
class SCIONElement(object): """ Base class for the different kind of servers the SCION infrastructure provides. :ivar `Topology` topology: the topology of the AS as seen by the server. :ivar `Config` config: the configuration of the AS in which the server is located. :ivar dict ifid2br: map of interface ID to RouterElement. :ivar `SCIONAddr` addr: the server's address. """ SERVICE_TYPE = None STARTUP_QUIET_PERIOD = STARTUP_QUIET_PERIOD USE_TCP = False # Timeout for TRC or Certificate requests. TRC_CC_REQ_TIMEOUT = 3 def __init__(self, server_id, conf_dir, public=None, bind=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. :param list public: (host_addr, port) of the element's public address (i.e. the address visible to other network elements). :param list bind: (host_addr, port) of the element's bind address, if any (i.e. the address the element uses to identify itself to the local operating system, if it differs from the public address due to NAT). :param str spki_cache_dir: Path for caching TRCs and certificate chains. :param str prom_export: String of the form 'addr:port' specifying the prometheus endpoint. If no string is provided, no metrics are exported. """ self.id = server_id self.conf_dir = conf_dir self.ifid2br = {} self.topology = Topology.from_file( os.path.join(self.conf_dir, TOPO_FILE)) # Labels attached to every exported metric. self._labels = {"server_id": self.id, "isd_as": str(self.topology.isd_as)} # Must be over-ridden by child classes: self.CTRL_PLD_CLASS_MAP = {} self.SCMP_PLD_CLASS_MAP = {} self.public = public self.bind = bind if self.SERVICE_TYPE: own_config = self.topology.get_own_config(self.SERVICE_TYPE, server_id) if public is None: self.public = own_config.public if bind is None: self.bind = own_config.bind self.init_ifid2br() self.trust_store = TrustStore(self.conf_dir, spki_cache_dir, self.id, self._labels) self.total_dropped = 0 self._core_ases = defaultdict(list) # Mapping ISD_ID->list of core ASes self.init_core_ases() self.run_flag = threading.Event() self.run_flag.set() self.stopped_flag = threading.Event() self.stopped_flag.clear() self._in_buf = queue.Queue(MAX_QUEUE) self._socks = SocketMgr() self._startup = time.time() if self.USE_TCP: self._DefaultMeta = TCPMetadata else: self._DefaultMeta = UDPMetadata self.unverified_segs = ExpiringDict(500, 60 * 60) self.unv_segs_lock = threading.RLock() self.requested_trcs = {} self.req_trcs_lock = threading.Lock() self.requested_certs = {} self.req_certs_lock = threading.Lock() # TODO(jonghoonkwon): Fix me to setup sockets for multiple public addresses host_addr, self._port = self.public[0] self.addr = SCIONAddr.from_values(self.topology.isd_as, host_addr) if prom_export: self._export_metrics(prom_export) self._init_metrics() self._setup_sockets(True) lib_sciond.init(os.path.join(SCIOND_API_SOCKDIR, "sd%s.sock" % self.addr.isd_as)) def _load_as_conf(self): return Config.from_file(os.path.join(self.conf_dir, AS_CONF_FILE)) def _setup_sockets(self, init): """ Setup incoming socket and register with dispatcher """ self._tcp_sock = None self._tcp_new_conns = queue.Queue(MAX_QUEUE) # New TCP connections. if self._port is None: # No scion socket desired. return svc = SERVICE_TO_SVC_A.get(self.SERVICE_TYPE) # Setup TCP "accept" socket. self._setup_tcp_accept_socket(svc) # Setup UDP socket if self.bind: # TODO(jonghoonkwon): Fix me to setup socket for a proper bind address, # if the element has more than one bind addresses host_addr, b_port = self.bind[0] b_addr = SCIONAddr.from_values(self.topology.isd_as, host_addr) self._udp_sock = ReliableSocket( reg=(self.addr, self._port, init, svc), bind_ip=(b_addr, b_port)) else: self._udp_sock = ReliableSocket( reg=(self.addr, self._port, init, svc)) if not self._udp_sock.registered: self._udp_sock = None return if self._labels: CONNECTED_TO_DISPATCHER.labels(**self._labels).set(1) self._port = self._udp_sock.port self._socks.add(self._udp_sock, self.handle_recv) def _setup_tcp_accept_socket(self, svc): if not self.USE_TCP: return MAX_TRIES = 40 for i in range(MAX_TRIES): try: self._tcp_sock = SCIONTCPSocket() self._tcp_sock.setsockopt(SockOpt.SOF_REUSEADDR) self._tcp_sock.set_recv_tout(TCP_ACCEPT_POLLING_TOUT) self._tcp_sock.bind((self.addr, self._port), svc=svc) self._tcp_sock.listen() break except SCIONTCPError as e: logging.warning("TCP: Cannot connect to LWIP socket: %s" % e) time.sleep(1) # Wait for dispatcher else: logging.critical("TCP: cannot init TCP socket.") kill_self() def init_ifid2br(self): for br in self.topology.border_routers: for if_id in br.interfaces: self.ifid2br[if_id] = br def init_core_ases(self): """ Initializes dict of core ASes. """ for trc in self.trust_store.get_trcs(): self._core_ases[trc.isd] = trc.get_core_ases() def is_core_as(self, isd_as=None): if not isd_as: isd_as = self.addr.isd_as return isd_as in self._core_ases[isd_as[0]] def _update_core_ases(self, trc): """ When a new trc is received, this function is called to update the core ases map """ self._core_ases[trc.isd] = trc.get_core_ases() def get_border_addr(self, ifid): br = self.ifid2br[ifid] addr_idx = br.interfaces[ifid].addr_idx br_addr, br_port = br.int_addrs[addr_idx].public[0] return br_addr, br_port def handle_msg_meta(self, msg, meta): """ Main routine to handle incoming SCION messages. """ if isinstance(meta, SCMPMetadata): handler = self._get_scmp_handler(meta.pkt) else: handler = self._get_ctrl_handler(msg) if not handler: logging.error("handler not found: %s", msg) return try: # SIBRA operates on parsed packets. if (isinstance(meta, UDPMetadata) and msg.type() == PayloadClass.SIBRA): handler(meta.pkt) else: handler(msg, meta) except SCIONBaseError: log_exception("Error handling message:\n%s" % msg) def _check_trc_cert_reqs(self): check_cyle = 1.0 while self.run_flag.is_set(): start = time.time() self._check_cert_reqs() self._check_trc_reqs() sleep_interval(start, check_cyle, "Elem._check_trc_cert_reqs cycle") def _check_trc_reqs(self): """ Checks if TRC requests timeout and resends requests if so. """ with self.req_trcs_lock: now = time.time() for (isd, ver), (req_time, meta) in self.requested_trcs.items(): if now - req_time >= self.TRC_CC_REQ_TIMEOUT: trc_req = TRCRequest.from_values(isd, ver, cache_only=True) meta = meta or self._get_cs() req_id = mk_ctrl_req_id() logging.info("Re-Requesting TRC from %s: %s [id: %016x]", meta, trc_req.short_desc(), req_id) self.send_meta(CtrlPayload(CertMgmt(trc_req), req_id=req_id), meta) self.requested_trcs[(isd, ver)] = (time.time(), meta) if self._labels: PENDING_TRC_REQS_TOTAL.labels(**self._labels).set(len(self.requested_trcs)) def _check_cert_reqs(self): """ Checks if certificate requests timeout and resends requests if so. """ with self.req_certs_lock: now = time.time() for (isd_as, ver), (req_time, meta) in self.requested_certs.items(): if now - req_time >= self.TRC_CC_REQ_TIMEOUT: cert_req = CertChainRequest.from_values(isd_as, ver, cache_only=True) meta = meta or self._get_cs() req_id = mk_ctrl_req_id() logging.info("Re-Requesting CERTCHAIN from %s: %s [id: %016x]", meta, cert_req.short_desc(), req_id) self.send_meta(CtrlPayload(CertMgmt(cert_req), req_id=req_id), meta) self.requested_certs[(isd_as, ver)] = (time.time(), meta) if self._labels: PENDING_CERT_REQS_TOTAL.labels(**self._labels).set( len(self.requested_certs)) def _process_path_seg(self, seg_meta, req_id=None): """ When a pcb or path segment is received, this function is called to find missing TRCs and certs and request them. :param seg_meta: PathSegMeta object that contains pcb/path segment """ meta_str = str(seg_meta.meta) if seg_meta.meta else "ZK" req_str = "[id: %016x]" % req_id if req_id else "" logging.debug("Handling PCB from %s: %s %s", meta_str, seg_meta.seg.short_desc(), req_str) with self.unv_segs_lock: # Close the meta of the previous seg_meta, if there was one. prev_meta = self.unverified_segs.get(seg_meta.id) if prev_meta and prev_meta.meta: prev_meta.meta.close() self.unverified_segs[seg_meta.id] = seg_meta if self._labels: UNV_SEGS_TOTAL.labels(**self._labels).set(len(self.unverified_segs)) # Find missing TRCs and certificates missing_trcs = self._missing_trc_versions(seg_meta.trc_vers) missing_certs = self._missing_cert_versions(seg_meta.cert_vers) # Update missing TRCs/certs map seg_meta.missing_trcs.update(missing_trcs) seg_meta.missing_certs.update(missing_certs) # If all necessary TRCs/certs available, try to verify if seg_meta.verifiable(): self._try_to_verify_seg(seg_meta) return # Otherwise request missing trcs, certs self._request_missing_trcs(seg_meta) self._request_missing_certs(seg_meta) if seg_meta.meta: seg_meta.meta.close() def _try_to_verify_seg(self, seg_meta): """ If this pcb/path segment can be verified, call the function to process a verified pcb/path segment """ try: self._verify_path_seg(seg_meta) except SCIONVerificationError as e: logging.error("Signature verification failed for %s: %s" % (seg_meta.seg.short_id(), e)) return with self.unv_segs_lock: self.unverified_segs.pop(seg_meta.id, None) if self._labels: UNV_SEGS_TOTAL.labels(**self._labels).set(len(self.unverified_segs)) if seg_meta.meta: seg_meta.meta.close() seg_meta.callback(seg_meta) def _get_cs(self): """ Lookup certificate servers address and return meta. """ try: addr, port = self.dns_query_topo(CERTIFICATE_SERVICE)[0] except SCIONServiceLookupError as e: logging.warning("Lookup for certificate service failed: %s", e) return None return UDPMetadata.from_values(host=addr, port=port) def _request_missing_trcs(self, seg_meta): """ For all missing TRCs which are missing to verify this pcb/path segment, request them. Request is sent to certificate server, if the pcb/path segment was received by zk. Otherwise the sender of this pcb/path segment is asked. """ missing_trcs = set() with seg_meta.miss_trc_lock: missing_trcs = seg_meta.missing_trcs.copy() if not missing_trcs: return for isd, ver in missing_trcs: with self.req_trcs_lock: req_time, meta = self.requested_trcs.get((isd, ver), (None, None)) if meta: # There is already an outstanding request for the missing TRC # from somewhere else than than the local CS if seg_meta.meta: # Update the stored meta with the latest known server that has the TRC. self.requested_trcs[(isd, ver)] = (req_time, seg_meta.meta) continue if req_time and not seg_meta.meta: # There is already an outstanding request for the missing TRC # to the local CS and we don't have a new meta. continue trc_req = TRCRequest.from_values(isd, ver, cache_only=True) meta = seg_meta.meta or self._get_cs() if not meta: logging.error("Couldn't find a CS to request TRC for PCB %s", seg_meta.seg.short_id()) continue req_id = mk_ctrl_req_id() logging.info("Requesting %sv%s TRC from %s, for PCB %s [id: %016x]", isd, ver, meta, seg_meta.seg.short_id(), req_id) with self.req_trcs_lock: self.requested_trcs[(isd, ver)] = (time.time(), seg_meta.meta) if self._labels: PENDING_TRC_REQS_TOTAL.labels(**self._labels).set(len(self.requested_trcs)) self.send_meta(CtrlPayload(CertMgmt(trc_req), req_id=req_id), meta) def _request_missing_certs(self, seg_meta): """ For all missing CCs which are missing to verify this pcb/path segment, request them. Request is sent to certificate server, if the pcb/path segment was received by zk. Otherwise the sender of this pcb/path segment is asked. """ missing_certs = set() with seg_meta.miss_cert_lock: missing_certs = seg_meta.missing_certs.copy() if not missing_certs: return for isd_as, ver in missing_certs: with self.req_certs_lock: req_time, meta = self.requested_certs.get((isd_as, ver), (None, None)) if meta: # There is already an outstanding request for the missing cert # from somewhere else than than the local CS if seg_meta.meta: # Update the stored meta with the latest known server that has the cert. self.requested_certs[(isd_as, ver)] = (req_time, seg_meta.meta) continue if req_time and not seg_meta.meta: # There is already an outstanding request for the missing cert # to the local CS and we don't have a new meta. continue cert_req = CertChainRequest.from_values(isd_as, ver, cache_only=True) meta = seg_meta.meta or self._get_cs() if not meta: logging.error("Couldn't find a CS to request CERTCHAIN for PCB %s", seg_meta.seg.short_id()) continue req_id = mk_ctrl_req_id() logging.info("Requesting %sv%s CERTCHAIN from %s for PCB %s [id: %016x]", isd_as, ver, meta, seg_meta.seg.short_id(), req_id) with self.req_certs_lock: self.requested_certs[(isd_as, ver)] = (time.time(), seg_meta.meta) if self._labels: PENDING_CERT_REQS_TOTAL.labels(**self._labels).set(len(self.requested_certs)) self.send_meta(CtrlPayload(CertMgmt(cert_req), req_id=req_id), meta) def _missing_trc_versions(self, trc_versions): """ Check which intermediate trcs are missing and return their versions. :returns: the missing TRCs' :rtype set """ missing_trcs = set() for isd, versions in trc_versions.items(): # If not local TRC, only request versions contained in ASMarkings if isd is not self.topology.isd_as[0]: for ver in versions: if self.trust_store.get_trc(isd, ver) is None: missing_trcs.add((isd, ver)) continue # Local TRC max_req_ver = max(versions) max_local_ver = self.trust_store.get_trc(isd) lower_ver = 0 if max_local_ver is None: # This should never happen logging.critical("Local TRC not found!") kill_self() lower_ver = max_local_ver.version + 1 for ver in range(lower_ver, max_req_ver + 1): missing_trcs.add((isd, ver)) return missing_trcs def _missing_cert_versions(self, cert_versions): """ Check which and certificates are missing return their versions. :returns: the missing certs' versions :rtype set """ missing_certs = set() for isd_as, versions in cert_versions.items(): for ver in versions: if self.trust_store.get_cert(isd_as, ver) is None: missing_certs.add((isd_as, ver)) return missing_certs def process_trc_reply(self, cpld, meta): """ Process the TRC reply. :param rep: TRC reply. :type rep: TRCReply. """ meta.close() cmgt = cpld.union rep = cmgt.union assert isinstance(rep, TRCReply), type(rep) isd, ver = rep.trc.get_isd_ver() logging.info("TRC reply received for %sv%s from %s [id: %s]", isd, ver, meta, cpld.req_id_str()) self.trust_store.add_trc(rep.trc, True) # Update core ases for isd this trc belongs to max_local_ver = self.trust_store.get_trc(rep.trc.isd) if max_local_ver.version == rep.trc.version: self._update_core_ases(rep.trc) with self.req_trcs_lock: self.requested_trcs.pop((isd, ver), None) if self._labels: PENDING_TRC_REQS_TOTAL.labels(**self._labels).set(len(self.requested_trcs)) # Send trc to CS if meta.get_addr().isd_as != self.addr.isd_as: cs_meta = self._get_cs() self.send_meta(CtrlPayload(CertMgmt(rep)), cs_meta) cs_meta.close() # Remove received TRC from map self._check_segs_with_rec_trc(isd, ver) def _check_segs_with_rec_trc(self, isd, ver): """ When a trc reply is received, this method is called to check which segments can be verified. For all segments that can be verified, the processing is continued. """ with self.unv_segs_lock: for seg_meta in list(self.unverified_segs.values()): with seg_meta.miss_trc_lock: seg_meta.missing_trcs.discard((isd, ver)) # If all required trcs and certs are received if seg_meta.verifiable(): self._try_to_verify_seg(seg_meta) def process_trc_request(self, cpld, meta): """Process a TRC request.""" cmgt = cpld.union req = cmgt.union assert isinstance(req, TRCRequest), type(req) isd, ver = req.isd_as()[0], req.p.version logging.info("TRC request received for %sv%s from %s [id: %s]" % (isd, ver, meta, cpld.req_id_str())) trc = self.trust_store.get_trc(isd, ver) if trc: self.send_meta( CtrlPayload(CertMgmt(TRCReply.from_values(trc)), req_id=cpld.req_id), meta) else: logging.warning("Could not find requested TRC %sv%s [id: %s]" % (isd, ver, cpld.req_id_str())) def process_cert_chain_reply(self, cpld, meta): """Process a certificate chain reply.""" cmgt = cpld.union rep = cmgt.union assert isinstance(rep, CertChainReply), type(rep) meta.close() isd_as, ver = rep.chain.get_leaf_isd_as_ver() logging.info("Cert chain reply received for %sv%s from %s [id: %s]", isd_as, ver, meta, cpld.req_id_str()) self.trust_store.add_cert(rep.chain, True) with self.req_certs_lock: self.requested_certs.pop((isd_as, ver), None) if self._labels: PENDING_CERT_REQS_TOTAL.labels(**self._labels).set(len(self.requested_certs)) # Send cc to CS if meta.get_addr().isd_as != self.addr.isd_as: cs_meta = self._get_cs() self.send_meta(CtrlPayload(CertMgmt(rep)), cs_meta) cs_meta.close() # Remove received cert chain from map self._check_segs_with_rec_cert(isd_as, ver) def _check_segs_with_rec_cert(self, isd_as, ver): """ When a CC reply is received, this method is called to check which segments can be verified. For all segments that can be verified, the processing is continued. """ with self.unv_segs_lock: for seg_meta in list(self.unverified_segs.values()): with seg_meta.miss_cert_lock: seg_meta.missing_certs.discard((isd_as, ver)) # If all required trcs and certs are received. if seg_meta.verifiable(): self._try_to_verify_seg(seg_meta) def process_cert_chain_request(self, cpld, meta): """Process a certificate chain request.""" cmgt = cpld.union req = cmgt.union assert isinstance(req, CertChainRequest), type(req) isd_as, ver = req.isd_as(), req.p.version logging.info("Cert chain request received for %sv%s from %s [id: %s]" % (isd_as, ver, meta, cpld.req_id_str())) cert = self.trust_store.get_cert(isd_as, ver) if cert: self.send_meta( CtrlPayload(CertMgmt(CertChainReply.from_values(cert)), req_id=cpld.req_id), meta) else: logging.warning("Could not find requested certificate %sv%s [id: %s]" % (isd_as, ver, cpld.req_id_str())) def _verify_path_seg(self, seg_meta): """ Signature verification for all AS markings within this pcb/path segment. This function is called, when all TRCs and CCs used within this pcb/path segment are available. """ seg = seg_meta.seg exp_time = seg.get_expiration_time() for i, asm in enumerate(seg.iter_asms()): cert_ia = asm.isd_as() trc = self.trust_store.get_trc(cert_ia[0], asm.p.trcVer) chain = self.trust_store.get_cert(asm.isd_as(), asm.p.certVer) self._verify_exp_time(exp_time, chain) verify_chain_trc(cert_ia, chain, trc) seg.verify(chain.as_cert.subject_sig_key_raw, i) def _verify_exp_time(self, exp_time, chain): """ Verify that certificate chain cover the expiration time. :raises SCIONVerificationError """ # chain is only verifiable if TRC.exp_time >= CoreCert.exp_time >= LeafCert.exp_time if chain.as_cert.expiration_time < exp_time: raise SCIONVerificationError( "Certificate chain %sv%s expires before path segment" % chain.get_leaf_isd_as_ver()) def _get_ctrl_handler(self, msg): pclass = msg.type() try: type_map = self.CTRL_PLD_CLASS_MAP[pclass] except KeyError: logging.error("Control payload class not supported: %s\n%s", pclass, msg) return None ptype = msg.inner_type() try: return type_map[ptype] except KeyError: logging.error("%s control payload type not supported: %s\n%s", pclass, ptype, msg) return None def _get_scmp_handler(self, pkt): scmp = pkt.l4_hdr try: type_map = self.SCMP_PLD_CLASS_MAP[scmp.class_] except KeyError: logging.error("SCMP class not supported: %s(%s)\n%s", scmp.class_, SCMPClass.to_str(scmp.class_), pkt) return None try: return type_map[scmp.type] except KeyError: logging.error("SCMP %s type not supported: %s(%s)\n%s", scmp.type, scmp.class_, scmp_type_name(scmp.class_, scmp.type), pkt) return None def _parse_packet(self, packet): try: pkt = SCIONL4Packet(packet) except SCMPError as e: self._scmp_parse_error(packet, e) return None except SCIONBaseError: log_exception("Error parsing packet: %s" % hex_str(packet), level=logging.ERROR) return None try: pkt.validate(len(packet)) except SCMPError as e: self._scmp_validate_error(pkt, e) return None except SCIONChecksumFailed: logging.debug("Dropping packet due to failed checksum:\n%s", pkt) return pkt def _scmp_parse_error(self, packet, e): HDR_TYPE_OFFSET = 6 if packet[HDR_TYPE_OFFSET] == L4Proto.SCMP: # Ideally, never respond to an SCMP error with an SCMP error. # However, if parsing failed, we can (at best) only determine if # it's an SCMP packet, so just drop SCMP packets on parse error. logging.warning("Dropping SCMP packet due to parse error. %s", e) return # For now, none of these can be properly handled, so just log and drop # the packet. In the future, the "x Not Supported" errors might be # handlable in the case of deprecating old versions. DROP = SCMPBadVersion, SCMPBadSrcType, SCMPBadDstType assert isinstance(e, DROP), type(e) logging.warning("Dropping packet due to parse error: %s", e) def _scmp_validate_error(self, pkt, e): if pkt.cmn_hdr.next_hdr == L4Proto.SCMP and pkt.ext_hdrs[0].error: # Never respond to an SCMP error with an SCMP error. logging.info( "Dropping SCMP error packet due to validation error. %s", e) return if isinstance(e, (SCMPBadIOFOffset, SCMPBadHOFOffset)): # Can't handle normally, as the packet isn't reversible. reply = self._scmp_bad_path_metadata(pkt, e) else: logging.warning("Error: %s", type(e)) reply = pkt.reversed_copy() args = () if isinstance(e, SCMPUnspecified): args = (str(e),) elif isinstance(e, (SCMPOversizePkt, SCMPBadPktLen)): args = (e.args[1],) # the relevant MTU. elif isinstance(e, (SCMPTooManyHopByHop, SCMPBadExtOrder, SCMPBadHopByHop)): args = e.args if isinstance(e, SCMPBadExtOrder): # Delete the problematic extension. del reply.ext_hdrs[args[0]] reply.convert_to_scmp_error(self.addr, e.CLASS, e.TYPE, pkt, *args) if pkt.addrs.src.isd_as == self.addr.isd_as: # No path needed for a local reply. reply.path = SCIONPath() next_hop, port = self.get_first_hop(reply) reply.update() self.send(reply, next_hop, port) def _scmp_bad_path_metadata(self, pkt, e): """ Handle a packet with an invalid IOF/HOF offset in the common header. As the path can't be used, a response can only be sent if the source is local (as that doesn't require a path). """ if pkt.addrs.src.isd_as != self.addr.isd_as: logging.warning( "Invalid path metadata in packet from " "non-local source, dropping: %s\n%s\n%s\n%s", e, pkt.cmn_hdr, pkt.addrs, pkt.path) return reply = copy.deepcopy(pkt) # Remove existing path before reversing. reply.path = SCIONPath() reply.reverse() reply.convert_to_scmp_error(self.addr, e.CLASS, e.TYPE, pkt) reply.update() logging.warning( "Invalid path metadata in packet from " "local source, sending SCMP error: %s\n%s\n%s\n%s", e, pkt.cmn_hdr, pkt.addrs, pkt.path) return reply def get_first_hop(self, spkt): """ Returns first hop addr of down-path or end-host addr. """ return self._get_first_hop(spkt.path, spkt.addrs.dst, spkt.ext_hdrs) def _get_first_hop(self, path, dst, ext_hdrs=()): if_id = self._ext_first_hop(ext_hdrs) if if_id is None: if len(path) == 0: return self._empty_first_hop(dst) if_id = path.get_fwd_if() if if_id in self.ifid2br: return self.get_border_addr(if_id) logging.error("Unable to find first hop:\n%s", path) return None, None def _ext_first_hop(self, ext_hdrs): for hdr in ext_hdrs: if_id = hdr.get_next_ifid() if if_id is not None: return if_id def _empty_first_hop(self, dst): if dst.isd_as != self.addr.isd_as: logging.error("Packet to remote AS w/o path, dst: %s", dst) return None, None host = dst.host if host.TYPE == AddrType.SVC: host = self.dns_query_topo(SVC_TO_SERVICE[host.addr])[0][0] return host, SCION_UDP_EH_DATA_PORT def _build_packet(self, dst_host=None, path=None, ext_hdrs=(), dst_ia=None, payload=None, dst_port=0): if dst_host is None: dst_host = HostAddrNone() if dst_ia is None: dst_ia = self.addr.isd_as if path is None: path = SCIONPath() if payload is None: payload = PayloadRaw() dst_addr = SCIONAddr.from_values(dst_ia, dst_host) cmn_hdr, addr_hdr = build_base_hdrs(dst_addr, self.addr) udp_hdr = SCIONUDPHeader.from_values( self.addr, self._port, dst_addr, dst_port) return SCIONL4Packet.from_values( cmn_hdr, addr_hdr, path, ext_hdrs, udp_hdr, payload) def send(self, packet, dst, dst_port): """ Send *packet* to *dst* (to port *dst_port*) using the local socket. Calling ``packet.pack()`` should return :class:`bytes`, and ``dst.__str__()`` should return a string representing an IP address. :param packet: the packet to be sent to the destination. :param str dst: the destination IP address. :param int dst_port: the destination port number. """ assert not isinstance(packet.addrs.src.host, HostAddrNone), type(packet.addrs.src.host) assert not isinstance(packet.addrs.dst.host, HostAddrNone), type(packet.addrs.dst.host) assert isinstance(packet, SCIONBasePacket), type(packet) assert isinstance(dst_port, int), type(dst_port) if not self._udp_sock: return False return self._udp_sock.send(packet.pack(), (dst, dst_port)) def send_meta(self, msg, meta, next_hop_port=None): if isinstance(meta, TCPMetadata): assert not next_hop_port, next_hop_port return self._send_meta_tcp(msg, meta) elif isinstance(meta, SockOnlyMetadata): assert not next_hop_port, next_hop_port return meta.sock.send(msg) elif isinstance(meta, UDPMetadata): dst_port = meta.port else: logging.error("Unsupported metadata: %s" % meta.__name__) return False pkt = self._build_packet(meta.host, meta.path, meta.ext_hdrs, meta.ia, msg, dst_port) if not next_hop_port: next_hop_port = self.get_first_hop(pkt) if next_hop_port == (None, None): logging.error("Can't find first hop, dropping packet\n%s", pkt) return False return self.send(pkt, *next_hop_port) def _send_meta_tcp(self, msg, meta): if not meta.sock: tcp_sock = self._tcp_sock_from_meta(meta) meta.sock = tcp_sock self._tcp_conns_put(tcp_sock) return meta.sock.send_msg(msg.pack()) def _tcp_sock_from_meta(self, meta): assert meta.host dst = meta.get_addr() first_ip, first_port = self._get_first_hop(meta.path, dst) active = True try: # Create low-level TCP socket and connect sock = SCIONTCPSocket() sock.bind((self.addr, 0)) sock.connect(dst, meta.port, meta.path, first_ip, first_port, flags=meta.flags) except SCIONTCPError: log_exception("TCP: connection init error, marking socket inactive") sock = None active = False # Create and return TCPSocketWrapper return TCPSocketWrapper(sock, dst, meta.path, active) def _tcp_conns_put(self, sock): dropped = 0 while True: try: self._tcp_new_conns.put(sock, block=False) except queue.Full: old_sock = self._tcp_new_conns.get_nowait() old_sock.close() logging.error("TCP: _tcp_new_conns is full. Closing old socket") dropped += 1 else: break if dropped > 0: logging.warning("%d TCP connection(s) dropped" % dropped) def run(self): """ Main routine to receive packets and pass them to :func:`handle_request()`. """ self._tcp_start() threading.Thread( target=thread_safety_net, args=(self.packet_recv,), name="Elem.packet_recv", daemon=True).start() try: self._packet_process() except SCIONBaseError: log_exception("Error processing packet.") finally: self.stop() def packet_put(self, packet, addr, sock): """ Try to put incoming packet in queue If queue is full, drop oldest packet in queue """ msg, meta = self._get_msg_meta(packet, addr, sock) if msg is None: return self._in_buf_put((msg, meta)) def _in_buf_put(self, item): dropped = 0 while True: try: self._in_buf.put(item, block=False) if self._labels: PKT_BUF_BYTES.labels(**self._labels).inc(len(item[0])) except queue.Full: msg, _ = self._in_buf.get_nowait() dropped += 1 if self._labels: PKTS_DROPPED_TOTAL.labels(**self._labels).inc() PKT_BUF_BYTES.labels(**self._labels).dec(len(msg)) else: break finally: if self._labels: PKT_BUF_TOTAL.labels(**self._labels).set(self._in_buf.qsize()) if dropped > 0: self.total_dropped += dropped logging.warning("%d packet(s) dropped (%d total dropped so far)", dropped, self.total_dropped) def _get_msg_meta(self, packet, addr, sock): pkt = self._parse_packet(packet) if not pkt: logging.error("Cannot parse packet:\n%s" % packet) return None, None # Create metadata: rev_pkt = pkt.reversed_copy() # Skip OneHopPathExt (if exists) exts = [] for e in rev_pkt.ext_hdrs: if not isinstance(e, OneHopPathExt): exts.append(e) if rev_pkt.l4_hdr.TYPE == L4Proto.UDP: meta = UDPMetadata.from_values(ia=rev_pkt.addrs.dst.isd_as, host=rev_pkt.addrs.dst.host, path=rev_pkt.path, ext_hdrs=exts, port=rev_pkt.l4_hdr.dst_port) elif rev_pkt.l4_hdr.TYPE == L4Proto.SCMP: meta = SCMPMetadata.from_values(ia=rev_pkt.addrs.dst.isd_as, host=rev_pkt.addrs.dst.host, path=rev_pkt.path, ext_hdrs=exts) else: logging.error("Cannot create meta for: %s" % pkt) return None, None # FIXME(PSz): for now it is needed by SIBRA service. meta.pkt = pkt try: pkt.parse_payload() except SCIONParseError as e: logging.error("Cannot parse payload\n Error: %s\n Pkt: %s", e, pkt) return None, meta return pkt.get_payload(), meta def handle_accept(self, sock): """ Callback to handle a ready listening socket """ s = sock.accept() if not s: logging.error("accept failed") return self._socks.add(s, self.handle_recv) def handle_recv(self, sock): """ Callback to handle a ready recving socket """ packet, addr = sock.recv() if packet is None: self._socks.remove(sock) sock.close() if sock == self._udp_sock: self._udp_sock = None if self._labels: CONNECTED_TO_DISPATCHER.labels(**self._labels).set(0) return self.packet_put(packet, addr, sock) def packet_recv(self): """ Read packets from sockets, and put them into a :class:`queue.Queue`. """ while self.run_flag.is_set(): if not self._udp_sock: self._setup_sockets(False) for sock, callback in self._socks.select_(timeout=0.1): callback(sock) self._tcp_socks_update() self._socks.close() self.stopped_flag.set() def _packet_process(self): """ Read packets from a :class:`queue.Queue`, and process them. """ while self.run_flag.is_set(): try: msg, meta = self._in_buf.get(timeout=1.0) if self._labels: PKT_BUF_BYTES.labels(**self._labels).dec(len(msg)) PKT_BUF_TOTAL.labels(**self._labels).set(self._in_buf.qsize()) self.handle_msg_meta(msg, meta) except queue.Empty: continue def _tcp_start(self): if not self.USE_TCP: return if not self._tcp_sock: logging.warning("TCP: accept socket is unset, port:%d", self._port) return threading.Thread( target=thread_safety_net, args=(self._tcp_accept_loop,), name="Elem._tcp_accept_loop", daemon=True).start() def _tcp_accept_loop(self): while self.run_flag.is_set(): try: logging.debug("TCP: waiting for connections") self._tcp_conns_put(TCPSocketWrapper(*self._tcp_sock.accept())) logging.debug("TCP: accepted connection") except SCIONTCPTimeout: pass except SCIONTCPError: log_exception("TCP: error on accept()") logging.error("TCP: leaving the accept loop") break try: self._tcp_sock.close() except SCIONTCPError: log_exception("TCP: error on closing _tcp_sock") def _tcp_socks_update(self): if not self.USE_TCP: return self._socks.remove_inactive() self._tcp_add_waiting() def _tcp_add_waiting(self): while True: try: self._socks.add(self._tcp_new_conns.get_nowait(), self._tcp_handle_recv) except queue.Empty: break def _tcp_handle_recv(self, sock): """ Callback to handle a ready recving socket """ msg, meta = sock.get_msg_meta() logging.debug("tcp_handle_recv:%s, %s", msg, meta) if msg is None and meta is None: self._socks.remove(sock) sock.close() return if msg: self._in_buf_put((msg, meta)) def _tcp_clean(self): if not hasattr(self, "_tcp_sock") or not self._tcp_sock: return # Close all TCP sockets. while not self._tcp_new_conns.empty(): try: tcp_sock = self._tcp_new_conns.get_nowait() except queue.Empty: break tcp_sock.close() def stop(self): """Shut down the daemon thread.""" # Signal that the thread should stop self.run_flag.clear() # Wait for the thread to finish self.stopped_flag.wait(5) # Close tcp sockets. self._tcp_clean() def _quiet_startup(self): return (time.time() - self._startup) < self.STARTUP_QUIET_PERIOD def dns_query_topo(self, qname): """ Query dns for an answer. If the answer is empty, or an error occurs then return the relevant topology entries instead. :param str qname: Service to query for. """ assert qname in SERVICE_TYPES service_map = { BEACON_SERVICE: self.topology.beacon_servers, CERTIFICATE_SERVICE: self.topology.certificate_servers, PATH_SERVICE: self.topology.path_servers, SIBRA_SERVICE: self.topology.sibra_servers, } # Generate fallback from local topology results = [] for srv in service_map[qname]: addr, port = srv.public[0] results.append((addr, port)) # FIXME(kormat): replace with new discovery service when that's ready. if not results: # No results from local toplogy either raise SCIONServiceLookupError("No %s servers found" % qname) return results def _verify_revocation_for_asm(self, rev_info, as_marking, verify_all=True): """ Verifies a revocation for a given AS marking. :param rev_info: The RevocationInfo object. :param as_marking: The ASMarking object. :param verify_all: If true, verify all PCBMs (including peers), otherwise only verify the up/down hop. :return: True, if the revocation successfully revokes an upstream interface in the AS marking, False otherwise. """ if rev_info.isd_as() != as_marking.isd_as(): return False if not ConnectedHashTree.verify(rev_info, as_marking.p.hashTreeRoot): logging.error("Revocation verification failed. %s", rev_info) return False for pcbm in as_marking.iter_pcbms(): if rev_info.p.ifID in [pcbm.hof().ingress_if, pcbm.hof().egress_if]: return True if not verify_all: break return False def _build_meta(self, ia=None, host=None, path=None, port=0, reuse=False, one_hop=False): if ia is None: ia = self.addr.isd_as if path is None: path = SCIONPath() if not one_hop: return self._DefaultMeta.from_values(ia, host, path, port=port, reuse=reuse) # One hop path extension in handled in a different way in TCP and UDP if self._DefaultMeta == TCPMetadata: return TCPMetadata.from_values(ia, host, path, port=port, reuse=reuse, flags=TCPFlags.ONEHOPPATH) return UDPMetadata.from_values(ia, host, path, port=port, reuse=reuse, ext_hdrs=[OneHopPathExt()]) def _export_metrics(self, export_addr): """ Starts an HTTP server endpoint for prometheus to scrape. """ addr, port = export_addr.split(":") port = int(port) addr = addr.strip("[]") logging.info("Exporting metrics on %s", export_addr) start_http_server(port, addr=addr) def _init_metrics(self): """ Initializes all metrics to 0. Subclasses should initialize their metrics here and must call the super method. """ PKT_BUF_TOTAL.labels(**self._labels).set(0) PKT_BUF_BYTES.labels(**self._labels).set(0) PKTS_DROPPED_TOTAL.labels(**self._labels).inc(0) UNV_SEGS_TOTAL.labels(**self._labels).set(0) PENDING_TRC_REQS_TOTAL.labels(**self._labels).set(0) PENDING_CERT_REQS_TOTAL.labels(**self._labels).set(0) CONNECTED_TO_DISPATCHER.labels(**self._labels).set(0) def _get_path_via_sciond(self, isd_as, flush=False): flags = lib_sciond.PathRequestFlags(flush=flush) start = time.time() while time.time() - start < API_TOUT: try: path_entries = lib_sciond.get_paths(isd_as, flags=flags) except lib_sciond.SCIONDLibError as e: logging.error("Error during path lookup: %s" % e) continue if path_entries: return path_entries[0].path() logging.warning("Unable to get path to %s from SCIOND.", isd_as) return None