def _sig_toml(self, topo_id, topo): name = 'sig%s' % topo_id.file_fmt() net = self.args.networks[name][0] log_level = 'trace' if self.args.trace else 'debug' sig_conf = { 'sig': { 'ID': name, 'SIGConfig': 'conf/cfg.json', 'IA': str(topo_id), 'IP': str(net['ipv4']), }, 'sd_client': { 'Path': get_default_sciond_path(ISD_AS(topo["ISD_AS"])) }, 'logging': { 'file': { 'Level': log_level, 'Path': '/share/logs/%s.log' % name }, 'console': { 'Level': 'error', } }, 'metrics': { 'Prometheus': '0.0.0.0:%s' % SIG_PROM_PORT } } path = os.path.join(topo_id.base_dir(self.args.output_dir), name, SIG_CONFIG_NAME) write_file(path, toml.dumps(sig_conf))
def _ps_conf(self, topo_id, topo, base): image = 'path_py' if self.args.path_server == 'py' else 'path' raw_entry = { 'image': docker_image(self.args, image), 'depends_on': [ sciond_svc_name(topo_id), 'scion_disp_%s' % topo_id.file_fmt(), ], 'environment': { 'SU_EXEC_USERSPEC': self.user_spec, }, 'volumes': self._std_vol(topo_id), 'command': [], } for k, v in topo.get("PathService", {}).items(): entry = copy.deepcopy(raw_entry) name = self.prefix + k entry['container_name'] = name entry['volumes'].append('%s:/share/conf:ro' % os.path.join(base, k)) if self.args.path_server == 'py': entry['command'].append('--spki_cache_dir=cache') entry['command'].append('--prom=%s' % prom_addr_infra(k, v, self.args.port_gen)) entry['command'].append('--sciond_path=%s' % get_default_sciond_path(ISD_AS(topo["ISD_AS"]))) entry['command'].append(k) entry['command'].append('conf') self.dc_conf['services']['scion_%s' % k] = entry
def _ps_conf(self, topo_id, topo, base): image = 'scion_path_py' if self.ps == 'py' else 'scion_path' raw_entry = { 'image': image, 'restart': 'always', 'depends_on': [self._sciond_name(topo_id), 'dispatcher', 'zookeeper'], 'environment': { 'SU_EXEC_USERSPEC': self.user_spec, }, 'volumes': [ '/etc/passwd:/etc/passwd:ro', '/etc/group:/etc/group:ro', '/run/shm/dispatcher:/run/shm/dispatcher:rw', '/run/shm/sciond:/run/shm/sciond:rw', self.output_base + '/gen-cache:/share/cache:rw', self.output_base + '/logs:/share/logs:rw' ], 'command': [], } for k, v in topo.get("PathService", {}).items(): entry = copy.deepcopy(raw_entry) entry['container_name'] = k entry['volumes'].append('%s:/share/conf:ro' % os.path.join(base, k)) if self.ps == 'py': entry['command'].append('--spki_cache_dir=cache') entry['command'].append('--prom=%s' % _prom_addr_infra(v)) entry['command'].append( '--sciond_path=%s' % get_default_sciond_path(ISD_AS(topo["ISD_AS"]))) entry['command'].append(k) entry['command'].append('conf') self.dc_conf['services'][k] = entry
def _sig_toml(self, topo_id, topo, base): name = 'sig_%s' % topo_id.file_fmt() net = self.args.networks[name][0] base = topo_id.base_dir(self.args.output_dir) log_level = 'trace' if self.args.trace else 'debug' sig_conf = { 'sig': { 'ID': name, 'SIGConfig': 'conf/cfg.json', 'IA': str(topo_id), 'IP': str(net['ipv4']), }, 'sd_client': { 'Path': get_default_sciond_path(ISD_AS(topo["ISD_AS"])) }, 'logging': { 'file': { 'Level': log_level, 'Path': '/share/logs/%s.log' % name }, 'console': { 'Level': 'error', } } } write_file(os.path.join(base, "sig/sig.toml"), toml.dumps(sig_conf))
def _build_cs_conf(self, topo_id, ia, base, name): config_dir = '/share/conf' if self.args.docker else os.path.join(base, name) raw_entry = { 'general': { 'ID': name, 'ConfigDir': config_dir, }, 'sd_client': { 'Path': get_default_sciond_path(topo_id), }, 'logging': { 'file': { 'Path': os.path.join(self.log_dir, "%s.log" % name), 'Level': self.log_level, }, 'console': { 'Level': 'crit', }, }, 'TrustDB': { 'Backend': 'sqlite', 'Connection': os.path.join(self.db_dir, '%s.trust.db' % name), }, 'infra': { 'Type': "CS" }, 'cs': { 'LeafReissueTime': "6h", 'IssuerReissueTime': "3d", 'ReissueRate': "10s", 'ReissueTimeout': "5s", }, } return raw_entry
def _bs_conf(self, topo_id, topo, base): raw_entry = { 'image': docker_image(self.args, 'beacon_py'), 'depends_on': [ sciond_svc_name(topo_id), 'scion_disp_%s' % topo_id.file_fmt(), ], 'environment': { 'SU_EXEC_USERSPEC': self.user_spec, }, 'network_mode': 'service:scion_disp_%s' % topo_id.file_fmt(), 'volumes': self._std_vol(topo_id), 'command': ['--spki_cache_dir=cache'] } for k, v in topo.get("BeaconService", {}).items(): entry = copy.deepcopy(raw_entry) name = self.prefix + k entry['container_name'] = name entry['volumes'].append('%s:/share/conf:ro' % os.path.join(base, k)) prom_addr = prom_addr_infra(self.args.docker, k, v, BS_PROM_PORT) entry['command'].append('--prom=%s' % prom_addr) entry['command'].append( '--sciond_path=%s' % get_default_sciond_path(ISD_AS(topo["ISD_AS"]))) entry['command'].append(k) entry['command'].append('conf') self.dc_conf['services']['scion_%s' % k] = entry
def _build_cs_conf(self, topo_id, ia, base, name, infra_elem): config_dir = '/share/conf' if self.args.docker else os.path.join(base, name) raw_entry = { 'general': { 'ID': name, 'ConfigDir': config_dir, }, 'sd_client': { 'Path': get_default_sciond_path(topo_id), }, 'logging': self._log_entry(name), 'TrustDB': trust_db_conf_entry(self.args, name), 'infra': { 'Type': "CS" }, 'discovery': self._discovery_entry(), 'cs': { 'LeafReissueLeadTime': "6h", 'IssuerReissueLeadTime': "3d", 'ReissueRate': "10s", 'ReissueTimeout': "5s", }, 'metrics': self._metrics_entry(name, infra_elem, CS_PROM_PORT), } return raw_entry
def _build_control_service_conf(self, topo_id, ia, base, name, infra_elem): config_dir = '/share/conf' if self.args.docker else os.path.join( base, name) raw_entry = { 'general': { 'ID': name, 'ConfigDir': config_dir, 'ReconnectToDispatcher': True, }, 'logging': self._log_entry(name), 'trustDB': trust_db_conf_entry(self.args, name), 'beaconDB': beacon_db_conf_entry(self.args, name), 'discovery': self._discovery_entry(), 'tracing': self._tracing_entry(), 'metrics': self._metrics_entry(name, infra_elem, BS_PROM_PORT), 'quic': self._quic_conf_entry(BS_QUIC_PORT, self.args.svcfrac, infra_elem), 'sd_client': { 'Path': get_default_sciond_path(topo_id), }, 'cs': { 'LeafReissueLeadTime': "6h", 'IssuerReissueLeadTime': "3d", 'ReissueRate': "10s", 'ReissueTimeout': "5s", }, 'ps': { 'pathDB': { 'Backend': 'sqlite', 'Connection': os.path.join(self.db_dir, '%s.path.db' % name), }, 'SegSync': True, }, } return raw_entry
def _cs_conf(self, topo_id, topo, base): image = 'cert_py' if self.args.cert_server == 'py' else 'cert' raw_entry = { 'image': docker_image(self.args, image), 'depends_on': [ sciond_svc_name(topo_id), 'scion_disp_%s' % topo_id.file_fmt(), ], 'environment': { 'SU_EXEC_USERSPEC': self.user_spec, }, 'network_mode': 'service:scion_disp_%s' % topo_id.file_fmt(), 'volumes': self._std_vol(topo_id), 'command': [] } for k, v in topo.get("CertificateService", {}).items(): entry = copy.deepcopy(raw_entry) entry['container_name'] = self.prefix + k entry['volumes'].append('%s:/share/conf:ro' % os.path.join(base, k)) if self.args.cert_server == 'py': sciond = get_default_sciond_path(ISD_AS(topo["ISD_AS"])) entry['command'].append('--spki_cache_dir=cache') entry['command'].append('--prom=[0.0.0.0]:%s' % CS_PROM_PORT) entry['command'].append('--sciond_path=%s' % sciond) entry['command'].append(k) entry['command'].append('conf') self.dc_conf['services']['scion_%s' % k] = entry
def _std_entries(self, topo, topo_key, cmd, base): entries = [] for elem_id, elem in topo.get(topo_key, {}).items(): conf_dir = os.path.join(base, elem_id) prom_addr = prom_addr_infra(elem_id, elem, self.args.port_gen) entries.append((elem_id, [cmd, "--prom", prom_addr, "--sciond_path", get_default_sciond_path(ISD_AS(topo["ISD_AS"])), elem_id, conf_dir])) return entries
def print_as_viewer_info(addr): ''' Attempt sciond connection if needed, and print requested AS data. :param addr: Optional IP Address for sciond socket binding when not localhost. ''' try: # init connection to sciond conf_dir = "%s/%s/ISD%s/AS%s/endhost" % ( SCION_ROOT, GEN_PATH, s_isd_as.isd_str(), s_isd_as.as_file_fmt()) sock_file = get_default_sciond_path(s_isd_as) if not pathlib.Path(sock_file).exists(): sock_file = get_default_sciond_path(None) connector[s_isd_as] = lib_sciond.init(sock_file) logging.info(connector[s_isd_as]._api_addr) try: # test if sciond is already running for this AS logging.info("Starting sciond at %s" % sock_file) lib_sciond.get_as_info(connector=connector[s_isd_as]) except (SCIONDResponseError) as err: logging.error("%s: %s" % (err.__class__.__name__, err)) return except (SCIONDConnectionError, FileNotFoundError) as err: logging.warning("%s: %s" % (err.__class__.__name__, err)) # need to launch sciond, wait for uptime launch_sciond(sock_file, conf_dir, addr, s_isd_as) if args.t: # as topology print_as_topology(s_isd_as, connector) if args.p: # announced paths print_paths(s_isd_as, d_isd_as, connector) if args.c: # config print_yml(os.path.join(conf_dir, AS_CONF_FILE)) if args.pp: # path policy print_yml(os.path.join(conf_dir, PATH_POLICY_FILE)) if args.trc: # TRC print_json_files(findCerts(conf_dir, ".trc")) if args.crt: # cert chain print_json_files(findCerts(conf_dir, ".crt")) if args.s: # segments print_segments_summary(s_isd_as, connector) except (SCIONBaseError, AttributeError) as err: logging.error("%s: %s" % (err.__class__.__name__, err))
def __init__(self, conf_dir, addr, api_addr, run_local_api=False, port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False): """ Initialize an instance of the class SCIONDaemon. """ super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir, prom_export=prom_export, public=[(addr, port)]) up_labels = {**self._labels, "type": "up"} if self._labels else None down_labels = {**self._labels, "type": "down"} if self._labels else None core_labels = {**self._labels, "type": "core"} if self._labels else None self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels) self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels) self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels) self.rev_cache = RevCache() # Keep track of requested paths. self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT) self.req_path_lock = threading.Lock() self._api_sock = None self.daemon_thread = None os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True) self.api_addr = (api_addr or get_default_sciond_path()) if delete_sock: try: os.remove(self.api_addr) except OSError as e: if e.errno != errno.ENOENT: logging.error("Could not delete socket %s: %s" % (self.api_addr, e)) self.CTRL_PLD_CLASS_MAP = { PayloadClass.PATH: { PMT.REPLY: self.handle_path_reply, PMT.REVOCATION: self.handle_revocation, }, PayloadClass.CERT: { CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request, CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply, CertMgmtType.TRC_REPLY: self.process_trc_reply, CertMgmtType.TRC_REQ: self.process_trc_request, }, } self.SCMP_PLD_CLASS_MAP = { SCMPClass.PATH: {SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation}, } if run_local_api: self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond")) self._socks.add(self._api_sock, self.handle_accept)
def main_default(type_, local_type=None, trace_=False, **kwargs): """ Default main() method. Parses cmdline args, setups up signal handling, logging, creates the appropriate object and runs it. :param type type_: Primary type to instantiate. :param type local_type: If not `None`, load the topology to check if this is a core or local AS. If it's a core AS, instantiate the primary type, otherwise the local type. :param bool trace_: Should a periodic thread stacktrace report be created? """ handle_signals() parser = argparse.ArgumentParser() parser.add_argument('--log_dir', default="logs/", help='Log dir (Default: logs/)') parser.add_argument( '--spki_cache_dir', default="gen-cache/", help='Cache dir for SCION TRCs and cert chains (Default: gen-cache/)') parser.add_argument('--prom', type=str, help='Address to export prometheus metrics on') parser.add_argument('--sciond_path', type=str, help='Sciond socket path ' '(Default: %s)' % get_default_sciond_path()) parser.add_argument('server_id', help='Server identifier') parser.add_argument('conf_dir', nargs='?', default='.', help='Configuration directory (Default: ./)') args = parser.parse_args() init_logging(os.path.join(args.log_dir, args.server_id)) if local_type is None: inst = type_(args.server_id, args.conf_dir, prom_export=args.prom, sciond_path=args.sciond_path, spki_cache_dir=args.spki_cache_dir, **kwargs) else: # Load the topology to check if this is a core AD or not topo = Topology.from_file(os.path.join(args.conf_dir, TOPO_FILE)) if topo.is_core_as: inst = type_(args.server_id, args.conf_dir, prom_export=args.prom, sciond_path=args.sciond_path, spki_cache_dir=args.spki_cache_dir, **kwargs) else: inst = local_type(args.server_id, args.conf_dir, prom_export=args.prom, sciond_path=args.sciond_path, spki_cache_dir=args.spki_cache_dir, **kwargs) if trace_: trace(inst.id) logging.info("Started %s", args.server_id) inst.run()
def index(request): ''' Main index handler for index.html for main visualization page. Validates parameters, request scion data, returns formatted response. :param request: HTML request object containing url parameters. ''' p = {} # return param dictionary p['tab'] = set_param(request, 'tab', 'tab-pathtopo') p['data'] = set_param(request, 'data', 'sdapi') p['addr'] = set_param(request, 'addr', '') p['src'] = set_param(request, 'src', '') p['dst'] = set_param(request, 'dst', '') p['mp'] = set_param(request, 'mp', '5') p['err'] = '' if (p['src'] == '' and p['dst'] == ''): # use endhost gen/ia if no host specified if (p['src'] == ''): ia_file = "%s/%s/ia" % (SCION_ROOT, GEN_PATH) try: with open(ia_file, 'r') as fin: # load and reformat p['src'] = str(ISD_AS(fin.read().strip())) except (FileNotFoundError) as err: logging.warning("%s: %s" % (err.__class__.__name__, err)) return fmt_err(request, p) s_isd_as = ISD_AS(p['src']) d_isd_as = ISD_AS(p['dst']) p['src'], p['dst'] = str(s_isd_as), str(d_isd_as) # reformat csegs = dsegs = usegs = [] paths = '' logging.info("Requesting sciond data from %s to %s" % (s_isd_as, d_isd_as)) conf_dir = "%s/%s/ISD%s/AS%s/endhost" % ( SCION_ROOT, GEN_PATH, s_isd_as.isd_str(), s_isd_as.as_file_fmt()) sock_file = get_default_sciond_path(s_isd_as) if not pathlib.Path(sock_file).exists(): sock_file = get_default_sciond_path(None) try: if (p['data'] == 'sdapi'): connector[s_isd_as] = lib_sciond.init(sock_file) logging.info(connector[s_isd_as]._api_addr) try: # test if sciond is already running for this AS logging.info("Testing sciond at %s" % sock_file) lib_sciond.get_as_info(connector=connector[s_isd_as]) except (SCIONDResponseError) as err: p['err'] = "%s: %s" % (err.__class__.__name__, err) return fmt_err(request, p) except (SCIONDConnectionError, FileNotFoundError) as err: logging.warning("%s: %s" % (err.__class__.__name__, err)) # need to launch sciond, wait for uptime launch_sciond(sock_file, conf_dir, p['addr'], s_isd_as) if (p['dst'] != ''): # PATHS try: # get paths and keep segments flags = lib_sciond.PathRequestFlags(flush=False, sibra=False) paths = lib_sciond.get_paths(d_isd_as, max_paths=int(p['mp']), flags=flags, connector=connector[s_isd_as]) csegs = lib_sciond.get_segtype_hops( PST.CORE, connector=connector[s_isd_as]) dsegs = lib_sciond.get_segtype_hops( PST.DOWN, connector=connector[s_isd_as]) usegs = lib_sciond.get_segtype_hops( PST.UP, connector=connector[s_isd_as]) # refresh old segments for next call flags = lib_sciond.PathRequestFlags(flush=True, sibra=False) lib_sciond.get_paths(d_isd_as, max_paths=int(p['mp']), flags=flags, connector=connector[s_isd_as]) except (SCIONDResponseError, SCIONDConnectionError, AttributeError) as err: # AttributeError handles backward-compatability logging.error("%s: %s" % (err.__class__.__name__, err)) p['err'] = str(err) p['json_as_topo'] = json.dumps( get_json_as_topology_sciond(connector[s_isd_as], paths)) p['json_trc'] = ("TRC information for sciond not yet implemented.") p['json_crt'] = ( "Certificate information for sciond not yet implemented.") elif (p['data'] == 'file'): t = Topology.from_file(os.path.join(conf_dir, TOPO_FILE)) topo = organize_topo(t) p['json_as_topo'] = json.dumps(get_json_as_topology(t, topo)) p['json_trc'] = html_jsonfile(findCerts(conf_dir, ".trc")) p['json_crt'] = html_jsonfile(findCerts(conf_dir, ".crt")) p['path_info'] = get_as_view_html(paths, csegs, usegs, dsegs) p['json_path_topo'] = json.dumps( get_json_path_segs(paths, csegs, usegs, dsegs)) p['json_seg_topo'] = json.dumps( get_json_all_segments(csegs, usegs, dsegs)) p['json_paths'] = json.dumps(get_json_paths(paths)) except (SCIONBaseError) as err: p['err'] = "%s: %s" % (err.__class__.__name__, err) return fmt_err(request, p) return render(request, 'asviz/index.html', p)