def _resolve_addresses(self, cfg, pub_addrinfo, priv_addrinfo): (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg) # private interface priv_addr = None if priv_if is not None: addr = priv_if.getS(ns.address) if addr.hasType(ns.StaticAddress): priv_addr = addr.getS(ns.address, rdf.IPv4AddressSubnet) elif addr.hasType(ns.DhcpAddress): priv_addr = priv_addrinfo.address else: raise Exception('invalid address variant') # public interface pub_addr = None if pub_if is not None: addr = pub_if.getS(ns.address) if addr.hasType(ns.StaticAddress): pub_addr = addr.getS(ns.address, rdf.IPv4AddressSubnet) elif addr.hasType(ns.DhcpAddress): pub_addr = pub_addrinfo.address else: raise Exception('invalid address variant') return pub_addr, priv_addr
def _create_ppp_scripts(self, cfg): """Create PPP scripts (ip-pre-up, ip-up, ip-down) as strings.""" ppp_script_fmt = textwrap.dedent("""\ #!/usr/bin/python from codebay.l2tpserver import pppscripts try: s = pppscripts.PppScripts(name=%(name)r, public_interface=%(pubif)r, private_interface=%(privif)r, proxyarp_interface=%(proxyif)r) s.%(funcname)s() except: from codebay.common import logger import sys _log = logger.get('l2tpserver.pppscripts.%(name)s') _log.exception('failed') sys.exit(1) """) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) _pubif = pub_iface_name _privif = priv_iface_name # can be None, repr handles _proxyif = proxyarp_interface # can be None, repr handles ippreup_config = ppp_script_fmt % { 'name': 'ppp-ip-pre-up', 'funcname': 'ppp_ip_pre_up', 'pubif': _pubif, 'privif': _privif, 'proxyif': _proxyif } ipup_config = ppp_script_fmt % { 'name': 'ppp-ip-up', 'funcname': 'ppp_ip_up', 'pubif': _pubif, 'privif': _privif, 'proxyif': _proxyif } ipdown_config = ppp_script_fmt % { 'name': 'ppp-ip-down', 'funcname': 'ppp_ip_down', 'pubif': _pubif, 'privif': _privif, 'proxyif': _proxyif } return { 'ip-pre-up': ippreup_config, 'ip-up': ipup_config, 'ip-down': ipdown_config }
def resolve(self, cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo): net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) self._pub_addrinfo = pub_dhcpaddrinfo self._priv_addrinfo = priv_dhcpaddrinfo pub_addr, priv_addr = self._resolve_addresses(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) if pub_iface is not None: self.public_interface = ResolvedInterface() self.public_interface.address = pub_addr self.public_interface.device = pub_iface_name self.public_interface.rdf_interface = pub_iface if priv_iface is not None: self.private_interface = ResolvedInterface() self.private_interface.address = priv_addr self.private_interface.device = priv_iface_name self.private_interface.rdf_interface = priv_iface self.dns_servers = self._resolve_dns_servers(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) self.gateway_routes = self._resolve_routes(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo, net_cfg.getS(ns.gatewayRoutes, rdf.Seq(rdf.Type(ns.Route)))) self.client_routes = self._resolve_routes(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo, net_cfg.getS(ns.clientRoutes, rdf.Seq(rdf.Type(ns.Route)))) self.ppp_dns_servers, self.ppp_wins_servers = self._resolve_ppp_dns_wins_servers(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) self.ppp_forced_router = self._resolve_ppp_forced_router(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) # XXX: post checks? if len(self.dns_servers) == 0: # XXX: this should be converted to a more useful exception # XXX: for a 'lenient' startup (e.g. for sending e-mail to fixed IP address SMTP server) # this check is too strict raise Exception('no dns servers')
def _create_ppp_scripts(self, cfg): """Create PPP scripts (ip-pre-up, ip-up, ip-down) as strings.""" ppp_script_fmt = textwrap.dedent( """\ #!/usr/bin/python from codebay.l2tpserver import pppscripts try: s = pppscripts.PppScripts(name=%(name)r, public_interface=%(pubif)r, private_interface=%(privif)r, proxyarp_interface=%(proxyif)r) s.%(funcname)s() except: from codebay.common import logger import sys _log = logger.get('l2tpserver.pppscripts.%(name)s') _log.exception('failed') sys.exit(1) """ ) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) _pubif = pub_iface_name _privif = priv_iface_name # can be None, repr handles _proxyif = proxyarp_interface # can be None, repr handles ippreup_config = ppp_script_fmt % { "name": "ppp-ip-pre-up", "funcname": "ppp_ip_pre_up", "pubif": _pubif, "privif": _privif, "proxyif": _proxyif, } ipup_config = ppp_script_fmt % { "name": "ppp-ip-up", "funcname": "ppp_ip_up", "pubif": _pubif, "privif": _privif, "proxyif": _proxyif, } ipdown_config = ppp_script_fmt % { "name": "ppp-ip-down", "funcname": "ppp_ip_down", "pubif": _pubif, "privif": _privif, "proxyif": _proxyif, } return {"ip-pre-up": ippreup_config, "ip-up": ipup_config, "ip-down": ipdown_config}
def _resolve_dns_servers(self, cfg, pub, priv): """Determine DNS server list by resolving configured and DHCP-obtained value.""" (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg) net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) dns = net_cfg.getS(ns.dnsServers) if dns.hasType(ns.StaticDnsServers): _log.debug('dns servers from configured data') dns_servers = [] for i in dns.getS(ns.addressList, rdf.Seq(rdf.Type(ns.DnsServer))): srv = ResolvedDnsServer() srv.address = i.getS(ns.address, rdf.IPv4Address) srv.rdf_server_list = dns srv.from_dhcp = False srv.from_dhcp_rdf_interface = None dns_servers.append(srv) return dns_servers elif dns.hasType(ns.DhcpDnsServers): iface = dns.getS(ns.interface, rdf.Type(ns.NetworkInterface)) if iface == pub_if: _log.debug('dns servers from dhcp public') if pub is not None: dns_servers = [] for i in pub.dns_servers: srv = ResolvedDnsServer() srv.address = i # IPv4Address srv.rdf_server_list = dns srv.from_dhcp = True srv.from_dhcp_rdf_interface = pub_if dns_servers.append(srv) return dns_servers else: return [] elif iface == priv_if: _log.debug('dns servers from dhcp private') if priv is not None: dns_servers = [] for i in priv.dns_servers: srv = ResolvedDnsServer() srv.address = i # IPv4Address srv.rdf_server_list = dns srv.from_dhcp = True srv.from_dhcp_rdf_interface = priv_if dns_servers.append(srv) return dns_servers else: return [] else: raise Exception('unknown interface in dnsServers') else: raise Exception('unknown dnsServers variant') raise Exception('internal error - unexpected exit from function')
def _resolve_ppp_forced_router(self, cfg, pub_addrinfo, priv_addrinfo): (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg) net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) if not net_cfg.hasS(ns.pppForcedRouter): return None fr = net_cfg.getS(ns.pppForcedRouter, rdf.Type(ns.PppForcedRouter)) rt = self._resolve_one_route(fr, pub_if, priv_if, pub_addrinfo, priv_addrinfo) return rt
def prepare_interfaces(self, cfg): """Configure interfaces up without IP addresses.""" (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (pub_mtu, priv_mtu) = helpers.get_iface_mtus(cfg) self.set_rp_filter('all', False) if pub_iface is not None: self._prepare_interface(pub_iface_name, pub_mtu) if priv_iface is not None: self._prepare_interface(priv_iface_name, priv_mtu)
def check_interface_existences(self, cfg): """Check existence of interfaces.""" (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) if pub_iface is not None: if not self._check_interface_existence(pub_iface_name): return False if priv_iface is not None: if not self._check_interface_existence(priv_iface_name): return False return True
def resolve(self, cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo): net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) self._pub_addrinfo = pub_dhcpaddrinfo self._priv_addrinfo = priv_dhcpaddrinfo pub_addr, priv_addr = self._resolve_addresses(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) if pub_iface is not None: self.public_interface = ResolvedInterface() self.public_interface.address = pub_addr self.public_interface.device = pub_iface_name self.public_interface.rdf_interface = pub_iface if priv_iface is not None: self.private_interface = ResolvedInterface() self.private_interface.address = priv_addr self.private_interface.device = priv_iface_name self.private_interface.rdf_interface = priv_iface self.dns_servers = self._resolve_dns_servers(cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) self.gateway_routes = self._resolve_routes( cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo, net_cfg.getS(ns.gatewayRoutes, rdf.Seq(rdf.Type(ns.Route)))) self.client_routes = self._resolve_routes( cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo, net_cfg.getS(ns.clientRoutes, rdf.Seq(rdf.Type(ns.Route)))) self.ppp_dns_servers, self.ppp_wins_servers = self._resolve_ppp_dns_wins_servers( cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) self.ppp_forced_router = self._resolve_ppp_forced_router( cfg, pub_dhcpaddrinfo, priv_dhcpaddrinfo) # XXX: post checks? if len(self.dns_servers) == 0: # XXX: this should be converted to a more useful exception # XXX: for a 'lenient' startup (e.g. for sending e-mail to fixed IP address SMTP server) # this check is too strict raise Exception('no dns servers')
def _create_config(self, cfg, resinfo, timeout, retry, initial_interval, select_timeout, importpath): """Create dhclient configuration files.""" args = '' self.ifaces = [] (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) if helpers.is_dhcp_interface(pub_iface): self.ifaces.append(pub_iface_name) args += "%r" % pub_iface_name else: args += '%r' % None if helpers.is_dhcp_interface(priv_iface): self.ifaces.append(priv_iface_name) args += ", %r" % priv_iface_name else: args += ', %r' % None if len(self.ifaces) == 0: raise Exception('Expected at least one configured DHCP interface, but found none.') conf = textwrap.dedent("""\ # - automatically created file, do not modify. timeout %(timeout)s; retry %(retry)s; initial-interval %(initial_interval)s; # select-timeout %(select_timeout)s; send dhcp-lease-time 28800; # default is only 3600 seconds request subnet-mask, routers, domain-name-servers, netbios-name-servers; require subnet-mask; """) % {'timeout': timeout, 'retry': retry, 'initial_interval': initial_interval, 'select_timeout': select_timeout} do_import = '' if importpath != 'system': do_import = 'sys.path = "%s".split(' ') + sys.path' % importpath script = textwrap.dedent("""\ #!/usr/bin/python import sys %(do_import)s try: from codebay.l2tpserver import dhcpscript d = dhcpscript.DhcpScript(%(args)s) d.run() except: pass """) % {'do_import': do_import, 'args': args} leases = textwrap.dedent("""\ """) self.configs = [{'file': constants.DHCLIENT_CONF, 'cont': conf, 'mode': 0644}, {'file': constants.DHCLIENT_SCRIPT, 'cont': script, 'mode': 0755}, {'file': constants.DHCLIENT_LEASES, 'cont': leases, 'mode': 0664}]
def _resolve_routes(self, cfg, pub_addrinfo, priv_addrinfo, route_seq): net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) # # XXX: handle overlapping static + dhcp route (prefer dhcp) # XXX: at least log overlapping routes in general # ret = [] # figure out unique subnets subdict = {} # dict of subnets -> route list subnets = [] # unique subnets for r in route_seq: subnet = r.getS(ns.address, rdf.IPv4Subnet) subnet_str = subnet.toString() if not subdict.has_key(subnet_str): subdict[subnet_str] = [] subnets.append(subnet) t = subdict[subnet_str] t.append(r) # sort _log.debug('route keys (subnets), no sort:') for i in subnets: _log.debug(' ' + i.toString()) subnets.sort() _log.debug('route keys (subnets), after sort:') for i in subnets: _log.debug(' ' + i.toString()) # resolve routes in sorted order (starting from "widest") got_default_route = False for subnet in subnets: _log.debug('processing subnet %s' % subnet.toString()) # apply first working route for this subnet got_route_for_this_subnet = False for r in subdict[subnet.toString()]: rt = self._resolve_one_route(r, pub_iface, priv_iface, pub_addrinfo, priv_addrinfo) # must have router or blackhole if rt.router is None and not rt.blackhole: _log.warning('cannot figure out router for route %s, skipping' % rt.subnet.toString()) continue ret.append(rt) if rt.subnet == datatypes.IPv4Subnet.fromString('0.0.0.0/0'): got_default_route = True # success, skip other routes with this same subnet got_route_for_this_subnet = True break if not got_route_for_this_subnet: # XXX: should this be 'raise'? _log.warning('could not resolve a route for subnet %s' % subnet.toString()) if not got_default_route: _log.warning('did not get a default route') return ret
def up_qos_rules(self, cfg): """Configure and enable quality-of-service configuration.""" #def _compute_burst(kbits, mtu): # assumed_hz = 250 # burst = (float(kbits) / float(assumed_hz)) # example: 1024kbit/s, hz=250 => 4kbit # return '%fkbit' % min(burst*2, (mtu*8.0/1000.0)) _log.debug('up_qos_rules') (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) qos_cfg = helpers.get_qos_config(cfg) if qos_cfg.hasS(ns.globalUplinkRateLimit): pub_uplink = qos_cfg.getS(ns.globalUplinkRateLimit, rdf.Integer) else: pub_uplink = None # XXX: add to conf? pub_downlink = None priv_uplink = None priv_downlink = None pub_mtu, priv_mtu = helpers.get_iface_mtus(cfg) _log.debug('qos: %s, %s, %s, %s' % (pub_uplink, pub_downlink, priv_uplink, priv_downlink)) def_tx_limit = 100 # packets sfq_perturb = 30 # seconds sfq_quantum = None # XXX: should we set this? defaults to iface mtu if pub_iface_name is not None: run_command([ constants.CMD_TC, 'qdisc', 'del', 'dev', pub_iface_name, 'root' ]) if priv_iface_name is not None: run_command([ constants.CMD_TC, 'qdisc', 'del', 'dev', priv_iface_name, 'root' ]) if pub_iface_name is not None: if pub_uplink is None: # this leaves pfifo_fast in place pass else: pub_rate = '%skbit' % pub_uplink # only uplink rate is relevant #pub_ceil = pub_rate #pub_burst = _compute_burst(pub_uplink, pub_mtu) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'root', 'handle', '1:', 'htb', 'default', '1' ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'class', 'add', 'dev', pub_iface_name, 'parent', '1:', 'classid', '1:1', 'htb', 'rate', pub_rate, 'quantum', str(pub_mtu) ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'parent', '1:1', 'handle', '10:', 'sfq', 'perturb', str(sfq_perturb) ], retval=runcommand.FAIL) if priv_iface_name is not None: if priv_uplink is None: # this leaves pfifo_fast in place pass else: priv_rate = '%skbps' % priv_uplink #priv_ceil = priv_rate #priv_burst = _compute_burst(priv_uplink, priv_mtu) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'root', 'handle', '2:', 'htb', 'default', '1' ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'class', 'add', 'dev', priv_iface_name, 'parent', '2:', 'classid', '2:1', 'htb', 'rate', priv_rate, 'quantum', str(priv_mtu) ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'parent', '2:1', 'handle', '20:', 'sfq', 'perturb', str(sfq_perturb) ], retval=runcommand.FAIL) if helpers.get_debug(cfg): run_command([constants.CMD_TC, '-d', 'qdisc', 'show']) run_command([constants.CMD_TC, '-d', 'class', 'show'])
def up_firewall_rules(self, cfg, pub_addr, priv_addr, ppp_forced_iface, ppp_forced_gw): """Configure and enable firewall rules.""" _log.debug('up_firewall_rules') # ROUTE support through modprobe test retval, stdout, stderr = run_command( [constants.CMD_MODPROBE, 'ipt_ROUTE']) route_target_supported = False if retval == 0: route_target_supported = True _log.info('ROUTE target support detected') else: _log.warning('ROUTE target support NOT detected') net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) fw_cfg = net_cfg.getS(ns.firewallConfig, rdf.Type(ns.FirewallConfig)) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) pub_nat = helpers.is_public_nat(cfg) priv_nat = helpers.is_private_nat(cfg) # XXX: this could be in firewall rules cli_routing = helpers.is_client_routing(cfg) pub_addr_str = None if pub_addr is not None: pub_addr_str = pub_addr.getAddress().toString() priv_addr_str = None if priv_addr is not None: priv_addr_str = priv_addr.getAddress().toString() if_dict = { 'pub_if': pub_iface_name, 'priv_if': priv_iface_name, 'ppp_if': ppp_interfaces, 'pub_ip': pub_addr_str, 'priv_ip': priv_addr_str, 'fwmark_ipsec': constants.FWMARK_IPSEC, 'fwmark_skipnat': constants.FWMARK_SKIPNAT, 'fwmark_ppp': constants.FWMARK_PPP, 'fwmark_ppp_s2s': constants.FWMARK_PPP_S2S, 'fwmark_local_l2tp': constants.FWMARK_LOCAL_L2TP, 'fwmark_license_restricted': constants.FWMARK_LICENSE_RESTRICTED, 'http_fwd1': constants.WEBUI_FORWARD_PORT_UIFORCED_HTTP, 'https_fwd1': constants.WEBUI_FORWARD_PORT_UIFORCED_HTTPS, 'http_fwd2': constants.WEBUI_FORWARD_PORT_LICENSE_HTTP, 'https_fwd2': constants.WEBUI_FORWARD_PORT_LICENSE_HTTPS, 'http_fwd3': constants.WEBUI_FORWARD_PORT_OLDPSK_HTTP, 'https_fwd3': constants.WEBUI_FORWARD_PORT_OLDPSK_HTTPS, } # # rules for -t raw # raw_rules = textwrap.dedent("""\ -A raw_prerouting -i %(ppp_if)s -j raw_prerouting_ppp -A raw_output -o %(ppp_if)s -j raw_output_ppp """) % if_dict # # rules for -t nat # nat_rules = textwrap.dedent("""\ -A nat_prerouting -i %(ppp_if)s -j nat_prerouting_ppp -A nat_postrouting -o %(ppp_if)s -j nat_postrouting_ppp -A nat_output -o %(ppp_if)s -j nat_output_ppp """) % if_dict pf_cfg = fw_cfg.getS(ns.portForward, rdf.Seq(rdf.Type(ns.PortForwardRule))) for i in pf_cfg: iface = i.getS(ns.interface, rdf.Type(ns.NetworkInterface)).getS( ns.interfaceName, rdf.String) proto = str(i.getS(ns.protocol, rdf.Integer)) port = str(i.getS(ns.port, rdf.Integer)) daddr = i.getS(ns.destinationAddress, rdf.IPv4Address).toString() dport = str(i.getS(ns.destinationPort, rdf.Integer)) nat_rules += textwrap.dedent("""\ -A nat_prerouting -i %(iface)s -p %(proto)s --dport %(port)s -j DNAT --to-destination %(daddr)s:%(dport)s-%(dport)s """) % { 'iface': iface, 'proto': proto, 'port': port, 'daddr': daddr, 'dport': dport } # nat all traffic (both ppp and other), because we support routing of non-client traffic if pub_nat: # These bizarre rules are used to prevent clients which use our gateway as a router/NAT # from accidentally getting an unmodified UDP port when they are using IPsec. This would # be hazardous to IKE because our IKE already uses UDP/500 and UDP/4500 (but may not be # running due to a startup race when the client connects). # # The ports are pretty arbitrary; Linux maps >= 1024 starting from 1024; we choose to # start higher to make it easier to track NATted and other ports (and also so that all # ports we use, namely 500, 4500, 1701, 1702, etc) are below the start point). # # Mark 2 is used as a "skip NAT" marker: we can add this mark to e.g. site-to-site packets # to avoid NAT for them if we wish. # # We use a two-chain workaround here to implement NAT: our NAT rule must have a match # "not public address AND not private address", but iptables does not support multiple # -s matches in the same rule. So, packets are only NATted if they satisfy: # 1. Source address != public address # 2. Source address != private address # 3. Packet is not marked as "skip NAT" # # We need to exclude private interface address (from public NAT) to avoid NATting # IPsec packets when they are used through the private interface. nat_rules += textwrap.dedent("""\ -A nat_postrouting -o %(pub_if)s ! -s %(pub_ip)s -m mark --mark 0/%(fwmark_skipnat)s -j nat_pub1 """) % if_dict if priv_iface is not None: nat_rules += textwrap.dedent("""\ -A nat_pub1 ! -s %(priv_ip)s -j nat_pub2 """) % if_dict else: nat_rules += textwrap.dedent("""\ -A nat_pub1 -j nat_pub2 """) % if_dict nat_rules += textwrap.dedent("""\ -A nat_pub2 -p tcp -j SNAT --to-source %(pub_ip)s:16384-49151 -A nat_pub2 -p udp -j SNAT --to-source %(pub_ip)s:16384-49151 -A nat_pub2 -j SNAT --to-source %(pub_ip)s """) % if_dict if priv_nat: nat_rules += textwrap.dedent("""\ -A nat_postrouting -o %(priv_if)s ! -s %(priv_ip)s -m mark --mark 0/%(fwmark_skipnat)s -j nat_priv1 """) % if_dict if pub_iface is not None: nat_rules += textwrap.dedent("""\ -A nat_priv1 ! -s %(pub_ip)s -j nat_priv2 """) % if_dict else: nat_rules += textwrap.dedent("""\ -A nat_priv1 -j nat_priv2 """) % if_dict nat_rules += textwrap.dedent("""\ -A nat_priv2 -p tcp -j SNAT --to-source %(priv_ip)s:16384-49151 -A nat_priv2 -p udp -j SNAT --to-source %(priv_ip)s:16384-49151 -A nat_priv2 -j SNAT --to-source %(priv_ip)s """) % if_dict # # rules for -t mangle # mangle_rules = textwrap.dedent("""\ -A mangle_prerouting -i %(ppp_if)s -j MARK --set-mark %(fwmark_ppp)s -A mangle_prerouting -i %(ppp_if)s -j mangle_prerouting_ppp -A mangle_prerouting -p esp -j MARK --set-mark %(fwmark_ipsec)s -A mangle_prerouting -p udp --dport 500 -j MARK --set-mark %(fwmark_ipsec)s -A mangle_prerouting -p udp --dport 4500 -j MARK --set-mark %(fwmark_ipsec)s -A mangle_input -i %(ppp_if)s -j mangle_input_ppp -A mangle_forward -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu -A mangle_forward -i %(ppp_if)s -j mangle_forward_ppp -A mangle_forward -o %(ppp_if)s -j mangle_forward_ppp -A mangle_output -o %(ppp_if)s -j mangle_output_ppp # Not necessary, openl2tp patch #-A mangle_output -p udp --sport 1701 -j MARK --set-mark %(fwmark_local_l2tp)s #-A mangle_output -p udp --sport 1702 -j MARK --set-mark %(fwmark_local_l2tp)s -A mangle_postrouting -o %(ppp_if)s -j mangle_postrouting_ppp """) % if_dict if (ppp_forced_iface is not None) and (ppp_forced_gw is not None): if route_target_supported: _log.info('forced routing enabled: %s / %s' % (ppp_forced_iface, ppp_forced_gw.toString())) # forced routing is applied but only if packets are not license restricted mangle_rules += '\n' mangle_rules += ('-A mangle_prerouting -m mark --mark 0/%(fwmark_license_restricted)s -i %(ppp_if)s -j ROUTE' % if_dict) + \ (' --oif %s --gw %s\n' % (ppp_forced_iface, ppp_forced_gw.toString())) else: _log.error( 'forced routing enabled but route target not supported!') # # rules for -t filter (we accept esp, udp/500, udp/4500 from any interface) # filter_rules = textwrap.dedent("""\ -A filter_input -i lo -j ACCEPT -A filter_input -m state --state ESTABLISHED,RELATED -j ACCEPT -A filter_input -i %(ppp_if)s -j filter_input_ppp # esp protected traffic (= l2tp) or IKE -A filter_input -m mark --mark %(fwmark_ipsec)s/%(fwmark_ipsec)s -j ACCEPT # rate limited public icmp -A filter_input -i %(pub_if)s -p icmp -m limit --limit 10/second --limit-burst 50 -j ACCEPT -A filter_input -i %(pub_if)s -p icmp -j DROP -A filter_input -i %(ppp_if)s -p icmp -j ACCEPT # all web ui ports -A filter_input -i %(ppp_if)s -p tcp --dport 80 -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport 443 -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(http_fwd1)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(https_fwd1)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(http_fwd2)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(https_fwd2)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(http_fwd3)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(https_fwd3)d -j ACCEPT """) % if_dict if priv_iface is not None: # Note: we assume that private interface is always different # from public interface if it is defined at all. filter_rules += textwrap.dedent("""\ -A filter_input -i %(priv_if)s -p icmp -m limit --limit 10/second --limit-burst 50 -j ACCEPT -A filter_input -i %(priv_if)s -p icmp -j DROP """) % if_dict ia_cfg = fw_cfg.getS(ns.inputAccept, rdf.Seq(rdf.Type(ns.InputAcceptRule))) for i in ia_cfg: iface = i.getS(ns.interface, rdf.Type(ns.NetworkInterface)).getS( ns.interfaceName, rdf.String()) proto = str(i.getS(ns.protocol, rdf.Integer)) port = str(i.getS(ns.port, rdf.Integer)) filter_rules += textwrap.dedent("""\ -A filter_input -i %(iface)s -p %(proto)s --dport %(port)s -j ACCEPT """) % { 'iface': iface, 'proto': proto, 'port': port } filter_rules += textwrap.dedent("""\ -A filter_output -o %(ppp_if)s -j filter_output_ppp -A filter_output -o %(ppp_if)s -j ACCEPT -A filter_output -j ACCEPT """) % if_dict ppp_firewall_rules = '' if fw_cfg.hasS(ns.pppFirewallRules): fr_cfg = fw_cfg.getS(ns.pppFirewallRules, rdf.Seq(rdf.Type(ns.PppFirewallRule))) else: fr_cfg = [] for i in fr_cfg: dest = '-d ' + i.getS(ns.subnet, rdf.IPv4Subnet).toString() if i.hasS(ns.protocol): proto = '-p ' + str(i.getS(ns.protocol, rdf.Integer)) if i.hasS(ns.port): port = '--dport ' + str(i.getS(ns.port, rdf.Integer)) else: port = '' else: proto = '' port = '' fr_action = i.getS(ns.action) if fr_action.hasType(ns.ActionAllow): action = 'ACCEPT' elif fr_action.hasType(ns.ActionDeny): action = 'REJECT --reject-with icmp-port-unreachable' else: raise Exception('invalid firewall action') ppp_firewall_rules += textwrap.dedent("""\ -A filter_forward_ppp_firewall %(dest)s %(proto)s %(port)s -j %(action)s """) % { 'dest': dest, 'proto': proto, 'port': port, 'action': action } # XXX: conn track? (e.g. pub->ppp ?) filter_rules += textwrap.dedent("""\ -A filter_forward -m conntrack --ctstate DNAT -j ACCEPT -A filter_forward -m state --state ESTABLISHED,RELATED -j ACCEPT """) % if_dict # client-to-client routing: note that we need to separate between # true client-to-client and site-to-site related routing if cli_routing: _log.info('client-to-client routing allowed, no rule added') else: # XXX -- This doesn't work (see #828) for client-to-s2s packets. # Currently never used. # match ppp -> ppp packets with *no* s2s mark _log.info( 'client-to-client routing not allowed, adding firewall rule to prevent' ) _log.error( 'client-to-client routing not allowed -- but unsupported in this build' ) filter_rules += textwrap.dedent("""\ -A filter_forward -i %(ppp_if)s -o %(ppp_if)s -m mark --mark 0/%(fwmark_ppp_s2s)s -j DROP """) % if_dict # ppp forwarding rules are only applied if traffic is not blocked # by client-to-client restrictions above filter_rules += textwrap.dedent("""\ -A filter_forward -i %(ppp_if)s -j filter_forward_ppp -A filter_forward -o %(ppp_if)s -j filter_forward_ppp -A filter_forward -i %(ppp_if)s -o %(pub_if)s -j ACCEPT -A filter_forward -i %(pub_if)s -o %(ppp_if)s -j ACCEPT -A filter_forward -i %(ppp_if)s -o %(ppp_if)s -j ACCEPT """) % if_dict # non-client routing if priv_iface is not None: if fw_cfg.getS(ns.allowNonClientRouting, rdf.Boolean): filter_rules += textwrap.dedent("""\ -A filter_forward -i %(pub_if)s -o %(pub_if)s -j ACCEPT -A filter_forward -i %(priv_if)s -o %(priv_if)s -j ACCEPT -A filter_forward -i %(priv_if)s -o %(pub_if)s -j ACCEPT """) % if_dict else: if fw_cfg.getS(ns.allowNonClientRouting, rdf.Boolean): filter_rules += textwrap.dedent("""\ -A filter_forward -i %(pub_if)s -o %(pub_if)s -j ACCEPT """) % if_dict if priv_iface is not None: filter_rules += textwrap.dedent("""\ -A filter_forward -i %(ppp_if)s -o %(priv_if)s -j ACCEPT -A filter_forward -i %(priv_if)s -o %(ppp_if)s -j ACCEPT """) % if_dict # # finally, build the tables # tables = textwrap.dedent("""\ # Iptables restore script *raw :PREROUTING ACCEPT :OUTPUT ACCEPT :raw_prerouting - :raw_output - :raw_prerouting_ppp - :raw_output_ppp - :raw_prerouting_ppp_cust - :raw_output_ppp_cust - -A PREROUTING -j raw_prerouting -A OUTPUT -j raw_output %(raw_rules)s COMMIT *filter :INPUT DROP :FORWARD DROP :OUTPUT DROP :filter_input - :filter_forward - :filter_forward_ppp_firewall - :filter_output - :filter_input_ppp - :filter_forward_ppp - :filter_output_ppp - :filter_input_ppp_cust - :filter_forward_ppp_cust - :filter_output_ppp_cust - -A INPUT -j filter_input -A FORWARD -j filter_forward -A OUTPUT -j filter_output %(filter_rules)s %(ppp_firewall_rules)s COMMIT *nat :PREROUTING ACCEPT :POSTROUTING ACCEPT :OUTPUT ACCEPT :nat_prerouting - :nat_postrouting - :nat_output - :nat_prerouting_ppp - :nat_postrouting_ppp - :nat_output_ppp - :nat_prerouting_ppp_cust - :nat_postrouting_ppp_cust - :nat_output_ppp_cust - # chains for public/private natting, see above :nat_pub1 - :nat_pub2 - :nat_priv1 - :nat_priv2 - -A PREROUTING -j nat_prerouting -A POSTROUTING -j nat_postrouting -A OUTPUT -j nat_output %(nat_rules)s COMMIT *mangle :PREROUTING ACCEPT :INPUT ACCEPT :FORWARD ACCEPT :OUTPUT ACCEPT :POSTROUTING ACCEPT :mangle_prerouting - :mangle_input - :mangle_forward - :mangle_output - :mangle_postrouting - :mangle_prerouting_ppp - :mangle_input_ppp - :mangle_forward_ppp - :mangle_output_ppp - :mangle_postrouting_ppp - :mangle_prerouting_ppp_cust - :mangle_input_ppp_cust - :mangle_forward_ppp_cust - :mangle_output_ppp_cust - :mangle_postrouting_ppp_cust - -A PREROUTING -j mangle_prerouting -A INPUT -j mangle_input -A FORWARD -j mangle_forward -A OUTPUT -j mangle_output -A POSTROUTING -j mangle_postrouting %(mangle_rules)s COMMIT # end of script. """) % { 'raw_rules': raw_rules, 'filter_rules': filter_rules, 'nat_rules': nat_rules, 'mangle_rules': mangle_rules, 'ppp_firewall_rules': ppp_firewall_rules } _log.debug('iptables-restore script dump:') for i, l in enumerate(tables.split('\n')): _log.debug('%d: %s' % (i + 1, l)) (retval, retout, reterr) = run_command([constants.CMD_IPTABLES_RESTORE], stdin=tables.encode('ascii'), retval=runcommand.FAIL) _log.debug('iptables-restore => %s\n%s\n%s' % (retval, retout, reterr))
def up_qos_rules(self, cfg): """Configure and enable quality-of-service configuration.""" #def _compute_burst(kbits, mtu): # assumed_hz = 250 # burst = (float(kbits) / float(assumed_hz)) # example: 1024kbit/s, hz=250 => 4kbit # return '%fkbit' % min(burst*2, (mtu*8.0/1000.0)) _log.debug('up_qos_rules') (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) qos_cfg = helpers.get_qos_config(cfg) if qos_cfg.hasS(ns.globalUplinkRateLimit): pub_uplink = qos_cfg.getS(ns.globalUplinkRateLimit, rdf.Integer) else: pub_uplink = None # XXX: add to conf? pub_downlink = None priv_uplink = None priv_downlink = None pub_mtu, priv_mtu = helpers.get_iface_mtus(cfg) _log.debug('qos: %s, %s, %s, %s' % (pub_uplink, pub_downlink, priv_uplink, priv_downlink)) def_tx_limit = 100 # packets sfq_perturb = 30 # seconds sfq_quantum = None # XXX: should we set this? defaults to iface mtu if pub_iface_name is not None: run_command([constants.CMD_TC, 'qdisc', 'del', 'dev', pub_iface_name, 'root']) if priv_iface_name is not None: run_command([constants.CMD_TC, 'qdisc', 'del', 'dev', priv_iface_name, 'root']) if pub_iface_name is not None: if pub_uplink is None: # this leaves pfifo_fast in place pass else: pub_rate = '%skbit' % pub_uplink # only uplink rate is relevant #pub_ceil = pub_rate #pub_burst = _compute_burst(pub_uplink, pub_mtu) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'root', 'handle', '1:', 'htb', 'default', '1'], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'class', 'add', 'dev', pub_iface_name, 'parent', '1:', 'classid', '1:1', 'htb', 'rate', pub_rate, 'quantum', str(pub_mtu)], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'parent', '1:1', 'handle', '10:', 'sfq', 'perturb', str(sfq_perturb)],retval=runcommand.FAIL) if priv_iface_name is not None: if priv_uplink is None: # this leaves pfifo_fast in place pass else: priv_rate = '%skbps' % priv_uplink #priv_ceil = priv_rate #priv_burst = _compute_burst(priv_uplink, priv_mtu) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'root', 'handle', '2:', 'htb', 'default', '1'], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'class', 'add', 'dev', priv_iface_name, 'parent', '2:', 'classid', '2:1', 'htb', 'rate', priv_rate, 'quantum', str(priv_mtu)], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'parent', '2:1', 'handle', '20:', 'sfq', 'perturb', str(sfq_perturb)],retval=runcommand.FAIL) if helpers.get_debug(cfg): run_command([constants.CMD_TC, '-d', 'qdisc', 'show']) run_command([constants.CMD_TC, '-d', 'class', 'show'])
def up_firewall_rules(self, cfg, pub_addr, priv_addr, ppp_forced_iface, ppp_forced_gw): """Configure and enable firewall rules.""" _log.debug('up_firewall_rules') # ROUTE support through modprobe test retval, stdout, stderr = run_command([constants.CMD_MODPROBE, 'ipt_ROUTE']) route_target_supported = False if retval == 0: route_target_supported = True _log.info('ROUTE target support detected') else: _log.warning('ROUTE target support NOT detected') net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) fw_cfg = net_cfg.getS(ns.firewallConfig, rdf.Type(ns.FirewallConfig)) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) pub_nat = helpers.is_public_nat(cfg) priv_nat = helpers.is_private_nat(cfg) # XXX: this could be in firewall rules cli_routing = helpers.is_client_routing(cfg) pub_addr_str = None if pub_addr is not None: pub_addr_str = pub_addr.getAddress().toString() priv_addr_str = None if priv_addr is not None: priv_addr_str = priv_addr.getAddress().toString() if_dict = {'pub_if':pub_iface_name, 'priv_if':priv_iface_name, 'ppp_if':ppp_interfaces, 'pub_ip':pub_addr_str, 'priv_ip':priv_addr_str, 'fwmark_ipsec':constants.FWMARK_IPSEC, 'fwmark_skipnat':constants.FWMARK_SKIPNAT, 'fwmark_ppp':constants.FWMARK_PPP, 'fwmark_ppp_s2s':constants.FWMARK_PPP_S2S, 'fwmark_local_l2tp':constants.FWMARK_LOCAL_L2TP, 'fwmark_license_restricted':constants.FWMARK_LICENSE_RESTRICTED, 'http_fwd1':constants.WEBUI_FORWARD_PORT_UIFORCED_HTTP, 'https_fwd1':constants.WEBUI_FORWARD_PORT_UIFORCED_HTTPS, 'http_fwd2':constants.WEBUI_FORWARD_PORT_LICENSE_HTTP, 'https_fwd2':constants.WEBUI_FORWARD_PORT_LICENSE_HTTPS, 'http_fwd3':constants.WEBUI_FORWARD_PORT_OLDPSK_HTTP, 'https_fwd3':constants.WEBUI_FORWARD_PORT_OLDPSK_HTTPS, } # # rules for -t raw # raw_rules = textwrap.dedent("""\ -A raw_prerouting -i %(ppp_if)s -j raw_prerouting_ppp -A raw_output -o %(ppp_if)s -j raw_output_ppp """) % if_dict # # rules for -t nat # nat_rules = textwrap.dedent("""\ -A nat_prerouting -i %(ppp_if)s -j nat_prerouting_ppp -A nat_postrouting -o %(ppp_if)s -j nat_postrouting_ppp -A nat_output -o %(ppp_if)s -j nat_output_ppp """) % if_dict pf_cfg = fw_cfg.getS(ns.portForward, rdf.Seq(rdf.Type(ns.PortForwardRule))) for i in pf_cfg: iface = i.getS(ns.interface, rdf.Type(ns.NetworkInterface)).getS(ns.interfaceName, rdf.String) proto = str(i.getS(ns.protocol, rdf.Integer)) port = str(i.getS(ns.port, rdf.Integer)) daddr = i.getS(ns.destinationAddress, rdf.IPv4Address).toString() dport = str(i.getS(ns.destinationPort, rdf.Integer)) nat_rules += textwrap.dedent("""\ -A nat_prerouting -i %(iface)s -p %(proto)s --dport %(port)s -j DNAT --to-destination %(daddr)s:%(dport)s-%(dport)s """) % {'iface': iface, 'proto': proto, 'port': port, 'daddr': daddr, 'dport': dport} # nat all traffic (both ppp and other), because we support routing of non-client traffic if pub_nat: # These bizarre rules are used to prevent clients which use our gateway as a router/NAT # from accidentally getting an unmodified UDP port when they are using IPsec. This would # be hazardous to IKE because our IKE already uses UDP/500 and UDP/4500 (but may not be # running due to a startup race when the client connects). # # The ports are pretty arbitrary; Linux maps >= 1024 starting from 1024; we choose to # start higher to make it easier to track NATted and other ports (and also so that all # ports we use, namely 500, 4500, 1701, 1702, etc) are below the start point). # # Mark 2 is used as a "skip NAT" marker: we can add this mark to e.g. site-to-site packets # to avoid NAT for them if we wish. # # We use a two-chain workaround here to implement NAT: our NAT rule must have a match # "not public address AND not private address", but iptables does not support multiple # -s matches in the same rule. So, packets are only NATted if they satisfy: # 1. Source address != public address # 2. Source address != private address # 3. Packet is not marked as "skip NAT" # # We need to exclude private interface address (from public NAT) to avoid NATting # IPsec packets when they are used through the private interface. nat_rules += textwrap.dedent("""\ -A nat_postrouting -o %(pub_if)s ! -s %(pub_ip)s -m mark --mark 0/%(fwmark_skipnat)s -j nat_pub1 """) % if_dict if priv_iface is not None: nat_rules += textwrap.dedent("""\ -A nat_pub1 ! -s %(priv_ip)s -j nat_pub2 """) % if_dict else: nat_rules += textwrap.dedent("""\ -A nat_pub1 -j nat_pub2 """) % if_dict nat_rules += textwrap.dedent("""\ -A nat_pub2 -p tcp -j SNAT --to-source %(pub_ip)s:16384-49151 -A nat_pub2 -p udp -j SNAT --to-source %(pub_ip)s:16384-49151 -A nat_pub2 -j SNAT --to-source %(pub_ip)s """) % if_dict if priv_nat: nat_rules += textwrap.dedent("""\ -A nat_postrouting -o %(priv_if)s ! -s %(priv_ip)s -m mark --mark 0/%(fwmark_skipnat)s -j nat_priv1 """) % if_dict if pub_iface is not None: nat_rules += textwrap.dedent("""\ -A nat_priv1 ! -s %(pub_ip)s -j nat_priv2 """) % if_dict else: nat_rules += textwrap.dedent("""\ -A nat_priv1 -j nat_priv2 """) % if_dict nat_rules += textwrap.dedent("""\ -A nat_priv2 -p tcp -j SNAT --to-source %(priv_ip)s:16384-49151 -A nat_priv2 -p udp -j SNAT --to-source %(priv_ip)s:16384-49151 -A nat_priv2 -j SNAT --to-source %(priv_ip)s """) % if_dict # # rules for -t mangle # mangle_rules = textwrap.dedent("""\ -A mangle_prerouting -i %(ppp_if)s -j MARK --set-mark %(fwmark_ppp)s -A mangle_prerouting -i %(ppp_if)s -j mangle_prerouting_ppp -A mangle_prerouting -p esp -j MARK --set-mark %(fwmark_ipsec)s -A mangle_prerouting -p udp --dport 500 -j MARK --set-mark %(fwmark_ipsec)s -A mangle_prerouting -p udp --dport 4500 -j MARK --set-mark %(fwmark_ipsec)s -A mangle_input -i %(ppp_if)s -j mangle_input_ppp -A mangle_forward -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu -A mangle_forward -i %(ppp_if)s -j mangle_forward_ppp -A mangle_forward -o %(ppp_if)s -j mangle_forward_ppp -A mangle_output -o %(ppp_if)s -j mangle_output_ppp # Not necessary, openl2tp patch #-A mangle_output -p udp --sport 1701 -j MARK --set-mark %(fwmark_local_l2tp)s #-A mangle_output -p udp --sport 1702 -j MARK --set-mark %(fwmark_local_l2tp)s -A mangle_postrouting -o %(ppp_if)s -j mangle_postrouting_ppp """) % if_dict if (ppp_forced_iface is not None) and (ppp_forced_gw is not None): if route_target_supported: _log.info('forced routing enabled: %s / %s' % (ppp_forced_iface, ppp_forced_gw.toString())) # forced routing is applied but only if packets are not license restricted mangle_rules += '\n' mangle_rules += ('-A mangle_prerouting -m mark --mark 0/%(fwmark_license_restricted)s -i %(ppp_if)s -j ROUTE' % if_dict) + \ (' --oif %s --gw %s\n' % (ppp_forced_iface, ppp_forced_gw.toString())) else: _log.error('forced routing enabled but route target not supported!') # # rules for -t filter (we accept esp, udp/500, udp/4500 from any interface) # filter_rules = textwrap.dedent("""\ -A filter_input -i lo -j ACCEPT -A filter_input -m state --state ESTABLISHED,RELATED -j ACCEPT -A filter_input -i %(ppp_if)s -j filter_input_ppp # esp protected traffic (= l2tp) or IKE -A filter_input -m mark --mark %(fwmark_ipsec)s/%(fwmark_ipsec)s -j ACCEPT # rate limited public icmp -A filter_input -i %(pub_if)s -p icmp -m limit --limit 10/second --limit-burst 50 -j ACCEPT -A filter_input -i %(pub_if)s -p icmp -j DROP -A filter_input -i %(ppp_if)s -p icmp -j ACCEPT # all web ui ports -A filter_input -i %(ppp_if)s -p tcp --dport 80 -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport 443 -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(http_fwd1)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(https_fwd1)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(http_fwd2)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(https_fwd2)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(http_fwd3)d -j ACCEPT -A filter_input -i %(ppp_if)s -p tcp --dport %(https_fwd3)d -j ACCEPT """) % if_dict if priv_iface is not None: # Note: we assume that private interface is always different # from public interface if it is defined at all. filter_rules += textwrap.dedent("""\ -A filter_input -i %(priv_if)s -p icmp -m limit --limit 10/second --limit-burst 50 -j ACCEPT -A filter_input -i %(priv_if)s -p icmp -j DROP """) % if_dict ia_cfg = fw_cfg.getS(ns.inputAccept, rdf.Seq(rdf.Type(ns.InputAcceptRule))) for i in ia_cfg: iface = i.getS(ns.interface, rdf.Type(ns.NetworkInterface)).getS(ns.interfaceName, rdf.String()) proto = str(i.getS(ns.protocol, rdf.Integer)) port = str(i.getS(ns.port, rdf.Integer)) filter_rules += textwrap.dedent("""\ -A filter_input -i %(iface)s -p %(proto)s --dport %(port)s -j ACCEPT """) % {'iface': iface, 'proto': proto, 'port': port} filter_rules += textwrap.dedent("""\ -A filter_output -o %(ppp_if)s -j filter_output_ppp -A filter_output -o %(ppp_if)s -j ACCEPT -A filter_output -j ACCEPT """) % if_dict ppp_firewall_rules = '' if fw_cfg.hasS(ns.pppFirewallRules): fr_cfg = fw_cfg.getS(ns.pppFirewallRules, rdf.Seq(rdf.Type(ns.PppFirewallRule))) else: fr_cfg = [] for i in fr_cfg: dest = '-d ' + i.getS(ns.subnet, rdf.IPv4Subnet).toString() if i.hasS(ns.protocol): proto = '-p ' + str(i.getS(ns.protocol, rdf.Integer)) if i.hasS(ns.port): port = '--dport ' + str(i.getS(ns.port, rdf.Integer)) else: port = '' else: proto = '' port = '' fr_action = i.getS(ns.action) if fr_action.hasType(ns.ActionAllow): action = 'ACCEPT' elif fr_action.hasType(ns.ActionDeny): action = 'REJECT --reject-with icmp-port-unreachable' else: raise Exception('invalid firewall action') ppp_firewall_rules += textwrap.dedent("""\ -A filter_forward_ppp_firewall %(dest)s %(proto)s %(port)s -j %(action)s """) % {'dest': dest, 'proto': proto, 'port': port, 'action': action} # XXX: conn track? (e.g. pub->ppp ?) filter_rules += textwrap.dedent("""\ -A filter_forward -m conntrack --ctstate DNAT -j ACCEPT -A filter_forward -m state --state ESTABLISHED,RELATED -j ACCEPT """) % if_dict # client-to-client routing: note that we need to separate between # true client-to-client and site-to-site related routing if cli_routing: _log.info('client-to-client routing allowed, no rule added') else: # XXX -- This doesn't work (see #828) for client-to-s2s packets. # Currently never used. # match ppp -> ppp packets with *no* s2s mark _log.info('client-to-client routing not allowed, adding firewall rule to prevent') _log.error('client-to-client routing not allowed -- but unsupported in this build') filter_rules += textwrap.dedent("""\ -A filter_forward -i %(ppp_if)s -o %(ppp_if)s -m mark --mark 0/%(fwmark_ppp_s2s)s -j DROP """) % if_dict # ppp forwarding rules are only applied if traffic is not blocked # by client-to-client restrictions above filter_rules += textwrap.dedent("""\ -A filter_forward -i %(ppp_if)s -j filter_forward_ppp -A filter_forward -o %(ppp_if)s -j filter_forward_ppp -A filter_forward -i %(ppp_if)s -o %(pub_if)s -j ACCEPT -A filter_forward -i %(pub_if)s -o %(ppp_if)s -j ACCEPT -A filter_forward -i %(ppp_if)s -o %(ppp_if)s -j ACCEPT """) % if_dict # non-client routing if priv_iface is not None: if fw_cfg.getS(ns.allowNonClientRouting, rdf.Boolean): filter_rules += textwrap.dedent("""\ -A filter_forward -i %(pub_if)s -o %(pub_if)s -j ACCEPT -A filter_forward -i %(priv_if)s -o %(priv_if)s -j ACCEPT -A filter_forward -i %(priv_if)s -o %(pub_if)s -j ACCEPT """) % if_dict else: if fw_cfg.getS(ns.allowNonClientRouting, rdf.Boolean): filter_rules += textwrap.dedent("""\ -A filter_forward -i %(pub_if)s -o %(pub_if)s -j ACCEPT """) % if_dict if priv_iface is not None: filter_rules += textwrap.dedent("""\ -A filter_forward -i %(ppp_if)s -o %(priv_if)s -j ACCEPT -A filter_forward -i %(priv_if)s -o %(ppp_if)s -j ACCEPT """) % if_dict # # finally, build the tables # tables = textwrap.dedent("""\ # Iptables restore script *raw :PREROUTING ACCEPT :OUTPUT ACCEPT :raw_prerouting - :raw_output - :raw_prerouting_ppp - :raw_output_ppp - :raw_prerouting_ppp_cust - :raw_output_ppp_cust - -A PREROUTING -j raw_prerouting -A OUTPUT -j raw_output %(raw_rules)s COMMIT *filter :INPUT DROP :FORWARD DROP :OUTPUT DROP :filter_input - :filter_forward - :filter_forward_ppp_firewall - :filter_output - :filter_input_ppp - :filter_forward_ppp - :filter_output_ppp - :filter_input_ppp_cust - :filter_forward_ppp_cust - :filter_output_ppp_cust - -A INPUT -j filter_input -A FORWARD -j filter_forward -A OUTPUT -j filter_output %(filter_rules)s %(ppp_firewall_rules)s COMMIT *nat :PREROUTING ACCEPT :POSTROUTING ACCEPT :OUTPUT ACCEPT :nat_prerouting - :nat_postrouting - :nat_output - :nat_prerouting_ppp - :nat_postrouting_ppp - :nat_output_ppp - :nat_prerouting_ppp_cust - :nat_postrouting_ppp_cust - :nat_output_ppp_cust - # chains for public/private natting, see above :nat_pub1 - :nat_pub2 - :nat_priv1 - :nat_priv2 - -A PREROUTING -j nat_prerouting -A POSTROUTING -j nat_postrouting -A OUTPUT -j nat_output %(nat_rules)s COMMIT *mangle :PREROUTING ACCEPT :INPUT ACCEPT :FORWARD ACCEPT :OUTPUT ACCEPT :POSTROUTING ACCEPT :mangle_prerouting - :mangle_input - :mangle_forward - :mangle_output - :mangle_postrouting - :mangle_prerouting_ppp - :mangle_input_ppp - :mangle_forward_ppp - :mangle_output_ppp - :mangle_postrouting_ppp - :mangle_prerouting_ppp_cust - :mangle_input_ppp_cust - :mangle_forward_ppp_cust - :mangle_output_ppp_cust - :mangle_postrouting_ppp_cust - -A PREROUTING -j mangle_prerouting -A INPUT -j mangle_input -A FORWARD -j mangle_forward -A OUTPUT -j mangle_output -A POSTROUTING -j mangle_postrouting %(mangle_rules)s COMMIT # end of script. """) % {'raw_rules':raw_rules, 'filter_rules':filter_rules, 'nat_rules':nat_rules, 'mangle_rules':mangle_rules, 'ppp_firewall_rules':ppp_firewall_rules} _log.debug('iptables-restore script dump:') for i, l in enumerate(tables.split('\n')): _log.debug('%d: %s' % (i+1, l)) (retval, retout, reterr) = run_command([constants.CMD_IPTABLES_RESTORE], stdin=tables.encode('ascii'), retval=runcommand.FAIL) _log.debug('iptables-restore => %s\n%s\n%s' % (retval, retout, reterr))
def _resolve_ppp_dns_wins_servers(self, cfg, pub_addrinfo, priv_addrinfo): (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg) ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig)) # dns servers if ppp_cfg.hasS(ns.pppDnsServers): dns_cfg = ppp_cfg.getS(ns.pppDnsServers) dns_list = None if dns_cfg.hasType(ns.StaticDnsServers): dns_list = [] for i in dns_cfg.getS(ns.addressList, rdf.Seq(rdf.Type(ns.DnsServer))): srv = ResolvedDnsServer() srv.address = i.getS(ns.address, rdf.IPv4Address) srv.rdf_server_list = dns_cfg srv.from_dhcp = False srv.from_dhcp_rdf_interface = None dns_list.append(srv) elif dns_cfg.hasType(ns.DhcpDnsServers): iface = dns_cfg.getS(ns.interface, rdf.Type(ns.NetworkInterface)) if iface == pub_if: if pub_addrinfo is not None: dns_list = [] for i in pub_addrinfo.dns_servers: srv = ResolvedDnsServer() srv.address = i # IPv4Address srv.rdf_server_list = dns_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = pub_if dns_list.append(srv) else: dns_list = [] elif iface == priv_if: if priv_addrinfo is not None: dns_list = [] for i in priv_addrinfo.dns_servers: srv = ResolvedDnsServer() srv.address = i # IPv4Address srv.rdf_server_list = dns_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = priv_if dns_list.append(srv) else: dns_list = [] else: raise Exception( 'unknown interface for dhcp-assigned dns servers for ppp' ) else: # XXX: better exception? InternalError or something? raise Exception('unknown dns servers variant for ppp') # wins servers if ppp_cfg.hasS(ns.pppWinsServers): wins_cfg = ppp_cfg.getS(ns.pppWinsServers) wins_list = None if wins_cfg.hasType(ns.StaticWinsServers): wins_list = [] for i in wins_cfg.getS(ns.addressList, rdf.Seq(rdf.Type(ns.WinsServer))): srv = ResolvedWinsServer() srv.address = i.getS(ns.address, rdf.IPv4Address) srv.rdf_server_list = wins_cfg srv.from_dhcp = False srv.from_dhcp_rdf_interface = None wins_list.append(srv) elif wins_cfg.hasType(ns.DhcpWinsServers): iface = wins_cfg.getS(ns.interface, rdf.Type(ns.NetworkInterface)) if iface == pub_if: if pub_addrinfo is not None: wins_list = [] for i in pub_addrinfo.wins_servers: srv = ResolvedWinsServer() srv.address = i # IPv4Address srv.rdf_server_list = wins_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = pub_if wins_list.append(srv) else: wins_list = [] elif iface == priv_if: if priv_addrinfo is not None: wins_list = [] for i in priv_addrinfo.wins_servers: srv = ResolvedWinsServer() srv.address = i # IPv4Address srv.rdf_server_list = wins_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = priv_if wins_list.append(srv) else: wins_list = [] else: raise Exception( 'unknown interface for dhcp-assigned wins servers for ppp' ) else: # XXX: better exception? InternalError or something? raise Exception('unknown wins servers variant for ppp') return dns_list, wins_list
def _resolve_ppp_dns_wins_servers(self, cfg, pub_addrinfo, priv_addrinfo): (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg) ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig)) # dns servers if ppp_cfg.hasS(ns.pppDnsServers): dns_cfg = ppp_cfg.getS(ns.pppDnsServers) dns_list = None if dns_cfg.hasType(ns.StaticDnsServers): dns_list = [] for i in dns_cfg.getS(ns.addressList, rdf.Seq(rdf.Type(ns.DnsServer))): srv = ResolvedDnsServer() srv.address = i.getS(ns.address, rdf.IPv4Address) srv.rdf_server_list = dns_cfg srv.from_dhcp = False srv.from_dhcp_rdf_interface = None dns_list.append(srv) elif dns_cfg.hasType(ns.DhcpDnsServers): iface = dns_cfg.getS(ns.interface, rdf.Type(ns.NetworkInterface)) if iface == pub_if: if pub_addrinfo is not None: dns_list = [] for i in pub_addrinfo.dns_servers: srv = ResolvedDnsServer() srv.address = i # IPv4Address srv.rdf_server_list = dns_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = pub_if dns_list.append(srv) else: dns_list = [] elif iface == priv_if: if priv_addrinfo is not None: dns_list = [] for i in priv_addrinfo.dns_servers: srv = ResolvedDnsServer() srv.address = i # IPv4Address srv.rdf_server_list = dns_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = priv_if dns_list.append(srv) else: dns_list = [] else: raise Exception('unknown interface for dhcp-assigned dns servers for ppp') else: # XXX: better exception? InternalError or something? raise Exception('unknown dns servers variant for ppp') # wins servers if ppp_cfg.hasS(ns.pppWinsServers): wins_cfg = ppp_cfg.getS(ns.pppWinsServers) wins_list = None if wins_cfg.hasType(ns.StaticWinsServers): wins_list = [] for i in wins_cfg.getS(ns.addressList, rdf.Seq(rdf.Type(ns.WinsServer))): srv = ResolvedWinsServer() srv.address = i.getS(ns.address, rdf.IPv4Address) srv.rdf_server_list = wins_cfg srv.from_dhcp = False srv.from_dhcp_rdf_interface = None wins_list.append(srv) elif wins_cfg.hasType(ns.DhcpWinsServers): iface = wins_cfg.getS(ns.interface, rdf.Type(ns.NetworkInterface)) if iface == pub_if: if pub_addrinfo is not None: wins_list = [] for i in pub_addrinfo.wins_servers: srv = ResolvedWinsServer() srv.address = i # IPv4Address srv.rdf_server_list = wins_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = pub_if wins_list.append(srv) else: wins_list = [] elif iface == priv_if: if priv_addrinfo is not None: wins_list = [] for i in priv_addrinfo.wins_servers: srv = ResolvedWinsServer() srv.address = i # IPv4Address srv.rdf_server_list = wins_cfg srv.from_dhcp = True srv.from_dhcp_rdf_interface = priv_if wins_list.append(srv) else: wins_list = [] else: raise Exception('unknown interface for dhcp-assigned wins servers for ppp') else: # XXX: better exception? InternalError or something? raise Exception('unknown wins servers variant for ppp') return dns_list, wins_list
def _resolve_routes(self, cfg, pub_addrinfo, priv_addrinfo, route_seq): net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) # # XXX: handle overlapping static + dhcp route (prefer dhcp) # XXX: at least log overlapping routes in general # ret = [] # figure out unique subnets subdict = {} # dict of subnets -> route list subnets = [] # unique subnets for r in route_seq: subnet = r.getS(ns.address, rdf.IPv4Subnet) subnet_str = subnet.toString() if not subdict.has_key(subnet_str): subdict[subnet_str] = [] subnets.append(subnet) t = subdict[subnet_str] t.append(r) # sort _log.debug('route keys (subnets), no sort:') for i in subnets: _log.debug(' ' + i.toString()) subnets.sort() _log.debug('route keys (subnets), after sort:') for i in subnets: _log.debug(' ' + i.toString()) # resolve routes in sorted order (starting from "widest") got_default_route = False for subnet in subnets: _log.debug('processing subnet %s' % subnet.toString()) # apply first working route for this subnet got_route_for_this_subnet = False for r in subdict[subnet.toString()]: rt = self._resolve_one_route(r, pub_iface, priv_iface, pub_addrinfo, priv_addrinfo) # must have router or blackhole if rt.router is None and not rt.blackhole: _log.warning( 'cannot figure out router for route %s, skipping' % rt.subnet.toString()) continue ret.append(rt) if rt.subnet == datatypes.IPv4Subnet.fromString('0.0.0.0/0'): got_default_route = True # success, skip other routes with this same subnet got_route_for_this_subnet = True break if not got_route_for_this_subnet: # XXX: should this be 'raise'? _log.warning('could not resolve a route for subnet %s' % subnet.toString()) if not got_default_route: _log.warning('did not get a default route') return ret
def create_config(self, cfg, res_info): """Create OpenL2tp configuration file as string.""" # This is for get_args() to later pick up self.ip_address = res_info.public_interface.address.getAddress().toString() (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg) net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig)) ppp_auth = ppp_cfg.getS(ns.pppAuthentication, rdf.Type(ns.PppAuthentication)) ppp_comp = ppp_cfg.getS(ns.pppCompression, rdf.Type(ns.PppCompression)) l2tp_cfg = cfg.getS(ns.l2tpConfig, rdf.Type(ns.L2tpConfig)) # XXX: (do we need a patch for these?) # - noipx, crtscts, lock: are not used by openl2tp # Note: # - noipdefault, nodetach, local: always passed to pppd by openl2tp # Note: The receive port is not changeable and no point in # setting the local port because of the one-udp-port -patch. # Note: could set the openl2tp local sending port which would # disable the ephemeral port use, but this is not required while # we use the one-socket patch. self.debug_on = helpers.get_debug(cfg) # XXX: it seems like openl2tp has *ppp* profile trace flags # all enabled by default and others (tunnel, session, system) not.. # there could be other debug flags, too, which would affect f.ex # openl2tp and pluto ppp_subnet = ppp_cfg.getS(ns.pppSubnet, rdf.IPv4Subnet) if ppp_subnet.getCidr() > 30: raise Exception('PPP subnet does not contain enough usable addresses') local_ip = ppp_subnet.getLastUsableAddress() # Note: hostname is not settable, but openl2tp derives it from # system hostname. # Note: vendor_name is not settable (and not used for anything more # than testing code) in openl2tp # Note: tunnelrws option does not exist in openl2tp # but could set the tx/rx window sizes # Note: not settable through openl2tp. # this has effect only when connect or pty options are used # in pppd config and thus is not required here. # connect_delay = '5000' # Note: openl2tp always uses lenght bit, so "length bit = yes" # or similar is not required in config. # PPP profile params = {} params['prefix'] = 'ppp profile modify profile_name=default' params['idle_timeout'] = '0' if ppp_cfg.hasS(ns.pppIdleTimeout): # short timeouts (less than 10 seconds, say) are not sane, but we # assume the user interface checks for sanity params['idle_timeout'] = str(ppp_cfg.getS(ns.pppIdleTimeout, rdf.Timedelta).seconds) # truncate self._log.warning('idle timeout specified, not robust with many clients') params['mtu'] = str(ppp_cfg.getS(ns.pppMtu, rdf.Integer)) params['mru'] = params['mtu'] params['local_ipaddr'] = local_ip.toString() # XXX: if no echo failure specified, then the tunnels may never die. # - tunnels have hello_interval but it only controls of the # frequency of the sent HELLO messages # - tunnels have idle timeout, but it has meaning only when all the # sessions for tunnel have died out # - sessions themselves do not die unless pppd terminates because # they have no timeout.. # Note: be careful with PPP options -> delete or empty config files! # - some options in the /etc/ppp/options file have priority over # command-line options # - openl2tp options are always command-line options # - this may lead to strange behaviour if there are old config # files still hanging around.. params['lcp_echo_interval'] = '0' params['lcp_echo_failure'] = '0' if ppp_cfg.hasS(ns.pppLcpEchoInterval): params['lcp_echo_interval'] = str(ppp_cfg.getS(ns.pppLcpEchoInterval, rdf.Timedelta).seconds) params['lcp_echo_failure'] = str(ppp_cfg.getS(ns.pppLcpEchoFailure, rdf.Integer)) params['auth_pap'] = 'no' if ppp_auth.hasS(ns.pppPap) and ppp_auth.getS(ns.pppPap, rdf.Boolean): params['auth_pap'] = 'yes' params['auth_chap'] = 'no' if ppp_auth.hasS(ns.pppChap) and ppp_auth.getS(ns.pppChap, rdf.Boolean): params['auth_chap'] = 'yes' # MSCHAPv1 had problems with pppd RADIUS support params['auth_mschapv1'] = 'no' if ppp_auth.hasS(ns.pppMschap) and ppp_auth.getS(ns.pppMschap, rdf.Boolean): self._log.warn('auth mschapv1 enabled in config but not supported, ignoring') params['auth_mschapv2'] = 'no' if ppp_auth.hasS(ns.pppMschapV2) and ppp_auth.getS(ns.pppMschapV2, rdf.Boolean): params['auth_mschapv2'] = 'yes' params['auth_eap'] = 'no' if ppp_auth.hasS(ns.pppEap) and ppp_auth.getS(ns.pppEap, rdf.Boolean): self._log.warn('eap enabled in config but not supported, ignoring') # compression options params['comp_mppc'] = 'no' if ppp_comp.hasS(ns.pppMppc) and ppp_comp.getS(ns.pppMppc, rdf.Boolean): params['comp_mppc'] = 'yes' params['comp_mppe'] = 'no' if ppp_comp.hasS(ns.pppMppe) and ppp_comp.getS(ns.pppMppe, rdf.Boolean): params['comp_mppe'] = 'yes' params['comp_accomp'] = 'no' if ppp_comp.hasS(ns.pppAccomp) and ppp_comp.getS(ns.pppAccomp, rdf.Boolean): params['comp_accomp'] = 'yes' params['comp_pcomp'] = 'no' if ppp_comp.hasS(ns.pppPcomp) and ppp_comp.getS(ns.pppPcomp, rdf.Boolean): params['comp_pcomp'] = 'yes' params['comp_bsdcomp'] = 'no' if ppp_comp.hasS(ns.pppBsdcomp) and ppp_comp.getS(ns.pppBsdcomp, rdf.Boolean): params['comp_bsdcomp'] = 'yes' params['comp_deflate'] = 'no' if ppp_comp.hasS(ns.pppDeflate) and ppp_comp.getS(ns.pppDeflate, rdf.Boolean): params['comp_deflate'] = 'yes' params['comp_predictor1'] = 'no' if ppp_comp.hasS(ns.pppPredictor1) and ppp_comp.getS(ns.pppPredictor1, rdf.Boolean): params['comp_predictor1'] = 'yes' params['comp_vj'] = 'no' if ppp_comp.hasS(ns.pppVj) and ppp_comp.getS(ns.pppVj, rdf.Boolean): params['comp_vj'] = 'yes' params['comp_ccomp_vj'] = 'no' if ppp_comp.hasS(ns.pppCcompVj) and ppp_comp.getS(ns.pppCcompVj, rdf.Boolean): params['comp_ccomp_vj'] = 'yes' # sanity checks if params['comp_pcomp'] == 'yes': self._log.warning('pcomp enabled - this breaks in mppc: disabling') params['comp_pcomp'] = 'no' if params['comp_mppe'] == 'yes': self._log.warning('mppe enabled - not handled by protocol: disabling') params['comp_mppe'] = 'no' # dns servers params['dns_ipaddr_pri'] = '0' params['dns_ipaddr_sec'] = '0' dns_list = res_info.ppp_dns_servers if len(dns_list) > 0: params['dns_ipaddr_pri'] = dns_list[0].address.toString() if len(dns_list) > 1: params['dns_ipaddr_sec'] = dns_list[1].address.toString() # wins servers params['wins_ipaddr_pri'] = '0' params['wins_ipaddr_sec'] = '0' wins_list = res_info.ppp_wins_servers if len(wins_list) > 0: params['wins_ipaddr_pri'] = wins_list[0].address.toString() if len(wins_list) > 1: params['wins_ipaddr_sec'] = wins_list[1].address.toString() # XXX: check and set sensible values, these are defaults params['max_connect_time'] = '0' params['max_failure_count'] = '10' # NB: This is actually not set, because it causes problems in Openl2tp # (boolean argument doesn't work correctly; it will actually be set!) params['default_route'] = 'no' params['multilink'] = 'no' # NB: always use only radius, also local users are from the local radius server params['use_radius'] = 'yes' # Force radius plugin to use proper config file of radiusclient-ng params['radius_hint'] = constants.RADIUSCLIENT_CONFIG # Note: there seems to be quite real disagreement between # openl2tp configration interface and actual used/set configuration # values in openl2tpd: # - dns1=0 seems to work in configuration client, but actually it # sets the IP address as 0.0.0.0 in pppd config # - the zero IP:s do not seem to have any effect because pppd is # resilient. # - etc.. if self.debug_on: params['trace_flags'] = '2047' # Full trace else: params['trace_flags'] = '0' ppp_conf = textwrap.dedent("""\ %(prefix)s ip_pool_name=clientpool %(prefix)s default_route=%(default_route)s %(prefix)s multilink=%(multilink)s %(prefix)s use_radius=%(use_radius)s %(prefix)s radius_hint=%(radius_hint)s %(prefix)s idle_timeout=%(idle_timeout)s %(prefix)s mtu=%(mtu)s %(prefix)s mru=%(mru)s %(prefix)s local_ipaddr=%(local_ipaddr)s %(prefix)s lcp_echo_interval=%(lcp_echo_interval)s %(prefix)s lcp_echo_failure_count=%(lcp_echo_failure)s # Note: all auth options must be on one line %(prefix)s \ req_none=no \ auth_pap=no \ auth_chap=no \ auth_mschapv1=no \ auth_mschapv2=no \ auth_eap=no \ req_pap=%(auth_pap)s \ req_chap=%(auth_chap)s \ req_mschapv1=%(auth_mschapv1)s \ req_mschapv2=%(auth_mschapv2)s \ req_eap=%(auth_eap)s %(prefix)s \ mppe=%(comp_mppe)s %(prefix)s \ comp_mppc=%(comp_mppc)s \ comp_accomp=%(comp_accomp)s \ comp_pcomp=%(comp_pcomp)s \ comp_bsdcomp=%(comp_bsdcomp)s \ comp_deflate=%(comp_deflate)s \ comp_predictor1=%(comp_predictor1)s \ comp_vj=%(comp_vj)s \ comp_ccomp_vj=%(comp_ccomp_vj)s %(prefix)s dns_ipaddr_pri=%(dns_ipaddr_pri)s %(prefix)s dns_ipaddr_sec=%(dns_ipaddr_sec)s %(prefix)s wins_ipaddr_pri=%(wins_ipaddr_pri)s %(prefix)s wins_ipaddr_sec=%(wins_ipaddr_sec)s %(prefix)s max_connect_time=%(max_connect_time)s %(prefix)s max_failure_count=%(max_failure_count)s %(prefix)s trace_flags=%(trace_flags)s """) % params # Tunnel profile params = {} params['prefix'] = 'tunnel profile modify profile_name=default' # Default responder port params['our_port'] = '1701' # XXX: better values, these are defaults. # NB: this works ok in practice, and no need to change if no problems seen. params['mtu'] = '1460' # This might affect socket behaviour or the pppol2tp kernel module.. # XXX: this is default in openl2tp code # do we need to configure this? params['hello_timeout'] = '60' params['retry_timeout'] = '1' # Note: must set this to some value other than zero to prevent # tunnels from hanging when all connections (sessions) are dead params['idle_timeout'] = '1800' # 30 minutes params['rx_window_size'] = '4' params['tx_window_size']= '10' params['max_retries'] = '5' # XXX: better values, these are defaults # possible: none,digital,analog,any params['framing_caps'] = 'any' params['bearer_caps'] = 'any' if self.debug_on: params['trace_flags'] = '2047' # Full trace else: params['trace_flags'] = '0' tunnel_conf = textwrap.dedent("""\ %(prefix)s our_udp_port=%(our_port)s %(prefix)s mtu=%(mtu)s %(prefix)s hello_timeout=%(hello_timeout)s %(prefix)s retry_timeout=%(retry_timeout)s %(prefix)s idle_timeout=%(idle_timeout)s %(prefix)s rx_window_size=%(rx_window_size)s %(prefix)s tx_window_size=%(tx_window_size)s %(prefix)s max_retries=%(max_retries)s %(prefix)s framing_caps=%(framing_caps)s %(prefix)s bearer_caps=%(bearer_caps)s %(prefix)s trace_flags=%(trace_flags)s """) % params # Session profile params = {} params['prefix'] = 'session profile modify profile_name=default' # XXX: should we use sequence numbers for data? maybe not. # ppp will receive the packets anyway. reordering might matter # for control packets, but that should not happen anyway. params['sequencing_required'] = 'no' params['use_sequence_numbers'] = 'no' if self.debug_on: params['trace_flags'] = '2047' # Full trace else: params['trace_flags'] = '0' session_conf = textwrap.dedent("""\ %(prefix)s sequencing_required=%(sequencing_required)s %(prefix)s use_sequence_numbers=%(use_sequence_numbers)s %(prefix)s trace_flags=%(trace_flags)s """) % params # Peer profile # Note: no trace flags available for peer profile.. duh. params = {} params['prefix'] = 'peer profile modify profile_name=default' peer_conf = textwrap.dedent("""\ """) % params self.configs = [{'file': constants.OPENL2TP_CONF, 'cont': ppp_conf + tunnel_conf + session_conf + peer_conf}]
def reconcile_system_and_rdf_state(self, first_time=False): """Reconcile system and RDF states. Checks system device list and compares it against RDF device information. Extra PPP devices without RDF book-keeping information are terminated. Devices present in RDF but not in the system are also removed from RDF. After calling this function, the system and RDF should be reasonably synchronized. This function also detects and updates the 'liveness' of each PPP device. Devices which are not live are not taken into account in license computations, thus making their detection important. The function also updates global public/private interface rx/tx counters, rates, etc. This function should be periodically, with an interval of 1-10 minutes or so. """ # # XXX: If status is incomplete for some reason, this will now # spout exceptions. The code should check whether checking and # updating e.g. public interface status is useful at this point. # (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces( helpers.get_config()) # start timestamp now = datetime.datetime.utcnow() # get device info from system (ip command) _log.debug('reconcile: getting system info') ifaces = interfacehelper.get_interfaces() # build devname->node dict _log.debug('reconcile: build devname->rdf dev dict') rdfdevs = helpers.get_ppp_devices() rdfdevmap = self._build_rdf_devmap(rdfdevs) # reconcile devices, pass 1: system vs rdf [collect sysdevs dict at the same time] _log.debug('reconcile: pass 1, system vs rdf') sysdevs, pub_di, priv_di = self._reconcile_system_vs_rdf( rdfdevmap, ifaces, pub_if_name, priv_if_name) # do sysnukes for devices exceeding threshold for k in self._sysnuke_failures.keys(): if self._sysnuke_failures[k] >= SYSNUKE_FAILURE_LIMIT: try: _log.warning( 'sysnuke failure count for device %s too high, nuking' % k) self._nuke_system_device(k) del self._sysnuke_failures[k] except: _log.exception('sysnuke failed') # reconcile devices, pass 2: rdf vs system _log.debug('reconcile: pass 2, rdf vs system') self._reconcile_rdf_vs_system(rdfdevs, sysdevs) # do rdfnukes for devices exceeding threshold for k in self._rdfnuke_failures.keys( ): # key = rdf node of type PppDevice if self._rdfnuke_failures[k] >= RDFNUKE_FAILURE_LIMIT: try: if k.hasS(ns.deviceName): devname = k.getS(ns.deviceName, rdf.String) else: devname = '<unknown devname>' _log.warning( 'rdfnuke failure count for device %s (%s) too high, nuking' % (k, devname)) self._nuke_rdf_device(k) del self._rdfnuke_failures[k] except: _log.exception('failed during reconcile, skipping device') # NB: device will be left in _rdfnuke_failures, and be removed next time # update rx/tx counters, transfer rates, etc # (reget devs because we may have nuked something) _log.debug('reconcile: updating rx/tx counters') rdfdevs = helpers.get_ppp_devices() self._update_rxtx_etc(now, rdfdevs, sysdevs) # update device liveness status _log.debug('reconcile: updating liveness status') rdfdevs = helpers.get_ppp_devices() self._update_liveness_status(now, rdfdevs) # update public/private interface status _log.debug( 'reconcile: updating public and/or private interface status') self._update_public_private_ifaces(now, ifaces, pub_di, priv_di, first_time)
def _create_config(self, cfg, resinfo, timeout, retry, initial_interval, select_timeout, importpath): """Create dhclient configuration files.""" args = '' self.ifaces = [] (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) if helpers.is_dhcp_interface(pub_iface): self.ifaces.append(pub_iface_name) args += "%r" % pub_iface_name else: args += '%r' % None if helpers.is_dhcp_interface(priv_iface): self.ifaces.append(priv_iface_name) args += ", %r" % priv_iface_name else: args += ', %r' % None if len(self.ifaces) == 0: raise Exception( 'Expected at least one configured DHCP interface, but found none.' ) conf = textwrap.dedent("""\ # - automatically created file, do not modify. timeout %(timeout)s; retry %(retry)s; initial-interval %(initial_interval)s; # select-timeout %(select_timeout)s; send dhcp-lease-time 28800; # default is only 3600 seconds request subnet-mask, routers, domain-name-servers, netbios-name-servers; require subnet-mask; """) % { 'timeout': timeout, 'retry': retry, 'initial_interval': initial_interval, 'select_timeout': select_timeout } do_import = '' if importpath != 'system': do_import = 'sys.path = "%s".split(' ') + sys.path' % importpath script = textwrap.dedent("""\ #!/usr/bin/python import sys %(do_import)s try: from codebay.l2tpserver import dhcpscript d = dhcpscript.DhcpScript(%(args)s) d.run() except: pass """) % { 'do_import': do_import, 'args': args } leases = textwrap.dedent("""\ """) self.configs = [{ 'file': constants.DHCLIENT_CONF, 'cont': conf, 'mode': 0644 }, { 'file': constants.DHCLIENT_SCRIPT, 'cont': script, 'mode': 0755 }, { 'file': constants.DHCLIENT_LEASES, 'cont': leases, 'mode': 0664 }]
def reconcile_system_and_rdf_state(self, first_time=False): """Reconcile system and RDF states. Checks system device list and compares it against RDF device information. Extra PPP devices without RDF book-keeping information are terminated. Devices present in RDF but not in the system are also removed from RDF. After calling this function, the system and RDF should be reasonably synchronized. This function also detects and updates the 'liveness' of each PPP device. Devices which are not live are not taken into account in license computations, thus making their detection important. The function also updates global public/private interface rx/tx counters, rates, etc. This function should be periodically, with an interval of 1-10 minutes or so. """ # # XXX: If status is incomplete for some reason, this will now # spout exceptions. The code should check whether checking and # updating e.g. public interface status is useful at this point. # (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(helpers.get_config()) # start timestamp now = datetime.datetime.utcnow() # get device info from system (ip command) _log.debug('reconcile: getting system info') ifaces = interfacehelper.get_interfaces() # build devname->node dict _log.debug('reconcile: build devname->rdf dev dict') rdfdevs = helpers.get_ppp_devices() rdfdevmap = self._build_rdf_devmap(rdfdevs) # reconcile devices, pass 1: system vs rdf [collect sysdevs dict at the same time] _log.debug('reconcile: pass 1, system vs rdf') sysdevs, pub_di, priv_di = self._reconcile_system_vs_rdf(rdfdevmap, ifaces, pub_if_name, priv_if_name) # do sysnukes for devices exceeding threshold for k in self._sysnuke_failures.keys(): if self._sysnuke_failures[k] >= SYSNUKE_FAILURE_LIMIT: try: _log.warning('sysnuke failure count for device %s too high, nuking' % k) self._nuke_system_device(k) del self._sysnuke_failures[k] except: _log.exception('sysnuke failed') # reconcile devices, pass 2: rdf vs system _log.debug('reconcile: pass 2, rdf vs system') self._reconcile_rdf_vs_system(rdfdevs, sysdevs) # do rdfnukes for devices exceeding threshold for k in self._rdfnuke_failures.keys(): # key = rdf node of type PppDevice if self._rdfnuke_failures[k] >= RDFNUKE_FAILURE_LIMIT: try: if k.hasS(ns.deviceName): devname = k.getS(ns.deviceName, rdf.String) else: devname = '<unknown devname>' _log.warning('rdfnuke failure count for device %s (%s) too high, nuking' % (k, devname)) self._nuke_rdf_device(k) del self._rdfnuke_failures[k] except: _log.exception('failed during reconcile, skipping device') # NB: device will be left in _rdfnuke_failures, and be removed next time # update rx/tx counters, transfer rates, etc # (reget devs because we may have nuked something) _log.debug('reconcile: updating rx/tx counters') rdfdevs = helpers.get_ppp_devices() self._update_rxtx_etc(now, rdfdevs, sysdevs) # update device liveness status _log.debug('reconcile: updating liveness status') rdfdevs = helpers.get_ppp_devices() self._update_liveness_status(now, rdfdevs) # update public/private interface status _log.debug('reconcile: updating public and/or private interface status') self._update_public_private_ifaces(now, ifaces, pub_di, priv_di, first_time)