def create_config_pluto(self, cfg, resinfo, extra_psks=[]): def _psk_sanity_check(pskbin): for i in xrange( len(pskbin)): # basic sanity check - XXX: insufficient c = ord(pskbin[i]) if (c <= 0x20) or (c > 0x7e): raise Exception('PSK contains invalid character(s)') ipsec_cfg = cfg.getS(ns.ipsecConfig, rdf.Type(ns.IpsecConfig)) ike_lifetime = ipsec_cfg.getS(ns.ikeLifeTime, rdf.Timedelta).seconds ipsec_lifetime = ipsec_cfg.getS(ns.ipsecLifeTime, rdf.Timedelta).seconds self.debug_on = helpers.get_debug(cfg) self.debug_heavy = helpers.get_debug_heavy(cfg) self.ip = resinfo.public_interface.address.getAddress().toString() self.ike_lifetime = str(ike_lifetime) self.ipsec_lifetime = str(ipsec_lifetime) ownaddr = resinfo.public_interface.address.getAddress().toString() psks = ipsec_cfg.getS(ns.preSharedKeys, rdf.Seq(rdf.Type(ns.PreSharedKey))) # log unusual psk amounts (0, >1) if len(psks) == 0: self._log.warning('zero psks') elif len(psks) > 1: self._log.info('more than one psk (%s)' % len(psks)) else: self._log.debug('one psk, good') pskfile = '' def _encode_hex(s): r = '0x' for i in s: r += '%02x' % ord(i) return r # start with specific "extra_psks" for [addr, pskbin] in extra_psks: # XXX: no sanity check because using hex encoding # _psk_sanity_check(pskbin) pskfile += '%s : PSK %s\n' % (addr, _encode_hex(pskbin)) # end with generic psks for psk in psks: pskbin = psk.getS(ns.preSharedKey, rdf.Binary) # XXX: no sanity check because using hex encoding # _psk_sanity_check(pskbin) pskfile += ': PSK %s\n' % _encode_hex(pskbin) self.configs = [{ 'file': constants.PLUTO_CONF, 'cont': pskfile, 'mode': 0600 }]
def create_config_pluto(self, cfg, resinfo, extra_psks=[]): def _psk_sanity_check(pskbin): for i in xrange(len(pskbin)): # basic sanity check - XXX: insufficient c = ord(pskbin[i]) if (c <= 0x20) or (c > 0x7e): raise Exception('PSK contains invalid character(s)') ipsec_cfg = cfg.getS(ns.ipsecConfig, rdf.Type(ns.IpsecConfig)) ike_lifetime = ipsec_cfg.getS(ns.ikeLifeTime, rdf.Timedelta).seconds ipsec_lifetime = ipsec_cfg.getS(ns.ipsecLifeTime, rdf.Timedelta).seconds self.debug_on = helpers.get_debug(cfg) self.debug_heavy = helpers.get_debug_heavy(cfg) self.ip = resinfo.public_interface.address.getAddress().toString() self.ike_lifetime = str(ike_lifetime) self.ipsec_lifetime = str(ipsec_lifetime) ownaddr = resinfo.public_interface.address.getAddress().toString() psks = ipsec_cfg.getS(ns.preSharedKeys, rdf.Seq(rdf.Type(ns.PreSharedKey))) # log unusual psk amounts (0, >1) if len(psks) == 0: self._log.warning('zero psks') elif len(psks) > 1: self._log.info('more than one psk (%s)' % len(psks)) else: self._log.debug('one psk, good') pskfile = '' def _encode_hex(s): r = '0x' for i in s: r += '%02x' % ord(i) return r # start with specific "extra_psks" for [addr, pskbin] in extra_psks: # XXX: no sanity check because using hex encoding # _psk_sanity_check(pskbin) pskfile += '%s : PSK %s\n' % (addr, _encode_hex(pskbin)) # end with generic psks for psk in psks: pskbin = psk.getS(ns.preSharedKey, rdf.Binary) # XXX: no sanity check because using hex encoding # _psk_sanity_check(pskbin) pskfile += ': PSK %s\n' % _encode_hex(pskbin) self.configs = [{'file': constants.PLUTO_CONF, 'cont': pskfile, 'mode': 0600}]
def create_config(self, cfg, resinfo): snmp_cfg = cfg.getS(ns.snmpConfig, rdf.Type(ns.SnmpConfig)) snmp_community = snmp_cfg.getS(ns.snmpCommunity, rdf.String) snmp_syslocation = 'VPNease server' snmp_syscontact = 'None' vpnease_mib = constants.SNMP_MIB_MODULE_SO self.debug_on = helpers.get_debug(cfg) self.debug_heavy = helpers.get_debug_heavy(cfg) # XXX: set syslocation and syscontact more intelligently? snmpd_conf = textwrap.dedent( """\ # Minimal configuration example for VPNease snmpd com2sec readonly default %(community)s group rogroup v1 readonly group rogroup v2c readonly group rogroup usm readonly # incl/excl subtree mask view all included .1 80 # context sec.model sec.level match read write notif access rogroup "" any noauth exact all none none syslocation %(syslocation)s syscontact %(syscontact)s dlmod vpneaseMIB %(mibmodule)s """ % { 'community': snmp_community, 'syslocation': snmp_syslocation, 'syscontact': snmp_syscontact, 'mibmodule': vpnease_mib }) self.configs = [{ 'file': constants.SNMPD_CONF, 'cont': snmpd_conf, 'mode': 0600 }]
def create_config(self, cfg, resinfo): """Create ippool configuration file as string.""" self.debug_on = helpers.get_debug(cfg) ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig)) ppp_subnet = ppp_cfg.getS(ns.pppSubnet, rdf.IPv4Subnet) if ppp_subnet.getCidr() > 30: raise Exception( 'PPP subnet does not contain enough usable addresses') ip_range = ppp_cfg.getS(ns.pppRange, rdf.IPv4AddressRange) conf = textwrap.dedent("""\ pool create pool_name=clientpool pool address add pool_name=clientpool first_addr=%s num_addrs=%s netmask=%s """) % (ip_range.getFirstAddress().toString(), str( ip_range.size()), ppp_subnet.getMask().toString()) self.configs = [{'file': constants.IPPOOL_CONF, 'cont': conf}]
def create_config(self, cfg, resinfo): """Create ippool configuration file as string.""" self.debug_on = helpers.get_debug(cfg) ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig)) ppp_subnet = ppp_cfg.getS(ns.pppSubnet, rdf.IPv4Subnet) if ppp_subnet.getCidr() > 30: raise Exception('PPP subnet does not contain enough usable addresses') ip_range = ppp_cfg.getS(ns.pppRange, rdf.IPv4AddressRange) conf = textwrap.dedent("""\ pool create pool_name=clientpool pool address add pool_name=clientpool first_addr=%s num_addrs=%s netmask=%s """) % (ip_range.getFirstAddress().toString(), str(ip_range.size()), ppp_subnet.getMask().toString()) self.configs = [{'file': constants.IPPOOL_CONF, 'cont': conf}]
def create_config(self, cfg, resinfo): snmp_cfg = cfg.getS(ns.snmpConfig, rdf.Type(ns.SnmpConfig)) snmp_community = snmp_cfg.getS(ns.snmpCommunity, rdf.String) snmp_syslocation = "VPNease server" snmp_syscontact = "None" vpnease_mib = constants.SNMP_MIB_MODULE_SO self.debug_on = helpers.get_debug(cfg) self.debug_heavy = helpers.get_debug_heavy(cfg) # XXX: set syslocation and syscontact more intelligently? snmpd_conf = textwrap.dedent( """\ # Minimal configuration example for VPNease snmpd com2sec readonly default %(community)s group rogroup v1 readonly group rogroup v2c readonly group rogroup usm readonly # incl/excl subtree mask view all included .1 80 # context sec.model sec.level match read write notif access rogroup "" any noauth exact all none none syslocation %(syslocation)s syscontact %(syscontact)s dlmod vpneaseMIB %(mibmodule)s """ % { "community": snmp_community, "syslocation": snmp_syslocation, "syscontact": snmp_syscontact, "mibmodule": vpnease_mib, } ) self.configs = [{"file": constants.SNMPD_CONF, "cont": snmpd_conf, "mode": 0600}]
def up_qos_rules(self, cfg): """Configure and enable quality-of-service configuration.""" #def _compute_burst(kbits, mtu): # assumed_hz = 250 # burst = (float(kbits) / float(assumed_hz)) # example: 1024kbit/s, hz=250 => 4kbit # return '%fkbit' % min(burst*2, (mtu*8.0/1000.0)) _log.debug('up_qos_rules') (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) qos_cfg = helpers.get_qos_config(cfg) if qos_cfg.hasS(ns.globalUplinkRateLimit): pub_uplink = qos_cfg.getS(ns.globalUplinkRateLimit, rdf.Integer) else: pub_uplink = None # XXX: add to conf? pub_downlink = None priv_uplink = None priv_downlink = None pub_mtu, priv_mtu = helpers.get_iface_mtus(cfg) _log.debug('qos: %s, %s, %s, %s' % (pub_uplink, pub_downlink, priv_uplink, priv_downlink)) def_tx_limit = 100 # packets sfq_perturb = 30 # seconds sfq_quantum = None # XXX: should we set this? defaults to iface mtu if pub_iface_name is not None: run_command([ constants.CMD_TC, 'qdisc', 'del', 'dev', pub_iface_name, 'root' ]) if priv_iface_name is not None: run_command([ constants.CMD_TC, 'qdisc', 'del', 'dev', priv_iface_name, 'root' ]) if pub_iface_name is not None: if pub_uplink is None: # this leaves pfifo_fast in place pass else: pub_rate = '%skbit' % pub_uplink # only uplink rate is relevant #pub_ceil = pub_rate #pub_burst = _compute_burst(pub_uplink, pub_mtu) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'root', 'handle', '1:', 'htb', 'default', '1' ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'class', 'add', 'dev', pub_iface_name, 'parent', '1:', 'classid', '1:1', 'htb', 'rate', pub_rate, 'quantum', str(pub_mtu) ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'parent', '1:1', 'handle', '10:', 'sfq', 'perturb', str(sfq_perturb) ], retval=runcommand.FAIL) if priv_iface_name is not None: if priv_uplink is None: # this leaves pfifo_fast in place pass else: priv_rate = '%skbps' % priv_uplink #priv_ceil = priv_rate #priv_burst = _compute_burst(priv_uplink, priv_mtu) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'root', 'handle', '2:', 'htb', 'default', '1' ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'class', 'add', 'dev', priv_iface_name, 'parent', '2:', 'classid', '2:1', 'htb', 'rate', priv_rate, 'quantum', str(priv_mtu) ], retval=runcommand.FAIL) run_command([ constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'parent', '2:1', 'handle', '20:', 'sfq', 'perturb', str(sfq_perturb) ], retval=runcommand.FAIL) if helpers.get_debug(cfg): run_command([constants.CMD_TC, '-d', 'qdisc', 'show']) run_command([constants.CMD_TC, '-d', 'class', 'show'])
def create_config(self, cfg, resinfo): """Create configuration file for ez-ipupdate. See http://www.shakabuku.org/writing/dyndns.html. """ global_st = helpers.get_global_status() pub_iface, pub_iface_name = helpers.get_public_iface(cfg) pub_dyndns_cfg = helpers.get_dyndns_config(cfg) conf = textwrap.dedent("""\ # intentionally empty """) self.do_start = False if pub_dyndns_cfg is not None: self.debug_on = helpers.get_debug(cfg) if self.debug_on: debug = 'debug' else: debug = '' self._log.debug('Dynamic DNS configured') provider = pub_dyndns_cfg.getS(ns.provider, rdf.String) username = pub_dyndns_cfg.getS(ns.username, rdf.String) password = pub_dyndns_cfg.getS(ns.password, rdf.String) hostname = pub_dyndns_cfg.getS(ns.hostname, rdf.String) # address selection is complicated due to many options address = None if pub_dyndns_cfg.hasS(ns.dynDnsAddress): addr = pub_dyndns_cfg.getS(ns.dynDnsAddress) if addr.hasType(ns.DynDnsInterfaceAddress): address = None elif addr.hasType(ns.DynDnsStaticAddress): address = addr.getS(ns.ipAddress, rdf.IPv4Address).toString() elif addr.hasType(ns.DynDnsManagementConnectionAddress): if global_st.hasS(ns.managementConnectionOurNattedAddress): address = global_st.getS(ns.managementConnectionOurNattedAddress, rdf.IPv4Address).toString() else: address = None else: raise Exception('invalid dynDnsAddress type') if address == '': address = None address_str = '' if address is not None: address_str = 'address=%s' % address interface_str = 'interface=%s' % pub_iface_name self._log.debug('Dynamic DNS parameters: provider=%s, username=%s, password=%s, hostname=%s, address=%s, interface=%s' % (provider, username, password, hostname, address, pub_iface_name)) # NB: persistent cache is required for proper dyndns operation conf = textwrap.dedent("""\ #!/usr/local/bin/ez-ipupdate -c service-type=%(provider)s user=%(username)s:%(password)s host=%(hostname)s %(address)s %(interface)s max-interval=2073600 %(debug)s cache-file=%(cache_file_stem)s.%(pubif)s daemon """) % {'provider':provider, 'username':username, 'password':password, 'hostname':hostname, 'address':address_str, 'interface':interface_str, 'cache_file_stem':constants.EZIPUPDATE_CACHE, 'pubif':pub_iface_name, 'debug':debug} self.do_start = True else: self._log.debug('No dynamic DNS configured') self.configs = [{'file': constants.EZIPUPDATE_CONF, 'cont': conf, 'mode': 0755}]
def up_qos_rules(self, cfg): """Configure and enable quality-of-service configuration.""" #def _compute_burst(kbits, mtu): # assumed_hz = 250 # burst = (float(kbits) / float(assumed_hz)) # example: 1024kbit/s, hz=250 => 4kbit # return '%fkbit' % min(burst*2, (mtu*8.0/1000.0)) _log.debug('up_qos_rules') (pub_iface, pub_iface_name), (priv_iface, priv_iface_name) = helpers.get_ifaces(cfg) (_, proxyarp_interface) = helpers.get_proxyarp_iface(cfg) qos_cfg = helpers.get_qos_config(cfg) if qos_cfg.hasS(ns.globalUplinkRateLimit): pub_uplink = qos_cfg.getS(ns.globalUplinkRateLimit, rdf.Integer) else: pub_uplink = None # XXX: add to conf? pub_downlink = None priv_uplink = None priv_downlink = None pub_mtu, priv_mtu = helpers.get_iface_mtus(cfg) _log.debug('qos: %s, %s, %s, %s' % (pub_uplink, pub_downlink, priv_uplink, priv_downlink)) def_tx_limit = 100 # packets sfq_perturb = 30 # seconds sfq_quantum = None # XXX: should we set this? defaults to iface mtu if pub_iface_name is not None: run_command([constants.CMD_TC, 'qdisc', 'del', 'dev', pub_iface_name, 'root']) if priv_iface_name is not None: run_command([constants.CMD_TC, 'qdisc', 'del', 'dev', priv_iface_name, 'root']) if pub_iface_name is not None: if pub_uplink is None: # this leaves pfifo_fast in place pass else: pub_rate = '%skbit' % pub_uplink # only uplink rate is relevant #pub_ceil = pub_rate #pub_burst = _compute_burst(pub_uplink, pub_mtu) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'root', 'handle', '1:', 'htb', 'default', '1'], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'class', 'add', 'dev', pub_iface_name, 'parent', '1:', 'classid', '1:1', 'htb', 'rate', pub_rate, 'quantum', str(pub_mtu)], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', pub_iface_name, 'parent', '1:1', 'handle', '10:', 'sfq', 'perturb', str(sfq_perturb)],retval=runcommand.FAIL) if priv_iface_name is not None: if priv_uplink is None: # this leaves pfifo_fast in place pass else: priv_rate = '%skbps' % priv_uplink #priv_ceil = priv_rate #priv_burst = _compute_burst(priv_uplink, priv_mtu) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'root', 'handle', '2:', 'htb', 'default', '1'], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'class', 'add', 'dev', priv_iface_name, 'parent', '2:', 'classid', '2:1', 'htb', 'rate', priv_rate, 'quantum', str(priv_mtu)], retval=runcommand.FAIL) run_command([constants.CMD_TC, 'qdisc', 'add', 'dev', priv_iface_name, 'parent', '2:1', 'handle', '20:', 'sfq', 'perturb', str(sfq_perturb)],retval=runcommand.FAIL) if helpers.get_debug(cfg): run_command([constants.CMD_TC, '-d', 'qdisc', 'show']) run_command([constants.CMD_TC, '-d', 'class', 'show'])
def create_config(self, cfg, resinfo): # XXX: watchdog # - process status # - could also: use monit to run the process # - check process health (cpu usage, memory usage, etc.) self.debug_on = helpers.get_debug(cfg) params = {} params['webui_pid'] = constants.WEBUI_PIDFILE params['openl2tp_pid'] = constants.OPENL2TP_PIDFILE params['l2tpd_pid'] = constants.L2TPD_PIDFILE params['ippool_pid'] = constants.IPPOOL_PIDFILE params['pluto_pid'] = constants.PLUTO_PIDFILE params['ezipupdate_pid'] = constants.EZIPUPDATE_PIDFILE params['dhclient_pid'] = constants.DHCLIENT_PIDFILE # XXX: reboot script? # params['fail_action'] = '/usr/bin/l2tpgw-reboot' params['fail_action'] = '/bin/true' params['stop_action'] = '/bin/true' dhcp_conf = '' if helpers.get_public_dhcp_interface( cfg) is not None or helpers.get_private_dhcp_interface( cfg) is not None: dhcp_conf = textwrap.dedent("""\ # dhclient3 check process dhclient3 with pidfile \"%(dhclient_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" """) % params # Note: not using monit httpd server: # set # set httpd port 2812 and use address localhost # allow localhost # allow admin:monit # Note: process health check, pluto cpu usage: if cpu > 90%, etc ezipupdate_conf = '' if helpers.get_dyndns_config(cfg) is not None: ezipupdate_conf = textwrap.dedent("""\ # ez-ipupdate check process ez-ipupdate with pidfile \"%(ezipupdate_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" """) % params conf = textwrap.dedent("""\ set daemon 60 set logfile syslog facility log_daemon check process twistd with pidfile \"%(webui_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" check process openl2tp with pidfile \"%(openl2tp_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" check process ippoold with pidfile \"%(ippool_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" check process pluto with pidfile \"%(pluto_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" """) % params self.configs = [{ 'file': constants.MONIT_CONF, 'cont': conf + dhcp_conf + ezipupdate_conf, 'mode': 0700 }]
def create_config(self, cfg, res_info): """Create OpenL2tp configuration file as string.""" # This is for get_args() to later pick up self.ip_address = res_info.public_interface.address.getAddress().toString() (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg) net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig)) ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig)) ppp_auth = ppp_cfg.getS(ns.pppAuthentication, rdf.Type(ns.PppAuthentication)) ppp_comp = ppp_cfg.getS(ns.pppCompression, rdf.Type(ns.PppCompression)) l2tp_cfg = cfg.getS(ns.l2tpConfig, rdf.Type(ns.L2tpConfig)) # XXX: (do we need a patch for these?) # - noipx, crtscts, lock: are not used by openl2tp # Note: # - noipdefault, nodetach, local: always passed to pppd by openl2tp # Note: The receive port is not changeable and no point in # setting the local port because of the one-udp-port -patch. # Note: could set the openl2tp local sending port which would # disable the ephemeral port use, but this is not required while # we use the one-socket patch. self.debug_on = helpers.get_debug(cfg) # XXX: it seems like openl2tp has *ppp* profile trace flags # all enabled by default and others (tunnel, session, system) not.. # there could be other debug flags, too, which would affect f.ex # openl2tp and pluto ppp_subnet = ppp_cfg.getS(ns.pppSubnet, rdf.IPv4Subnet) if ppp_subnet.getCidr() > 30: raise Exception('PPP subnet does not contain enough usable addresses') local_ip = ppp_subnet.getLastUsableAddress() # Note: hostname is not settable, but openl2tp derives it from # system hostname. # Note: vendor_name is not settable (and not used for anything more # than testing code) in openl2tp # Note: tunnelrws option does not exist in openl2tp # but could set the tx/rx window sizes # Note: not settable through openl2tp. # this has effect only when connect or pty options are used # in pppd config and thus is not required here. # connect_delay = '5000' # Note: openl2tp always uses lenght bit, so "length bit = yes" # or similar is not required in config. # PPP profile params = {} params['prefix'] = 'ppp profile modify profile_name=default' params['idle_timeout'] = '0' if ppp_cfg.hasS(ns.pppIdleTimeout): # short timeouts (less than 10 seconds, say) are not sane, but we # assume the user interface checks for sanity params['idle_timeout'] = str(ppp_cfg.getS(ns.pppIdleTimeout, rdf.Timedelta).seconds) # truncate self._log.warning('idle timeout specified, not robust with many clients') params['mtu'] = str(ppp_cfg.getS(ns.pppMtu, rdf.Integer)) params['mru'] = params['mtu'] params['local_ipaddr'] = local_ip.toString() # XXX: if no echo failure specified, then the tunnels may never die. # - tunnels have hello_interval but it only controls of the # frequency of the sent HELLO messages # - tunnels have idle timeout, but it has meaning only when all the # sessions for tunnel have died out # - sessions themselves do not die unless pppd terminates because # they have no timeout.. # Note: be careful with PPP options -> delete or empty config files! # - some options in the /etc/ppp/options file have priority over # command-line options # - openl2tp options are always command-line options # - this may lead to strange behaviour if there are old config # files still hanging around.. params['lcp_echo_interval'] = '0' params['lcp_echo_failure'] = '0' if ppp_cfg.hasS(ns.pppLcpEchoInterval): params['lcp_echo_interval'] = str(ppp_cfg.getS(ns.pppLcpEchoInterval, rdf.Timedelta).seconds) params['lcp_echo_failure'] = str(ppp_cfg.getS(ns.pppLcpEchoFailure, rdf.Integer)) params['auth_pap'] = 'no' if ppp_auth.hasS(ns.pppPap) and ppp_auth.getS(ns.pppPap, rdf.Boolean): params['auth_pap'] = 'yes' params['auth_chap'] = 'no' if ppp_auth.hasS(ns.pppChap) and ppp_auth.getS(ns.pppChap, rdf.Boolean): params['auth_chap'] = 'yes' # MSCHAPv1 had problems with pppd RADIUS support params['auth_mschapv1'] = 'no' if ppp_auth.hasS(ns.pppMschap) and ppp_auth.getS(ns.pppMschap, rdf.Boolean): self._log.warn('auth mschapv1 enabled in config but not supported, ignoring') params['auth_mschapv2'] = 'no' if ppp_auth.hasS(ns.pppMschapV2) and ppp_auth.getS(ns.pppMschapV2, rdf.Boolean): params['auth_mschapv2'] = 'yes' params['auth_eap'] = 'no' if ppp_auth.hasS(ns.pppEap) and ppp_auth.getS(ns.pppEap, rdf.Boolean): self._log.warn('eap enabled in config but not supported, ignoring') # compression options params['comp_mppc'] = 'no' if ppp_comp.hasS(ns.pppMppc) and ppp_comp.getS(ns.pppMppc, rdf.Boolean): params['comp_mppc'] = 'yes' params['comp_mppe'] = 'no' if ppp_comp.hasS(ns.pppMppe) and ppp_comp.getS(ns.pppMppe, rdf.Boolean): params['comp_mppe'] = 'yes' params['comp_accomp'] = 'no' if ppp_comp.hasS(ns.pppAccomp) and ppp_comp.getS(ns.pppAccomp, rdf.Boolean): params['comp_accomp'] = 'yes' params['comp_pcomp'] = 'no' if ppp_comp.hasS(ns.pppPcomp) and ppp_comp.getS(ns.pppPcomp, rdf.Boolean): params['comp_pcomp'] = 'yes' params['comp_bsdcomp'] = 'no' if ppp_comp.hasS(ns.pppBsdcomp) and ppp_comp.getS(ns.pppBsdcomp, rdf.Boolean): params['comp_bsdcomp'] = 'yes' params['comp_deflate'] = 'no' if ppp_comp.hasS(ns.pppDeflate) and ppp_comp.getS(ns.pppDeflate, rdf.Boolean): params['comp_deflate'] = 'yes' params['comp_predictor1'] = 'no' if ppp_comp.hasS(ns.pppPredictor1) and ppp_comp.getS(ns.pppPredictor1, rdf.Boolean): params['comp_predictor1'] = 'yes' params['comp_vj'] = 'no' if ppp_comp.hasS(ns.pppVj) and ppp_comp.getS(ns.pppVj, rdf.Boolean): params['comp_vj'] = 'yes' params['comp_ccomp_vj'] = 'no' if ppp_comp.hasS(ns.pppCcompVj) and ppp_comp.getS(ns.pppCcompVj, rdf.Boolean): params['comp_ccomp_vj'] = 'yes' # sanity checks if params['comp_pcomp'] == 'yes': self._log.warning('pcomp enabled - this breaks in mppc: disabling') params['comp_pcomp'] = 'no' if params['comp_mppe'] == 'yes': self._log.warning('mppe enabled - not handled by protocol: disabling') params['comp_mppe'] = 'no' # dns servers params['dns_ipaddr_pri'] = '0' params['dns_ipaddr_sec'] = '0' dns_list = res_info.ppp_dns_servers if len(dns_list) > 0: params['dns_ipaddr_pri'] = dns_list[0].address.toString() if len(dns_list) > 1: params['dns_ipaddr_sec'] = dns_list[1].address.toString() # wins servers params['wins_ipaddr_pri'] = '0' params['wins_ipaddr_sec'] = '0' wins_list = res_info.ppp_wins_servers if len(wins_list) > 0: params['wins_ipaddr_pri'] = wins_list[0].address.toString() if len(wins_list) > 1: params['wins_ipaddr_sec'] = wins_list[1].address.toString() # XXX: check and set sensible values, these are defaults params['max_connect_time'] = '0' params['max_failure_count'] = '10' # NB: This is actually not set, because it causes problems in Openl2tp # (boolean argument doesn't work correctly; it will actually be set!) params['default_route'] = 'no' params['multilink'] = 'no' # NB: always use only radius, also local users are from the local radius server params['use_radius'] = 'yes' # Force radius plugin to use proper config file of radiusclient-ng params['radius_hint'] = constants.RADIUSCLIENT_CONFIG # Note: there seems to be quite real disagreement between # openl2tp configration interface and actual used/set configuration # values in openl2tpd: # - dns1=0 seems to work in configuration client, but actually it # sets the IP address as 0.0.0.0 in pppd config # - the zero IP:s do not seem to have any effect because pppd is # resilient. # - etc.. if self.debug_on: params['trace_flags'] = '2047' # Full trace else: params['trace_flags'] = '0' ppp_conf = textwrap.dedent("""\ %(prefix)s ip_pool_name=clientpool %(prefix)s default_route=%(default_route)s %(prefix)s multilink=%(multilink)s %(prefix)s use_radius=%(use_radius)s %(prefix)s radius_hint=%(radius_hint)s %(prefix)s idle_timeout=%(idle_timeout)s %(prefix)s mtu=%(mtu)s %(prefix)s mru=%(mru)s %(prefix)s local_ipaddr=%(local_ipaddr)s %(prefix)s lcp_echo_interval=%(lcp_echo_interval)s %(prefix)s lcp_echo_failure_count=%(lcp_echo_failure)s # Note: all auth options must be on one line %(prefix)s \ req_none=no \ auth_pap=no \ auth_chap=no \ auth_mschapv1=no \ auth_mschapv2=no \ auth_eap=no \ req_pap=%(auth_pap)s \ req_chap=%(auth_chap)s \ req_mschapv1=%(auth_mschapv1)s \ req_mschapv2=%(auth_mschapv2)s \ req_eap=%(auth_eap)s %(prefix)s \ mppe=%(comp_mppe)s %(prefix)s \ comp_mppc=%(comp_mppc)s \ comp_accomp=%(comp_accomp)s \ comp_pcomp=%(comp_pcomp)s \ comp_bsdcomp=%(comp_bsdcomp)s \ comp_deflate=%(comp_deflate)s \ comp_predictor1=%(comp_predictor1)s \ comp_vj=%(comp_vj)s \ comp_ccomp_vj=%(comp_ccomp_vj)s %(prefix)s dns_ipaddr_pri=%(dns_ipaddr_pri)s %(prefix)s dns_ipaddr_sec=%(dns_ipaddr_sec)s %(prefix)s wins_ipaddr_pri=%(wins_ipaddr_pri)s %(prefix)s wins_ipaddr_sec=%(wins_ipaddr_sec)s %(prefix)s max_connect_time=%(max_connect_time)s %(prefix)s max_failure_count=%(max_failure_count)s %(prefix)s trace_flags=%(trace_flags)s """) % params # Tunnel profile params = {} params['prefix'] = 'tunnel profile modify profile_name=default' # Default responder port params['our_port'] = '1701' # XXX: better values, these are defaults. # NB: this works ok in practice, and no need to change if no problems seen. params['mtu'] = '1460' # This might affect socket behaviour or the pppol2tp kernel module.. # XXX: this is default in openl2tp code # do we need to configure this? params['hello_timeout'] = '60' params['retry_timeout'] = '1' # Note: must set this to some value other than zero to prevent # tunnels from hanging when all connections (sessions) are dead params['idle_timeout'] = '1800' # 30 minutes params['rx_window_size'] = '4' params['tx_window_size']= '10' params['max_retries'] = '5' # XXX: better values, these are defaults # possible: none,digital,analog,any params['framing_caps'] = 'any' params['bearer_caps'] = 'any' if self.debug_on: params['trace_flags'] = '2047' # Full trace else: params['trace_flags'] = '0' tunnel_conf = textwrap.dedent("""\ %(prefix)s our_udp_port=%(our_port)s %(prefix)s mtu=%(mtu)s %(prefix)s hello_timeout=%(hello_timeout)s %(prefix)s retry_timeout=%(retry_timeout)s %(prefix)s idle_timeout=%(idle_timeout)s %(prefix)s rx_window_size=%(rx_window_size)s %(prefix)s tx_window_size=%(tx_window_size)s %(prefix)s max_retries=%(max_retries)s %(prefix)s framing_caps=%(framing_caps)s %(prefix)s bearer_caps=%(bearer_caps)s %(prefix)s trace_flags=%(trace_flags)s """) % params # Session profile params = {} params['prefix'] = 'session profile modify profile_name=default' # XXX: should we use sequence numbers for data? maybe not. # ppp will receive the packets anyway. reordering might matter # for control packets, but that should not happen anyway. params['sequencing_required'] = 'no' params['use_sequence_numbers'] = 'no' if self.debug_on: params['trace_flags'] = '2047' # Full trace else: params['trace_flags'] = '0' session_conf = textwrap.dedent("""\ %(prefix)s sequencing_required=%(sequencing_required)s %(prefix)s use_sequence_numbers=%(use_sequence_numbers)s %(prefix)s trace_flags=%(trace_flags)s """) % params # Peer profile # Note: no trace flags available for peer profile.. duh. params = {} params['prefix'] = 'peer profile modify profile_name=default' peer_conf = textwrap.dedent("""\ """) % params self.configs = [{'file': constants.OPENL2TP_CONF, 'cont': ppp_conf + tunnel_conf + session_conf + peer_conf}]
def start_client_connection(self, identifier, myip, gwip, username, password): l2tp_cfg = helpers.get_db_root().getS(ns.l2tpDeviceConfig, rdf.Type(ns.L2tpDeviceConfig)) ppp_cfg = l2tp_cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig)) debug = helpers.get_debug(l2tp_cfg) def _run_config(config, failmsg, successmsg): rv, out, err = 1, '', '' lock = helpers.acquire_openl2tpconfig_lock() if lock is None: raise Exception('failed to acquire openl2tp config lock') try: [rv, out, err] = run_command([constants.CMD_OPENL2TPCONFIG], stdin=str(config)) except: pass helpers.release_openl2tpconfig_lock(lock) if rv != 0: self._log.error('%s: %s, %s, %s' % (str(failmsg), str(rv), str(out), str(err))) raise Exception(str(failmsg)) else: self._log.debug('%s: %s, %s, %s' % (str(successmsg), str(rv), str(out), str(err))) return rv, out, err our_port = 1702 # NB: yes, 1702; we differentiate client and site-to-site connections based on local port peer_port = 1701 ppp_profile_name = 'ppp-prof-%s' % identifier tunnel_profile_name = 'tunnel-prof-%s' % identifier session_profile_name = 'session-prof-%s' % identifier peer_profile_name = 'peer-prof-%s' % identifier tunnel_name = 'tunnel-%s' % identifier session_name = 'session-%s' % identifier # we allow openl2tp to select these and "snoop" them from stdout tunnel_id = None session_id = None # ppp profile trace_flags = '0' if debug: trace_flags = '2047' config = 'ppp profile create profile_name=%s\n' % ppp_profile_name # XXX: take MRU and MTU like normal config? # XXX: should we have separate lcp echo etc settings for site-to-site? mtu = ppp_cfg.getS(ns.pppMtu, rdf.Integer) mru = mtu lcp_echo_interval = 0 lcp_echo_failure = 0 if ppp_cfg.hasS(ns.pppLcpEchoInterval): lcp_echo_interval = ppp_cfg.getS(ns.pppLcpEchoInterval, rdf.Timedelta).seconds lcp_echo_failure = ppp_cfg.getS(ns.pppLcpEchoFailure, rdf.Integer) for i in [ ['default_route', 'no'], ['multilink', 'no'], ['use_radius', 'no'], ['idle_timeout', '0'], # no limit ['mtu', str(mtu)], ['mru', str(mru)], ['lcp_echo_interval', str(lcp_echo_interval)], ['lcp_echo_failure_count', str(lcp_echo_failure)], ['max_connect_time', '0'], # no limit ['max_failure_count', '10'], ['trace_flags', trace_flags] ]: config += 'ppp profile modify profile_name=%s %s=%s\n' % (ppp_profile_name, i[0], i[1]) # Note: all auth options must be on one line config += 'ppp profile modify profile_name=%s req_none=yes auth_pap=yes auth_chap=yes auth_mschapv1=no auth_mschapv2=no auth_eap=no req_pap=no req_chap=no req_mschapv1=no req_mschapv2=no req_eap=no\n' % ppp_profile_name # no encryption config += 'ppp profile modify profile_name=%s mppe=no\n' % ppp_profile_name # Note: all compression options must be on one line # Request deflate or bsdcomp compression. config += 'ppp profile modify profile_name=%s comp_mppc=no comp_accomp=yes comp_pcomp=no comp_bsdcomp=no comp_deflate=yes comp_predictor=no comp_vj=no comp_ccomp_vj=no comp_ask_deflate=yes comp_ask_bsdcomp=no\n' % ppp_profile_name # tunnel profile config += 'tunnel profile create profile_name=%s\n' % tunnel_profile_name trace_flags = '0' if debug: trace_flags = '2047' # XXX: 1460 is hardcoded here, like in normal l2tp connections for i in [ ['our_udp_port', str(our_port)], ['peer_udp_port', str(peer_port)], ['mtu', '1460'], ['hello_timeout', '60'], ['retry_timeout', '3'], ['idle_timeout', '0'], ['rx_window_size', '4'], ['tx_window_size', '10'], ['max_retries', '5'], ['framing_caps', 'any'], ['bearer_caps', 'any'], ['trace_flags', trace_flags] ]: config += 'tunnel profile modify profile_name=%s %s=%s\n' % (tunnel_profile_name, i[0], i[1]) # session profile config += 'session profile create profile_name=%s\n' % session_profile_name trace_flags = '0' if debug: trace_flags = '2047' for i in [ ['sequencing_required', 'no'], ['use_sequence_numbers', 'no'], ['trace_flags', trace_flags] ]: config += 'session profile modify profile_name=%s %s=%s\n' % (session_profile_name, i[0], i[1]) # peer profile config += 'peer profile create profile_name=%s\n' % peer_profile_name # XXX: 'lac_lns', 'netmask' # 'peer_port' has no effect for some reason for i in [ ['peer_ipaddr', gwip.toString()], ['peer_port', str(peer_port)], # XXX: dup from above ['ppp_profile_name', ppp_profile_name], ['session_profile_name', session_profile_name], ['tunnel_profile_name', tunnel_profile_name] ]: config += 'peer profile modify profile_name=%s %s=%s\n' % (peer_profile_name, i[0], i[1]) config += '\nquit\n' # create profiles self._log.debug('openl2tp config:\n%s' % config) rv, stdout, stderr = _run_config(config, 'failed to create client-mode profiles', 'create client-mode profiles ok') # create tunnel - this triggers openl2tp # # NOTE: 'interface_name' would make life easier, but is not currently # supported by Openl2tp. # # XXX: 'persist', 'interface_name' config = 'tunnel create tunnel_name=%s' % tunnel_name # NB: all on one line here for i in [ ['src_ipaddr', myip.toString()], ['our_udp_port', str(our_port)], # XXX: dup from above ['peer_udp_port', str(peer_port)], # XXX: dup from above ['dest_ipaddr', gwip.toString()], ['peer_profile_name', peer_profile_name], ['profile_name', tunnel_profile_name], ['session_profile_name', session_profile_name], ['tunnel_name', tunnel_name], ### ['tunnel_id', tunnel_id], # XXX: for some reason can't be used, fetched below! ['use_udp_checksums', 'yes'] ]: # XXX: probably doesn't do anything now config += ' %s=%s' % (i[0], i[1]) config += '\nquit\n' # activate tunnel self._log.debug('openl2tp config for tunnel:\n%s' % config) rv, stdout, stderr = _run_config(config, 'failed to create client-mode tunnel', 'create client-mode tunnel ok') for l in stderr.split('\n'): m = _re_openl2tp_created_tunnel.match(l) if m is not None: if tunnel_id is not None: self._log.warning('second tunnel id (%s), old one was %s; ignoring' % (m.group(1), tunnel_id)) else: tunnel_id = m.group(1) self._log.debug('figured out tunnel id %s' % tunnel_id) if tunnel_id is None: raise Exception('could not figure tunnel id of new site-to-site tunnel (username %s) [rv: %s, out: %s, err: %s]' % (username, rv, stdout, stderr)) config = 'session create session_name=%s' % session_name for i in [ ['tunnel_name', tunnel_name], ['tunnel_id', tunnel_id], ### ['session_id', session_id], # XXX: for some reason can't be used, fetched below! ['profile_name', session_profile_name], ['ppp_profile_name', ppp_profile_name], ['user_name', username], ['user_password', password] ]: config += ' %s=%s' % (i[0], i[1]) config += '\nquit\n' # activate session self._log.debug('openl2tp config for session:\n%s' % config) rv, stdout, stderr = _run_config(config, 'failed to create client-mode session', 'create client-mode session ok') for l in stderr.split('\n'): m = _re_openl2tp_created_session.match(l) if m is not None: if session_id is not None: self._log.warning('second session id (%s), old one was %s; ignoring' % (m.group(2), session_id)) else: tun = m.group(1) if tun != tunnel_id: self._log.warning('tunnel id differs from earlier (earlier %s, found %s), ignoring' % (tunnel_id, tun)) else: session_id = m.group(2) self._log.debug('figured out session id %s' % session_id) if session_id is None: raise Exception('could not figure session id of new site-to-site tunnel (username %s) [rv: %s, out: %s, err: %s]' % (username, rv, stdout, stderr)) self._log.info('created new tunnel and session (%s/%s) for site-to-site client (username %s)' % (tunnel_id, session_id, username))
def create_config(self, cfg, resinfo): # XXX: watchdog # - process status # - could also: use monit to run the process # - check process health (cpu usage, memory usage, etc.) self.debug_on = helpers.get_debug(cfg) params = {} params['webui_pid'] = constants.WEBUI_PIDFILE params['openl2tp_pid'] = constants.OPENL2TP_PIDFILE params['l2tpd_pid'] = constants.L2TPD_PIDFILE params['ippool_pid'] = constants.IPPOOL_PIDFILE params['pluto_pid'] = constants.PLUTO_PIDFILE params['ezipupdate_pid'] = constants.EZIPUPDATE_PIDFILE params['dhclient_pid'] = constants.DHCLIENT_PIDFILE # XXX: reboot script? # params['fail_action'] = '/usr/bin/l2tpgw-reboot' params['fail_action'] = '/bin/true' params['stop_action'] = '/bin/true' dhcp_conf = '' if helpers.get_public_dhcp_interface(cfg) is not None or helpers.get_private_dhcp_interface(cfg) is not None: dhcp_conf = textwrap.dedent("""\ # dhclient3 check process dhclient3 with pidfile \"%(dhclient_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" """) % params # Note: not using monit httpd server: # set # set httpd port 2812 and use address localhost # allow localhost # allow admin:monit # Note: process health check, pluto cpu usage: if cpu > 90%, etc ezipupdate_conf = '' if helpers.get_dyndns_config(cfg) is not None: ezipupdate_conf = textwrap.dedent("""\ # ez-ipupdate check process ez-ipupdate with pidfile \"%(ezipupdate_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" """) % params conf = textwrap.dedent("""\ set daemon 60 set logfile syslog facility log_daemon check process twistd with pidfile \"%(webui_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" check process openl2tp with pidfile \"%(openl2tp_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" check process ippoold with pidfile \"%(ippool_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" check process pluto with pidfile \"%(pluto_pid)s\" start program = \"%(fail_action)s\" stop program = \"%(stop_action)s\" """) % params self.configs = [{'file': constants.MONIT_CONF, 'cont': conf + dhcp_conf + ezipupdate_conf, 'mode': 0700}]