コード例 #1
0
    def create_config(self, cfg, resinfo):
        """Create FreeRADIUS configuration files.

        Freeradius is always started and thus no start/nostart magic is needed.
        """

        net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig))
        ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig))
        ppp_auth = ppp_cfg.getS(ns.pppAuthentication, rdf.Type(ns.PppAuthentication))
        ppp_comp = ppp_cfg.getS(ns.pppCompression, rdf.Type(ns.PppCompression))
        l2tp_cfg = cfg.getS(ns.l2tpConfig, rdf.Type(ns.L2tpConfig))

        radius_public_ip = resinfo.public_interface.address.getAddress().toString()
        nas_ip = radius_public_ip

        radius_private_ip = None
        if resinfo.private_interface is not None:
            radius_private_ip = resinfo.private_interface.address.getAddress().toString()
            nas_ip = radius_private_ip

        # NB: this secret does no good for us and may be static
        client_config = textwrap.dedent("""\
        # autogenerated file, do not edit
        client 127.0.0.1 {
            secret      = notasecret
            shortname   = localhost
            nastype     = other
        }
        """ % {'nas_ip':nas_ip})
        
        dictionary_config = textwrap.dedent("""\
        # autogenerated file, do not edit
        $INCLUDE        /usr/share/freeradius/dictionary
        """)

        # XXX: may need changes in the future
        retry_delay = 5
        retry_count = 3
        proxy_config = textwrap.dedent("""\
        # autogenerated file, do not edit

        proxy server {
            synchronous = no
            retry_delay = %(retry_delay)d
            retry_count = %(retry_count)d
            dead_time = 120
            default_fallback = yes
            post_proxy_authorize = no
        }

        realm LOCAL {
                type    	= radius
                authhost	= LOCAL
                accthost	= LOCAL
        }        
        """ % {'retry_delay':retry_delay, 'retry_count':retry_count})

        # NB: no way to do failover with DEFAULT or NULL realms, use a
        # bogus realm instead

        using_proxy, nas_id, servers = self._get_radius_parameters(cfg)
        for addr, port, secret in servers:
            if addr is not None and port is not None and secret is not None:
                proxy_config += textwrap.dedent("""\

                realm default.realm.invalid {
                    type      = radius
        	    authhost  = %(addr)s:%(port)s
        	    secret    = "%(secret)s"
                    nostrip
                }
                """ % {'addr':str(addr), 'port':str(port), 'secret':str(secret)})


        # XXX: log_file below *does not work*, it causes a non-fatal startup error
        # can it be removed?  we're using syslog anyway
        
        # XXX: may need changing in the future
        max_reqtime = 30
        radiusd_config = textwrap.dedent("""\
        # autogenerated file, do not edit

        prefix = /usr
        exec_prefix = /usr
        sysconfdir = /etc
        localstatedir = /var
        sbindir = ${exec_prefix}/sbin
        logdir = syslog
        raddbdir = %(raddbdir)s
        radacctdir = ${logdir}/radacct

        confdir = ${raddbdir}
        run_dir = %(run_dir)s
        log_file = ${logdir}/radius.log
        libdir = /usr/lib/freeradius
        pidfile = ${run_dir}/freeradius.pid
        user = freerad
        group = freerad
        max_request_time = %(max_reqtime)d
        delete_blocked_requests = yes
        cleanup_delay = 5
        max_requests = 512
        """ % {'raddbdir':constants.FREERADIUS_CONFIG_DIR,
               'run_dir':constants.FREERADIUS_RUNPATH,
               'max_reqtime':max_reqtime})

        # NB: bind only to localhost
        radiusd_config += textwrap.dedent("""\
        listen {
            ipaddr = 127.0.0.1
            port = 1812
            type = auth
        }
        """)

        radiusd_config += textwrap.dedent("""\

        hostname_lookups = no
        allow_core_dumps = no

        regular_expressions     = yes
        extended_expressions    = yes
        
        log_stripped_names = no
        log_auth = no

        log_auth_badpass = no
        log_auth_goodpass = no
        usercollide = no
        lower_user = no
        lower_pass = no
        nospace_user = no
        nospace_pass = no

        checkrad = ${sbindir}/checkrad

        security {
            max_attributes = 200
            reject_delay = 1
            status_server = no
        }

        proxy_requests  = yes
        $INCLUDE  ${confdir}/proxy.conf

        $INCLUDE  ${confdir}/clients.conf

        thread pool {
            start_servers = 3
            max_servers = 16
            min_spare_servers = 2
            max_spare_servers = 3
            max_requests_per_server = 0
        }
        
        modules {
            pap {
                auto_header = yes
            }
        
            chap {
                authtype = CHAP
            }

            pam {
                pam_auth = radiusd
            }

            mschap {
            }

            realm suffix {
                format = suffix
                delimiter = "@"
                ignore_default = no
                ignore_null = no
            }
        
            preprocess {
                huntgroups = ${confdir}/huntgroups
                hints = ${confdir}/hints

                with_ascend_hack = no
                ascend_channels_per_line = 23
                with_ntdomain_hack = no
                with_specialix_jetstream_hack = no
                with_cisco_vsa_hack = no
            }

            files {
                usersfile = ${confdir}/users
                acctusersfile = ${confdir}/acct_users
                preproxy_usersfile = ${confdir}/preproxy_users
                compat = no
            }

            detail {
                detailfile = ${radacctdir}/%{Client-IP-Address}/detail-%Y%m%d
                detailperm = 0600        
                suppress {
                    User-Password
                }
            }

            acct_unique {
                key = "User-Name, Acct-Session-Id, NAS-IP-Address, Client-IP-Address, NAS-Port"
            }
        
            expr {
            }

            digest {
            }
        
            exec {
                wait = yes
                input_pairs = request
            }

        }

        instantiate {
            exec
            expr
        }
        
        authorize {
            preprocess        
            chap
            mschap
            suffix
            files
            pap
        }
        
        authenticate {
            Auth-Type PAP {
                pap
            }
            Auth-Type CHAP {
                chap
            }
            Auth-Type MS-CHAP {
                mschap
            }
        }
        
        session {
        }
        
        post-auth {
        }

        pre-proxy {
            files
        }
        
        post-proxy {
        }
        """)

        preproxy_config = textwrap.dedent("""\
        # autogenerated file, do not edit

        DEFAULT
            NAS-IP-Address := "%(nas_ip)s",
            NAS-Port-Type := 5""" % {'nas_ip':nas_ip})

        if nas_id is not None:
            preproxy_config += textwrap.dedent("""\
            ,
                NAS-Identifier := "%(nas_id)s"

            """ % {'nas_id':nas_id})
        else:
            preproxy_config += '\n\n'            

        self.configs = [
            {'file': constants.FREERADIUS_ACCT_USERS, 'cont': ''},               # Not used
            {'file': constants.FREERADIUS_ATTRS, 'cont': ''},                    # Not used
            {'file': constants.FREERADIUS_CLIENTS, 'cont': ''},                  # Deprecated
            {'file': constants.FREERADIUS_CLIENTS_CONF, 'cont': client_config},
            {'file': constants.FREERADIUS_DICTIONARY, 'cont': dictionary_config},
            {'file': constants.FREERADIUS_EAP_CONF, 'cont': ''},                 # Not used
            {'file': constants.FREERADIUS_EXPERIMENTAL_CONF, 'cont': ''},        # Not used
            {'file': constants.FREERADIUS_HINTS, 'cont': ''},                    # Not used
            {'file': constants.FREERADIUS_HUNTGROUPS, 'cont': ''},               # Not used
            {'file': constants.FREERADIUS_LDAP_ATTRMAP, 'cont': ''},             # Not used
            {'file': constants.FREERADIUS_MSSQL_CONF, 'cont': ''},               # Not used
            {'file': constants.FREERADIUS_NASLIST, 'cont': ''},                  # Deprecated
            {'file': constants.FREERADIUS_NASPASSWD, 'cont': ''},                # Not used
            {'file': constants.FREERADIUS_ORACLESQL_CONF, 'cont': ''},           # Not used
            {'file': constants.FREERADIUS_OTP_CONF, 'cont': ''},                 # Not used
            {'file': constants.FREERADIUS_POSTGRESQL_CONF, 'cont': ''},          # Not used
            {'file': constants.FREERADIUS_PREPROXY_USERS, 'cont': preproxy_config},
            {'file': constants.FREERADIUS_PROXY_CONF, 'cont': proxy_config},
            {'file': constants.FREERADIUS_RADIUSD_CONF, 'cont': radiusd_config},
            {'file': constants.FREERADIUS_REALMS, 'cont': ''},                   # Deprecated
            {'file': constants.FREERADIUS_SNMP_CONF, 'cont': ''},                # Not used
            {'file': constants.FREERADIUS_SQL_CONF, 'cont': ''},                 # Not used
            {'file': constants.FREERADIUS_SQLIPPOOL_CONF, 'cont': ''},           # Not used
            ]

        self.configs.append(self._create_users_file(cfg, resinfo, using_proxy))
コード例 #2
0
    def _do_mgmt_identify_args(self):
        root = self.rdf_root
        licinfo = self.rdf_root.getS(ns_ui.licenseInfo,
                                     rdf.Type(ns_ui.LicenseInfo))
        uiconfig = self.rdf_root.getS(ns_ui.uiConfig, rdf.Type(ns_ui.UiConfig))

        args = {}
        try:
            ui_root = helpers.get_ui_config()
            if ui_root.hasS(ns_ui.licenseKey) and ui_root.getS(
                    ns_ui.licenseKey, rdf.String) != '':
                args['licenseKey'] = ui_root.getS(ns_ui.licenseKey, rdf.String)
            elif ui_root.hasS(ns_ui.testLicenseKey) and ui_root.getS(
                    ns_ui.testLicenseKey, rdf.String) != '':
                args['licenseKey'] = ui_root.getS(ns_ui.testLicenseKey,
                                                  rdf.String)
            else:
                raise Exception('no configured license')
        except:
            args['licenseKey'] = ''  # anonymous

        try:
            t = helpers.get_boot_uuid()
            if t is None:
                args['bootUuid'] = ''
            else:
                args['bootUuid'] = t
        except:
            args['bootUuid'] = ''

        try:
            t = helpers.get_installation_uuid()
            if t is None:
                args['installationUuid'] = ''
            else:
                args['installationUuid'] = t
        except:
            args['installationUuid'] = ''

        try:
            t = helpers.get_cookie_uuid()
            if t is None:
                args['cookieUuid'] = ''
            else:
                args['cookieUuid'] = t
        except:
            args['cookieUuid'] = ''

        args['address'] = '0.0.0.0'  # overridden by managementconnection
        args['port'] = 0  # overridden by managementconnection

        try:
            args['softwareVersion'] = helpers.get_product_version()
        except:
            args['softwareVersion'] = ''

        args['softwareBuildInfo'] = ''  # XXX
        args['hardwareType'] = ''  # XXX
        args['hardwareInfo'] = ''  # XXX

        try:
            if self.force_update:
                args['automaticUpdates'] = True
            else:
                args['automaticUpdates'] = uiconfig.getS(
                    ns_ui.automaticUpdates, rdf.Boolean)
        except:
            args['automaticUpdates'] = True

        try:
            args['isLiveCd'] = helpers.is_live_cd()
        except:
            args['isLiveCd'] = False

        return args
コード例 #3
0
    def _update_public_private_ifaces(self, now, ifaces, pub_di, priv_di,
                                      first_time):
        def _update_iface(di, st):
            orxtime = st.getS(ns.rxLastChange, rdf.Datetime)
            nrxtime = now
            st.setS(ns.rxLastChange, rdf.Datetime, nrxtime)

            otxtime = st.getS(ns.txLastChange, rdf.Datetime)
            ntxtime = now
            st.setS(ns.txLastChange, rdf.Datetime, ntxtime)

            orxbytes = st.getS(ns.rxBytesCounter, rdf.Integer)
            nrxbytes = di.rxbytes
            nrxbytes = _wrap_check(orxbytes, nrxbytes)  # handle 4GiB wrap
            st.setS(ns.rxBytesCounter, rdf.Integer, nrxbytes)

            otxbytes = st.getS(ns.txBytesCounter, rdf.Integer)
            ntxbytes = di.txbytes
            ntxbytes = _wrap_check(otxbytes, ntxbytes)  # handle 4GiB wrap
            st.setS(ns.txBytesCounter, rdf.Integer, ntxbytes)

            orxpackets = st.getS(ns.rxPacketsCounter, rdf.Integer)
            nrxpackets = di.rxpackets
            st.setS(ns.rxPacketsCounter, rdf.Integer, nrxpackets)

            otxpackets = st.getS(ns.txPacketsCounter, rdf.Integer)
            ntxpackets = di.txpackets
            st.setS(ns.txPacketsCounter, rdf.Integer, ntxpackets)

            if first_time:
                st.setS(ns.rxRateCurrent, rdf.Float, 0.0)
                st.setS(ns.rxRateMaximum, rdf.Float, 0.0)
                st.setS(ns.txRateCurrent, rdf.Float, 0.0)
                st.setS(ns.txRateMaximum, rdf.Float, 0.0)
            else:
                rx_time = nrxtime - orxtime
                rx_secs = float(rx_time.seconds) + float(
                    rx_time.microseconds / 1000000.0)
                if rx_secs > 0.0:
                    rx_rate = float(nrxbytes - orxbytes) / rx_secs
                else:
                    rx_rate = 0.0
                if rx_rate > st.getS(ns.rxRateMaximum, rdf.Float):
                    st.setS(ns.rxRateMaximum, rdf.Float, rx_rate)
                st.setS(ns.rxRateCurrent, rdf.Float, rx_rate)

                tx_time = ntxtime - otxtime
                tx_secs = float(tx_time.seconds) + float(
                    tx_time.microseconds / 1000000.0)
                if tx_secs > 0.0:
                    tx_rate = float(ntxbytes - otxbytes) / tx_secs
                else:
                    tx_rate = 0.0
                if tx_rate > st.getS(ns.txRateMaximum, rdf.Float):
                    st.setS(ns.txRateMaximum, rdf.Float, tx_rate)
                st.setS(ns.txRateCurrent, rdf.Float, tx_rate)

            # NB: link and IP info are updated on every round; this benefits
            # very little but also costs very little...

            # update link info
            st.setS(ns.mtu, rdf.Integer, di.mtu)
            st.setS(ns.macAddress, rdf.String, di.mac)

            # update ip level info
            iface = ifaces.get_interface_by_name(di.devname)
            addrsub = iface.get_current_ipv4_address_info()
            if addrsub is None:
                _log.info('could not get address of interface %s, ignoring' %
                          di.devname)
            else:
                st.setS(ns.ipAddress, rdf.IPv4AddressSubnet, addrsub)

        if pub_di is not None:
            pub_if_st = helpers.get_status().getS(
                ns.publicInterface, rdf.Type(ns.NetworkInterface))
            _update_iface(pub_di, pub_if_st)

        if priv_di is not None:
            priv_if_st = helpers.get_status().getS(
                ns.privateInterface, rdf.Type(ns.NetworkInterface))
            _update_iface(priv_di, priv_if_st)
コード例 #4
0
    def start_client_connection(self, identifier, myip, gwip, username, password):
        l2tp_cfg = helpers.get_db_root().getS(ns.l2tpDeviceConfig, rdf.Type(ns.L2tpDeviceConfig))
        ppp_cfg = l2tp_cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig))
        
        debug = helpers.get_debug(l2tp_cfg)

        def _run_config(config, failmsg, successmsg):
            rv, out, err = 1, '', ''

            lock = helpers.acquire_openl2tpconfig_lock()
            if lock is None:
                raise Exception('failed to acquire openl2tp config lock')
            try:
                [rv, out, err] = run_command([constants.CMD_OPENL2TPCONFIG], stdin=str(config))
            except:
                pass
            helpers.release_openl2tpconfig_lock(lock)
            if rv != 0:
                self._log.error('%s: %s, %s, %s' % (str(failmsg), str(rv), str(out), str(err)))
                raise Exception(str(failmsg))
            else:
                self._log.debug('%s: %s, %s, %s' % (str(successmsg), str(rv), str(out), str(err)))

            return rv, out, err

        our_port = 1702   # NB: yes, 1702; we differentiate client and site-to-site connections based on local port
        peer_port = 1701

        ppp_profile_name = 'ppp-prof-%s' % identifier
        tunnel_profile_name = 'tunnel-prof-%s' % identifier
        session_profile_name = 'session-prof-%s' % identifier
        peer_profile_name = 'peer-prof-%s' % identifier
        tunnel_name = 'tunnel-%s' % identifier
        session_name = 'session-%s' % identifier

        # we allow openl2tp to select these and "snoop" them from stdout
        tunnel_id = None
        session_id = None

        # ppp profile
        trace_flags = '0'
        if debug:
            trace_flags = '2047'
        config = 'ppp profile create profile_name=%s\n' % ppp_profile_name

        # XXX: take MRU and MTU like normal config?
        # XXX: should we have separate lcp echo etc settings for site-to-site?
        mtu = ppp_cfg.getS(ns.pppMtu, rdf.Integer)
        mru = mtu
        lcp_echo_interval = 0
        lcp_echo_failure = 0
        if ppp_cfg.hasS(ns.pppLcpEchoInterval):
            lcp_echo_interval = ppp_cfg.getS(ns.pppLcpEchoInterval, rdf.Timedelta).seconds
            lcp_echo_failure = ppp_cfg.getS(ns.pppLcpEchoFailure, rdf.Integer)

        for i in [ ['default_route', 'no'],
                   ['multilink', 'no'],
                   ['use_radius', 'no'],
                   ['idle_timeout', '0'],  # no limit
                   ['mtu', str(mtu)],
                   ['mru', str(mru)],
                   ['lcp_echo_interval', str(lcp_echo_interval)],
                   ['lcp_echo_failure_count', str(lcp_echo_failure)],
                   ['max_connect_time', '0'],  # no limit
                   ['max_failure_count', '10'],
                   ['trace_flags', trace_flags] ]:
            config += 'ppp profile modify profile_name=%s %s=%s\n' % (ppp_profile_name, i[0], i[1])

        # Note: all auth options must be on one line
        config += 'ppp profile modify profile_name=%s req_none=yes auth_pap=yes auth_chap=yes auth_mschapv1=no auth_mschapv2=no auth_eap=no req_pap=no req_chap=no req_mschapv1=no req_mschapv2=no req_eap=no\n' % ppp_profile_name

        # no encryption
        config += 'ppp profile modify profile_name=%s mppe=no\n' % ppp_profile_name

        # Note: all compression options must be on one line
        # Request deflate or bsdcomp compression.
        config += 'ppp profile modify profile_name=%s comp_mppc=no comp_accomp=yes comp_pcomp=no comp_bsdcomp=no comp_deflate=yes comp_predictor=no comp_vj=no comp_ccomp_vj=no comp_ask_deflate=yes comp_ask_bsdcomp=no\n' % ppp_profile_name

        # tunnel profile
        config += 'tunnel profile create profile_name=%s\n' % tunnel_profile_name

        trace_flags = '0'
        if debug:
            trace_flags = '2047'

        # XXX: 1460 is hardcoded here, like in normal l2tp connections
        for i in [ ['our_udp_port', str(our_port)],
                   ['peer_udp_port', str(peer_port)],
                   ['mtu', '1460'],
                   ['hello_timeout', '60'],
                   ['retry_timeout', '3'],
                   ['idle_timeout', '0'],
                   ['rx_window_size', '4'],
                   ['tx_window_size', '10'],
                   ['max_retries', '5'],
                   ['framing_caps', 'any'],
                   ['bearer_caps', 'any'],
                   ['trace_flags', trace_flags] ]:
            config += 'tunnel profile modify profile_name=%s %s=%s\n' % (tunnel_profile_name, i[0], i[1])
            
        # session profile
        config += 'session profile create profile_name=%s\n' % session_profile_name

        trace_flags = '0'
        if debug:
            trace_flags = '2047'

        for i in [ ['sequencing_required', 'no'],
                   ['use_sequence_numbers', 'no'],
                   ['trace_flags', trace_flags] ]:
            config += 'session profile modify profile_name=%s %s=%s\n' % (session_profile_name, i[0], i[1])

        # peer profile
        config += 'peer profile create profile_name=%s\n' % peer_profile_name

        # XXX: 'lac_lns', 'netmask'
        # 'peer_port' has no effect for some reason
        for i in [ ['peer_ipaddr', gwip.toString()],
                   ['peer_port', str(peer_port)],  # XXX: dup from above
                   ['ppp_profile_name', ppp_profile_name],
                   ['session_profile_name', session_profile_name],
                   ['tunnel_profile_name', tunnel_profile_name] ]:
            config += 'peer profile modify profile_name=%s %s=%s\n' % (peer_profile_name, i[0], i[1])

        config += '\nquit\n'

        # create profiles
        self._log.debug('openl2tp config:\n%s' % config)
        rv, stdout, stderr = _run_config(config, 'failed to create client-mode profiles', 'create client-mode profiles ok')

        # create tunnel - this triggers openl2tp
        #
        # NOTE: 'interface_name' would make life easier, but is not currently
        # supported by Openl2tp.
        #
        # XXX: 'persist', 'interface_name'
        config = 'tunnel create tunnel_name=%s' % tunnel_name  # NB: all on one line here
        for i in [ ['src_ipaddr', myip.toString()],
                   ['our_udp_port', str(our_port)],   # XXX: dup from above
                   ['peer_udp_port', str(peer_port)], # XXX: dup from above
                   ['dest_ipaddr', gwip.toString()],
                   ['peer_profile_name', peer_profile_name],
                   ['profile_name', tunnel_profile_name],
                   ['session_profile_name', session_profile_name],
                   ['tunnel_name', tunnel_name],
###                ['tunnel_id', tunnel_id], # XXX: for some reason can't be used, fetched below!
                   ['use_udp_checksums', 'yes'] ]: # XXX: probably doesn't do anything now
            config += ' %s=%s' % (i[0], i[1])

        config += '\nquit\n'

        # activate tunnel
        self._log.debug('openl2tp config for tunnel:\n%s' % config)
        rv, stdout, stderr = _run_config(config, 'failed to create client-mode tunnel', 'create client-mode tunnel ok')
        
        for l in stderr.split('\n'):
            m = _re_openl2tp_created_tunnel.match(l)
            if m is not None:
                if tunnel_id is not None:
                    self._log.warning('second tunnel id (%s), old one was %s; ignoring' % (m.group(1), tunnel_id))
                else:
                    tunnel_id = m.group(1)

        self._log.debug('figured out tunnel id %s' % tunnel_id)
        if tunnel_id is None:
            raise Exception('could not figure tunnel id of new site-to-site tunnel (username %s) [rv: %s, out: %s, err: %s]' % (username, rv, stdout, stderr))

        config = 'session create session_name=%s' % session_name
        for i in [ ['tunnel_name', tunnel_name],
                   ['tunnel_id', tunnel_id],
###                ['session_id', session_id], # XXX: for some reason can't be used, fetched below!
                   ['profile_name', session_profile_name],
                   ['ppp_profile_name', ppp_profile_name],
                   ['user_name', username],
                   ['user_password', password] ]:
            config += ' %s=%s' % (i[0], i[1])

        config += '\nquit\n'

        # activate session
        self._log.debug('openl2tp config for session:\n%s' % config)
        rv, stdout, stderr = _run_config(config, 'failed to create client-mode session', 'create client-mode session ok')

        for l in stderr.split('\n'):
            m = _re_openl2tp_created_session.match(l)
            if m is not None:
                if session_id is not None:
                    self._log.warning('second session id (%s), old one was %s; ignoring' % (m.group(2), session_id))
                else:
                    tun = m.group(1)
                    if tun != tunnel_id:
                        self._log.warning('tunnel id differs from earlier (earlier %s, found %s), ignoring' % (tunnel_id, tun))
                    else:
                        session_id = m.group(2)
                        
        self._log.debug('figured out session id %s' % session_id)
        if session_id is None:
            raise Exception('could not figure session id of new site-to-site tunnel (username %s) [rv: %s, out: %s, err: %s]' % (username, rv, stdout, stderr))

        self._log.info('created new tunnel and session (%s/%s) for site-to-site client (username %s)' % (tunnel_id, session_id, username))
コード例 #5
0
    def create_config(self, cfg, res_info):
        """Create OpenL2tp configuration file as string."""

        # This is for get_args() to later pick up
        self.ip_address = res_info.public_interface.address.getAddress().toString()

        (pub_if, pub_if_name), (priv_if, priv_if_name) = helpers.get_ifaces(cfg)
        net_cfg = cfg.getS(ns.networkConfig, rdf.Type(ns.NetworkConfig))
        ppp_cfg = cfg.getS(ns.pppConfig, rdf.Type(ns.PppConfig))
        ppp_auth = ppp_cfg.getS(ns.pppAuthentication, rdf.Type(ns.PppAuthentication))
        ppp_comp = ppp_cfg.getS(ns.pppCompression, rdf.Type(ns.PppCompression))
        l2tp_cfg = cfg.getS(ns.l2tpConfig, rdf.Type(ns.L2tpConfig))

        # XXX:  (do we need a patch for these?)
        # - noipx, crtscts, lock: are not used by openl2tp

        # Note:
        # - noipdefault, nodetach, local: always passed to pppd by openl2tp


        # Note: The receive port is not changeable and no point in
        # setting the local port because of the one-udp-port -patch.

        # Note: could set the openl2tp local sending port which would
        # disable the ephemeral port use, but this is not required while
        # we use the one-socket patch.

        self.debug_on = helpers.get_debug(cfg)

        # XXX: it seems like openl2tp has *ppp* profile trace flags
        # all enabled by default and others (tunnel, session, system) not..
        # there could be other debug flags, too, which would affect f.ex
        # openl2tp and pluto

        ppp_subnet = ppp_cfg.getS(ns.pppSubnet, rdf.IPv4Subnet)
        if ppp_subnet.getCidr() > 30:
            raise Exception('PPP subnet does not contain enough usable addresses')
        local_ip = ppp_subnet.getLastUsableAddress()

        # Note: hostname is not settable, but openl2tp derives it from
        # system hostname.

        # Note: vendor_name is not settable (and not used for anything more
        # than testing code) in openl2tp

        # Note: tunnelrws option does not exist in openl2tp
        # but could set the tx/rx window sizes

        # Note: not settable through openl2tp.
        # this has effect only when connect or pty options are used
        # in pppd config and thus is not required here.
        # connect_delay = '5000'

        # Note: openl2tp always uses lenght bit, so "length bit = yes"
        # or similar is not required in config.

        # PPP profile
        params = {}
        params['prefix'] = 'ppp profile modify profile_name=default'

        params['idle_timeout'] = '0'
        if ppp_cfg.hasS(ns.pppIdleTimeout):
            # short timeouts (less than 10 seconds, say) are not sane, but we
            # assume the user interface checks for sanity
            params['idle_timeout'] = str(ppp_cfg.getS(ns.pppIdleTimeout, rdf.Timedelta).seconds)  # truncate
            self._log.warning('idle timeout specified, not robust with many clients')
            
        params['mtu'] = str(ppp_cfg.getS(ns.pppMtu, rdf.Integer))
        params['mru'] = params['mtu']

        params['local_ipaddr'] = local_ip.toString()


        # XXX: if no echo failure specified, then the tunnels may never die.
        # - tunnels have hello_interval but it only controls of the
        #   frequency of the sent HELLO messages
        # - tunnels have idle timeout, but it has meaning only when all the
        #   sessions for tunnel have died out
        # - sessions themselves do not die unless pppd terminates because
        #   they have no timeout..

        # Note: be careful with PPP options -> delete or empty config files!
        # - some options in the /etc/ppp/options file have priority over
        #   command-line options
        # - openl2tp options are always command-line options
        # - this may lead to strange behaviour if there are old config
        #   files still hanging around..

        params['lcp_echo_interval'] = '0'
        params['lcp_echo_failure'] = '0'
        if ppp_cfg.hasS(ns.pppLcpEchoInterval):
            params['lcp_echo_interval'] = str(ppp_cfg.getS(ns.pppLcpEchoInterval, rdf.Timedelta).seconds)
            params['lcp_echo_failure'] = str(ppp_cfg.getS(ns.pppLcpEchoFailure, rdf.Integer))

        params['auth_pap'] = 'no'
        if ppp_auth.hasS(ns.pppPap) and ppp_auth.getS(ns.pppPap, rdf.Boolean):
            params['auth_pap'] = 'yes'

        params['auth_chap'] = 'no'
        if ppp_auth.hasS(ns.pppChap) and ppp_auth.getS(ns.pppChap, rdf.Boolean):
            params['auth_chap'] = 'yes'

        # MSCHAPv1 had problems with pppd RADIUS support
        params['auth_mschapv1'] = 'no'
        if ppp_auth.hasS(ns.pppMschap) and ppp_auth.getS(ns.pppMschap, rdf.Boolean):
            self._log.warn('auth mschapv1 enabled in config but not supported, ignoring')

        params['auth_mschapv2'] = 'no'
        if ppp_auth.hasS(ns.pppMschapV2) and ppp_auth.getS(ns.pppMschapV2, rdf.Boolean):
            params['auth_mschapv2'] = 'yes'

        params['auth_eap'] = 'no'
        if ppp_auth.hasS(ns.pppEap) and ppp_auth.getS(ns.pppEap, rdf.Boolean):
            self._log.warn('eap enabled in config but not supported, ignoring')

        # compression options
        params['comp_mppc'] = 'no'
        if ppp_comp.hasS(ns.pppMppc) and ppp_comp.getS(ns.pppMppc, rdf.Boolean):
            params['comp_mppc'] = 'yes'
        params['comp_mppe'] = 'no'
        if ppp_comp.hasS(ns.pppMppe) and ppp_comp.getS(ns.pppMppe, rdf.Boolean):
            params['comp_mppe'] = 'yes'
        params['comp_accomp'] = 'no'
        if ppp_comp.hasS(ns.pppAccomp) and ppp_comp.getS(ns.pppAccomp, rdf.Boolean):
            params['comp_accomp'] = 'yes'
        params['comp_pcomp'] = 'no'
        if ppp_comp.hasS(ns.pppPcomp) and ppp_comp.getS(ns.pppPcomp, rdf.Boolean):
            params['comp_pcomp'] = 'yes'
        params['comp_bsdcomp'] = 'no'
        if ppp_comp.hasS(ns.pppBsdcomp) and ppp_comp.getS(ns.pppBsdcomp, rdf.Boolean):
            params['comp_bsdcomp'] = 'yes'
        params['comp_deflate'] = 'no'
        if ppp_comp.hasS(ns.pppDeflate) and ppp_comp.getS(ns.pppDeflate, rdf.Boolean):
            params['comp_deflate'] = 'yes'
        params['comp_predictor1'] = 'no'
        if ppp_comp.hasS(ns.pppPredictor1) and ppp_comp.getS(ns.pppPredictor1, rdf.Boolean):
            params['comp_predictor1'] = 'yes'
        params['comp_vj'] = 'no'
        if ppp_comp.hasS(ns.pppVj) and ppp_comp.getS(ns.pppVj, rdf.Boolean):
            params['comp_vj'] = 'yes'
        params['comp_ccomp_vj'] = 'no'
        if ppp_comp.hasS(ns.pppCcompVj) and ppp_comp.getS(ns.pppCcompVj, rdf.Boolean):
            params['comp_ccomp_vj'] = 'yes'

        # sanity checks
        if params['comp_pcomp'] == 'yes':
            self._log.warning('pcomp enabled - this breaks in mppc: disabling')
            params['comp_pcomp'] = 'no'
        if params['comp_mppe'] == 'yes':
            self._log.warning('mppe enabled - not handled by protocol: disabling')
            params['comp_mppe'] = 'no'

        # dns servers
        params['dns_ipaddr_pri'] = '0'
        params['dns_ipaddr_sec'] = '0'
        dns_list = res_info.ppp_dns_servers
        if len(dns_list) > 0:
            params['dns_ipaddr_pri'] = dns_list[0].address.toString()
            if len(dns_list) > 1:
                params['dns_ipaddr_sec'] = dns_list[1].address.toString()

        # wins servers
        params['wins_ipaddr_pri'] = '0'
        params['wins_ipaddr_sec'] = '0'
        wins_list = res_info.ppp_wins_servers
        if len(wins_list) > 0:
            params['wins_ipaddr_pri'] = wins_list[0].address.toString()
            if len(wins_list) > 1:
                params['wins_ipaddr_sec'] = wins_list[1].address.toString()

        # XXX: check and set sensible values, these are defaults
        params['max_connect_time'] = '0'
        params['max_failure_count'] = '10'

        # NB: This is actually not set, because it causes problems in Openl2tp
        # (boolean argument doesn't work correctly; it will actually be set!)
        params['default_route'] = 'no'
        params['multilink'] = 'no'

        # NB: always use only radius, also local users are from the local radius server
        params['use_radius'] = 'yes'

        # Force radius plugin to use proper config file of radiusclient-ng
        params['radius_hint'] = constants.RADIUSCLIENT_CONFIG

        # Note: there seems to be quite real disagreement between
        # openl2tp configration interface and actual used/set configuration
        # values in openl2tpd:
        # - dns1=0 seems to work in configuration client, but actually it
        # sets the IP address as 0.0.0.0 in pppd config
        # - the zero IP:s do not seem to have any effect because pppd is
        #   resilient.
        # - etc..

        if self.debug_on:
            params['trace_flags'] = '2047' # Full trace
        else:
            params['trace_flags'] = '0'

        ppp_conf = textwrap.dedent("""\

        %(prefix)s ip_pool_name=clientpool

        %(prefix)s default_route=%(default_route)s
        %(prefix)s multilink=%(multilink)s
        %(prefix)s use_radius=%(use_radius)s
        %(prefix)s radius_hint=%(radius_hint)s

        %(prefix)s idle_timeout=%(idle_timeout)s

        %(prefix)s mtu=%(mtu)s
        %(prefix)s mru=%(mru)s

        %(prefix)s local_ipaddr=%(local_ipaddr)s

        %(prefix)s lcp_echo_interval=%(lcp_echo_interval)s
        %(prefix)s lcp_echo_failure_count=%(lcp_echo_failure)s

        # Note: all auth options must be on one line
        %(prefix)s \
        req_none=no \
        auth_pap=no \
        auth_chap=no \
        auth_mschapv1=no \
        auth_mschapv2=no \
        auth_eap=no \
        req_pap=%(auth_pap)s \
        req_chap=%(auth_chap)s \
        req_mschapv1=%(auth_mschapv1)s \
        req_mschapv2=%(auth_mschapv2)s \
        req_eap=%(auth_eap)s

        %(prefix)s \
        mppe=%(comp_mppe)s

        %(prefix)s \
        comp_mppc=%(comp_mppc)s \
        comp_accomp=%(comp_accomp)s \
        comp_pcomp=%(comp_pcomp)s \
        comp_bsdcomp=%(comp_bsdcomp)s \
        comp_deflate=%(comp_deflate)s \
        comp_predictor1=%(comp_predictor1)s \
        comp_vj=%(comp_vj)s \
        comp_ccomp_vj=%(comp_ccomp_vj)s

        %(prefix)s dns_ipaddr_pri=%(dns_ipaddr_pri)s
        %(prefix)s dns_ipaddr_sec=%(dns_ipaddr_sec)s
        %(prefix)s wins_ipaddr_pri=%(wins_ipaddr_pri)s
        %(prefix)s wins_ipaddr_sec=%(wins_ipaddr_sec)s

        %(prefix)s max_connect_time=%(max_connect_time)s
        %(prefix)s max_failure_count=%(max_failure_count)s

        %(prefix)s trace_flags=%(trace_flags)s
        """) % params


        # Tunnel profile
        params = {}
        params['prefix'] = 'tunnel profile modify profile_name=default'

        # Default responder port
        params['our_port'] = '1701'

        # XXX: better values, these are defaults.
        # NB: this works ok in practice, and no need to change if no problems seen.
        params['mtu'] = '1460' # This might affect socket behaviour or the pppol2tp kernel module..

        # XXX: this is default in openl2tp code
        # do we need to configure this?
        params['hello_timeout'] = '60'
        params['retry_timeout'] = '1'

        # Note: must set this to some value other than zero to prevent
        # tunnels from hanging when all connections (sessions) are dead
        params['idle_timeout'] = '1800' # 30 minutes

        params['rx_window_size'] = '4'
        params['tx_window_size']= '10'
        params['max_retries'] = '5'
        
        # XXX: better values, these are defaults
        # possible: none,digital,analog,any
        params['framing_caps'] = 'any'
        params['bearer_caps'] = 'any'

        if self.debug_on:
            params['trace_flags'] = '2047' # Full trace
        else:
            params['trace_flags'] = '0'

        tunnel_conf = textwrap.dedent("""\
        %(prefix)s our_udp_port=%(our_port)s
        %(prefix)s mtu=%(mtu)s
        %(prefix)s hello_timeout=%(hello_timeout)s
        %(prefix)s retry_timeout=%(retry_timeout)s
        %(prefix)s idle_timeout=%(idle_timeout)s

        %(prefix)s rx_window_size=%(rx_window_size)s
        %(prefix)s tx_window_size=%(tx_window_size)s
        %(prefix)s max_retries=%(max_retries)s

        %(prefix)s framing_caps=%(framing_caps)s
        %(prefix)s bearer_caps=%(bearer_caps)s

        %(prefix)s trace_flags=%(trace_flags)s
        """) % params


        # Session profile
        params = {}
        params['prefix'] = 'session profile modify profile_name=default'

        # XXX: should we use sequence numbers for data? maybe not.
        # ppp will receive the packets anyway. reordering might matter
        # for control packets, but that should not happen anyway.
        params['sequencing_required'] = 'no'
        params['use_sequence_numbers'] = 'no'

        if self.debug_on:
            params['trace_flags'] = '2047' # Full trace
        else:
            params['trace_flags'] = '0'

        session_conf = textwrap.dedent("""\
        %(prefix)s sequencing_required=%(sequencing_required)s
        %(prefix)s use_sequence_numbers=%(use_sequence_numbers)s

        %(prefix)s trace_flags=%(trace_flags)s
        """) % params

        # Peer profile
        # Note: no trace flags available for peer profile.. duh.
        params = {}
        params['prefix'] = 'peer profile modify profile_name=default'

        peer_conf = textwrap.dedent("""\
        """) % params

        self.configs = [{'file': constants.OPENL2TP_CONF,
                         'cont': ppp_conf + tunnel_conf + session_conf + peer_conf}]
コード例 #6
0
    def save_network_data(self, ctx, form, data):
        def _save_ip_address(rdf_node, fda):
            if fda['ip_address_selection'] == 'dhcp':
                rdf_node.setS(ns_ui.address, rdf.Type(ns_ui.DhcpAddress))
            elif fda['ip_address_selection'] == 'static':
                static_node = rdf_node.setS(ns_ui.address,
                                            rdf.Type(ns_ui.StaticAddress))
                static_node.setS(ns_ui.ipAddress, rdf.IPv4Address,
                                 fda['ip_address'])
                static_node.setS(ns_ui.subnetMask, rdf.IPv4Address,
                                 fda['subnet_mask'])
            else:
                raise uidatahelpers.FormDataError(
                    'ip_address_selection is neither dhcp nor static')

        def _save_client_traffic(rdf_node, fda):
            client_nat = False
            client_proxy = False
            if fda['client_traffic'] == 'nat':
                client_nat = True
            elif fda['client_traffic'] == 'proxyarp':
                client_proxy = True
            rdf_node.setS(ns_ui.clientConnectionNat, rdf.Boolean, client_nat)
            rdf_node.setS(ns_ui.clientConnectionProxyArp, rdf.Boolean,
                          client_proxy)

        fda = formalutils.FormDataAccessor(form, [], ctx)
        ui_root = helpers.get_new_ui_config()

        # Interface count
        ic_root = ui_root.setS(ns_ui.internetConnection,
                               rdf.Type(ns_ui.NetworkConnection))
        if fda['ifcount_group.interface_count'] == 'oneif':
            ui_root.removeNodes(ns_ui.privateNetworkConnection)
            pn_root = None
        elif fda['ifcount_group.interface_count'] == 'twoif':
            pn_root = ui_root.setS(ns_ui.privateNetworkConnection,
                                   rdf.Type(ns_ui.NetworkConnection))
        else:
            raise uidatahelpers.FormDataError(
                'interface_count is neither oneif nor twoif.')

        # Internet connection
        ic_fda = fda.descend('ic_group')
        ic_root.setS(ns_ui.interface, rdf.String, ic_fda['if'])
        _save_ip_address(ic_root, ic_fda)
        uidatahelpers.save_optional_field_to_rdf(ic_root, ns_ui.defaultGateway,
                                                 rdf.IPv4Address, ic_fda,
                                                 'default_gateway')
        ic_root.setS(ns_ui.mtu, rdf.Integer, int(ic_fda['mtu']))
        uidatahelpers.save_optional_field_to_rdf(ic_root, ns_ui.vpnUplink,
                                                 rdf.Float, ic_fda, 'uplink')
        _save_client_traffic(ic_root, ic_fda)

        # Private network connection, fill if exists.
        if not (pn_root is None):
            pn_fda = fda.descend('pn_group')
            pn_root.setS(ns_ui.interface, rdf.String, pn_fda['if'])
            _save_ip_address(pn_root, pn_fda)
            uidatahelpers.save_optional_field_to_rdf(pn_root,
                                                     ns_ui.defaultGateway,
                                                     rdf.IPv4Address, pn_fda,
                                                     'default_gateway')
            _save_client_traffic(pn_root, pn_fda)

        # DNS Servers
        dns_fda = fda.descend('dns_group')
        if dns_fda['dns_selection'] == 'use_dhcp_ic':
            dns_root = ui_root.setS(ns_ui.dnsServers,
                                    rdf.Type(ns_ui.InternetConnectionDhcp))
        elif dns_fda['dns_selection'] == 'use_dhcp_pn':
            dns_root = ui_root.setS(
                ns_ui.dnsServers, rdf.Type(ns_ui.PrivateNetworkConnectionDhcp))
        elif dns_fda['dns_selection'] == 'set_manually':
            dns_root = ui_root.setS(ns_ui.dnsServers,
                                    rdf.Type(ns_ui.SetDnsServers))

            # XXX: dns_1 is not really optional here; we should not save dns_2 if we don't have dns_1, it makes no sense
            uidatahelpers.save_optional_field_to_rdf(dns_root,
                                                     ns_ui.primaryDns,
                                                     rdf.IPv4Address, dns_fda,
                                                     'dns_1')
            uidatahelpers.save_optional_field_to_rdf(dns_root,
                                                     ns_ui.secondaryDns,
                                                     rdf.IPv4Address, dns_fda,
                                                     'dns_2')

        # Dyndns
        ddns_fda = fda.descend('ddns_group')
        if uidatahelpers.has_form_value(ddns_fda, 'ddns_provider') and \
           (ddns_fda['ddns_provider'] != 'none'):
            ddns_root = ui_root.setS(ns_ui.dynDnsServer,
                                     rdf.Type(ns_ui.DynDnsServer))
            ddns_root.setS(ns_ui.dynDnsProvider, rdf.String,
                           ddns_fda['ddns_provider'])
            ddns_root.setS(ns_ui.dynDnsUsername, rdf.String,
                           ddns_fda['ddns_username'])
            ddns_root.setS(ns_ui.dynDnsPassword, rdf.String,
                           ddns_fda['ddns_password'])
            ddns_root.setS(ns_ui.dynDnsHostname, rdf.String,
                           ddns_fda['ddns_hostname'])

            tmp = ddns_fda['ddns_address_type']
            if tmp == 'interface':
                ddns_root.setS(ns_ui.dynDnsAddress, rdf.String, '')
            elif tmp == 'natted':
                ddns_root.setS(ns_ui.dynDnsAddress, rdf.String, 'natted')
            elif tmp == 'static':
                if (ddns_fda.has_key('ddns_address')) and \
                       (ddns_fda['ddns_address'] is not None) and \
                       (ddns_fda['ddns_address'] != ''):
                    ddns_root.setS(ns_ui.dynDnsAddress, rdf.String,
                                   ddns_fda['ddns_address'])
                else:
                    ddns_root.setS(ns_ui.dynDnsAddress, rdf.String, '')
        else:
            ui_root.removeNodes(ns_ui.dynDnsServer)
コード例 #7
0
    def fill_network_config(self, form, ctx, fda):
        def _fill_address_to_form(rdf_node, fda):
            if (rdf_node.getS(ns_ui.address).hasType(ns_ui.DhcpAddress)):
                fda['ip_address_selection'] = 'dhcp'
            elif (rdf_node.getS(ns_ui.address).hasType(ns_ui.StaticAddress)):
                fda['ip_address_selection'] = 'static'
                fda['ip_address'] = rdf_node.getS(ns_ui.address).getS(
                    ns_ui.ipAddress, rdf.IPv4Address)
                fda['subnet_mask'] = rdf_node.getS(ns_ui.address).getS(
                    ns_ui.subnetMask, rdf.IPv4Address)
            else:
                raise uidatahelpers.RdfDataError(
                    'ns_ui.address is not dhcp or static')

        def _fill_client_traffic_to_form(rdf_node, fda):
            if rdf_node.getS(ns_ui.clientConnectionNat, rdf.Boolean):
                fda['client_traffic'] = 'nat'
            elif rdf_node.getS(ns_ui.clientConnectionProxyArp, rdf.Boolean):
                fda['client_traffic'] = 'proxyarp'
            else:
                fda['client_traffic'] = 'none'

        root = helpers.get_ui_config()

        # interface count (ifcount_group)
        ifc_fda = fda.descend('ifcount_group')
        if root.hasS(ns_ui.privateNetworkConnection):
            ifc_fda['interface_count'] = 'twoif'
            pn_root = root.getS(ns_ui.privateNetworkConnection,
                                rdf.Type(ns_ui.NetworkConnection))
        else:
            ifc_fda['interface_count'] = 'oneif'
            pn_root = None

        # internet connection (ic_group)
        ic_root = root.getS(ns_ui.internetConnection,
                            rdf.Type(ns_ui.NetworkConnection))
        ic_fda = fda.descend('ic_group')
        ic_fda['if'] = ic_root.getS(ns_ui.interface, rdf.String)
        _fill_address_to_form(ic_root, ic_fda)
        uidatahelpers.fill_optional_field_to_form(ic_root,
                                                  ns_ui.defaultGateway,
                                                  rdf.IPv4Address, ic_fda,
                                                  'default_gateway')
        ic_fda['mtu'] = ic_root.getS(ns_ui.mtu, rdf.Integer)
        uidatahelpers.fill_optional_field_to_form(ic_root, ns_ui.vpnUplink,
                                                  rdf.Float, ic_fda, 'uplink')
        _fill_client_traffic_to_form(ic_root, ic_fda)

        # private network connection (pn_group)
        if not (pn_root is None):
            pn_fda = fda.descend('pn_group')
            pn_fda['if'] = pn_root.getS(ns_ui.interface, rdf.String)
            _fill_address_to_form(pn_root, pn_fda)
            uidatahelpers.fill_optional_field_to_form(pn_root,
                                                      ns_ui.defaultGateway,
                                                      rdf.IPv4Address, pn_fda,
                                                      'default_gateway')
            _fill_client_traffic_to_form(pn_root, pn_fda)

        # dns servers (dns_group)
        dns_root = root.getS(ns_ui.dnsServers)
        dns_fda = fda.descend('dns_group')
        if dns_root.hasType(ns_ui.InternetConnectionDhcp):
            dns_fda['dns_selection'] = 'use_dhcp_ic'
        elif dns_root.hasType(ns_ui.PrivateNetworkConnectionDhcp):
            dns_fda['dns_selection'] = 'use_dhcp_pn'
        elif dns_root.hasType(ns_ui.SetDnsServers):
            dns_fda['dns_selection'] = 'set_manually'
            dns_exists = False
            if dns_root.hasS(ns_ui.primaryDns):
                dns_exists = True
                dns_fda['dns_1'] = dns_root.getS(ns_ui.primaryDns,
                                                 rdf.IPv4Address)
            if dns_root.hasS(ns_ui.secondaryDns):
                dns_exists = True
                dns_fda['dns_2'] = dns_root.getS(ns_ui.secondaryDns,
                                                 rdf.IPv4Address)
            if not (dns_exists):
                _log.warning('no dns servers when filling form data')
        else:
            raise uidatahelpers.RdfDataError(
                'Unknown DNS server information selection')

        # dynamic dns (ddns_group)
        ddns_fda = fda.descend('ddns_group')
        if root.hasS(ns_ui.dynDnsServer):
            ddns_root = root.getS(ns_ui.dynDnsServer,
                                  rdf.Type(ns_ui.DynDnsServer))
            ddns_fda['ddns_provider'] = ddns_root.getS(ns_ui.dynDnsProvider,
                                                       rdf.String)
            ddns_fda['ddns_username'] = ddns_root.getS(ns_ui.dynDnsUsername,
                                                       rdf.String)
            ddns_fda['ddns_password'] = ddns_root.getS(ns_ui.dynDnsPassword,
                                                       rdf.String)
            ddns_fda['ddns_hostname'] = ddns_root.getS(ns_ui.dynDnsHostname,
                                                       rdf.String)
            if ddns_root.hasS(ns_ui.dynDnsAddress):
                tmp = ddns_root.getS(ns_ui.dynDnsAddress, rdf.String)
                if tmp == '':
                    ddns_fda['ddns_address_type'] = 'interface'
                    ddns_fda['ddns_address'] = ''
                elif tmp == 'natted':
                    ddns_fda['ddns_address_type'] = 'natted'
                    ddns_fda['ddns_address'] = ''
                else:
                    ddns_fda['ddns_address_type'] = 'static'
                    ddns_fda['ddns_address'] = tmp
            else:
                ddns_fda['ddns_address_type'] = 'interface'
                ddns_fda['ddns_address'] = ''
        else:
            ddns_fda['ddns_provider'] = 'none'
コード例 #8
0
ファイル: autoconfigexe.py プロジェクト: zeus911/vpnease-l2tp
    def renderHTTP(self, ctx):
        request = inevow.IRequest(ctx)

        # read unpatched exe
        f = None
        exedata = ''
        try:
            f = open(self.autoconfig_exe_filename, 'rb')
            exedata = f.read()
        finally:
            if f is not None:
                f.close()
                f = None

        # figure parameters
        server_address_in_uri = None
        try:
            server_address_in_uri = str(request.getRequestHostname())
        except:
            _log.exception('cannot figure out server_address_in_uri')

        server_ip = None
        try:
            server_ip = self._get_server_ip_for_win2k(ctx)
        except:
            _log.exception('cannot figure out server_ip')

        server_address = None
        if self.force_server_address_to_ip:
            server_address = server_ip
        else:
            server_address = server_address_in_uri

        if (server_address_in_uri is None) or (server_address_in_uri == ''):
            raise Exception('server_address_in_uri missing, failing')
        if (server_address is None) or (server_address == ''):
            raise Exception('server_address missing, failing')
        if self.include_win2k_regdata and ((server_ip is None) or (server_ip == '')):
            raise Exception('server_ip is needed and missing, failing')
        
        preshared_key = ''
        try:
            psk_seq = helpers.get_ui_config().getS(ns_ui.preSharedKeys, rdf.Seq(rdf.Type(ns_ui.PreSharedKey)))
            preshared_key = str(psk_seq[0].getS(ns_ui.preSharedKey, rdf.String))
        except:
            _log.exception('cannot figure out preshared_key')

        username = ''
        try:
            tmp = self.get_logged_in_username()
            if tmp is not None:
                username = str(tmp)
        except:
            _log.exception('cannot figure out username')
            
        # Profile name, always uses address in URI, even if server address itself forced to IP
        profile_prefix = 'VPNease'
        try:
            if os.path.exists(constants.AUTOCONFIG_PROFILE_PREFIX_FILE):
                profile_prefix = helpers.read_and_strip_file(constants.AUTOCONFIG_PROFILE_PREFIX_FILE)
        except:
            _log.exception('failed when checking for alternative profile name')
        profile_name = '%s (%s)' % (profile_prefix, server_address_in_uri)

        # Server behind port forward
        server_portfw = False
        try:
            global_st = helpers.get_global_status()
            if global_st.hasS(ns.behindNat):
                if global_st.getS(ns.behindNat, rdf.Boolean):
                    server_portfw = True
                else:
                    server_portfw = False
            else:
                # assume worst - reboot *MAY* be required
                server_portfw = True

            # markerfile for debugging
            if helpers.check_marker_file(constants.FORCE_NATTREBOOT_MARKERFILE):
                _log.warning('force nat-t reboot marker file exists, pretending server is behind port forward')
                server_portfw = True
        except:
            _log.exception('cannot determine whether server is behind port forward, may be OK')

        # Windows 2000 registry-based IPsec policy + prohibitIpsec
        win2k_ipsec_policy_registry_file = ''
        try:
            if self.include_win2k_regdata:
                # Registry data is HEX encoded UTF-16; HEX encoding is used to avoid problems
                # with the parameters.cpp mechanism (null termination).  The resulting data is
                # large, around 50 kilobytes (!).

                # Always uses server IP for IPsec policy, because that's what Windows 2000 IPsec wants
                t = self._get_win2k_reg_file(server_ip, preshared_key)
                t = self._encode_windows_reg_file(t)  # UTF-16
                win2k_ipsec_policy_registry_file = t.encode('hex')  # hex-encoded UTF-16
        except:
            _log.exception('cannot create win2k registry file')
        
        # Fill paramdict and return
        paramdict = {}
        paramdict['operation'] = 'configure_profile'
        paramdict['profile_name'] = profile_name
        paramdict['desktop_shortcut_name'] = '%s.LNK' % profile_name  # xxx: for now the same
        paramdict['server_address'] = server_address
        paramdict['preshared_key'] = preshared_key
        paramdict['username'] = username
        paramdict['ppp_compression_enabled'] = '1'
        paramdict['default_route_enabled'] = '1'
        paramdict['create_desktop_shortcut'] = '1'
        paramdict['open_profile_after_creation'] = '1'
        if server_portfw:
            paramdict['server_behind_port_forward'] = '1'
        else:
            paramdict['server_behind_port_forward'] = '0'
        if self.include_win2k_regdata:
            paramdict['win2k_registry_file'] = win2k_ipsec_policy_registry_file
        return uihelpers.RewritingBinaryResource(exedata, paramdict)
コード例 #9
0
ファイル: crontab.py プロジェクト: zeus911/vpnease-l2tp
def _update_snmp():
    """Update SNMP data."""

    from codebay.l2tpserver import licensemanager
    from codebay.l2tpserver import helpers
    from codebay.l2tpserver.webui import uihelpers

    now = datetime.datetime.utcnow()
    st = helpers.get_status()
    global_st = helpers.get_global_status()
    license_info = helpers.get_license_info()

    def _timeticks(td):
        return int(helpers.timedelta_to_seconds(td) * 100.0)

    def _timestamp(dt):
        return datatypes.encode_datetime_to_iso8601_subset(dt)

    def _get_management_conn():
        # XXX: not the best place for this
        if global_st.hasS(ns.managementServerConnection):
            if global_st.getS(ns.managementServerConnection, rdf.Boolean):
                return 1
        return 0
        
    vals = {}

    lm = licensemanager.LicenseMonitor()
    usr_count, usr_limit, usr_limit_leeway, s2s_count, s2s_limit, s2s_limit_leeway = None, None, None, None, None, None
    try:
        usr_count, usr_limit, usr_limit_leeway, s2s_count, s2s_limit, s2s_limit_leeway = lm.count_both_users()
    except:
        _log.exception('cannot get ppp counts for snmp')

    # XXX: this sharing of status code is quite unclean; see uihelpers.get_status_and_substatus() for suggestions
    health_errors = 0
    try:
        status_class, status_text, substatus_class, substatus_text, status_ok = uihelpers.get_status_and_substatus()
        if status_ok:
            health_errors = 0
        else:
            health_errors = 1
    except:
        _log.exception('cannot determine health errors')
    
    for k, l in [ ('vpneaseHealthCheckErrors',       lambda: health_errors),
                  ('vpneaseUserCount',               lambda: usr_count),
                  ('vpneaseSiteToSiteCount',         lambda: s2s_count),
                  ('vpneaseLastMaintenanceReboot',   lambda: _timestamp(helpers.read_datetime_marker_file(constants.LAST_AUTOMATIC_REBOOT_MARKER_FILE))),
                  ('vpneaseNextMaintenanceReboot',   lambda: _timestamp(uihelpers.compute_periodic_reboot_time())),
                  ('vpneaseLastSoftwareUpdate',      lambda: _timestamp(helpers.read_datetime_marker_file(constants.LAST_SUCCESSFUL_UPDATE_MARKER_FILE))),
                  ('vpneaseSoftwareVersion',         lambda: helpers.get_product_version(cache=True, filecache=True)),
                  ('vpneaseCpuUsage',                lambda: int(global_st.getS(ns.cpuUsage, rdf.Float))),
                  ('vpneaseMemoryUsage',             lambda: int(global_st.getS(ns.memoryUsage, rdf.Float))),
                  ('vpneaseVirtualMemoryUsage',      lambda: int(global_st.getS(ns.swapUsage, rdf.Float))),
                  ('vpneaseServiceUptime',           lambda: _timeticks(now - st.getS(ns.startTime, rdf.Datetime))),
                  ('vpneaseHostUptime',              lambda: _timeticks(datetime.timedelta(0, helpers.get_uptime(), 0))),
                  ('vpneasePublicAddress',           lambda: st.getS(ns.publicInterface, rdf.Type(ns.NetworkInterface)).getS(ns.ipAddress, rdf.IPv4AddressSubnet).getAddress().toString()),
                  ('vpneasePublicSubnet',            lambda: st.getS(ns.publicInterface, rdf.Type(ns.NetworkInterface)).getS(ns.ipAddress, rdf.IPv4AddressSubnet).getMask().toString()),
                  ('vpneasePublicMac',               lambda: st.getS(ns.publicInterface, rdf.Type(ns.NetworkInterface)).getS(ns.macAddress, rdf.String)),
                  ('vpneasePrivateAddress',          lambda: st.getS(ns.privateInterface, rdf.Type(ns.NetworkInterface)).getS(ns.ipAddress, rdf.IPv4AddressSubnet).getAddress().toString()),
                  ('vpneasePrivateSubnet',           lambda: st.getS(ns.privateInterface, rdf.Type(ns.NetworkInterface)).getS(ns.ipAddress, rdf.IPv4AddressSubnet).getMask().toString()),
                  ('vpneasePrivateMac',              lambda: st.getS(ns.privateInterface, rdf.Type(ns.NetworkInterface)).getS(ns.macAddress, rdf.String)),
                  ('vpneaseLicenseKey',              lambda: license_info.getS(ns_ui.licenseKey, rdf.String)),
                  ('vpneaseLicenseString',           lambda: license_info.getS(ns_ui.licenseString, rdf.String)),
                  ('vpneaseLicenseUserLimit',        lambda: usr_limit),
                  ('vpneaseLicenseSiteToSiteLimit',  lambda: s2s_limit),
                  ('vpneaseMaintenanceReboots',      lambda: global_st.getS(ns.periodicReboots, rdf.Integer)),
                  ('vpneaseWatchdogReboots',         lambda: global_st.getS(ns.watchdogReboots, rdf.Integer)),
                  ('vpneaseLicenseServerConnection', _get_management_conn),
                  ]:
        try:
            val = l()
            if val is not None:
                vals[k] = val
        except:
            # these are expected in several cases, so don't spew too much log about them
            # XXX: it would be better if the checkers would figure these out for themselves
            # (when a value is expected and when not)
            _log.info('failed to get snmp value for key %s' % k)
            #_log.exception('failed to get snmp value for key %s' % k)
                  
    keys = vals.keys()
    keys.sort()
    res = ''
    for k in keys:
        res += '%s=%s\n' % (k, vals[k])

    # to ASCII, escaping any non-ASCII chars with XML escapes
    res = res.encode('US-ASCII', 'xmlcharrefreplace')

    f = None
    try:
        f = open(constants.SNMP_DATA_FILE, 'wb')
        f.write(res)
    finally:
        if f:
            f.close()
        f = None
コード例 #10
0
    def _validate(self, ctx, form, data):
        fda = formalutils.FormDataAccessor(form, ['s2s_connections'], ctx)

        # Get some useful stuff for validation
        ui_root = helpers.get_ui_config()
        pub_iface, pub_addr_subnet = None, None
        if ui_root.hasS(ns_ui.internetConnection):
            pub_iface = ui_root.getS(ns_ui.internetConnection,
                                     rdf.Type(ns_ui.NetworkConnection))
            pub_addr = pub_iface.getS(ns_ui.address)
            if pub_addr.hasType(ns_ui.StaticAddress):
                pub_addr_subnet = datatypes.IPv4AddressSubnet.fromStrings(
                    pub_addr.getS(ns_ui.ipAddress, rdf.IPv4Address).toString(),
                    pub_addr.getS(ns_ui.subnetMask,
                                  rdf.IPv4Address).toString())
        priv_iface, priv_addr_subnet = None, None
        if ui_root.hasS(ns_ui.privateNetworkConnection):
            priv_iface = ui_root.getS(ns_ui.privateNetworkConnection,
                                      rdf.Type(ns_ui.NetworkConnection))
            priv_addr = priv_iface.getS(ns_ui.address)
            if priv_addr.hasType(ns_ui.StaticAddress):
                priv_addr_subnet = datatypes.IPv4AddressSubnet.fromStrings(
                    priv_addr.getS(ns_ui.ipAddress,
                                   rdf.IPv4Address).toString(),
                    priv_addr.getS(ns_ui.subnetMask,
                                   rdf.IPv4Address).toString())
        ppp_subnet = None
        if ui_root.hasS(ns_ui.clientSubnet):
            ppp_subnet = ui_root.getS(ns_ui.clientSubnet, rdf.IPv4Subnet)

        # Validate individual site-to-site connections
        idx = 0
        conns = []
        while True:
            fda_conn = fda.descend(str(idx))
            if len(fda_conn.keys()) == 0:
                break
            conns.append(fda_conn)
            idx += 1

        remote_access_usernames = []
        if ui_root.hasS(ns_ui.users):
            for user in ui_root.getS(ns_ui.users,
                                     rdf.Seq(rdf.Type(ns_ui.User))):
                if user.hasS(ns_ui.username):
                    remote_access_usernames.append(
                        user.getS(ns_ui.username, rdf.String))

        s2s_server_usernames_found = []
        for fda_conn_index, fda_conn in enumerate(conns):
            if fda_conn.has_key('s2s_username'):
                if not uihelpers.check_ppp_username_characters(
                        fda_conn['s2s_username']):
                    fda_conn.add_error('s2s_username', 'Invalid characters')
                elif len(fda_conn['s2s_username']
                         ) > constants.MAX_USERNAME_LENGTH:
                    fda_conn.add_error('s2s_username', 'Username too long')

            if fda_conn.has_key('s2s_password'):
                if not uihelpers.check_ppp_password_characters(
                        fda_conn['s2s_password']):
                    fda_conn.add_error('s2s_password', 'Invalid characters')
                elif len(fda_conn['s2s_password']
                         ) > constants.MAX_PASSWORD_LENGTH:
                    fda_conn.add_error('s2s_password', 'Password too long')

            if fda_conn.has_key('s2s_mode'):
                mode = fda_conn['s2s_mode']
                if mode == 'client':
                    # psk and server address are mandatory for client
                    if not fda_conn.has_key('s2s_psk') or fda_conn[
                            's2s_psk'] == '' or fda_conn['s2s_psk'] is None:
                        fda_conn.add_error('s2s_psk', 'Required for initiator')
                    else:
                        if not uihelpers.check_preshared_key_characters(
                                fda_conn['s2s_psk']):
                            fda_conn.add_error('s2s_psk', 'Invalid characters')
                    if not fda_conn.has_key('s2s_server') or fda_conn[
                            's2s_server'] == '' or fda_conn[
                                's2s_server'] is None:
                        fda_conn.add_error('s2s_server',
                                           'Required for initiator')
                    else:
                        if not uihelpers.check_dns_name_characters(
                                fda_conn['s2s_server']):
                            fda_conn.add_error('s2s_server',
                                               'Invalid characters')
                else:  # server
                    # must not have duplicate server-mode names; client mode names may be duplicates
                    if fda_conn.has_key('s2s_username'):
                        username = fda_conn['s2s_username']
                        if username in s2s_server_usernames_found:
                            fda_conn.add_error(
                                's2s_username',
                                'Duplicate username for server mode connection'
                            )
                        elif username in remote_access_usernames:
                            fda_conn.add_error(
                                's2s_username',
                                'Duplicate username for server mode connection (already a user with that name)'
                            )
                        else:
                            s2s_server_usernames_found.append(
                                fda_conn['s2s_username'])

            # check subnets
            if fda_conn.has_key('s2s_subnets'):
                subnets = fda_conn['s2s_subnets']

                # check that list doesn't contain overlap inside itself
                overlap_inside_list = False
                for i in xrange(len(subnets)):
                    for j in xrange(len(subnets)):
                        if i != j:
                            if subnets[i].overlapsWithSubnet(subnets[j]):
                                overlap_inside_list = True
                if overlap_inside_list:
                    fda_conn.add_warning('s2s_subnets',
                                         'Subnets in list overlap')

                # check that no element of list overlaps with any other subnet of any other site-to-site connection
                overlap_with_other = False
                for subnet in subnets:
                    for other_conn_index, other_conn in enumerate(conns):
                        if other_conn.has_key(
                                's2s_subnets'
                        ) and other_conn_index != fda_conn_index:
                            for other_subnet in other_conn['s2s_subnets']:
                                if subnet.overlapsWithSubnet(other_subnet):
                                    overlap_with_other = True
                if overlap_with_other:
                    fda_conn.add_warning(
                        's2s_subnets',
                        'Remote subnet(s) overlap with other connections')

                # check overlap against public interface
                if pub_addr_subnet is not None:
                    if subnet.overlapsWithSubnet(pub_addr_subnet.getSubnet()):
                        fda_conn.add_warning(
                            's2s_subnets',
                            'Remote subnet(s) overlap with Internet connection subnet'
                        )

                # check overlap against private interface
                if priv_addr_subnet is not None:
                    if subnet.overlapsWithSubnet(priv_addr_subnet.getSubnet()):
                        fda_conn.add_warning(
                            's2s_subnets',
                            'Remote subnet(s) overlap with private network connection subnet'
                        )

                # check overlap against ppp subnet
                if ppp_subnet is not None:
                    if subnet.overlapsWithSubnet(ppp_subnet):
                        fda_conn.add_warning(
                            's2s_subnets',
                            'Remote subnet(s) overlap with client subnet')