コード例 #1
0
 def render_doupdate(self, ctx, data):
     helpers.db_flush()
     uihelpers.ui_reboot(constants.WEBUI_PRODUCT_UPDATE_MESSAGE,
                         skip_update=False,
                         force_update=True,
                         force_fsck=False,
                         delay=5.0)
     return ''
コード例 #2
0
 def render_doconfigimport(self, ctx, data):
     helpers.db_flush()
     uihelpers.ui_reboot(constants.WEBUI_PRODUCT_IMPORT_REBOOT_MESSAGE,
                         skip_update=True,
                         force_update=False,
                         force_fsck=False,
                         delay=5.0)
     return ''
コード例 #3
0
 def render_doshutdown(self, ctx, data):
     helpers.db_flush()
     uihelpers.ui_shutdown(constants.WEBUI_PRODUCT_SHUTDOWN_MESSAGE,
                           skip_update=False,
                           force_update=False,
                           force_fsck=False,
                           delay=5.0)
     return ''
コード例 #4
0
ファイル: watchdog.py プロジェクト: nakedible/vpnease-l2tp
    def _watchdog_action(self):
        """Execute watchdog recovery action."""

        if self._watchdog_action_started:
            _log.warning('_watchdog_action: watchdog action already started, skipping')
        else:
            _log.error('_watchdog_action: too many watchdog failures, taking watchdog action: reboot')
            try:
                self._watchdog_action_started = True
                helpers.increment_global_status_counter(ns.watchdogReboots)
                helpers.db_flush()
            except:
                _log.exception('failed to increment counter')
            uihelpers.ui_reboot(constants.WEBUI_WATCHDOG_SHUTDOWN_MESSAGE, skip_update=False, force_update=False, force_fsck=True, delay=10.0)
コード例 #5
0
    def _watchdog_action(self):
        """Execute watchdog recovery action."""

        if self._watchdog_action_started:
            _log.warning(
                '_watchdog_action: watchdog action already started, skipping')
        else:
            _log.error(
                '_watchdog_action: too many watchdog failures, taking watchdog action: reboot'
            )
            try:
                self._watchdog_action_started = True
                helpers.increment_global_status_counter(ns.watchdogReboots)
                helpers.db_flush()
            except:
                _log.exception('failed to increment counter')
            uihelpers.ui_reboot(constants.WEBUI_WATCHDOG_SHUTDOWN_MESSAGE,
                                skip_update=False,
                                force_update=False,
                                force_fsck=True,
                                delay=10.0)
コード例 #6
0
ファイル: master.py プロジェクト: nakedible/vpnease-l2tp
    def _immediate_auto_update_check(self, identify_result):
        update_now = identify_result["updateImmediately"]

        # XXX: If this debug marker exists, autoupdate is forced if newer is available.
        if os.path.exists(constants.AUTOUPDATE_MARKERFILE):
            curr_ver = helpers.get_product_version()
            latest_ver = uihelpers.get_latest_product_version()
            if (
                (curr_ver is not None)
                and (latest_ver is not None)
                and (helpers.compare_product_versions(latest_ver, curr_ver) > 0)
            ):
                _log.info(
                    "detected that update is available (%s -> %s), immediate automatic update" % (curr_ver, latest_ver)
                )
                update_now = True

        if update_now:
            # take action
            _log.info("management server has requested immediate update, forced check for updates now, rebooting")
            helpers.db_flush()
            uihelpers.ui_reboot(
                "immediate autoupdate", skip_update=False, force_update=True, force_fsck=True, delay=0.0
            )
コード例 #7
0
    def _periodic_reboot_check(self):
        """Check for periodic reboot and take action if necessary.

        Tries to be clever and avoid reboot if connections are up.

        Uptime estimation is annoying: if time is changed on this reboot,
        the estimate may be grossly wrong.  To ensure that we don't reboot
        on the first boot (when time is synchronized) uncontrollably, this
        function also checks that enough watchdog rounds have been run to
        warrant a reboot.  The underlying assumption is that web UI has been
        running continuously, which is currently OK because we don't restart
        it ever (cron watchdog will just reboot if UI is down).

        Staggering of reboot is added by randomizing the "minute" of the
        reboot in the range [0,45] (not [0,60] for leeway).  The "minute"
        is randomized when watchdog is created, so it stays the same every
        time for one reboot.  Note that the stagger is effectively only
        applied to the first reboot attempt; next attempts (e.g. next day
        at designated time) will not have a stagger.

        If more staggering behavior is desired, see XXX below.
        """

        uptime = self.master.get_uptime()
        reboot_required = False
        now = datetime.datetime.utcnow()

        _log.debug('_periodic_reboot_check: uptime=%s' % uptime)

        # Check whether UI configuration requires a reboot (time & day match)
        try:
            reboot_limit = uihelpers.compute_periodic_reboot_time()
            reboot_limit += self._periodic_reboot_stagger_delta
            _log.debug(
                '_periodic_reboot_check: reboot limit after stagger: %s' %
                reboot_limit)

            lm = licensemanager.LicenseMonitor()
            count, limit, limit_leeway = lm.count_normal_users()

            # time to periodic reboot (negative = past due)
            diff = reboot_limit - now
            _log.debug(
                '_periodic_reboot_check: periodic reboot diff (limit-now, time to reboot): %s'
                % str(diff))

            if diff <= datetime.timedelta(0, 0, 0):
                overdue = -diff
                _log.debug(
                    '_periodic_reboot_check: periodic reboot is %s overdue' %
                    overdue)
                if count > 0:
                    # there are clients (without license restrictions!), give 24h leeway
                    if overdue < datetime.timedelta(1, 0, 0):  # XXX: hardcoded
                        _log.info(
                            '_periodic_reboot_check: want to do a periodic reboot, but there are active clients (%d), skipping'
                            % count)
                    else:
                        _log.warning(
                            '_periodic_reboot_check: want to a periodic reboot, active clients (%d), but leeway over, rebooting anyway'
                            % count)
                        reboot_required = True
                else:
                    _log.warning(
                        '_periodic_reboot_check: want to do a periodic reboot, and no active clients, ok'
                    )
                    reboot_required = True
        except:
            _log.exception(
                '_periodic_reboot_check: failed when checking for periodic reboot policy'
            )

        # If not within periodic reboot time window (e.g. 02:00-03:00 local time),
        # skip periodic reboot.
        if reboot_required:
            # XXX: better stagger check could be applied here (checked every day)
            if not uihelpers.check_periodic_reboot_time_window(now):
                _log.warning(
                    '_periodic_reboot_check: want to do a periodic reboot, but not within periodic reboot time window'
                )
                reboot_required = False

        # If more than a maximum number of days, reboot, despite configuration
        if uptime > constants.PERIODIC_REBOOT_MAX_UPTIME:
            _log.warning(
                '_periodic_reboot_check: uptime is too large (%s), requires reboot'
                % uptime)
            reboot_required = True
        elif uptime < 0.0:
            # negative uptime: ignore it for now; if the diff is great, we'll get a periodic reboot anyway later
            _log.warning(
                '_periodic_reboot_check: uptime is negative (%s), ignoring' %
                uptime)

        # Sanity check: if we want to reboot, check that enough watchdog rounds
        # have elapsed (roughly 24h).
        if reboot_required:
            rounds = self.get_watchdog_rounds()
            if rounds < constants.PERIODIC_REBOOT_MINIMUM_WATCHDOG_ROUNDS:
                _log.warning(
                    '_periodic_reboot_check: want to do periodic reboot, but watchdog rounds too low (%d < %d)'
                    % (rounds,
                       constants.PERIODIC_REBOOT_MINIMUM_WATCHDOG_ROUNDS))
                reboot_required = False

        # Take action if necessary
        if reboot_required:
            if self._periodic_reboot_started:
                _log.info(
                    '_periodic_reboot_check: reboot required but periodic reboot already in progress, no action needed'
                )
            else:
                try:
                    _log.warning(
                        '_periodic_reboot_check: periodic reboot started')
                    self._periodic_reboot_started = True
                    self._periodic_reboot_show_warning()
                    helpers.increment_global_status_counter(ns.periodicReboots)
                    helpers.db_flush()
                except:
                    _log.exception('failed to increment counter')

                try:
                    helpers.write_datetime_marker_file(
                        constants.LAST_AUTOMATIC_REBOOT_MARKER_FILE)
                except:
                    _log.exception(
                        'failed to write last automatic reboot marker file')

                uihelpers.ui_reboot(
                    constants.WEBUI_PRODUCT_PERIODIC_REBOOT_MESSAGE,
                    skip_update=False,
                    force_update=False,
                    force_fsck=True,
                    delay=120.0)  # XXX: constants
コード例 #8
0
ファイル: management.py プロジェクト: nakedible/vpnease-l2tp
 def render_doconfigimport(self, ctx, data):
     helpers.db_flush()
     uihelpers.ui_reboot(constants.WEBUI_PRODUCT_IMPORT_REBOOT_MESSAGE, skip_update=True, force_update=False, force_fsck=False, delay=5.0)
     return ''
コード例 #9
0
ファイル: management.py プロジェクト: nakedible/vpnease-l2tp
 def render_doupdate(self, ctx, data):
     helpers.db_flush()
     uihelpers.ui_reboot(constants.WEBUI_PRODUCT_UPDATE_MESSAGE, skip_update=False, force_update=True, force_fsck=False, delay=5.0)
     return ''
コード例 #10
0
ファイル: management.py プロジェクト: nakedible/vpnease-l2tp
 def render_doshutdown(self, ctx, data):
     helpers.db_flush()
     uihelpers.ui_shutdown(constants.WEBUI_PRODUCT_SHUTDOWN_MESSAGE, skip_update=False, force_update=False, force_fsck=False, delay=5.0)
     return ''
コード例 #11
0
ファイル: watchdog.py プロジェクト: nakedible/vpnease-l2tp
    def _periodic_reboot_check(self):
        """Check for periodic reboot and take action if necessary.

        Tries to be clever and avoid reboot if connections are up.

        Uptime estimation is annoying: if time is changed on this reboot,
        the estimate may be grossly wrong.  To ensure that we don't reboot
        on the first boot (when time is synchronized) uncontrollably, this
        function also checks that enough watchdog rounds have been run to
        warrant a reboot.  The underlying assumption is that web UI has been
        running continuously, which is currently OK because we don't restart
        it ever (cron watchdog will just reboot if UI is down).

        Staggering of reboot is added by randomizing the "minute" of the
        reboot in the range [0,45] (not [0,60] for leeway).  The "minute"
        is randomized when watchdog is created, so it stays the same every
        time for one reboot.  Note that the stagger is effectively only
        applied to the first reboot attempt; next attempts (e.g. next day
        at designated time) will not have a stagger.

        If more staggering behavior is desired, see XXX below.
        """

        uptime = self.master.get_uptime()
        reboot_required = False
        now = datetime.datetime.utcnow()

        _log.debug('_periodic_reboot_check: uptime=%s' % uptime)
        
        # Check whether UI configuration requires a reboot (time & day match)
        try:
            reboot_limit = uihelpers.compute_periodic_reboot_time()
            reboot_limit += self._periodic_reboot_stagger_delta
            _log.debug('_periodic_reboot_check: reboot limit after stagger: %s' % reboot_limit)
            
            lm = licensemanager.LicenseMonitor()
            count, limit, limit_leeway = lm.count_normal_users()

            # time to periodic reboot (negative = past due)
            diff = reboot_limit - now
            _log.debug('_periodic_reboot_check: periodic reboot diff (limit-now, time to reboot): %s' % str(diff))

            if diff <= datetime.timedelta(0, 0, 0):
                overdue = -diff
                _log.debug('_periodic_reboot_check: periodic reboot is %s overdue' % overdue)
                if count > 0:
                    # there are clients (without license restrictions!), give 24h leeway
                    if overdue < datetime.timedelta(1, 0, 0):  # XXX: hardcoded
                        _log.info('_periodic_reboot_check: want to do a periodic reboot, but there are active clients (%d), skipping' % count)
                    else:
                        _log.warning('_periodic_reboot_check: want to a periodic reboot, active clients (%d), but leeway over, rebooting anyway' % count)
                        reboot_required = True
                else:
                    _log.warning('_periodic_reboot_check: want to do a periodic reboot, and no active clients, ok')
                    reboot_required = True
        except:
            _log.exception('_periodic_reboot_check: failed when checking for periodic reboot policy')

        # If not within periodic reboot time window (e.g. 02:00-03:00 local time),
        # skip periodic reboot.
        if reboot_required:
            # XXX: better stagger check could be applied here (checked every day)
            if not uihelpers.check_periodic_reboot_time_window(now):
                _log.warning('_periodic_reboot_check: want to do a periodic reboot, but not within periodic reboot time window')
                reboot_required = False
            
        # If more than a maximum number of days, reboot, despite configuration
        if uptime > constants.PERIODIC_REBOOT_MAX_UPTIME:
            _log.warning('_periodic_reboot_check: uptime is too large (%s), requires reboot' % uptime)
            reboot_required = True
        elif uptime < 0.0:
            # negative uptime: ignore it for now; if the diff is great, we'll get a periodic reboot anyway later
            _log.warning('_periodic_reboot_check: uptime is negative (%s), ignoring' % uptime)

        # Sanity check: if we want to reboot, check that enough watchdog rounds
        # have elapsed (roughly 24h).
        if reboot_required:
            rounds = self.get_watchdog_rounds()
            if rounds < constants.PERIODIC_REBOOT_MINIMUM_WATCHDOG_ROUNDS:
                _log.warning('_periodic_reboot_check: want to do periodic reboot, but watchdog rounds too low (%d < %d)' % (rounds, constants.PERIODIC_REBOOT_MINIMUM_WATCHDOG_ROUNDS))
                reboot_required = False

        # Take action if necessary
        if reboot_required:
            if self._periodic_reboot_started:
                _log.info('_periodic_reboot_check: reboot required but periodic reboot already in progress, no action needed')
            else:
                try:
                    _log.warning('_periodic_reboot_check: periodic reboot started')
                    self._periodic_reboot_started = True
                    self._periodic_reboot_show_warning()
                    helpers.increment_global_status_counter(ns.periodicReboots)
                    helpers.db_flush()
                except:
                    _log.exception('failed to increment counter')

                try:
                    helpers.write_datetime_marker_file(constants.LAST_AUTOMATIC_REBOOT_MARKER_FILE)
                except:
                    _log.exception('failed to write last automatic reboot marker file')

                uihelpers.ui_reboot(constants.WEBUI_PRODUCT_PERIODIC_REBOOT_MESSAGE, skip_update=False, force_update=False, force_fsck=True, delay=120.0)  # XXX: constants