Ejemplo n.º 1
0
    def run(self):
        """ run, endless loop, until sigterm is received
        :return: None
        """
        # check database consistency / repair
        syslog.syslog(syslog.LOG_NOTICE, 'startup, check database.')
        check_and_repair('/var/netflow/*.sqlite')

        vacuum_interval = (60*60*8) # 8 hour vacuum cycle
        vacuum_countdown = None
        syslog.syslog(syslog.LOG_NOTICE, 'start watching flowd')
        while self.running:
            # should we perform a vacuum
            if not vacuum_countdown or vacuum_countdown < time.time():
                vacuum_countdown = time.time() + vacuum_interval
                do_vacuum = True
            else:
                do_vacuum = False

            # run aggregate
            try:
                aggregate_flowd(do_vacuum)
                if do_vacuum:
                    syslog.syslog(syslog.LOG_NOTICE, 'vacuum done')
            except:
                syslog.syslog(syslog.LOG_ERR, 'flowd aggregate died with message %s' % (traceback.format_exc()))
                return
            # rotate if needed
            check_rotate()
            # wait for next pass, exit on sigterm
            for i in range(30):
                if self.running:
                    time.sleep(0.5)
                else:
                    break
Ejemplo n.º 2
0
    def run(self):
        """ run, endless loop, until sigterm is received
        :return: None
        """
        # check database consistency / repair
        check_and_repair('/var/netflow/*.sqlite')

        vacuum_interval = (60 * 60 * 8)  # 8 hour vacuum cycle
        vacuum_countdown = None
        while self.running:
            # should we perform a vacuum
            if not vacuum_countdown or vacuum_countdown < time.time():
                vacuum_countdown = time.time() + vacuum_interval
                do_vacuum = True
            else:
                do_vacuum = False

            # run aggregate
            try:
                aggregate_flowd(do_vacuum)
            except:
                syslog.syslog(
                    syslog.LOG_ERR, 'flowd aggregate died with message %s' %
                    (traceback.format_exc()))
                return
            # rotate if needed
            check_rotate()
            # wait for next pass, exit on sigterm
            for i in range(30):
                if self.running:
                    time.sleep(0.5)
                else:
                    break
def main():
    """ Background process loop, runs as backend daemon for all zones. only one should be active at all times.
        The main job of this procedure is to sync the administration with the actual situation in the ipfw firewall.
    """
    # perform integrity check and repair database if needed
    check_and_repair('/var/captiveportal/captiveportal.sqlite')

    last_cleanup_timestamp = 0
    bgprocess = CPBackgroundProcess()
    bgprocess.initialize_fixed()

    while True:
        try:
            # open database
            bgprocess.db.open()

            # cleanup old settings, every 5 minutes
            if time.time() - last_cleanup_timestamp > 300:
                bgprocess.db.cleanup_sessions()
                last_cleanup_timestamp = time.time()

            # reload cached arp table contents
            bgprocess.arp.reload()

            # update accounting info, for all zones
            bgprocess.db.update_accounting_info(
                bgprocess.ipfw.list_accounting_info())

            # process sessions per zone
            for zoneid in bgprocess.list_zone_ids():
                bgprocess.sync_zone(zoneid)

            # close the database handle while waiting for the next poll
            bgprocess.db.close()

            # process accounting messages (uses php script, for reuse of Auth classes)
            try:
                subprocess.run([
                    '/usr/local/opnsense/scripts/OPNsense/CaptivePortal/process_accounting_messages.php'
                ],
                               capture_output=True)
            except OSError:
                # if accounting script crashes don't exit backgroung process
                pass

            # sleep
            time.sleep(5)
        except KeyboardInterrupt:
            break
        except SystemExit:
            break
        except:
            syslog.syslog(syslog.LOG_ERR, traceback.format_exc())
            print(traceback.format_exc())
            break
Ejemplo n.º 4
0
def main():
    """ Background process loop, runs as backend daemon for all zones. only one should be active at all times.
        The main job of this procedure is to sync the administration with the actual situation in the ipfw firewall.
    """
    # perform integrity check and repair database if needed
    check_and_repair('/var/captiveportal/captiveportal.sqlite')

    last_cleanup_timestamp = 0
    bgprocess = CPBackgroundProcess()
    bgprocess.initialize_fixed()

    while True:
        try:
            # open database
            bgprocess.db.open()

            # cleanup old settings, every 5 minutes
            if time.time() - last_cleanup_timestamp > 300:
                bgprocess.db.cleanup_sessions()
                last_cleanup_timestamp = time.time()

            # reload cached arp table contents
            bgprocess.arp.reload()

            # update accounting info, for all zones
            bgprocess.db.update_accounting_info(bgprocess.ipfw.list_accounting_info())

            # process sessions per zone
            for zoneid in bgprocess.list_zone_ids():
                bgprocess.sync_zone(zoneid)

            # close the database handle while waiting for the next poll
            bgprocess.db.close()

            # process accounting messages (uses php script, for reuse of Auth classes)
            try:
                subprocess.call(['/usr/local/opnsense/scripts/OPNsense/CaptivePortal/process_accounting_messages.php'])
            except OSError:
                # if accounting script crashes don't exit backgroung process
                pass

            # sleep
            time.sleep(5)
        except KeyboardInterrupt:
            break
        except SystemExit:
            break
        except:
            syslog.syslog(syslog.LOG_ERR, traceback.format_exc())
            print(traceback.format_exc())
            break
Ejemplo n.º 5
0
    def run(self):
        """ run, endless loop, until sigterm is received
        :return: None
        """
        # check database consistency / repair
        syslog.syslog(syslog.LOG_NOTICE, 'startup, check database.')
        check_and_repair('%s/*.sqlite' % self.config.database_dir)

        vacuum_interval = (60 * 60 * 8)  # 8 hour vacuum cycle
        vacuum_countdown = None
        syslog.syslog(syslog.LOG_NOTICE, 'start watching flowd')
        while self.running:
            loop_start = time.time()
            # should we perform a vacuum
            if not vacuum_countdown or vacuum_countdown < time.time():
                vacuum_countdown = time.time() + vacuum_interval
                do_vacuum = True
            else:
                do_vacuum = False

            # run aggregate
            try:
                aggregate_flowd(self.config, do_vacuum)
                if do_vacuum:
                    syslog.syslog(syslog.LOG_NOTICE, 'vacuum done')
            except:
                syslog.syslog(
                    syslog.LOG_ERR, 'flowd aggregate died with message %s' %
                    (traceback.format_exc()))
                raise

            # rotate if needed
            check_rotate(self.config.flowd_source)

            # wait for next pass, exit on sigterm
            if Main.config.single_pass:
                break
            else:
                # calculate time to wait in between parses. since tailing flowd.log is quite time consuming
                # its better to wait a bit longer. max 120 x 0.5 seconds.
                wait_time = max(120 - int(time.time() - loop_start), 0)
                for i in range(wait_time):
                    if self.running:
                        time.sleep(0.5)
                    else:
                        break
Ejemplo n.º 6
0
    def run(self):
        """ run, endless loop, until sigterm is received
        :return: None
        """
        # check database consistency / repair
        syslog.syslog(syslog.LOG_NOTICE, 'startup, check database.')
        check_and_repair('%s/*.sqlite' % self.config.database_dir)

        vacuum_interval = (60 * 60 * 8)  # 8 hour vacuum cycle
        vacuum_countdown = None
        syslog.syslog(syslog.LOG_NOTICE, 'start watching flowd')
        while self.running:
            # should we perform a vacuum
            if not vacuum_countdown or vacuum_countdown < time.time():
                vacuum_countdown = time.time() + vacuum_interval
                do_vacuum = True
            else:
                do_vacuum = False

            # run aggregate
            try:
                aggregate_flowd(self.config, do_vacuum)
                if do_vacuum:
                    syslog.syslog(syslog.LOG_NOTICE, 'vacuum done')
            except:
                syslog.syslog(
                    syslog.LOG_ERR, 'flowd aggregate died with message %s' %
                    (traceback.format_exc()))
                raise

            # rotate if needed
            check_rotate(self.config.flowd_source)

            # wait for next pass, exit on sigterm
            if Main.config.single_pass:
                break
            else:
                for i in range(30):
                    if self.running:
                        time.sleep(0.5)
                    else:
                        break
Ejemplo n.º 7
0
            import io
            import pstats

            pr = cProfile.Profile(builtins=False)
            pr.enable()
            Main()
            pr.disable()
            s = io.StringIO()
            sortby = 'cumulative'
            ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
            ps.print_stats()
            print (s.getvalue())
        else:
            Main()
    elif cmd_args.repair:
        # force a database repair, when
        try:
            lck = open(Main.config.pid_filename, 'a+')
            fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
            check_and_repair(filename_mask='%s/*.sqlite' % Main.config.database_dir, force_repair=True)
            lck.close()
            os.remove(Main.config.pid_filename)
        except IOError:
            # already running, exit status 99
            sys.exit(99)
    else:
        # Daemonize flowd aggregator
        from daemonize import Daemonize
        daemon = Daemonize(app="flowd_aggregate", pid=Main.config.pid_filename, action=Main)
        daemon.start()
Ejemplo n.º 8
0
        import cProfile
        import StringIO
        import pstats

        pr = cProfile.Profile(builtins=False)
        pr.enable()
        Main()
        pr.disable()
        s = StringIO.StringIO()
        sortby = 'cumulative'
        ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        ps.print_stats()
        print s.getvalue()
    else:
        Main()
elif len(sys.argv) > 1 and 'repair' in sys.argv[1:]:
    # force a database repair, when
    try:
        lck = open('/var/run/flowd_aggregate.pid', 'a+')
        fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
        check_and_repair(filename_mask='/var/netflow/*.sqlite', force_repair=True)
        lck.close()
        os.remove('/var/run/flowd_aggregate.pid')
    except IOError:
        # already running, exit status 99
        sys.exit(99)
else:
    # Daemonize flowd aggregator
    daemon = Daemonize(app="flowd_aggregate", pid='/var/run/flowd_aggregate.pid', action=Main)
    daemon.start()
Ejemplo n.º 9
0
            import io
            import pstats

            pr = cProfile.Profile(builtins=False)
            pr.enable()
            Main()
            pr.disable()
            s = io.StringIO()
            sortby = 'cumulative'
            ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
            ps.print_stats()
            print s.getvalue()
        else:
            Main()
    elif cmd_args.repair:
        # force a database repair, when
        try:
            lck = open(Main.config.pid_filename, 'a+')
            fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
            check_and_repair(filename_mask='%s/*.sqlite' % Main.config.database_dir, force_repair=True)
            lck.close()
            os.remove(Main.config.pid_filename)
        except IOError:
            # already running, exit status 99
            sys.exit(99)
    else:
        # Daemonize flowd aggregator
        from daemonize import Daemonize
        daemon = Daemonize(app="flowd_aggregate", pid=Main.config.pid_filename, action=Main)
        daemon.start()
Ejemplo n.º 10
0
        pr = cProfile.Profile(builtins=False)
        pr.enable()
        Main()
        pr.disable()
        s = StringIO.StringIO()
        sortby = 'cumulative'
        ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        ps.print_stats()
        print s.getvalue()
    else:
        Main()
elif len(sys.argv) > 1 and 'repair' in sys.argv[1:]:
    # force a database repair, when
    try:
        lck = open('/var/run/flowd_aggregate.pid', 'a+')
        fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
        check_and_repair(filename_mask='/var/netflow/*.sqlite',
                         force_repair=True)
        lck.close()
        os.remove('/var/run/flowd_aggregate.pid')
    except IOError:
        # already running, exit status 99
        sys.exit(99)
else:
    # Daemonize flowd aggregator
    daemon = Daemonize(app="flowd_aggregate",
                       pid='/var/run/flowd_aggregate.pid',
                       action=Main)
    daemon.start()