コード例 #1
0
def create_watchdog(sync_item):
    try:
        url = sync_item.url
        label = sync_item.label
        localbox_client = LocalBox(url, label, sync_item.path)

        event_handler = LocalBoxEventHandler(localbox_client)
        observer = Observer()
        observer.setName('th-evt-%s' % sync_item.label)
        observer.schedule(event_handler, localbox_client.path, recursive=True)
        observer.start()
        getLogger(__name__).info('started watchdog for %s' % sync_item.path)
    except NoOptionError as error:
        getLogger(__name__).exception(error)
        string = "Skipping '%s' due to missing option '%s'" % (sync_item,
                                                               error.option)
        getLogger(__name__).info(string)
    except URLError as error:
        getLogger(__name__).exception(error)
        string = "Skipping '%s' because it cannot be reached" % sync_item
        getLogger(__name__).info(string)
コード例 #2
0
ファイル: watcher.py プロジェクト: MercurySeven/project-SSD
class Watcher(QObject):
    signal_event = Signal()
    """the Watcher class is used to activate or deactivate the watchdog thread,
    this is usually done automatically with signals or manually using
    the reboot method or run method
    """
    def __init__(self):
        super(Watcher, self).__init__()
        """
        Constructor for Watcher class, used to setup some public
        variables(path) and hidden
        :param path: the path that the watchdog will observe
        """

        # connect
        # could be deleted, for now it's just to avoid Exceptions when turning
        # off
        self.observer = Observer()
        env_settings = QSettings()

        self.path = lambda: env_settings.value("sync_path")

        # Debug < Info < Warning < Error so setting debug will get everything
        # I need to create a new logger cuz davide's logger is root log
        self.logger = logging.getLogger("watchdog")
        self.logger.setLevel(logging.WARNING)
        formatter = logging.Formatter(
            '%(asctime)s:%(levelname)s:%(pathname)s:%(process)d:%(message)s')
        file_handler = logging.FileHandler('log.mer')
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)

    def run(self, watch):
        """
        method used to turn on or off the watchdog thread

        :param watch: boolean variable that is used to turn on
            the watchdog if true and off if false
        :return: True if the requested action was done and False
            if ignored (ex turning off when already off)
        """

        if watch:
            self.logger.warning("Attivato watchdog")
            path = self.path()
            path = "" if path is None else path
            self.logger.warning("Controllo cartella: " + path)
            return self.background()
        else:
            self.observer.unschedule_all()
            self.observer.stop()
            self.logger.warning("Disattivato watchdog")
            return True

    def background(self):
        """method used to initiate observer and start it"""
        event_handler = MyHandler(self, self.logger)
        # Lo richiamo ogni volta perchè non posso far ripartire lo stesso
        # thread
        self.observer = Observer()
        self.observer.setName("Watchdog's thread")
        path: str = self.path()
        if path is not None and os.path.isdir(path):
            self.observer.schedule(event_handler, path, recursive=True)
            self.observer.start()
            return True
        else:
            return False

    def reboot(self):
        """Method used to reboot the observer, turns it off and then on again"""
        self.run(False)
        self.run(True)
コード例 #3
0
ファイル: netflow.py プロジェクト: eliasgrana/msnm-sensor
class Netflow(Source):
    '''
    classdocs
    '''
    def __init__(self):
        super(Netflow, self).__init__()

        # Listen for new nfcapd files
        event_handler = NetFlowFileEventHandler(self)
        try:
            # Watch the new netflow generated files
            self._observer = Observer()
            self._observer.schedule(
                event_handler,
                self.config.get_config()['DataSources'][self._type][
                    self.__class__.__name__]['captures'],
                recursive=False)
            self._observer.setName("Netflow")
            self._observer.start()
        except Exception as e:
            logging.error(
                "Please check if %s folder for netflow captures is already created.",
                self.config.get_config()['DataSources'][self._type][
                    self.__class__.__name__]['captures'])
            raise e

    def start(self):

        nfcapdAutomatize = self.config.get_config()['DataSources'][self._type][
            self.__class__.__name__]['nfcapdAutomatize']
        if nfcapdAutomatize:
            self.run_nfcapd()
        else:
            logging.warn("nfcapd must have been initiated before!")

    def stop(self):

        nfcapdAutomatize = self.config.get_config()['DataSources'][self._type][
            self.__class__.__name__]['nfcapdAutomatize']
        if nfcapdAutomatize:
            #If nfcapd is already started we kill it!
            retcode = os.system("killall -9 nfcapd")
            if retcode == 0:
                logging.debug("nfcapd was succesfully killed ..")

        self._observer.stop()

    def run_nfcapd(self):
        """
        Runs the nfcapd daemon. This is in charge of to capture the traffic flow and generates the
        correspondiente nfcapd* files every certain scheduling time. This time is set up on demand.

        Raises
        ------
        DataSourceError

        """

        method_name = "run_nfcapd()"

        nfcapd_captures_folder = self.config.get_config()['DataSources'][
            self._type][self.__class__.__name__][
                'captures']  # Folder where the nfcapd will be generated
        timer = self.config.get_config()['GeneralParams'][
            'dataSourcesScheduling']  # data source captures scheduling

        # TODO: compatibility for windows machines
        # If nfcapd is already started we kill it!
        retcode = os.system("killall -9 nfcapd")
        if retcode == 0:
            logging.debug("nfcapd was succesfully killed ..")

        logging.debug(
            "Running nfcapd application. Captures will be generated in %s every %s seconds.",
            nfcapd_captures_folder, timer)

        # Call to nfcapd process.
        retcode = call("nfcapd -w -D -l " + str(nfcapd_captures_folder) +
                       " -p 2055 -T +8 -t " + str(timer),
                       shell=True)

        if retcode != 0:
            raise DataSourceError(self, "Error calling nfcapd ...",
                                  method_name)

    def run_nfdump(self, nfcapd_file_path, output_file_path):
        """
        From a new netflow file, it is in charge of to call nfdump to transform the byte based
        netflow files to a *.csv file as input of the flow parser.

        Raises
        ------
        MSNMError

        """

        method_name = "run_nfdump()"

        logging.info("Running nfdump ...")

        # Call to nfdump process.
        retcode = call("nfdump -r " + str(nfcapd_file_path) +
                       " -q -o csv >> " + output_file_path,
                       shell=True)

        if retcode != 0:
            raise DataSourceError(self, "Error calling nfdump", method_name)

        logging.debug("New nfdump csv generated: %s", output_file_path)

        try:
            # add new id column to merge in parser
            # TODO: build a more elaborated method to do this e.g., from a dateframe utils package
            df = pd.read_csv(output_file_path, header=None, index_col=0)
            df.loc[:, df.shape[1]] = range(100000, 100000 + df.shape[0])
            df.to_csv(output_file_path, encoding='utf-8', header=False)

        except ValueError:
            # FIXME: Sometimes nfcapd generates an empty file :( I do not why :(
            logging.warn("Nfdump file is empty, skipping ... ERROR: %s ",
                         sys.exc_info()[0])
            raise DataSourceError(self, "Nfdump file is empty ....",
                                  method_name)

        except Exception:
            raise DataSourceError(self, sys.exc_info()[0], method_name)