Beispiel #1
0
    def __init__(self, handler_id, config, lock, log_queue):
        self.handler_id = handler_id
        self.lock = lock

        # Suppress logging messages of watchdog observer
        logging.getLogger("watchdog.observers.inotify_buffer").setLevel(
            logging.WARNING)

        self.log = utils.get_logger(
            "WatchdogEventHandler-{}".format(self.handler_id),
            log_queue
        )
        self.log.debug("init")

        self.paths = [os.path.normpath(config["monitored_dir"])]

        # learn what events to detect
        self.detect_all = False
        self.detect_create = False
        self.detect_modify = False
        self.detect_delete = False
        self.detect_move_from = False
        self.detect_move_to = False
        self.detect_close = False

        regexes = []
        for event, regex in iteritems(config["monitored_events"]):
            self.log.debug("event: %s, pattern: %s", event, regex)
            regex = convert_suffix_list_to_regex(regex,
                                                 compile_regex=True,
                                                 log=self.log)

            regexes.append(regex)

            if "all" in event.lower():
                self.log.info("Activate all event types")
                self.detect_all = regex
            elif "create" in event.lower():
                self.log.info("Activate on create event types")
                self.detect_create = regex
            elif "modify" in event.lower():
                self.log.info("Activate on modify event types")
                self.detect_modify = regex
            elif "delete" in event.lower():
                self.log.info("Activate on delete event types")
                self.detect_delete = regex
            elif "move_from" in event.lower():
                self.log.info("Activate on move from event types")
                self.detect_move_from = regex
            elif "move_to" in event.lower():
                self.log.info("Activate on move to event types")
                self.detect_move_to = regex
            elif "close" in event.lower():
                self.log.info("Activate on close event types")
                self.detect_close = regex

        WatchdogEventHandler.regexes = regexes

        self.log.debug("init: super")
        super().__init__()
Beispiel #2
0
    def _setup(self):
        """Initiate class variables and environment.

        Sets static configuration parameters creates ring buffer and starts
        cleanup thread.
        """

        try:
            self.timeout = self.config["event_timeout"]
        except KeyError:
            # when using old config file type
            self.timeout = 1

        self.file_descriptor = inotifyx.init()

        # TODO why is this necessary
        self.paths = [self.config["monitored_dir"]]
        self.mon_subdirs = self.config["fix_subdirs"]

        self.mon_regex_per_event = self.config["monitored_events"]
        self.log.debug("monitored_events=%s", self.config["monitored_events"])

        regexes = []
        for key, value in iteritems(self.config["monitored_events"]):
            self.mon_regex_per_event[key] = (convert_suffix_list_to_regex(
                value, compile_regex=False, log=self.log))

            regexes.append(self.mon_regex_per_event[key])

            # cannot be compiled before because regexes needs to be a list
            # of string
            try:
                self.mon_regex_per_event[key] = (re.compile(
                    self.mon_regex_per_event[key]))
            except Exception:
                self.log.error("Could not compile regex '%s'",
                               self.mon_regex_per_event[key],
                               exc_info=True)
                raise

        self.log.debug("regexes=%s", regexes)
        self.mon_regex = convert_suffix_list_to_regex(regexes,
                                                      suffix=False,
                                                      compile_regex=True,
                                                      log=self.log)

        self.history = collections.deque(maxlen=self.config["history_size"])

        self.lock = threading.Lock()

        self._add_watch()

        if self.config["use_cleanup"]:
            self.cleanup_time = self.config["time_till_closed"]
            self.action_time = self.config["action_time"]

            self.get_remaining_events = self._get_events_from_cleanup

            self.cleanup_thread = CleanUp(paths=self.paths,
                                          mon_subdirs=self.mon_subdirs,
                                          mon_regex=self.mon_regex,
                                          cleanup_time=self.cleanup_time,
                                          action_time=self.action_time,
                                          lock=self.lock,
                                          log_queue=self.log_queue)
            self.cleanup_thread.start()
        else:
            self.get_remaining_events = self._get_no_events
Beispiel #3
0
    def _start_signal(self, signal, send_type, appid, socket_ids,
                      registered_ids, vari_requests, perm_requests):
        """Register socket ids and updated related lists accordingly.

        Updated registered_ids, vari_requests and perm_requests in place and
        send confirmation back.

        Args:
            signal: Signal to send after finishing
            send_type: The type of data the socket ids should get.
            appid: The application ID to identify where the signal came from.
            socket_ids: Socket ids to be registered.
            registered_ids: Already registered socket ids.
            vari_requests: List of open requests (query mode).
            perm_requests: List of next node number to serve (stream mode).
        """

        socket_ids = utils.convert_socket_to_fqdn(socket_ids, self.log)

        # Convert suffixes to regex
        # for compatibility with API versions 3.1.2 or older
        # socket_ids is of the format [[<host>, <prio>, <suffix>], ...]
        for socket_conf in socket_ids:
            self.log.debug("suffix=%s", socket_conf[2])
            socket_conf[2] = convert_suffix_list_to_regex(socket_conf[2],
                                                          suffix=True,
                                                          compile_regex=False,
                                                          log=self.log)

        targets = copy.deepcopy(sorted([i + [send_type] for i in socket_ids]))
        # compile regex
        # This cannot be done before because deepcopy does not support it
        # for python versions < 3.7, see http://bugs.python.org/issue10076
        for socket_conf in targets:
            try:
                socket_conf[2] = re.compile(socket_conf[2])
            except Exception:
                self.log.error("Error message was:", exc_info=True)
                raise FormatError("Could not compile regex '{}'".format(
                    socket_conf[2]))

        current_time = datetime.datetime.now().isoformat()
        targetset = TargetProperties(targets=targets,
                                     appid=appid,
                                     time_registered=current_time)

        overwrite_index = None
        for i, target_properties in enumerate(registered_ids):
            if target_properties.appid != appid:
                continue

            # the registered disjoint socket ids for each node set
            # set(<host>:<port>, <host>:<port>, ...)
            targets_flatlist = set([j[0] for j in target_properties.targets])

            # the disjoint socket_ids to be register
            # "set" is used to eliminated duplications
            # set(<host>:<port>, <host>:<port>, ...) created from socket_ids
            socket_ids_flatlist = set(
                [socket_conf[0] for socket_conf in socket_ids])

            # If the socket_ids of the node set to be register are either a
            # subset or a superset of an already registered node set
            # overwrite the old one with it
            # new registration  | registered    | what to done
            # (h:p, h:p2)       |  (h:p)        |  overwrite: (h:p, h:p2)
            # (h:p)             |  (h:p, h:p2)  |  overwrite: (h:p)
            # (h:p, h:p2)       |  (h:p, h:p3)  |  ?

            # Check if socket_ids is sublist of one entry of registered_ids
            # -> overwrite existing entry
            if socket_ids_flatlist.issubset(targets_flatlist):
                self.log.debug("socket_ids already contained, override")
                overwrite_index = i
            # Check if one entry of registered_ids is sublist in socket_ids
            # -> overwrite existing entry
            elif targets_flatlist.issubset(socket_ids_flatlist):
                self.log.debug("socket_ids is superset of already "
                               "contained set, override")
                overwrite_index = i
            # TODO Mixture ?
            elif not socket_ids_flatlist.isdisjoint(targets_flatlist):
                self.log.error("socket_ids is neither a subset nor "
                               "superset of already contained set")
                self.log.debug("Currently: no idea what to do with this.")
                self.log.debug("socket_ids=%s", socket_ids_flatlist)
                self.log.debug("registered_socketids=%s", targets_flatlist)

        if overwrite_index is None:
            registered_ids.append(targetset)

            if perm_requests is not None:
                perm_requests.append(0)

            if vari_requests is not None:
                vari_requests.append([])

        else:
            # overriding is necessary because the new request may contain
            # different parameters like monitored file suffix, priority or
            # connection type also this means the old socket_id set should be
            # replaced in total and not only partially
            self.log.debug("overwrite_index=%s", overwrite_index)

            registered_ids[overwrite_index] = targetset

            if perm_requests is not None:
                perm_requests[overwrite_index] = 0

            if vari_requests is not None:
                vari_requests[overwrite_index] = []

        self.log.debug("after start handling: registered_ids=%s",
                       registered_ids)

        # send signal back to receiver
        self.send_response([signal])