예제 #1
1
class FsRadar:

    def __init__(self, dir_filter, observer):
        self.inotify = INotify()
        self.watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF
        self.watch_flags = masks.ALL_EVENTS
        self.watch_flags = \
            flags.CREATE | \
            flags.DELETE | \
            flags.DELETE_SELF | \
            flags.CLOSE_WRITE | \
            flags.MOVE_SELF | \
            flags.MOVED_FROM | \
            flags.MOVED_TO | \
            flags.EXCL_UNLINK

        self.wds = {}
        self.dir_filter = dir_filter
        self.observer = observer

    def add_watch(self, path):
        if not ((self.watch_flags & flags.ONLYDIR) and not os.path.isdir(path)):
            wd = self.inotify.add_watch(path, self.watch_flags)
            self.wds[wd] = path
            logger.debug('Watch %s', important(path))

    def rm_watch(self, wd):
        logger.debug('Stop Watching %s', important(self.wds[wd]))
        self.inotify.rm_watch(wd)
        self.wds.pop(wd)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.close()

    def close(self):
        logger.debug('Close inotify descriptor')
        return self.inotify.close()

    def on_watch_event(self, event):
        MASK_NEW_DIR = flags.CREATE | flags.ISDIR

        if logging.getLogger().isEnabledFor(logging.DEBUG):
            logger.debug('New event: %r', event)
            for flag in flags.from_mask(event.mask):
                logger.debug('-> flag: %s', flag)

        if MASK_NEW_DIR == MASK_NEW_DIR & event.mask:
            new_dir_path = join(self.wds[event.wd], event.name)
            self.on_new_dir(new_dir_path)
        elif flags.CLOSE_WRITE & event.mask and event.name:
            # we are watching a directory and a file inside of it has been touched
            logger.debug('Watching dir, file touched')
            self.on_file_write(join(self.wds[event.wd], event.name))
        elif flags.CLOSE_WRITE & event.mask and not event.name:
            # we are watching a file
            logger.debug('Watching file, file touched')
            self.on_file_write(self.wds[event.wd])
        elif flags.IGNORED & event.mask:
            # inotify_rm_watch was called automatically
            # (file/directory removed/unmounted)
            path = self.wds[event.wd]
            self.wds.pop(event.wd)
            self.on_file_gone(path)

    def on_new_dir(self, path):
        if self.dir_filter(path):
            self.add_watch(path)

            # If files have been added immediately to the directory we
            # missed the events, so we emit them artificially (with
            # the risk of having some repeated events)
            for fName in os.listdir(path):
                self.on_file_write(join(new_dir_path, fName))

    def on_file_write(self, path):
        '''A write /directory at `path` was either unlinked, moved or unmounted'''
        self.observer.notify(FsRadarEvent.FILE_MATCH, path)

    def on_file_gone(self, path):
        '''The file/directory at `path` was either unlinked, moved or unmounted'''
        self.observer.notify(FsRadarEvent.FILE_GONE, path)

    def run_forever(self):
        while True:
            for event in self.inotify.read(read_delay=30, timeout=2000):
                self.on_watch_event(event)
예제 #2
0
    def loop_inotify(self, mail_delta):
        directory = self.file_consumer.consume
        inotify = INotify()
        inotify.add_watch(directory, flags.CLOSE_WRITE | flags.MOVED_TO)

        # Run initial mail fetch and consume all currently existing documents
        self.loop_step(mail_delta)
        next_mail_time = self.mail_fetcher.last_checked + mail_delta

        while True:
            # Consume documents until next_mail_time
            while True:
                delta = next_mail_time - time.time()
                if delta > 0:
                    for event in inotify.read(timeout=delta):
                        file = os.path.join(directory, event.name)
                        if os.path.isfile(file):
                            self.file_consumer.try_consume_file(file)
                        else:
                            self.logger.warning(
                                "Skipping %s as it is not a file", file)
                else:
                    break

            self.mail_fetcher.pull()
            next_mail_time = self.mail_fetcher.last_checked + mail_delta
def _main():
    watch_dir = "/tmp/watch_tree"
    create_thread = threading.Thread(target=create_test_files,
                                     args=(watch_dir,))

    try:
        os.mkdir(watch_dir)
    except OSError:
        pass

    inotify = INotify()
    watch_flags = (flags.CREATE | flags.DELETE | flags.MODIFY
                   | flags.DELETE_SELF | flags.ISDIR)

    wd_to_path = {}

    wd = inotify.add_watch(watch_dir, watch_flags)
    wd_to_path[wd] = watch_dir
    print("wd_to_path: ", wd_to_path)

    create_thread.start()

    while True:
        for event in inotify.read():
            path = wd_to_path[event.wd]
            event_type = flags.from_mask(event.mask)

            # remember dir name
            if flags.ISDIR in event_type and flags.CREATE in event_type:
                new_dir = os.path.join(path, event.name)
                wd = inotify.add_watch(new_dir, watch_flags)
                wd_to_path[wd] = new_dir

            print("PATH=[{}] FILENAME=[{}] EVENT_TYPES={}"
                  .format(path, event.name, event_type))
def _main():
    watch_dir = "/tmp/watch_tree"

    try:
        for path in watch_dir:
            os.mkdir(path)
    except OSError:
        pass

    inotify = INotify()
    watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF

    wd_to_path = {}

    wd = inotify.add_watch(watch_dir, watch_flags)
    wd_to_path[wd] = watch_dir
    print("wd_to_path: ", wd_to_path)

    with open(os.path.join(watch_dir, "test_file"), "w"):
        pass

    while True:
        for event in inotify.read():
            path = wd_to_path[event.wd]
            event_type = flags.from_mask(event.mask)

            print("PATH=[{}] FILENAME=[{}] EVENT_TYPES={}".format(
                path, event.name, event_type))
예제 #5
0
파일: layer.py 프로젝트: DrLuke/easyShader
 def __init__(self, path):
     self.path = path
     fl = flags.MODIFY
     self.i = INotify()
     self.watch = self.i.add_watch(self.path, fl)
     with open(self.path, "r") as f:
         self.content = f.read()
예제 #6
0
    def run(self, inotify: INotify, store: OrderedDict, timeout: int):
        for file_path in list(store):
            try:
                # for now only monitor modification of files
                logger.info("adding %s to watch list" % file_path)
                wd = inotify.add_watch(file_path, flags.MODIFY)
                watches[wd] = file_path
            except FileNotFoundError:
                pass

        # print("watch list %s " % watches)
        while self._running:
            for event in inotify.read(timeout=timeout):
                # print(str(event))
                for flag in flags.from_mask(event.mask):
                    # we only watch files for modification events
                    if flag == flags.MODIFY:
                        # only if the event is for a file that we are watching for
                        if event.wd in watches:
                            file_path = watches[event.wd]
                            logger.debug("modify event received for %s " %
                                         file_path)
                            store[
                                file_path] = FStatCache.get_file_stats_using_stat(
                                    file_path)
예제 #7
0
    def __init__(self):
        super().__init__()

        self.inotify = INotify()
        self.qnotifier = QSocketNotifier(self.inotify.fd, QSocketNotifier.Read)
        self.qnotifier.activated.connect(self._on_activated)
        self.wd = None
예제 #8
0
class TickNotifierOrigin():
    def __init__(self, symbol, binary_id, id):
        self.__set_config(symbol, binary_id, id)

    def __set_config(self, symbol, binary_id, id):
        self.binary_id = binary_id
        self.id = id
        self.tick_dir = local_settings["inotify_config"]["default_dir"] + symbol
        self.tick_file_path = self.tick_dir + "/" + local_settings[
            "inotify_config"]["default_name"]
        self.inotify = INotify()
        watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF
        self.inotify.add_watch(self.tick_dir, watch_flags)
        os.chdir(self.tick_dir)
        self.tick_data = None
        self.symbol = symbol

    def run(self):
        event = self.inotify.read()[0]
        for flag in flags.from_mask(event.mask):
            if str(flag) == "flags.MODIFY":
                with open(self.tick_file_path) as json_file:
                    self.tick_data = json.load(json_file)
        return True

    def get_tick_data(self):
        return {
            "id": self.id,
            "binary_id": self.binary_id,
            "symbol": self.symbol,
            "data": self.tick_data
        }
 def init_inotify(self):
     self.inotify = INotify()
     self.dirs_array = self.discover_tree(self.watched_dir)
     for dir in self.dirs_array:
         self.watched_name[self.inotify.add_watch(dir,
                                                  self.watch_flags)] = dir
     print(str(self.watched_name))
예제 #10
0
 def __init__(self):
     """Summary
     """
     self.inotify = INotify()
     self.watched_flags = flags.CREATE | flags.MOVED_TO | flags.ISDIR
     self.directories = {}
     self.watch_descriptors = {}
     self.users = {}
     self.uploaders = [GCSUploader(), S3Uploader()]
     for user, _ in config.PROJECT_CONFIG["USERS"].items():
         watch_descriptor = self.inotify.add_watch(
             os.path.join(config.APP_LANDING_DIR, user, 'ingest'),
             self.watched_flags)
         self.directories[watch_descriptor] = os.path.join(
             config.APP_LANDING_DIR, user, 'ingest')
         self.users[watch_descriptor] = user
         self.watch_descriptors[
             self.directories[watch_descriptor]] = watch_descriptor
         logger.info("Watching ingestion folder")
     while True:
         events = self.inotify.read(read_delay=1000)
         all_events = self.get_all_events(events)
         with ThreadPoolExecutor(max_workers=None) as executor:
             for event in all_events:
                 executor.submit(
                     upload_file, self.uploaders,
                     os.path.join(self.directories[event.wd], event.name))
예제 #11
0
    def __init__(self, parent: Optional[QObject] = None) -> None:
        super().__init__(parent)

        self.inotify = INotify()
        self.qnotifier = QSocketNotifier(self.inotify.fd, QSocketNotifier.Read)
        self.qnotifier.activated.connect(self._on_activated)
        self.wd = None
예제 #12
0
class FileChangeWatcher(object):
    """Monitors for changes in a given file."""
    def __init__(self, filepath):
        self.__path, self.__name = filepath.rsplit("/", 1)
        self.__inotify = INotify()
        watch_flags = \
            Flags.MODIFY \
            | Flags.CLOSE_WRITE \
            | Flags.MOVED_TO \
            | Flags.EXCL_UNLINK
        self.__wd = self.__inotify.add_watch(self.__path, watch_flags)

    def close(self):
        self.__inotify.rm_watch(self.__wd)

    @property
    def changed(self):
        modified = saved = False
        events = self.__inotify.read()
        events = (e for e in events if e.name == self.__name)
        flags = (f for e in events for f in Flags.from_mask(e.mask))
        for flag in flags:
            if flag == Flags.MODIFY:
                modified = True
            elif flag == Flags.CLOSE_WRITE:
                saved = True
            elif flag == Flags.MOVED_TO:
                modified = saved = True
        return modified and saved
예제 #13
0
def history_watcher(shutdown_event, session_start_event, session_stop_event):
    while (not (shutdown_event.isSet())):
        session_start_event.wait()

        inotify = INotify()
        watch_flags = flags.MODIFY
        shutil.copy(HISTORY_FILE, HISTORY_FILE + "_clj")

        wd = inotify.add_watch(HISTORY_FILE, watch_flags)
        print("IDK")
        while (not (session_stop_event.isSet())):
            print("yada")
            for event in inotify.read():
                dp = subprocess.Popen(['diff', HISTORY_FILE, HISTORY_FILE + '_clj'], \
                                      stdout=subprocess.PIPE)
                gp = subprocess.Popen(['grep', 'cmd'], \
                                      stdin=dp.stdout, \
                                      stdout=subprocess.PIPE)
                dp.stdout.close()
                r_stdout, r_stderr = gp.communicate()
                cmd_ln = r_stdout.decode().split('cmd: ')[1]
                print(cmd_ln)


                write_journal((("___________________________________________________\n"), \
                             (getcwd() + "> " + cmd_ln), \
                             (datetime.now().strftime("%m/%d/%Y %H:%M:%S") + '\n\n')))
                shutil.copy(HISTORY_FILE, HISTORY_FILE + "_clj")
        sleep(0.1)
예제 #14
0
 def __set_config(self, pool_name):
     self.data = None
     self.queue_pool_dir = self.main_path + pool_name
     self.inotify = INotify()
     # watch_flags = flags.CREATE
     watch_flags = flags.CREATE | flags.MODIFY
     self.inotify.add_watch(self.queue_pool_dir, watch_flags)
     os.chdir(self.queue_pool_dir)
예제 #15
0
def main():
    inotify = INotify()
    watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF
    wd = inotify.add_watch('/tmp', watch_flags)
    for event in inotify.read():
        print(event)
        for flag in flags.from_mask(event.mask):
            print('    ' + str(flag))
예제 #16
0
파일: dl.py 프로젝트: BasementCat/autodl
def get_inotify():
    inotify = INotify()
    watch_flags = masks.ALL_EVENTS
    logger.debug("inotify flags: %s", watch_flags)
    for path in CONFIG['MOUNTS']:
        logger.info("Watching path %s", path)
        wd = inotify.add_watch(path, watch_flags)
    return inotify
예제 #17
0
class Watcher:  # pylint: disable=too-few-public-methods
    """Watcher class. Provides `watch` method to execute callback when event happens"""
    def __init__(self, callback):
        self._inotify = None
        self._thread = None
        self._watch_descriptors = []
        self._callback = callback

    def watch(self, directories):
        """
        Creates a thread to watch for a single fs event.
        Executes callback once when any monitored event happens.
        Removes existing watchers on initialization,
        so only one watching thread exists at a time.

        Keyword arguments:
        directories -- directories to watch
        callback -- function to execute on event
        """
        if self._inotify:
            for descriptor in self._watch_descriptors:
                try:
                    self._inotify.rm_watch(descriptor)
                except OSError:
                    pass
            self._watch_descriptors = []
            self._inotify.close()
        self._thread = Thread(target=self._notify_changes,
                              args=(directories, ))
        self._thread.start()

    def get_notification_thread(self):
        """Returns notification thread"""
        return self._thread

    def _notify_changes(self, directories):
        """
        Registers notification on FS events.

        Keyword arguments:
        directory -- directory to watch
        callback -- function to execute on event
        """
        self._inotify = INotify()
        watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.MOVED_FROM | \
                      flags.MOVED_TO | flags.DELETE_SELF
        for directory in directories:
            if exists(directory):
                self._watch_descriptors.append(
                    self._inotify.add_watch(directory, watch_flags))

        # wait for event
        try:
            self._inotify.read()
            self._callback()
        except ValueError:
            # do not care if the watcher has been removed / thread killed
            pass
예제 #18
0
def watcher(directory):
    inotify = INotify()
    watch_flags = flags.CREATE | flags.MOVED_TO  # only watch for files created, copied or moved
    inotify.add_watch(directory, watch_flags)
    WAIT = 1000
    while True:
        for event in inotify.read(WAIT):
            filename = event.name
            yield os.path.join(directory, filename)
예제 #19
0
class INotifyWait(EventEmitter):

    IN_ACCESS = flags.ACCESS
    IN_MODIFY = flags.MODIFY
    IN_CLOSE_WRITE = flags.CLOSE_WRITE
    IN_CLOSE_NOWRITE = flags.CLOSE_NOWRITE
    IN_OPEN = flags.OPEN
    IN_MOVED_FROM = flags.MOVED_FROM
    IN_MOVED_TO = flags.MOVED_TO
    IN_CREATE = flags.CREATE
    IN_DELETE = flags.DELETE
    IN_DELETE_SELF = flags.DELETE_SELF
    IN_MOVE_SELF = flags.MOVE_SELF
    IN_UNMOUNT = flags.UNMOUNT
    IN_CLOSE = flags.CLOSE_WRITE | flags.CLOSE_NOWRITE
    IN_MOVE = flags.MOVED_FROM | flags.MOVED_TO

    def __init__(self, path, options):
        super(INotifyWait, self).__init__()
        if str(path) == path:
            self.paths = [os.path.abspath(path)]
        else:
            self.paths = []
            for p in path:
                self.path.append(os.path.abspath(p))
        self.__active = True
        self.__process = None
        self.__inotify = INotify()

        # TODO: find a way to screen events emitted from unrelated paths
        for path in self.paths:
            self.__inotify.add_watch(path, options.get('events'))

    def on(self, type, callback):
        if self.__active:
            super().on(type, callback)
            if self.__process is not None:
                self.__process.terminate()
            self.__process = Process(target=self.__inotifyRead)
            self.__process.start()

    def stop(self):
        if self.__active:
            self.__active = False
            self.__process.terminate()
            self.__inotify.close()

    def __inotifyRead(self):
        while self.__active:
            for event in self.__inotify.read():
                for flag in flags.from_mask(event.mask):
                    for path in self.paths:
                        self.emit(flag, {
                            'type': flag,
                            'path': path,
                            'entry': event.name
                        })
예제 #20
0
 def __init__(self, config, event_queue):
     super(FileMonitor, self).__init__()
     self.stop_request = threading.Event()
     self.inotify = INotify()
     self.event_queue = event_queue
     self.watch_paths = config.watch_paths
     self.watch_flags = config.watch_flags
     self.recursion = config.recursion
     self.watch_descriptor_paths = {}
예제 #21
0
 def __init__(self, filepath):
     self.__path, self.__name = filepath.rsplit("/", 1)
     self.__inotify = INotify()
     watch_flags = \
         Flags.MODIFY \
         | Flags.CLOSE_WRITE \
         | Flags.MOVED_TO \
         | Flags.EXCL_UNLINK
     self.__wd = self.__inotify.add_watch(self.__path, watch_flags)
예제 #22
0
class TaskRegistrator(Queue):
    def __init__(self, pool_name):
        self.set_queue_config()
        self.__set_config(pool_name)

    def __set_config(self, pool_name):
        self.data = None
        self.queue_pool_dir = self.main_path + pool_name
        self.inotify = INotify()
        # watch_flags = flags.CREATE
        watch_flags = flags.CREATE | flags.MODIFY
        self.inotify.add_watch(self.queue_pool_dir, watch_flags)
        os.chdir(self.queue_pool_dir)

    def __extract_json(self, data):
        arr_data_raw = data.split(".")
        print(" ** arr_data_raw = ", arr_data_raw)
        arr_data = arr_data_raw[0].split("-")
        print(" ** arr_data = ", arr_data)
        return arr_data[0], arr_data[1]

    def run(self, pool_name):
        event = self.inotify.read()[0]
        file_path = ""
        print(" >>>>> event = ", event)
        for flag in flags.from_mask(event.mask):
            if str(flag) == "flags.CREATE" or str(flag) == "flags.MODIFY":
                try:
                    Printer().cprint(
                        " **** TaskRegistrator detected a new task to be registered ... name = %s"
                        % event.name)
                    cmd, key = self.__extract_json(event.name)
                    file_path = self.main_path + pool_name + "/" + event.name
                    print(" ** DIBACA = ", file_path)
                    with open(file_path) as json_file:
                        self.data = {
                            "cmd": cmd,
                            "key": key,
                            "pool_name": pool_name,
                            "path": file_path,
                            "value": json.load(json_file)
                        }
                    print("Data .. DISIMPAN")
                    return True
                except:
                    print(" damn .. masuk else ...")
                    pass
        if os.path.exists(file_path):
            print(" sampah! deleting ..")
            os.remove(file_path)
            print(" File is deleted ..")
        return True
        # return False

    def get_data(self):
        return self.data
예제 #23
0
    def __init__(self, watch_dir, n_files):
        super().__init__(watch_dir, n_files)

        self.wd_to_path = {}

        self.inotify = INotify()
        watch_flags = flags.CREATE | flags.OPEN | flags.MODIFY

        wd = self.inotify.add_watch(self.watch_dir, watch_flags)
        self.wd_to_path[wd] = self.watch_dir
예제 #24
0
 def run(self):
     LOGGER.info("starting")
     init_signals()
     ih = INotify()
     wds = {}
     register_watches(ih, wds)
     force = True
     events = []
     while RUN:
         if force:
             force = False
         else:
             events = ih.read(1000)
             if events is None or len(events) == 0:
                 create_idle_file()
                 continue
         delete_idle_file()
         if len(events) > 0:
             LOGGER.info("got events")
         for event in events:
             try:
                 path = wds[event.wd]
             except KeyError:
                 path = "unknown"
             LOGGER.debug("%s on %s" % (event, path))
             if not (event.mask & flags.IGNORED):
                 if event.mask & flags.DELETE_SELF:
                     try:
                         path = wds[event.wd]
                         unregister_watch(ih, wds, event.wd)
                         register_watch(ih, wds, path)
                     except KeyError:
                         pass
         try:
             lock = filelock.FileLock(get_plugin_lock_path(), timeout=300)
             with lock.acquire(poll_intervall=1):
                 # ok, there is no plugins.install/uninstall running
                 pass
         except filelock.Timeout:
             LOGGER.warning("can't acquire plugin management lock => "
                            "maybe a blocked plugins.install/uninstall ? "
                            "=> exiting")
             break
         if not is_status_running_or_error():
             LOGGER.info("The module is not RUNNING or ERROR => "
                         "ignoring...")
             time.sleep(2)
             force = True
             continue
         ret = self.handle_event()
         if not ret:
             break
         register_watches(ih, wds)
         LOGGER.info("waiting for events...")
     LOGGER.info("stopped")
예제 #25
0
 def __init__(self, timeout=None):
     # this could potentially be an LRU Cache
     self._store = OrderedDict()
     self._inotify = INotify()
     self._monitor = MonitorThread()
     self._monitor_thread = Thread(target=self._monitor.run,
                                   args=(
                                       self._inotify,
                                       self._store,
                                       timeout,
                                   ))
예제 #26
0
 def __init__(self, root_dir=None, excluded_dirs=[]):
     if not root_dir:
         # TODO: create proper exception class
         raise NotADirectoryError
     self.root_dir = Directory(root_dir)
     self.excluded_dirs = excluded_dirs
     self.inotify = INotify()
     self.watch_flags = flags.CREATE | flags.MODIFY | flags.DELETE
     self.watched_dirs = {}
     self.wd_tracker = {}
     self.watch(self.root_dir)
예제 #27
0
def main(argv: List[str]) -> None:
    inotify = INotify()
    watch_flags = masks.ALL_EVENTS

    print("watching /tmp")
    inotify.add_watch('/tmp', watch_flags)

    while True:
        for event in inotify.read():
            print(event)
            evs = [str(fl) for fl in flags.from_mask(event.mask)]
            print("    " + ", ".join(evs))
예제 #28
0
파일: inotify.py 프로젝트: Grumbel/dirtool
def main(argv: List[str]) -> None:
    inotify = INotify()
    watch_flags = masks.ALL_EVENTS

    print("watching /tmp")
    inotify.add_watch('/tmp', watch_flags)

    while True:
        for event in inotify.read():
            print(event)
            evs = [str(fl) for fl in flags.from_mask(event.mask)]
            print("    " + ", ".join(evs))
예제 #29
0
 def __set_config(self, symbol, binary_id, id):
     self.binary_id = binary_id
     self.id = id
     self.tick_dir = local_settings["inotify_config"]["default_dir"] + symbol
     self.tick_file_path = self.tick_dir + "/" + local_settings[
         "inotify_config"]["default_name"]
     self.inotify = INotify()
     watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF
     self.inotify.add_watch(self.tick_dir, watch_flags)
     os.chdir(self.tick_dir)
     self.tick_data = None
     self.symbol = symbol
예제 #30
0
파일: monitor.py 프로젝트: irigon/pyLyRT
class Monitor(threading.Thread):
    def __init__(self, path_to_runtime_lib, register):
        threading.Thread.__init__(self)
        self.path = path_to_runtime_lib
        self.lastrun = 0  # get rid of this ugly hack
        self.register = register
        self.known_modules = set()

    def start_inotify(self):
        self.inotify = INotify()
        watch_flags = flags.CLOSE_WRITE | flags.DELETE
        self.wd = self.inotify.add_watch(self.path + '/', watch_flags)

    def run(self):
        self.alive = True
        try:
            while self.alive == True:
                for event in self.inotify.read():
                    if self.alive == False:
                        break
                    if event.name.endswith('.py'):
                        print(
                            'Cheguei em event.name. Name: {}, knownmodules:{}'.
                            format(event.name, self.known_modules))
                        module_name = self.path + '.' + os.path.splitext(
                            event.name)[0]
                        for flag in flags.from_mask(event.mask):
                            if flag.name == 'CLOSE_WRITE':
                                if event.name not in self.known_modules:
                                    print('Primeira vez em known modules ({})'.
                                          format(self.known_modules))
                                    self.known_modules.add((event.name))
                                    self.register.add_module(module_name)
                            if flag.name == 'DELETE':
                                # TODO: handle unload from roles / modules
                                pass
        finally:
            self.inotify.rm_watch(self.wd)

    def stop(self):
        self.alive = False
        self.add_mock_file()
        self.remove_mock_file()
        # send a mock event to inotify in order to avoid the need to kill the thread
        self.join()

    def add_mock_file(self):
        with open('{}/___mocking_file.py'.format(self.path), 'w') as f:
            f.write("Mock")

    def remove_mock_file(self):
        os.remove('{}/___mocking_file.py'.format(self.path))
예제 #31
0
class McDirWatcher:
    def __init__(self):
        self.inotify = INotify()

        watch_flags = flags.CREATE | flags.MODIFY
        self.inotify.add_watch(LOG_DIR, watch_flags)

    def events(self):
        while True:
            yield from self.inotify.read()

    def close(self):
        self.inotify.close()
예제 #32
0
class FileMonitor(threading.Thread):
    def __init__(self, config, event_queue):
        super(FileMonitor, self).__init__()
        self.stop_request = threading.Event()
        self.inotify = INotify()
        self.event_queue = event_queue
        self.watch_paths = config.watch_paths
        self.watch_flags = config.watch_flags
        self.recursion = config.recursion
        self.watch_descriptor_paths = {}

    def run(self):
        for path in self.watch_paths:
            self.__add_watch(path)
            if self.recursion:
                for root, dirs, files in os.walk(path):
                    if root not in self.watch_descriptor_paths:
                        self.__add_watch(root)
        while not self.stop_request.isSet():
            for event in self.inotify.read(timeout=5):
                file_event = FileEvent(event,
                                       self.watch_descriptor_paths[event[0]])
                if self.recursion and flags.ISDIR & file_event.mask and flags.CREATE & file_event.mask:
                    self.__add_watch(file_event.full_path)
                if flags.DELETE_SELF & file_event.mask:
                    self.__rm_watch(file_event.full_path)
                self.event_queue.put(file_event)

    def __add_watch(self, path):
        wd = self.inotify.add_watch(path, self.watch_flags)
        self.watch_descriptor_paths[wd] = path
        self.watch_descriptor_paths[path] = wd
        for root, dirs, files in os.walk(path):
            if root not in self.watch_descriptor_paths:
                self.__add_watch(root)

    def __rm_watch(self, path):
        try:
            wd = self.watch_descriptor_paths[path]
            del self.watch_descriptor_paths[path]
            del self.watch_descriptor_paths[wd]
        except KeyError:
            if not path.endswith('/'):
                path = path + "/"
                wd = self.watch_descriptor_paths[path]
                del self.watch_descriptor_paths[path]
                del self.watch_descriptor_paths[wd]

    def join(self, timeout=None):
        self.stop_request.set()
        super(FileMonitor, self).join(timeout)
예제 #33
0
class INotifyQt(QObject):

    sig_event = pyqtSignal(inotify_simple.Event)

    def __init__(self):
        super().__init__()

        self.inotify = INotify()
        self.qnotifier = QSocketNotifier(self.inotify.fd, QSocketNotifier.Read)
        self.qnotifier.activated.connect(self._on_activated)
        self.wd = None

    def add_watch(self, path, flags=inotify_masks.ALL_EVENTS):
        self.wd = self.inotify.add_watch(path, flags)

    def _on_activated(self, fd):
        assert fd == self.inotify.fd

        for ev in self.inotify.read():
            self.sig_event.emit(ev)

    def close(self):
        del self.qnotifier
        self.inotify.close()
예제 #34
0
    def __init__(self, dir_filter, observer):
        self.inotify = INotify()
        self.watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF
        self.watch_flags = masks.ALL_EVENTS
        self.watch_flags = \
            flags.CREATE | \
            flags.DELETE | \
            flags.DELETE_SELF | \
            flags.CLOSE_WRITE | \
            flags.MOVE_SELF | \
            flags.MOVED_FROM | \
            flags.MOVED_TO | \
            flags.EXCL_UNLINK

        self.wds = {}
        self.dir_filter = dir_filter
        self.observer = observer
예제 #35
0
    def run(self):
        while not os.path.exists(self.watch_dir):
            logger.info("Waiting for %s...", self.watch_dir)
            time.sleep(30)

        inotify = INotify()
        watch_flags = flags.CLOSE_WRITE
        logger.info("Watching %s", self.watch_dir)
        try:
            inotify.add_watch(self.watch_dir, watch_flags)

            # And see the corresponding events:
            while True:
                events = inotify.read()
                matching = [f.name for f in events if f.name.startswith(self.prefix)]
                if len(matching) > 0:
                    # take only the last file
                    self.processForMovement(os.path.join(self.watch_dir, matching[-1]))

            inotify.close()
        except:
            logger.exception("Error when setting up inotify")
예제 #36
0
import os
from inotify_simple import INotify, flags

os.mkdir('/tmp/inotify_test')

inotify = INotify()
watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF
wd = inotify.add_watch('/tmp/inotify_test', watch_flags)

# Now create, delete and modify some files in the directory being monitored:
os.chdir('/tmp/inotify_test')
# CREATE event for a directory:
os.system('mkdir foo')
# CREATE event for a file:
os.system('echo hello > test.txt')
# MODIFY event for the file:
os.system('echo world >> test.txt')
# DELETE event for the file
os.system('rm test.txt')
# DELETE event for the directory
os.system('rmdir foo')
os.chdir('/tmp')
# DELETE_SELF on the original directory. # Also generates an IGNORED event
# indicating the watch was removed.
os.system('rmdir inotify_test')

# And see the corresponding events:
for event in inotify.read():
    print(event)
    for flag in flags.from_mask(event.mask):
        print('    ' + str(flag))
예제 #37
0

def processForMovement(filename):
    #print(filename, file=sys.stderr)
    has_movement = processFile(filename, False)
    eg.reportMovement(has_movement)


size = (82, 46)
md = MotionDetector(100000, 30)
eg = EventGen(30)
watch_dir = '/run/replay/fragments'
prefix = 'mv'

if len(sys.argv) == 1:
    inotify = INotify()
    watch_flags = flags.CLOSE_WRITE
    wd = inotify.add_watch(watch_dir, watch_flags)

    # And see the corresponding events:
    while True:
        events = inotify.read()
        matching = [f.name for f in events if f.name.startswith(prefix)]
        if len(matching) > 0:
            # take only the last file
            processForMovement(os.path.join(watch_dir, matching[-1]))

    inotify.close()
else:
    for f in sys.argv[1:]:
        processFile(f, True)
예제 #38
0
class Server(object):
    """A TCP server which manages, power, partitioning and scheduling of jobs
    on SpiNNaker machines.

    Once constructed the server starts a background thread
    (:py:attr:`._server_thread`, :py:meth:`._run`) which implements the main
    server logic and handles communication with clients, monitoring of
    asynchronous board control events (e.g. board power-on completion) and
    watches the config file for changes. All members of this object are assumed
    to be accessed only from this thread while it is running. The thread is
    stopped, and its completion awaited by calling :py:meth:`.stop_and_join`,
    stopping the server.

    The server uses a :py:class:`~spalloc_server.Controller` object to
    implement scheduling, allocation and machine management functionality. This
    object is :py:mod:`pickled <pickle>` when the server shuts down in order to
    preserve the state of all managed machines (e.g. allocated jobs etc.).

    To allow the interruption of the server thread on asynchronous events from
    the Controller a :py:func:`~socket.socketpair` (:py:attr:`._notify_send`
    and :py:attr:`._notify_send`) is used which monitored allong with client
    connections and config file changes.

    A number of callable commands are implemented by the server in the form of
    a subset of the :py:class:`.Server`'s methods indicated by the
    :py:func:`._command` decorator. These may be called by a client by sending
    a line ``{"command": "...", "args": [...], "kwargs": {...}}``. If the
    function throws an exception, the client is disconnected. If the function
    returns, it is packed as a JSON line ``{"return": ...}``.
    """

    def __init__(self, config_filename, cold_start=False):
        """
        Parameters
        ----------
        config_filename : str
            The filename of the config file for the server which describes the
            machines to be controlled.
        cold_start : bool
            If False (the default), the server will attempt to restore its
            previous state, if True, the server will start from scratch.
        """
        self._config_filename = config_filename
        self._cold_start = cold_start

        # Should the background thread terminate?
        self._stop = False

        # The background thread in which the server will run
        self._server_thread = threading.Thread(target=self._run,
                                               name="Server Thread")

        # The poll object used for listening for connections
        self._poll = select.poll()

        # This socket pair is used by background threads to interrupt the main
        # event loop.
        self._notify_send, self._notify_recv = socket.socketpair()
        self._poll.register(self._notify_recv, select.POLLIN)

        # Currently open sockets to clients. Once server started, should only
        # be accessed from the server thread.
        self._server_socket = None
        # {fd: socket, ...}
        self._client_sockets = {}

        # Buffered data received from each socket
        # {fd: buf, ...}
        self._client_buffers = {}

        # For each client, contains a set() of job IDs and machine names that
        # the client is watching for changes or None if all changes are to be
        # monitored.
        # {socket: set or None, ...}
        self._client_job_watches = {}
        self._client_machine_watches = {}

        # The current server configuration options. Once server started, should
        # only be accessed from the server thread.
        self._configuration = Configuration()

        # Infer the saved-state location
        self._state_filename = os.path.join(
            os.path.dirname(self._config_filename),
            ".{}.state.{}".format(os.path.basename(self._config_filename),
                                  __version__)
        )

        # Attempt to restore saved state if required
        self._controller = None
        if not self._cold_start:
            if os.path.isfile(self._state_filename):
                try:
                    with open(self._state_filename, "rb") as f:
                        self._controller = pickle.load(f)
                    logging.info("Server warm-starting from %s.",
                                 self._state_filename)
                except:
                    # Some other error occurred during unpickling.
                    logging.exception(
                        "Server state could not be unpacked from %s.",
                        self._state_filename)

        # Perform cold-start if no saved state was loaded
        if self._controller is None:
            logging.info("Server cold-starting.")
            self._controller = Controller()

        # Notify the background thread when something changes in the background
        # of the controller (e.g. power state changes).
        self._controller.on_background_state_change = self._notify

        # Read configuration file. This must succeed when the server is first
        # being started.
        if not self._read_config_file():
            raise Exception("Config file could not be loaded.")

        # Set up inotify watcher for config file changes
        self._config_inotify = INotify()
        self._poll.register(self._config_inotify.fd, select.POLLIN)
        self._watch_config_file()

        # Start the server
        self._server_thread.start()

        # Flag for checking if the server is still alive
        self._running = True

    def _notify(self):
        """Notify the background thread that something has happened.

        Calling this method simply wakes up the server thread causing it to
        perform all its usual checks and processing steps.
        """
        self._notify_send.send(b"x")

    def _watch_config_file(self):
        """Create an inotify watch on the config file.

        This watch is monitored by the main server threead and if the config
        file is changed, the config file is re-read.
        """
        # A one-shot watch is used since some editors cause a delete event to
        # be produced when the file is saved, removing the watch anyway. Using
        # a one-shot watch simplifies implementation as it requires the watch
        # to *always* be recreated, rather than just 'sometimes'.
        self._config_inotify.add_watch(self._config_filename,
                                       inotify_flags.MODIFY |
                                       inotify_flags.ATTRIB |
                                       inotify_flags.CLOSE_WRITE |
                                       inotify_flags.MOVED_TO |
                                       inotify_flags.CREATE |
                                       inotify_flags.DELETE |
                                       inotify_flags.DELETE_SELF |
                                       inotify_flags.MOVE_SELF |
                                       inotify_flags.ONESHOT)

    def _read_config_file(self):
        """(Re-)read the server configuration.

        If reading of the config file fails, the current configuration is
        retained, unchanged.

        Returns
        -------
        bool
            True if reading succeded, False otherwise.
        """
        try:
            with open(self._config_filename, "r") as f:
                config_script = f.read()  # pragma: no branch
        except (IOError, OSError):
            logging.exception("Could not read "
                              "config file %s", self._config_filename)
            return False

        # The environment in which the configuration script is exexcuted (and
        # where the script will store its options.)
        try:
            g = {}
            g.update(configuration.__dict__)
            g.update(coordinates.__dict__)
            exec(config_script, g)
        except:
            # Executing the config file failed, don't update any settings
            logging.exception("Error while evaluating "
                              "config file %s", self._config_filename)
            return False

        # Make sure a configuration object is specified
        new = g.get("configuration", None)
        if not isinstance(new, Configuration):
            logging.error("'configuration' must be a Configuration object "
                          "in config file %s", self._config_filename)
            return False

        # Update the configuration
        old = self._configuration
        self._configuration = new

        # Restart the server if the port or IP has changed (or if the server
        # has not yet been started...)
        if (new.port != old.port or
                new.ip != old.ip or
                self._server_socket is None):
            # Close all open connections
            self._close()

            # Create a new server socket
            self._server_socket = socket.socket(socket.AF_INET,
                                                socket.SOCK_STREAM)
            self._server_socket.setsockopt(socket.SOL_SOCKET,
                                           socket.SO_REUSEADDR, 1)
            self._server_socket.bind((new.ip, new.port))
            self._server_socket.listen(5)
            self._poll.register(self._server_socket,
                                select.POLLIN)

        # Update the controller
        self._controller.max_retired_jobs = new.max_retired_jobs
        self._controller.machines = OrderedDict((m.name, m)
                                                for m in new.machines)

        logging.info("Config file %s read successfully.",
                     self._config_filename)
        return True

    def _close(self):
        """Close all server sockets and disconnect all client connections."""
        if self._server_socket is not None:
            self._poll.unregister(self._server_socket)
            self._server_socket.close()
        for client_socket in list(itervalues(self._client_sockets)):
            self._disconnect_client(client_socket)

    def _disconnect_client(self, client):
        """Disconnect a client.

        Parameters
        ----------
        client : :py:class:`socket.Socket`
        """
        try:
            logging.info("Client %s disconnected.", client.getpeername())
        except:
            logging.info("Client %s disconnected.", client)

        # Remove from the client list
        del self._client_sockets[client.fileno()]

        # Clear input buffer
        del self._client_buffers[client]

        # Clear any watches
        self._client_job_watches.pop(client, None)
        self._client_machine_watches.pop(client, None)

        # Stop watching the client's socket for data
        self._poll.unregister(client)

        # Disconnect the client
        client.close()

    def _accept_client(self):
        """Accept a new client."""
        client, addr = self._server_socket.accept()
        logging.info("New client connected from %s", addr)

        # Watch the client's socket for datat
        self._poll.register(client, select.POLLIN)

        # Keep a reference to the socket
        self._client_sockets[client.fileno()] = client

        # Create a buffer for data sent by the client
        self._client_buffers[client] = b""

    def _handle_commands(self, client):
        """Handle an incomming command from a client.

        Parameters
        ----------
        client : :py:class:`socket.Socket`
        """
        try:
            data = client.recv(1024)
        except (OSError, IOError):
            data = b""

        # Did the client disconnect?
        if len(data) == 0:
            self._disconnect_client(client)
            return

        self._client_buffers[client] += data

        # Process any complete commands (whole lines)
        while b"\n" in self._client_buffers[client]:
            line, _, self._client_buffers[client] = \
                self._client_buffers[client].partition(b"\n")

            try:
                cmd_obj = json.loads(line.decode("utf-8"))

                # Execute the specified command
                ret_val = _COMMANDS[cmd_obj["command"]](
                    self, client, *cmd_obj["args"], **cmd_obj["kwargs"])

                # Return the response
                client.send(json.dumps({"return": ret_val}).encode("utf-8") +
                            b"\n")
            except:
                # If any of the above fails for any reason (e.g. invalid JSON,
                # unrecognised command, command crashes, etc.), just disconnect
                # the client.
                logging.exception("Client %s sent bad command %r, "
                                  "disconnecting",
                                  client.getpeername(), line)
                self._disconnect_client(client)
                return

    def _send_change_notifications(self):
        """Send any registered change notifications to clients.

        Sends notifications of the forms ``{"jobs_changed": [job_id, ...]}``
        and ``{"machines_changed": [machine_name, ...]}`` to clients who have
        subscribed to be notified of changes to jobs or machines.
        """
        # Notify clients about jobs which have changed
        changed_jobs = self._controller.changed_jobs
        if changed_jobs:
            for client, jobs in list(iteritems(self._client_job_watches)):
                if jobs is None or not jobs.isdisjoint(changed_jobs):
                    try:
                        client.send(
                            json.dumps(
                                {"jobs_changed":
                                 list(changed_jobs)
                                 if jobs is None else
                                 list(changed_jobs.intersection(jobs))}
                            ).encode("utf-8") + b"\n")
                    except (OSError, IOError):
                        logging.exception(
                            "Could not send notification.")
                        self._disconnect_client(client)

        # Notify clients about machines which have changed
        changed_machines = self._controller.changed_machines
        if changed_machines:
            for client, machines in list(
                    iteritems(self._client_machine_watches)):
                if (machines is None or
                        not machines.isdisjoint(changed_machines)):
                    try:
                        client.send(
                            json.dumps(
                                {"machines_changed":
                                 list(changed_machines)
                                 if machines is None else
                                 list(changed_machines.intersection(machines))}
                            ).encode("utf-8") + b"\n")
                    except (OSError, IOError):
                        logging.exception(
                            "Could not send notification.")
                        self._disconnect_client(client)

    def _run(self):
        """The main server thread.

        This 'infinate' loop runs in a background thread and waits for and
        processes events such as the :py:meth:`._notify` method being called,
        the config file changing, clients sending commands or new clients
        connecting. It also periodically calls destroy_timed_out_jobs on the
        controller.
        """
        logging.info("Server running.")
        while not self._stop:
            # Wait for a connection to get opened/closed, a command to arrive,
            # the config file to change or the timeout to ellapse.
            events = self._poll.poll(
                self._configuration.timeout_check_interval)

            # Cull any jobs which have timed out
            self._controller.destroy_timed_out_jobs()

            for fd, event in events:
                if fd == self._notify_recv.fileno():
                    # _notify was called
                    self._notify_recv.recv(1024)
                elif fd == self._server_socket.fileno():
                    # New client connected
                    self._accept_client()
                elif fd in self._client_sockets:
                    # Incoming data from client
                    self._handle_commands(self._client_sockets[fd])
                elif fd == self._config_inotify.fd:
                    # Config file changed, re-read it
                    time.sleep(0.1)
                    self._config_inotify.read()
                    self._watch_config_file()
                    self._read_config_file()
                else:  # pragma: no cover
                    # Should not get here...
                    assert False

            # Send any job/machine change notifications out
            self._send_change_notifications()

    def is_alive(self):
        """Is the server running?"""
        return self._running

    def join(self):
        """Wait for the server to completely shut down."""
        self._server_thread.join()
        self._controller.join()

    def stop_and_join(self):
        """Stop the server and wait for it to shut down completely."""
        logging.info("Server shutting down, please wait...")

        # Shut down server thread
        self._stop = True
        self._notify()
        self._server_thread.join()

        # Stop watching config file
        self._config_inotify.close()

        # Close all connections
        logging.info("Closing connections...")
        self._close()

        # Shut down the controller and flush all BMP commands
        logging.info("Waiting for all queued BMP commands...")
        self._controller.stop()
        self._controller.join()

        # Dump controller state to file
        with open(self._state_filename, "wb") as f:
            pickle.dump(self._controller, f)

        logging.info("Server shut down.")

        self._running = False

    @_command
    def version(self, client):
        """
        Returns
        -------
        str
            The server's version number."""
        return __version__

    @_command
    def create_job(self, client, *args, **kwargs):
        """Create a new job (i.e. allocation of boards).

        This function should be called in one of the following styles::

            # Any single (SpiNN-5) board
            job_id = create_job(owner="me")
            job_id = create_job(1, owner="me")

            # Board x=3, y=2, z=1 on the machine named "m"
            job_id = create_job(3, 2, 1, machine="m", owner="me")

            # Any machine with at least 4 boards
            job_id = create_job(4, owner="me")

            # Any 7-or-more board machine with an aspect ratio at least as
            # square as 1:2
            job_id = create_job(7, min_ratio=0.5, owner="me")

            # Any 4x5 triad segment of a machine (may or may-not be a
            # torus/full machine)
            job_id = create_job(4, 5, owner="me")

            # Any torus-connected (full machine) 4x2 machine
            job_id = create_job(4, 2, require_torus=True, owner="me")

        The 'other parameters' enumerated below may be used to further restrict
        what machines the job may be allocated onto.

        Jobs for which no suitable machines are available are immediately
        destroyed (and the reason given).

        Once a job has been created, it must be 'kept alive' by a simple
        watchdog_ mechanism. Jobs may be kept alive by periodically calling the
        :py:meth:`.job_keepalive` command or by calling any other job-specific
        command. Jobs are culled if no keep alive message is received for
        ``keepalive`` seconds. If absolutely necessary, a job's keepalive value
        may be set to None, disabling the keepalive mechanism.

        .. _watchdog: https://en.wikipedia.org/wiki/Watchdog_timer

        Once a job has been allocated some boards, these boards will be
        automatically powered on and left unbooted ready for use.

        Parameters
        ----------
        owner : str
            **Required.** The name of the owner of this job.
        keepalive : float or None
            *Optional.* The maximum number of seconds which may elapse between
            a query on this job before it is automatically destroyed. If None,
            no timeout is used. (Default: 60.0)

        Other Parameters
        ----------------
        machine : str or None
            *Optional.* Specify the name of a machine which this job must be
            executed on. If None, the first suitable machine available will be
            used, according to the tags selected below. Must be None when tags
            are given. (Default: None)
        tags : [str, ...] or None
            *Optional.* The set of tags which any machine running this job must
            have. If None is supplied, only machines with the "default" tag
            will be used. If machine is given, this argument must be None.
            (Default: None)
        min_ratio : float
            The aspect ratio (h/w) which the allocated region must be 'at least
            as square as'. Set to 0.0 for any allowable shape, 1.0 to be
            exactly square. Ignored when allocating single boards or specific
            rectangles of triads.
        max_dead_boards : int or None
            The maximum number of broken or unreachable boards to allow in the
            allocated region. If None, any number of dead boards is permitted,
            as long as the board on the bottom-left corner is alive (Default:
            None).
        max_dead_links : int or None
            The maximum number of broken links allow in the allocated region.
            When require_torus is True this includes wrap-around links,
            otherwise peripheral links are not counted.  If None, any number of
            broken links is allowed. (Default: None).
        require_torus : bool
            If True, only allocate blocks with torus connectivity. In general
            this will only succeed for requests to allocate an entire machine
            (when the machine is otherwise not in use!). Must be False when
            allocating boards. (Default: False)

        Returns
        -------
        int
            The job ID given to the newly allocated job.
        """
        if kwargs.get("tags", None) is not None:
            kwargs["tags"] = set(kwargs["tags"])
        return self._controller.create_job(*args, **kwargs)

    @_command
    def job_keepalive(self, client, job_id):
        """Reset the keepalive timer for the specified job.

        Note all other job-specific commands implicitly do this.
        """
        self._controller.job_keepalive(job_id)

    @_command
    def get_job_state(self, client, job_id):
        """Poll the state of a running job.

        Returns
        -------
        {"state": state, "power": power
         "keepalive": keepalive, "reason": reason}
            Where:

            state : :py:class:`~spalloc_server.controller.JobState`
                The current state of the queried job.
            power : bool or None
                If job is in the ready or power states, indicates whether the
                boards are power{ed,ing} on (True), or power{ed,ing} off
                (False). In other states, this value is None.
            keepalive : float or None
                The Job's keepalive value: the number of seconds between
                queries about the job before it is automatically destroyed.
                None if no timeout is active (or when the job has been
                destroyed).
            reason : str or None
                If the job has been destroyed, this may be a string describing
                the reason the job was terminated.
            start_time : float or None
                For queued and allocated jobs, gives the Unix time (UTC) at
                which the job was created (or None otherwise).
        """
        out = self._controller.get_job_state(job_id)._asdict()
        out["state"] = int(out["state"])
        return out

    @_command
    def get_job_machine_info(self, client, job_id):
        """Get the list of Ethernet connections to the allocated machine.

        Returns
        -------
        {"width": width, "height": height, \
         "connections": connections, "machine_name": machine_name}
            Where:

            width, height : int or None
                The dimensions of the machine in chips, e.g. for booting.

                None if no boards are allocated to the job.
            connections : [[[x, y], hostname], ...] or None
                A list giving Ethernet-connected chip coordinates in the
                machine to hostname.

                None if no boards are allocated to the job.
            machine_name : str or None
                The name of the machine the job is allocated on.

                None if no boards are allocated to the job.
            boards : [[x, y, z], ...] or None
                All the boards allocated to the job or None if no boards
                allocated.
        """
        width, height, connections, machine_name, boards = \
            self._controller.get_job_machine_info(job_id)

        if connections is not None:
            connections = list(iteritems(connections))
        if boards is not None:
            boards = list(boards)

        return {"width": width, "height": height,
                "connections": connections,
                "machine_name": machine_name,
                "boards": boards}

    @_command
    def power_on_job_boards(self, client, job_id):
        """Power on (or reset if already on) boards associated with a job.

        Once called, the job will enter the 'power' state until the power state
        change is complete, this may take some time.
        """
        self._controller.power_on_job_boards(job_id)

    @_command
    def power_off_job_boards(self, client, job_id):
        """Power off boards associated with a job.

        Once called, the job will enter the 'power' state until the power state
        change is complete, this may take some time.
        """
        self._controller.power_off_job_boards(job_id)

    @_command
    def destroy_job(self, client, job_id, reason=None):
        """Destroy a job.

        Call when the job is finished, or to terminate it early, this function
        releases any resources consumed by the job and removes it from any
        queues.

        Parameters
        ----------
        reason : str or None
            *Optional.* A human-readable string describing the reason for the
            job's destruction.
        """
        self._controller.destroy_job(job_id, reason)

    @_command
    def notify_job(self, client, job_id=None):
        r"""Register to be notified about changes to a specific job ID.

        Once registered, a client will be asynchronously be sent notifications
        form ``{"jobs_changed": [job_id, ...]}\n`` enumerating job IDs which
        have changed. Notifications are sent when a job changes state, for
        example when created, queued, powering on/off, powered on and
        destroyed. The specific nature of the change is not reflected in the
        notification.

        Parameters
        ----------
        job_id : int or None
            A job ID to be notified of or None if all job state changes should
            be reported.

        See Also
        --------
        no_notify_job : Stop being notified about a job.
        notify_machine : Register to be notified about changes to machines.
        """
        if job_id is None:
            self._client_job_watches[client] = None
        else:
            if client not in self._client_job_watches:
                self._client_job_watches[client] = set([job_id])
            elif self._client_job_watches[client] is not None:
                self._client_job_watches[client].add(job_id)
            else:
                # Client is already notified about all changes, do nothing!
                pass

    @_command
    def no_notify_job(self, client, job_id=None):
        """Stop being notified about a specific job ID.

        Once this command returns, no further notifications for the specified
        ID will be received.

        Parameters
        ----------
        job_id : id or None
            A job ID to no longer be notified of or None to not be notified of
            any jobs. Note that if all job IDs were registered for
            notification, this command only has an effect if the specified
            job_id is None.

        See Also
        --------
        notify_job : Register to be notified about changes to a specific job.
        """
        if client not in self._client_job_watches:
            return

        if job_id is None:
            del self._client_job_watches[client]
        else:
            watches = self._client_job_watches[client]
            if watches is not None:
                watches.discard(job_id)
                if len(watches) == 0:
                    del self._client_job_watches[client]

    @_command
    def notify_machine(self, client, machine_name=None):
        r"""Register to be notified about a specific machine name.

        Once registered, a client will be asynchronously be sent notifications
        of the form ``{"machines_changed": [machine_name, ...]}\n`` enumerating
        machine names which have changed. Notifications are sent when a machine
        changes state, for example when created, change, removed, allocated a
        job or an allocated job is destroyed.

        Parameters
        ----------
        machine_name : machine or None
            A machine name to be notified of or None if all machine state
            changes should be reported.

        See Also
        --------
        no_notify_machine : Stop being notified about a machine.
        notify_job : Register to be notified about changes to jobs.
        """
        if machine_name is None:
            self._client_machine_watches[client] = None
        else:
            if client not in self._client_machine_watches:
                self._client_machine_watches[client] = set([machine_name])
            elif self._client_machine_watches[client] is not None:
                self._client_machine_watches[client].add(machine_name)
            else:
                # Client is already notified about all changes, do nothing!
                pass

    @_command
    def no_notify_machine(self, client, machine_name=None):
        """Unregister to be notified about a specific machine name.

        Once this command returns, no further notifications for the specified
        ID will be received.

        Parameters
        ----------
        machine_name : name or None
            A machine name to no longer be notified of or None to not be
            notified of any machines. Note that if all machines were registered
            for notification, this command only has an effect if the specified
            machine_name is None.

        See Also
        --------
        notify_machine : Register to be notified about changes to a machine.
        """
        if client not in self._client_machine_watches:
            return

        if machine_name is None:
            del self._client_machine_watches[client]
        else:
            watches = self._client_machine_watches[client]
            if watches is not None:
                watches.discard(machine_name)
                if len(watches) == 0:
                    del self._client_machine_watches[client]

    @_command
    def list_jobs(self, client):
        """Enumerate all non-destroyed jobs.

        Returns
        -------
        jobs : [{...}, ...]
            A list of allocated/queued jobs in order of creation from oldest
            (first) to newest (last). Each job is described by a dictionary
            with the following keys:

            "job_id" is the ID of the job.

            "owner" is the string giving the name of the Job's owner.

            "start_time" is the time the job was created (Unix time, UTC).

            "keepalive" is the maximum time allowed between queries for this
            job before it is automatically destroyed (or None if the job can
            remain allocated indefinitely).

            "state" is the current
            :py:class:`~spalloc_server.controller.JobState` of the job.

            "power" indicates whether the boards are powered on or not. If job
            is in the ready or power states, indicates whether the boards are
            power{ed,ing} on (True), or power{ed,ing} off (False). In other
            states, this value is None.

            "args" and "kwargs" are the arguments to the alloc function
            which specifies the type/size of allocation requested and the
            restrictions on dead boards, links and torus connectivity.

            "allocated_machine_name" is the name of the machine the job has
            been allocated to run on (or None if not allocated yet).

            "boards" is a list [(x, y, z), ...] of boards allocated to the job.
        """
        out = []
        for job in self._controller.list_jobs():
            job = job._asdict()
            job["state"] = int(job["state"])
            if job["boards"] is not None:
                job["boards"] = list(job["boards"])
            if job["kwargs"].get("tags", None) is not None:
                job["kwargs"]["tags"] = list(job["kwargs"]["tags"])
            out.append(job)
        return out

    @_command
    def list_machines(self, client):
        """Enumerates all machines known to the system.

        Returns
        -------
        machines : [{...}, ...]
            The list of machines known to the system in order of priority from
            highest (first) to lowest (last). Each machine is described by a
            dictionary with the following keys:

            "name" is the name of the machine.

            "tags" is the list ['tag', ...] of tags the machine has.

            "width" and "height" are the dimensions of the machine in
            triads.

            "dead_boards" is a list([(x, y, z), ...]) giving the coordinates
            of known-dead boards.

            "dead_links" is a list([(x, y, z, link), ...]) giving the
            locations of known-dead links from the perspective of the sender.
            Links to dead boards may or may not be included in this list.
        """
        out = []
        for machine in self._controller.list_machines():
            machine = machine._asdict()
            machine["tags"] = list(machine["tags"])
            machine["dead_boards"] = list(machine["dead_boards"])
            machine["dead_links"] = [(x, y, z, int(link))
                                     for x, y, z, link
                                     in machine["dead_links"]]
            out.append(machine)
        return out

    @_command
    def get_board_position(self, client, machine_name, x, y, z):
        """Get the physical location of a specified board.

        Parameters
        ----------
        machine_name : str
            The name of the machine containing the board.
        x, y, z : int
            The logical board location within the machine.

        Returns
        -------
        (cabinet, frame, board) or None
            The physical location of the board at the specified location or
            None if the machine/board are not recognised.
        """
        return self._controller.get_board_position(machine_name, x, y, z)

    @_command
    def get_board_at_position(self, client, machine_name, x, y, z):
        """Get the logical location of a board at the specified physical
        location.

        Parameters
        ----------
        machine_name : str
            The name of the machine containing the board.
        cabinet, frame, board : int
            The physical board location within the machine.

        Returns
        -------
        (x, y, z) or None
            The logical location of the board at the specified location or None
            if the machine/board are not recognised.
        """
        return self._controller.get_board_at_position(machine_name, x, y, z)

    @_command
    def where_is(self, client, **kwargs):
        """Find out where a SpiNNaker board or chip is located, logically and
        physically.

        May be called in one of the following styles::

            >>> # Query by logical board coordinate within a machine.
            >>> where_is(machine=..., x=..., y=..., z=...)

            >>> # Query by physical board location within a machine.
            >>> where_is(machine=..., cabinet=..., frame=..., board=...)

            >>> # Query by chip coordinate (as if the machine were booted as
            >>> # one large machine).
            >>> where_is(machine=..., chip_x=..., chip_y=...)

            >>> # Query by chip coordinate, within the boards allocated to a
            >>> # job.
            >>> where_is(job_id=..., chip_x=..., chip_y=...)

        Returns
        -------
        {"machine": ..., "logical": ..., "physical": ..., "chip": ..., \
                "board_chip": ..., "job_chip": ..., "job_id": ...} or None
            If a board exists at the supplied location, a dictionary giving the
            location of the board/chip, supplied in a number of alternative
            forms. If the supplied coordinates do not specify a specific chip,
            the chip coordinates given are those of the Ethernet connected chip
            on that board.

            If no board exists at the supplied position, None is returned
            instead.

            ``machine`` gives the name of the machine containing the board.

            ``logical`` the logical board coordinate, (x, y, z) within the
            machine.

            ``physical`` the physical board location, (cabinet, frame, board),
            within the machine.

            ``chip`` the coordinates of the chip, (x, y), if the whole machine
            were booted as a single machine.

            ``board_chip`` the coordinates of the chip, (x, y), within its
            board.

            ``job_id`` is the job ID of the job currently allocated to the
            board identified or None if the board is not allocated to a job.

            ``job_chip`` the coordinates of the chip, (x, y), within its
            job, if a job is allocated to the board or None otherwise.
        """
        return self._controller.where_is(**kwargs)
예제 #39
0
    def __init__(self, config_filename, cold_start=False):
        """
        Parameters
        ----------
        config_filename : str
            The filename of the config file for the server which describes the
            machines to be controlled.
        cold_start : bool
            If False (the default), the server will attempt to restore its
            previous state, if True, the server will start from scratch.
        """
        self._config_filename = config_filename
        self._cold_start = cold_start

        # Should the background thread terminate?
        self._stop = False

        # The background thread in which the server will run
        self._server_thread = threading.Thread(target=self._run,
                                               name="Server Thread")

        # The poll object used for listening for connections
        self._poll = select.poll()

        # This socket pair is used by background threads to interrupt the main
        # event loop.
        self._notify_send, self._notify_recv = socket.socketpair()
        self._poll.register(self._notify_recv, select.POLLIN)

        # Currently open sockets to clients. Once server started, should only
        # be accessed from the server thread.
        self._server_socket = None
        # {fd: socket, ...}
        self._client_sockets = {}

        # Buffered data received from each socket
        # {fd: buf, ...}
        self._client_buffers = {}

        # For each client, contains a set() of job IDs and machine names that
        # the client is watching for changes or None if all changes are to be
        # monitored.
        # {socket: set or None, ...}
        self._client_job_watches = {}
        self._client_machine_watches = {}

        # The current server configuration options. Once server started, should
        # only be accessed from the server thread.
        self._configuration = Configuration()

        # Infer the saved-state location
        self._state_filename = os.path.join(
            os.path.dirname(self._config_filename),
            ".{}.state.{}".format(os.path.basename(self._config_filename),
                                  __version__)
        )

        # Attempt to restore saved state if required
        self._controller = None
        if not self._cold_start:
            if os.path.isfile(self._state_filename):
                try:
                    with open(self._state_filename, "rb") as f:
                        self._controller = pickle.load(f)
                    logging.info("Server warm-starting from %s.",
                                 self._state_filename)
                except:
                    # Some other error occurred during unpickling.
                    logging.exception(
                        "Server state could not be unpacked from %s.",
                        self._state_filename)

        # Perform cold-start if no saved state was loaded
        if self._controller is None:
            logging.info("Server cold-starting.")
            self._controller = Controller()

        # Notify the background thread when something changes in the background
        # of the controller (e.g. power state changes).
        self._controller.on_background_state_change = self._notify

        # Read configuration file. This must succeed when the server is first
        # being started.
        if not self._read_config_file():
            raise Exception("Config file could not be loaded.")

        # Set up inotify watcher for config file changes
        self._config_inotify = INotify()
        self._poll.register(self._config_inotify.fd, select.POLLIN)
        self._watch_config_file()

        # Start the server
        self._server_thread.start()

        # Flag for checking if the server is still alive
        self._running = True
예제 #40
0
device_id=row[0]
print (device_id)
s3.download_file('littercam','device-'+str(mac)+'/parameters.txt', 'parameters.txt')


subprocess.Popen(["./Litter_detect"])
myMQTTClient.publish("topic", "connected", 0)

timestr = time.strftime("%Y-%m-%d-%H:%M:%S")

#timestr = time.strftime("%Y-%m-%d-%H:%M:%S")

#s3.upload_file("/home/pi/detections/litter.jpg", "littercam", "device-"+str(mac)+"/litter"+str(timestr)+".jpg")

# inotify initialization
inotify = INotify()
inotify_live = INotify()
watch_flags = flags.CREATE | flags.MODIFY
wd = inotify.add_watch('detections', watch_flags)
wd_live = inotify_live.add_watch('live', watch_flags)
#time.sleep(3)   # Delays for 5 seconds. You can also use a float value.

while 1:
       timestr = time.strftime("%Y-%m-%d-%H:%M:%S")
       JSONPayload = '{"state":{"desired":{"live": \"'+timestr+'\" }}}'
       deviceShadowHandler.shadowUpdate(JSONPayload,customShadowCallback_Update, 5)
       events= inotify.read(5000,2000)
       events_live= inotify_live.read(5000,2000)

       print ("evnets "+ str(events))
       print ("evnets_live "+ str(events_live))