class XacroObserver: def __init__(self, root_file: str): self.xacro_tree = XacroTree(root_file) self.observer = Observer() self.logger = roslog.get_logger('xacro_live') @property def watched_dirs(self) -> typing.Set[str]: """Get the list of directories being watched.""" return {emitter.watch.path for emitter in self.observer.emitters} def start(self, event_handler: FileSystemEventHandler) -> None: """Start tracking.""" self.observer.start() self.update(event_handler) def stop(self) -> None: """Stop tracking.""" self.observer.stop() self.observer.join() def update_watchlist(self, event_handler: FileSystemEventHandler) -> None: """Update list of directories tracked.""" if self.watched_dirs != self.xacro_tree.dirs: self.observer.unschedule_all() for xdir in self.xacro_tree.dirs: self.observer.schedule(event_handler, path=xdir, recursive=False) def update(self, event_handler: FileSystemEventHandler) -> None: """Update xacro_tree and dir watchlist.""" self.xacro_tree.update() self.update_watchlist(event_handler)
class WatchFiles: observer = None def __init__(self, ): self.started = False def run(self, path): if self.started: return try: logging.basicConfig(filename='changes.log', level=logging.INFO) logger = logging.getLogger('FILE') self.observer = Observer() self.observer.schedule(_MyHandler(logger), path=path, recursive=True) self.observer.start() self.started = True except Exception as e: raise ValueError(e) def stop(self): if self.started: self.observer.unschedule_all() self.observer.stop() self.observer.join(1000) self.started = False
class FileMonitor(object): def __init__(self): self.watches = [] self.observer = Observer() self.observer.start() def addWatch(self, path, filename=None, recursive=False): if filename is None: self.watches.append(self.observer.schedule(MyEventHandler(), path, recursive)) return if not path.endswith('/'): full_path = path + '/' + filename else: full_path = path + filename self.watches.append(self.observer.schedule(MyEventHandler([full_path,]), path, recursive)) def removeWatch(self, path): if not path.endswith('/'): path += '/' for watch in self.watches: if watch.path == path: self.observer.unschedule(watch) return True return False def removeAllWatches(self): self.observer.unschedule_all()
class PropMTimeWatcher: def __init__(self, app_data_folder): self._app_data_folder = app_data_folder self._observer = Observer() self.schedule() def schedule(self): pref = PropMTimePreferences(self._app_data_folder) self._observer.unschedule_all() for path, watcher in pref.get_all_paths().items(): if watcher: if os.path.exists(path): event_handler = ModHandler(path, self._app_data_folder) log.info('scheduling watcher : %s' % path) self._observer.schedule(event_handler, path=path, recursive=True) else: log.error('Error: "%s" does not exist.\n\nPlease edit the path.\n\nTo do this, click on the %s icon and select "Paths".' % (path, __application_name__)) self._observer.start() def request_exit(self): self._observer.unschedule_all() self._observer.stop() self._observer.join(TIMEOUT) if self._observer.isAlive(): log.error('observer still alive')
class FileMonitor(object): def __init__(self): self.watches = [] self.observer = Observer() self.observer.start() def add_watch(self, path, filename=None, recursive=False): # if no filename is provided, just watch the directory if filename is None: self.watches.append( self.observer.schedule(FSEventHandler(), path, recursive)) return # if we're monitoring a specific file, we have to pass the full path to the event handler if not path.endswith('/'): full_path = path + '/' + filename else: full_path = path + filename self.watches.append( self.observer.schedule(FSEventHandler([ full_path, ]), path, recursive)) def remove_watch(self, path): if not path.endswith('/'): path += '/' for watch in self.watches: if watch.path == path: self.observer.unschedule(watch) return True return False def remove_all_watches(self): self.observer.unschedule_all()
class EntryWatcher: def __init__(self, deduplication_interval_millis: int = 100) -> None: self._observer = Observer() self._observer.start() self._observed_path: Optional[Path] = None self._handler = PathHandler(deduplication_interval_millis) self._update_lock = Lock() pubsub.entry_connect(self.on_navigated) def on_navigated(self, entry: Entry) -> None: with self._update_lock: if self._observed_path == str(entry.path): return self._observer.unschedule_all() self._observed_path = None path = entry.path if not path.is_dir(): # version 0.9.0 of watchdog crashes when trying to observe a file path = path.parent if path.exists(): self._observer.schedule(self._handler, str(path), recursive=False) self._observed_path = path
class ObservePaths(object): def __new__(cls): if Observer is None: return None return object.__new__(cls) def __init__(self): self.observer = Observer() self.event_handler = ReportEvent() self.paths = {} self.observer.start() package_events.listen(u'FileBrowser', self.dired_event_handler) def dired_event_handler(self, package, event, payload): '''receiving args from common.emit_event''' def view_closed(view): self.paths.pop(view, None) def start_refresh(view, path): self.paths.update({view: [path.rstrip(os.sep)] if path else []}) def finish_refresh(view, paths): if not paths: return old_paths = sorted(self.paths.get(view, [])) paths = sorted(paths) if paths == old_paths: return self.paths.update({view: sorted(p for p in set(old_paths + [p.rstrip(os.sep) for p in paths]) if os.path.exists(p))}) self.observer.unschedule_all() for p in reduce(lambda i, j: i + j, self.paths.values()): self.observer.schedule(self.event_handler, p) def fold(view, path): p = set(self.paths.get(view, [])) - set([path.rstrip(os.sep)]) finish_refresh(view, list(p)) def toggle_watch_all(watch): '''watch is boolean or None, global setting dired_autorefresh''' views = self.paths.keys() if not watch: self.paths = {} sublime.set_timeout(lambda: refresh(views, erase_settings=(not watch)), 1) case = { 'start_refresh': lambda: start_refresh(*payload), 'finish_refresh': lambda: finish_refresh(*payload), 'view_closed': lambda: view_closed(payload), 'fold': lambda: fold(*payload), 'stop_watch': lambda: view_closed(payload), 'toggle_watch_all': lambda: toggle_watch_all(payload) } case[event]() emit_event(u'', self.paths, plugin=u'FileBrowserWFS')
class Trieur(): def __init__(self, config=config_classic, origin=origin, destination=destination): self.logger = Logger.Logger() self.observer = Observer() self.config = Config.Config(config) self.origin = origin self.destination = destination self._running = False Event_Handler.Handler.patterns = self.config.get_patterns() self.handler = Event_Handler.Handler() self.watch = None @property def running(self): return self.running def start(self): #check destination and origin if not self.origin.exists() or not self.origin.is_dir( ) or not self.destination.exists() or not self.destination.is_dir(): return False print("EROOR") #update of the patterns and process self.handler.set_process(config=self.config, logger=self.logger, destination=self.destination) self.watch = self.observer.schedule(self.handler, str(origin.resolve())) self.config.deploy(self.destination) #start observer self.observer.start() self.logger.debug("Distribution started") def stop(self): self.observer.stop() self.handler.unset_process() self.observer.join() self.observer.unschedule_all() self.logger.debug("Distribution stopped") def move(self, event): old_path = pl.Path(event.src_path) new_path = self.destination / self.config.build_file_path( event, self.logger) try: assert not new_path.exists( ), f"New path for file ({old_path.name}) already exist" except AssertionError as error: self.logger.warning(error) else: shutil.move(old_path, new_path)
class AppRestartable(App): """Our main app component, which sets up the Resilient services and other components""" def __init__(self, *args, **kwargs): super(AppRestartable, self).__init__(*args, **kwargs) self.reloading = False self.reload_timer = None self.observer = None def do_initialize_watchdog(self): """Initialize the configuration file watchdog""" # Monitor the configuration file, using a Watchdog observer daemon. LOG.info("Monitoring config file for changes.") ConfigFileUpdateHandler.set_patterns(self.config_file) event_handler = ConfigFileUpdateHandler(self) self.observer = Observer() config_dir = os.path.dirname(self.config_file) if not config_dir: config_dir = os.getcwd() self.observer.schedule(event_handler, path=config_dir, recursive=False) self.observer.daemon = True self.observer.start() def started(self, component): LOG.info("App Started %s", str(component)) self.do_initialize_watchdog() def stopped(self, component): """Stopped Event Handler""" LOG.info("App Stopped") self._stop_observer() def reload_complete(self, event, *args, **kwargs): """ All components done handling reload event """ if event.parent.success: LOG.info("Reload completed successfully!") else: LOG.error("Reloading failed to complete successfully!") if self.reload_timer: self.reload_timer.unregister() self.reload_timer = None self.reloading = False def reload_timeout(self, event): """Reload timed out, assume it failed""" LOG.error("Reload Timed Out, Assuming Failure!") self.reloading = False def _stop_observer(self): """ stop monitoring config file for changes """ if self.observer: LOG.info("Stopping config file monitoring") self.observer.unschedule_all() self.observer.stop() self.observer = None
class ModuleChangedEventHandler(FileSystemEventHandler): def __init__(self, paths, on_change): self.dirs = defaultdict(set) self.on_change = on_change self.modules_to_add_later = [] self.observer = Observer() self.old_dirs = defaultdict(set) self.started = False for path in paths: self.add_module(path) def reset(self): self.dirs = defaultdict(set) del self.modules_to_add_later[:] self.old_dirs = defaultdict(set) self.observer.unschedule_all() def add_module(self, path): """Add a python module to track changes to""" path = os.path.abspath(path) for suff in importcompletion.SUFFIXES: if path.endswith(suff): path = path[:-len(suff)] break dirname = os.path.dirname(path) if dirname not in self.dirs: self.observer.schedule(self, dirname, recursive=False) self.dirs[os.path.dirname(path)].add(path) def add_module_later(self, path): self.modules_to_add_later.append(path) def activate(self): if not self.started: self.started = True self.observer.start() self.dirs = self.old_dirs for dirname in self.dirs: self.observer.schedule(self, dirname, recursive=False) for module in self.modules_to_add_later: self.add_module(module) del self.modules_to_add_later[:] def deactivate(self): self.observer.unschedule_all() self.old_dirs = self.dirs self.dirs = defaultdict(set) def on_any_event(self, event): dirpath = os.path.dirname(event.src_path) paths = [path + '.py' for path in self.dirs[dirpath]] if event.src_path in paths: self.on_change(event.src_path)
def main(): args = docopt(__doc__) path = args.get('<dir>') or '.' observer = Observer() event_handler = EventHandler() observer.schedule(event_handler, path, recursive=True) observer.start() observer.join() observer.unschedule_all() observer.stop()
class ProjectWatcher(object): '''ProjectWatcher is responsible for watching any changes in project directory. It will call self._callback whenever there are any changes. It can currently handle only one directory at a time. ''' def __init__(self, callback): super(ProjectWatcher, self).__init__() self.proj_event = None self._observer = None self._event_handler = None self._callback = callback self.allow_event_dispatch = True def start_watching(self, project_dir): '''To start watching project_dir. ''' self._project_dir = project_dir self._observer = Observer() self._event_handler = ProjectEventHandler(self._observer, self) self._watch = self._observer.schedule(self._event_handler, self._project_dir, recursive=True) self._observer.start() def on_project_modified(self, *args): pass def dispatch_proj_event(self, event): '''To dispatch event to self._callback. ''' self.proj_event = event # Do not dispatch event if '.designer' is modified if '.designer' not in event.src_path and self.allow_event_dispatch: self._callback(event) def stop(self): '''To stop watching currently watched directory. This will also call join() on the thread created by Observer. ''' if self._observer: self._observer.unschedule_all() self._observer.stop() self.join() self._observer = None def join(self): '''join observer after unschedulling it ''' self._observer.join()
def watch_apis(): observer_api = Observer() observer_api.stop() observer_api.unschedule_all() api_event_handler = API_Handler() #get api from list apis = open( "../list/list.txt", "r") #Loop through list for api in apis: #trim if not str(api).startswith('#'): observer_api.schedule(api_event_handler, "../apis/"+api.rstrip(), recursive=False) watchdog_log("Starting watchdog on apis: '" + api.rstrip()+"'.") apis.close() observer_api.start()
class ObserverWrapper(): def __init__(self, key, monitoring_directory, storage_directory, share_ID, user_name, machine_ID, command_port, json_response_dict, min_update_interval, command_dict): self.key = key self.monitoring_directory = monitoring_directory self.storage_directory = storage_directory self.share_ID = share_ID self.user_name = user_name self.machine_ID = machine_ID self.command_port = command_port self.json_response_dict = json_response_dict self.min_update_interval = min_update_interval self.command_dict = command_dict def run(self): self.a_s = AutoSync(self.key, self.monitoring_directory, self.storage_directory, self.share_ID, self.user_name, self.machine_ID, self.command_port, self.json_response_dict, self.min_update_interval) self.observer = Observer( ) #The observer objects can't cross process boundaries because they are unpicklable self.observer.schedule(self.a_s, self.monitoring_directory, recursive=True) self.observer.start() while True: if self.monitoring_directory not in self.command_dict: time.sleep(0.1) continue command = self.command_dict[self.monitoring_directory] if command == 'stop' and self.observer.isAlive(): logging.debug('stopping observer') self.observer.stop() elif command == 'start' and not self.observer.isAlive(): logging.debug('starting observer') self.observer.start() elif command == 'terminate': logging.debug('terminating observer') if self.observer.isAlive(): self.observer.stop() self.observer.unschedule_all() return def start(self): proc = Process(target=self.run) proc.start() logging.info('observer started')
class watcher(object): def __init__(self): self.directories = {} from watchdog.observers import Observer self.observer = Observer() def begin(self, filelist): # first, create all the watchdir() for each unique directory for item in filelist: self.addWatchItem(item) # now, schedule an event handler(watchdog.watch) for each folder for dir in self.directories: self.directories[dir].watch() def refresh(self, filelist): # get list of directories being monitored print("refreshing list") pass def end(self): self.observer.unschedule_all() pass def addWatchItem(self,item): filespec = Path(item).resolve() if filespec.parent not in self.directories: self.directories[filespec.parent] = watchdir(self.observer, filespec) else: self.directories[filespec.parent].addItem(filespec) def spinUpWorkers(self): for i in self.directories: #print(f"spinning up thread for {i}") self.directories[i].start() def spinDownWorkers(self): for i in self.directories: #print(f"spinning down thread for {i}") self.directories[i].stop() def dump(self): for i in self.directories: print(f"Folder: {i}") self.directories[i].dump()
class ObserverWrapper(): def __init__(self, key, monitoring_directory, storage_directory, share_ID, user_name, machine_ID, command_port, json_response_dict, min_update_interval, command_dict): self.key = key self.monitoring_directory = monitoring_directory self.storage_directory = storage_directory self.share_ID = share_ID self.user_name = user_name self.machine_ID = machine_ID self.command_port = command_port self.json_response_dict = json_response_dict self.min_update_interval = min_update_interval self.command_dict = command_dict def run(self): self.a_s = AutoSync(self.key, self.monitoring_directory, self.storage_directory, self.share_ID, self.user_name, self.machine_ID, self.command_port, self.json_response_dict, self.min_update_interval) self.observer = Observer() #The observer objects can't cross process boundaries because they are unpicklable self.observer.schedule(self.a_s, self.monitoring_directory, recursive=True) self.observer.start() while True: if self.monitoring_directory not in self.command_dict: time.sleep(0.1) continue command = self.command_dict[self.monitoring_directory] if command == 'stop' and self.observer.isAlive(): logging.debug('stopping observer') self.observer.stop() elif command == 'start' and not self.observer.isAlive(): logging.debug('starting observer') self.observer.start() elif command == 'terminate': logging.debug('terminating observer') if self.observer.isAlive(): self.observer.stop() self.observer.unschedule_all() return def start(self): proc = Process(target=self.run) proc.start() logging.info('observer started')
class Rainmaker(): def __init__(self): self.event_handlers = {} self.observer = Observer() self.observer.start() def add_watch(self,watch_path,rec_flag=True): event_handler = RainmakerEventHandler( watch_path ) self.event_handlers[watch_path] = event_handler self.observer.schedule( event_handler, watch_path, recursive = rec_flag) def remove_watch(self, k): eh = self.event_handlers.pop(k) self.observer.unschedule(eh) def shutdown(self): self.log.info( "Shutting down FSwatcher") self.observer.stop() self.observer.unschedule_all() self.observer.join()
class FolderWatcherThread(BaseThread): observer = None class Handler(FileSystemEventHandler): def on_any_event(self, event): event_processor_thread.queue.put([event]) def setup(self): super().setup() self.observer = Observer() self.observer.start() self.set_folders() def _run(self): while True: self.queue.get() self.set_folders() def set_folders(self): self.observer.unschedule_all() for folder in Config.folders: self.observer.schedule(self.Handler(), folder, recursive=True)
def start(actual_directories): observer = Observer() p = PTmp() for actual_directory in actual_directories: print "DIRECTORY", actual_directory observer.schedule(p, path=actual_directory, recursive=True) observer.start() try: print "Waiting for stuff to happen..." while True: sleep(1) except KeyboardInterrupt: pass finally: observer.unschedule_all() observer.stop() observer.join() return 0
class InboxWatcher: """ Watches for changes to the inbox directory, in order to trigger message processing via InboxEventHandler """ def __init__(self, command_listener, message_box): """Constructs the watchdog Observer""" self.observer = Observer() self.observer.schedule(InboxEventHandler(command_listener, message_box), message_box.dir, recursive=True) def start(self): """Start the watcher""" logger.debug(f"Starting InboxWatcher") self.observer.start() def stop(self): """Stop the watcher""" logger.debug(f"Stopping InboxWatcher") self.observer.unschedule_all() self.observer.stop()
class SfmEventHandler(FileSystemEventHandler): def __init__(self): ## measure counter self.measure_counter = 0 self.image_observer = Observer() self.image_observer.start() ## def on_created(self, event): super(SfmEventHandler, self).on_created(event) what = 'directory' if event.is_directory else 'file' logging.info("Created %s: %s", what, event.src_path) ## if creating a directory "image" means starting a new measurement if (what == 'directory'): self.measure_counter = self.measure_counter + 1 image_event_handler = AddImageEventHandler() ## assign sfm directory global input_dir, output_dir, matches_dir, reconstruction_dir input_dir = os.path.join(event.src_path) output_dir = os.path.join(event.src_path, "result") matches_dir = os.path.join(output_dir, "matches") reconstruction_dir = os.path.join(output_dir, "reconstruction_sequential") # Create the input match reconstruction directory if not present if not os.path.exists(output_dir): os.makedirs(output_dir) if not os.path.exists(matches_dir): os.mkdir(matches_dir) if not os.path.exists(reconstruction_dir): os.mkdir(reconstruction_dir) self.image_observer.unschedule_all() self.image_observer.schedule(image_event_handler, input_dir, recursive=False)
class Settings(FileSystemEventHandler): defaultProfile = [{ "profile": "Default", "logLevel": 20, "fleetServer": "peld-fleet.com", "profileSettings": { "windowX": 0, "windowY": 0, "windowHeight": 225, "windowWidth": 350, "compactTransparency": 65, "seconds": 10, "interval": 100, "graphDisabled": 0, "dpsIn": [{ "color": "#FF0000", "transitionValue": 0, "labelOnly": 0, "showPeak": 0 }], "dpsOut": [{ "color": "#00FFFF", "transitionValue": 0, "labelOnly": 0, "showPeak": 0 }], "logiOut": [], "logiIn": [], "capTransfered": [], "capRecieved": [], "capDamageOut": [], "capDamageIn": [], "mining": [], "labels": { "dpsIn": { "row": 0, "column": 7, "inThousands": 0, "decimalPlaces": 1 }, "dpsOut": { "row": 0, "column": 0, "inThousands": 0, "decimalPlaces": 1 }, "logiOut": { "row": 1, "column": 0, "inThousands": 0, "decimalPlaces": 1 }, "logiIn": { "row": 1, "column": 7, "inThousands": 0, "decimalPlaces": 1 }, "capTransfered": { "row": 1, "column": 1, "inThousands": 0, "decimalPlaces": 1 }, "capRecieved": { "row": 1, "column": 6, "inThousands": 0, "decimalPlaces": 1 }, "capDamageOut": { "row": 0, "column": 1, "inThousands": 0, "decimalPlaces": 1 }, "capDamageIn": { "row": 0, "column": 6, "inThousands": 0, "decimalPlaces": 1 }, "mining": { "row": 2, "column": 7, "inThousands": 0, "decimalPlaces": 1 } }, "labelColumns": [4, 4], "detailsOrder": [ "dpsOut", "dpsIn", "logiOut", "logiIn", "capTransfered", "capRecieved", "capDamageOut", "capDamageIn" ], "detailsWindow": { "show": 1, "width": 200, "height": 250, "x": 0, "y": 0 }, "fleetWindow": { "show": 1, "width": 600, "height": 400, "x": 200, "y": 200, "showAggregate": 1, "showDpsOut": 1, "showDpsIn": 1, "showLogiOut": 1 } } }] def __init__(self): self.observer = Observer() if (platform.system() == "Windows"): self.path = os.environ['APPDATA'] + "\\PELD" filename = "PELD.json" else: self.path = os.environ['HOME'] filename = ".peld" if not os.path.exists(self.path): os.mkdir(self.path) self.fullPath = os.path.join(self.path, filename) if not os.path.exists(self.fullPath): settingsFile = open(self.fullPath, 'w') json.dump(self.defaultProfile, settingsFile, indent=4) settingsFile.close() self.observer.schedule(self, self.path, recursive=False) self.observer.start() settingsFile = open(self.fullPath, 'r') self.allSettings = json.load(settingsFile) settingsFile.close() self.currentProfile = self.allSettings[0]["profileSettings"] self.lowCPUMode = False def on_moved(self, event): if not event.dest_path.endswith('.json'): return try: currentProfileName = self.allSettings[ self.selectedIndex.get()]["profile"] except AttributeError: return settingsFile = open(self.fullPath, 'r') self.allSettings = json.load(settingsFile) settingsFile.close() self.mainWindow.profileMenu.delete(0, tk.END) self.initializeMenu(self.mainWindow) i = 0 for profile in self.allSettings: if (profile["profile"] == currentProfileName): self.currentProfile = profile["profileSettings"] self.selectedIndex.set(i) self.mainWindow.event_generate('<<ChangeSettings>>') return i += 1 self.currentProfile = self.allSettings[0]["profileSettings"] self.selectedIndex.set(0) self.mainWindow.event_generate('<<ChangeSettings>>') def initializeMenu(self, mainWindow): self.mainWindow = mainWindow self.selectedIndex = tk.IntVar() i = 0 for profile in self.allSettings: self.mainWindow.profileMenu.add_radiobutton( label=profile["profile"], variable=self.selectedIndex, value=i, command=self.switchProfile) i += 1 self.selectedIndex.set(0) self.mainWindow.profileMenu.add_separator() self.mainWindow.profileMenu.add_command( label="Add New Profile", command=lambda: self.addProfileWindow(add=True)) self.mainWindow.profileMenu.add_command( label="Duplicate Current Profile", command=lambda: self.addProfileWindow(duplicate=True)) self.mainWindow.profileMenu.add_command( label="Rename Current Profile", command=lambda: self.addProfileWindow(rename=True)) self.mainWindow.profileMenu.add_command( label="Delete Current Profile", command=self.deleteProfileWindow) def addProfileWindow(self, add=False, duplicate=False, rename=False): if rename and (self.allSettings[self.selectedIndex.get()]["profile"] == "Default"): tk.messagebox.showerror("Error", "You can't rename the Default profile.") return self.newProfileWindow = tk.Toplevel() self.newProfileWindow.wm_attributes("-topmost", True) if add: self.newProfileWindow.wm_title("New Profile") elif duplicate: self.newProfileWindow.wm_title("Duplicate Profile") elif rename: self.newProfileWindow.wm_title("Rename Profile") try: self.newProfileWindow.iconbitmap(sys._MEIPASS + '\\app.ico') except Exception: try: self.newProfileWindow.iconbitmap("app.ico") except Exception: pass self.newProfileWindow.geometry("320x80") self.newProfileWindow.update_idletasks() tk.Frame(self.newProfileWindow, height="10", width="1").grid(row="0", column="0") profileLabel = tk.Label(self.newProfileWindow, text=" New Profile Name:") profileLabel.grid(row="1", column="0") self.profileString = tk.StringVar() if duplicate: self.profileString.set( self.allSettings[self.selectedIndex.get()]["profile"]) if rename: self.profileString.set( self.allSettings[self.selectedIndex.get()]["profile"]) profileInput = tk.Entry(self.newProfileWindow, textvariable=self.profileString, width=30) profileInput.grid(row="1", column="1") profileInput.focus_set() profileInput.icursor(tk.END) tk.Frame(self.newProfileWindow, height="10", width="1").grid(row="2", column="0") buttonFrame = tk.Frame(self.newProfileWindow) buttonFrame.grid(row="100", column="0", columnspan="5") tk.Frame(buttonFrame, height="1", width="30").grid(row="0", column="0") if add: okButton = tk.Button(buttonFrame, text=" Add ", command=lambda: self.addProfile(add=True)) profileInput.bind("<Return>", lambda e: self.addProfile(add=True)) elif duplicate: okButton = tk.Button( buttonFrame, text=" Add ", command=lambda: self.addProfile(duplicate=True)) profileInput.bind("<Return>", lambda e: self.addProfile(duplicate=True)) elif rename: okButton = tk.Button(buttonFrame, text=" Rename ", command=lambda: self.addProfile(rename=True)) profileInput.bind("<Return>", lambda e: self.addProfile(rename=True)) okButton.grid(row="0", column="1") tk.Frame(buttonFrame, height="1", width="30").grid(row="0", column="2") cancelButton = tk.Button(buttonFrame, text=" Cancel ", command=self.newProfileWindow.destroy) cancelButton.grid(row="0", column="3") def addProfile(self, add=False, duplicate=False, rename=False): if (self.profileString.get() == "Default"): tk.messagebox.showerror( "Error", "There can only be one profile named 'Default'") return for profile in self.allSettings: if self.profileString.get() == profile["profile"]: tk.messagebox.showerror( "Error", "There is already a profile named '" + self.profileString.get() + "'") return if add: newProfile = copy.deepcopy(self.defaultProfile[0]) newProfile["profile"] = self.profileString.get() self.allSettings.insert(0, newProfile) elif duplicate: newProfile = copy.deepcopy( self.allSettings[self.selectedIndex.get()]) newProfile["profile"] = self.profileString.get() self.allSettings.insert(0, newProfile) elif rename: self.allSettings[self.selectedIndex.get( )]["profile"] = self.profileString.get() self.allSettings.insert( 0, self.allSettings.pop(self.selectedIndex.get())) self.mainWindow.profileMenu.delete(0, tk.END) self.initializeMenu(self.mainWindow) self.switchProfile() self.newProfileWindow.destroy() def deleteProfileWindow(self): if (self.allSettings[self.selectedIndex.get()]["profile"] == "Default" ): tk.messagebox.showerror("Error", "You can't delete the Default profile.") return okCancel = tk.messagebox.askokcancel( "Continue?", "Are you sure you want to delete the current profile?") if not okCancel: return self.allSettings.pop(self.selectedIndex.get()) self.mainWindow.profileMenu.delete(0, tk.END) self.initializeMenu(self.mainWindow) self.switchProfile() self.currentProfile = self.allSettings[0]["profileSettings"] def getCapDamageInSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["capDamageIn"]) def getCapDamageOutSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["capDamageOut"]) def getCapRecievedSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["capRecieved"]) def getCapTransferedSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["capTransfered"]) def getDpsInSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["dpsIn"]) def getDpsOutSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["dpsOut"]) def getLogiInSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["logiIn"]) def getLogiOutSettings(self): if self.lowCPUMode: return [] return copy.deepcopy(self.currentProfile["logiOut"]) def getMiningSettings(self): if self.lowCPUMode: return [] try: return copy.deepcopy(self.currentProfile["mining"]) except KeyError: self.setSettings(mining=copy.deepcopy( self.defaultProfile[0]["profileSettings"]["mining"])) return copy.deepcopy(self.currentProfile["mining"]) def getMiningM3Setting(self): if self.lowCPUMode: return [] try: return self.currentProfile["mining"][0]["showM3"] except KeyError: return False def getInterval(self): if self.lowCPUMode: return 100 return self.currentProfile["interval"] def getSeconds(self): if self.lowCPUMode: return 2 return self.currentProfile["seconds"] def getWindowHeight(self): return self.currentProfile["windowHeight"] def getWindowWidth(self): return self.currentProfile["windowWidth"] def getWindowX(self): return self.currentProfile["windowX"] def getWindowY(self): return self.currentProfile["windowY"] def getCompactTransparency(self): try: return self.currentProfile["compactTransparency"] except KeyError: self.setSettings(compactTransparency=65) return self.currentProfile["compactTransparency"] def getGraphDisabled(self): if self.lowCPUMode: return True try: return self.currentProfile["graphDisabled"] except KeyError: self.setSettings(graphDisabled=0) return self.currentProfile["graphDisabled"] def getLabels(self): try: labelsCopy = copy.deepcopy(self.currentProfile["labels"]) if "mining" not in labelsCopy: placementArray = [[x, y] for x in range(8) for y in range(8)] for entry in labelsCopy: for place in placementArray: if place[0] == labelsCopy[entry]["row"] and place[ 1] == labelsCopy[entry]["column"]: placementArray.remove(place) labelsCopy["mining"] = { "row": placementArray[0][0], "column": placementArray[0][1], "inThousands": 0, "decimalPlaces": 1 } return labelsCopy except KeyError: self.setSettings(labels=copy.deepcopy( self.defaultProfile[0]["profileSettings"]["labels"])) return copy.deepcopy(self.currentProfile["labels"]) def getLabelColumns(self): try: return copy.deepcopy(self.currentProfile["labelColumns"]) except KeyError: self.setSettings(labelColumns=[4, 4]) return copy.deepcopy(self.currentProfile["labelColumns"]) @property def detailsWindow(self): return self.currentProfile.get("detailsWindow") or self.defaultProfile[ 0]["profileSettings"]["detailsWindow"] @property def detailsWindowShow(self): if self.lowCPUMode: return False if 'detailsWindow' in self.currentProfile and 'show' in self.currentProfile[ "detailsWindow"]: return self.currentProfile["detailsWindow"]["show"] else: return self.defaultProfile[0]["profileSettings"]["detailsWindow"][ "show"] @detailsWindowShow.setter def detailsWindowShow(self, value): if 'detailsWindow' in self.currentProfile: self.currentProfile["detailsWindow"]["show"] = value else: self.currentProfile["detailsWindow"] = {} self.currentProfile["detailsWindow"]["show"] = value @property def detailsWindowHeight(self): if 'detailsWindow' in self.currentProfile and 'height' in self.currentProfile[ "detailsWindow"]: return self.currentProfile["detailsWindow"]["height"] else: return self.defaultProfile[0]["profileSettings"]["detailsWindow"][ "height"] @detailsWindowHeight.setter def detailsWindowHeight(self, value): if 'detailsWindow' in self.currentProfile: self.currentProfile["detailsWindow"]["height"] = value else: self.currentProfile["detailsWindow"] = {} self.currentProfile["detailsWindow"]["height"] = value @property def detailsWindowWidth(self): if 'detailsWindow' in self.currentProfile and 'width' in self.currentProfile[ "detailsWindow"]: return self.currentProfile["detailsWindow"]["width"] else: return self.defaultProfile[0]["profileSettings"]["detailsWindow"][ "width"] @detailsWindowWidth.setter def detailsWindowWidth(self, value): if 'detailsWindow' in self.currentProfile: self.currentProfile["detailsWindow"]["width"] = value else: self.currentProfile["detailsWindow"] = {} self.currentProfile["detailsWindow"]["width"] = value @property def detailsWindowX(self): if 'detailsWindow' in self.currentProfile and 'x' in self.currentProfile[ "detailsWindow"]: return self.currentProfile["detailsWindow"]["x"] else: return self.defaultProfile[0]["profileSettings"]["detailsWindow"][ "x"] @detailsWindowX.setter def detailsWindowX(self, value): if 'detailsWindow' in self.currentProfile: self.currentProfile["detailsWindow"]["x"] = value else: self.currentProfile["detailsWindow"] = {} self.currentProfile["detailsWindow"]["x"] = value @property def detailsWindowY(self): if 'detailsWindow' in self.currentProfile and 'y' in self.currentProfile[ "detailsWindow"]: return self.currentProfile["detailsWindow"]["y"] else: return self.defaultProfile[0]["profileSettings"]["detailsWindow"][ "y"] @detailsWindowY.setter def detailsWindowY(self, value): if 'detailsWindow' in self.currentProfile: self.currentProfile["detailsWindow"]["y"] = value else: self.currentProfile["detailsWindow"] = {} self.currentProfile["detailsWindow"]["y"] = value @property def disableUpdateReminderFor(self): for profile in self.allSettings: if (profile["profile"] == "Default"): return profile.get("disableUpdateReminderFor") @disableUpdateReminderFor.setter def disableUpdateReminderFor(self, value): for profile in self.allSettings: if (profile["profile"] == "Default"): profile["disableUpdateReminderFor"] = value self.writeSettings() @property def logLevel(self): for profile in self.allSettings: if (profile["profile"] == "Default"): if not profile.get("logLevel"): profile["logLevel"] = self.defaultProfile[0]["logLevel"] self.writeSettings() return profile.get("logLevel") @logLevel.setter def logLevel(self, value): for profile in self.allSettings: if (profile["profile"] == "Default"): profile["logLevel"] = value self.writeSettings() @property def detailsOrder(self): if 'detailsOrder' in self.currentProfile: return copy.deepcopy(self.currentProfile["detailsOrder"]) else: return copy.deepcopy( self.defaultProfile[0]["profileSettings"]["detailsOrder"]) @detailsOrder.setter def detailsOrder(self, value): self.currentProfile["detailsOrder"] = value self.writeSettings() @property def fleetServer(self): for profile in self.allSettings: if (profile["profile"] == "Default"): if not profile.get("fleetServer"): profile["fleetServer"] = self.defaultProfile[0][ "fleetServer"] self.writeSettings() return profile.get("fleetServer") @fleetServer.setter def fleetServer(self, value): for profile in self.allSettings: if (profile["profile"] == "Default"): profile["fleetServer"] = value self.writeSettings() @property def fleetWindowShow(self): if self.lowCPUMode: return False if 'fleetWindow' in self.currentProfile and 'show' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["show"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "show"] @fleetWindowShow.setter def fleetWindowShow(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["show"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["show"] = value @property def fleetWindowWidth(self): if 'fleetWindow' in self.currentProfile and 'width' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["width"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "width"] @fleetWindowWidth.setter def fleetWindowWidth(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["width"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["width"] = value @property def fleetWindowHeight(self): if 'fleetWindow' in self.currentProfile and 'height' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["height"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "height"] @fleetWindowHeight.setter def fleetWindowHeight(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["height"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["height"] = value @property def fleetWindowX(self): if 'fleetWindow' in self.currentProfile and 'x' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["x"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "x"] @fleetWindowX.setter def fleetWindowX(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["x"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["x"] = value @property def fleetWindowY(self): if 'fleetWindow' in self.currentProfile and 'y' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["y"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "y"] @fleetWindowY.setter def fleetWindowY(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["y"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["y"] = value @property def fleetWindowShowAggregate(self): if 'fleetWindow' in self.currentProfile and 'showAggregate' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["showAggregate"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "showAggregate"] @fleetWindowShowAggregate.setter def fleetWindowShowAggregate(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["showAggregate"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["showAggregate"] = value @property def fleetWindowShowDpsOut(self): if 'fleetWindow' in self.currentProfile and 'showDpsOut' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["showDpsOut"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "showDpsOut"] @fleetWindowShowDpsOut.setter def fleetWindowShowDpsOut(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["showDpsOut"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["showDpsOut"] = value @property def fleetWindowShowDpsIn(self): if 'fleetWindow' in self.currentProfile and 'showDpsIn' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["showDpsIn"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "showDpsIn"] @fleetWindowShowDpsIn.setter def fleetWindowShowDpsIn(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["showDpsIn"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["showDpsIn"] = value @property def fleetWindowShowLogiOut(self): if 'fleetWindow' in self.currentProfile and 'showLogiOut' in self.currentProfile[ "fleetWindow"]: return self.currentProfile["fleetWindow"]["showLogiOut"] else: return self.defaultProfile[0]["profileSettings"]["fleetWindow"][ "showLogiOut"] @fleetWindowShowLogiOut.setter def fleetWindowShowLogiOut(self, value): if 'fleetWindow' in self.currentProfile: self.currentProfile["fleetWindow"]["showLogiOut"] = value else: self.currentProfile["fleetWindow"] = {} self.currentProfile["fleetWindow"]["showLogiOut"] = value def setOverviewFiles(self, characterDict): for profile in self.allSettings: if (profile["profile"] == "Default"): profile["overviewFiles"] = characterDict self.writeSettings() def getOverviewFiles(self): for profile in self.allSettings: if (profile["profile"] == "Default"): if 'overviewFiles' not in profile: if not hasattr(self, 'overviewNotificaitonShown'): logging.info( 'No overview settings set, showing overview notification for this session...' ) self.overviewNotificaitonShown = True from settings.overviewSettings import OverviewNotification OverviewNotification() return {} return profile["overviewFiles"] def getOverviewFile(self, characterName): overviewFiles = self.getOverviewFiles() if characterName in overviewFiles: return overviewFiles[characterName] else: return overviewFiles[ "default"] if 'default' in overviewFiles else None def getOverviewSettings(self, characterName): overviewFile = self.getOverviewFile(characterName) if not overviewFile: return None try: with open(overviewFile, encoding='utf8') as overviewFileContent: return yaml.safe_load(overviewFileContent.read()) except Exception as e: logging.exception('Exception loading overview settings file: ' + overviewFile) logging.exception(e) tk.messagebox.showerror( "Error", "Error loading overview settings file:\n" + overviewFile) return None def setSettings(self, capDamageIn=None, capDamageOut=None, capRecieved=None, capTransfered=None, dpsIn=None, dpsOut=None, logiIn=None, logiOut=None, mining=None, interval=None, seconds=None, windowHeight=None, windowWidth=None, windowX=None, windowY=None, compactTransparency=None, labels=None, labelColumns=None, graphDisabled=None, detailsOrder=None, detailsWindowShow=None): """ this funciton is dumb, ugly, and deprecated. Settings should be set through properties Some cleanup would be needed to remove it entirely """ if not capDamageIn == None: self.currentProfile["capDamageIn"] = capDamageIn if not capDamageOut == None: self.currentProfile["capDamageOut"] = capDamageOut if not capRecieved == None: self.currentProfile["capRecieved"] = capRecieved if not capTransfered == None: self.currentProfile["capTransfered"] = capTransfered if not dpsIn == None: self.currentProfile["dpsIn"] = dpsIn if not dpsOut == None: self.currentProfile["dpsOut"] = dpsOut if not logiIn == None: self.currentProfile["logiIn"] = logiIn if not logiOut == None: self.currentProfile["logiOut"] = logiOut if not mining == None: self.currentProfile["mining"] = mining if not interval == None: self.currentProfile["interval"] = interval if not seconds == None: self.currentProfile["seconds"] = seconds if not windowHeight == None: self.currentProfile["windowHeight"] = windowHeight if not windowWidth == None: self.currentProfile["windowWidth"] = windowWidth if not windowX == None: self.currentProfile["windowX"] = windowX if not windowY == None: self.currentProfile["windowY"] = windowY if not compactTransparency == None: self.currentProfile["compactTransparency"] = compactTransparency if not labels == None: self.currentProfile["labels"] = labels if not labelColumns == None: self.currentProfile["labelColumns"] = labelColumns if not graphDisabled == None: self.currentProfile["graphDisabled"] = graphDisabled if not detailsWindowShow == None: self.detailsWindowShow = detailsWindowShow if not detailsOrder == None: self.currentProfile["detailsOrder"] = detailsOrder self.writeSettings() def switchProfile(self): self.mainWindow.saveWindowGeometry() self.allSettings.insert(0, self.allSettings.pop(self.selectedIndex.get())) self.currentProfile = self.allSettings[0]["profileSettings"] self.mainWindow.profileMenu.delete(0, tk.END) self.initializeMenu(self.mainWindow) self.mainWindow.geometry( "%sx%s+%s+%s" % (self.getWindowWidth(), self.getWindowHeight(), self.getWindowX(), self.getWindowY())) self.mainWindow.detailsWindow.geometry( "%sx%s+%s+%s" % (self.detailsWindowWidth, self.detailsWindowHeight, self.detailsWindowX, self.detailsWindowY)) self.mainWindow.update_idletasks() self.mainWindow.graphFrame.readjust(0) self.mainWindow.animator.changeSettings() self.writeSettings() def writeSettings(self): logging.info('New settings:') logging.info(str(self.currentProfile)) tempFile = os.path.join(self.path, "PELD_temp.json") settingsFile = open(tempFile, 'w') json.dump(self.allSettings, settingsFile, indent=4) settingsFile.close() os.remove(self.fullPath) self.observer.unschedule_all() os.rename(tempFile, self.fullPath) self.observer.schedule(self, self.path, recursive=False)
class ModuleChangedEventHandler(FileSystemEventHandler): def __init__(self, paths, on_change): self.dirs = defaultdict(set) self.on_change = on_change self.modules_to_add_later = [] self.observer = Observer() self.old_dirs = defaultdict(set) self.started = False self.activated = False for path in paths: self._add_module(path) def reset(self): self.dirs = defaultdict(set) del self.modules_to_add_later[:] self.old_dirs = defaultdict(set) self.observer.unschedule_all() def _add_module(self, path): """Add a python module to track changes""" path = os.path.abspath(path) for suff in importcompletion.SUFFIXES: if path.endswith(suff): path = path[:-len(suff)] break dirname = os.path.dirname(path) if dirname not in self.dirs: self.observer.schedule(self, dirname, recursive=False) self.dirs[os.path.dirname(path)].add(path) def _add_module_later(self, path): self.modules_to_add_later.append(path) def track_module(self, path): """ Begins tracking this if activated, or remembers to track later. """ if self.activated: self._add_module(path) else: self._add_module_later(path) def activate(self): if self.activated: raise ValueError("%r is already activated." % (self, )) if not self.started: self.started = True self.observer.start() for dirname in self.dirs: self.observer.schedule(self, dirname, recursive=False) for module in self.modules_to_add_later: self._add_module(module) del self.modules_to_add_later[:] self.activated = True def deactivate(self): if not self.activated: raise ValueError("%r is not activated." % (self, )) self.observer.unschedule_all() self.activated = False def on_any_event(self, event): dirpath = os.path.dirname(event.src_path) paths = [path + '.py' for path in self.dirs[dirpath]] if event.src_path in paths: self.on_change(files_modified=[event.src_path])
class ChangesObserver(FileSystemEventHandler): def __init__(self, changes_handler=None): self.observer = Observer() self.changes = [] self.black_list = [] self.path = None self.changes_handler = changes_handler self.changes_timer = None self.observer.start() def observe(self, path, black_list=None): self.path = path self.black_list = black_list or [] self.observer.unschedule_all() self.observer.schedule(self, self.path, recursive=True) def get_changes_since(self, ts): ret = [] for change in self.changes: if change[0] >= ts: ret.append(change) return ret def on_any_event(self, event): if event.is_directory: return now = time.time() rel_src_path = get_rel_path(event.src_path, self.path) if event.event_type == EVENT_TYPE_MOVED: self.add_pure_change(Change(now, rel_src_path, EVENT_TYPE_DELETED)) rel_dest_path = get_rel_path(event.dest_path, self.path) self.add_pure_change(Change(now, rel_dest_path, EVENT_TYPE_CREATED)) else: self.add_pure_change(Change(time.time(), rel_src_path, event.event_type)) try: ioloop.IOLoop.instance().add_callback(self.refresh_change_timer) except RuntimeError: print 'ioloop.add_callback failed' def refresh_change_timer(self): loop = ioloop.IOLoop.instance() if self.changes_timer: loop.remove_timeout(self.changes_timer) self.changes_timer = loop.add_timeout(time.time() + 0.1, self.change_happened) def change_happened(self): if self.changes_handler and callable(self.changes_handler): ioloop.IOLoop.instance().add_callback(self.changes_handler) def find_related_trash_changes(self, change): trash_changes = [] for old_change in self.changes[::-1]: if old_change.path != change.path or change.time - old_change.time > 0.1: continue if change.type == EVENT_TYPE_DELETED: trash_changes.append(old_change) if old_change.type == EVENT_TYPE_CREATED: return trash_changes elif change.type == EVENT_TYPE_CREATED: trash_changes.append(old_change) if old_change.type == EVENT_TYPE_DELETED: return trash_changes return [] def add_pure_change(self, change): for path_name in self.black_list: if '..' not in os.path.relpath(change.path, path_name): print '...', change return trash_changes = self.find_related_trash_changes(change) if trash_changes: for change in trash_changes: self.changes.remove(change) print '- ', change else: self.changes.append(change) print '+ ', change self.remove_outdated_changes(30) def remove_outdated_changes(self, seconds): for change in self.changes[:]: if change.time - time.time() > seconds: try: self.changes.remove(change) except ValueError: pass
class ModuleChangedEventHandler(FileSystemEventHandler): def __init__(self, paths, on_change): self.dirs = defaultdict(set) self.on_change = on_change self.modules_to_add_later = [] self.observer = Observer() self.old_dirs = defaultdict(set) self.started = False self.activated = False for path in paths: self._add_module(path) def reset(self): self.dirs = defaultdict(set) del self.modules_to_add_later[:] self.old_dirs = defaultdict(set) self.observer.unschedule_all() def _add_module(self, path): """Add a python module to track changes""" path = os.path.abspath(path) for suff in importcompletion.SUFFIXES: if path.endswith(suff): path = path[:-len(suff)] break dirname = os.path.dirname(path) if dirname not in self.dirs: self.observer.schedule(self, dirname, recursive=False) self.dirs[dirname].add(path) def _add_module_later(self, path): self.modules_to_add_later.append(path) def track_module(self, path): """ Begins tracking this if activated, or remembers to track later. """ if self.activated: self._add_module(path) else: self._add_module_later(path) def activate(self): if self.activated: raise ValueError("%r is already activated." % (self,)) if not self.started: self.started = True self.observer.start() for dirname in self.dirs: self.observer.schedule(self, dirname, recursive=False) for module in self.modules_to_add_later: self._add_module(module) del self.modules_to_add_later[:] self.activated = True def deactivate(self): if not self.activated: raise ValueError("%r is not activated." % (self,)) self.observer.unschedule_all() self.activated = False def on_any_event(self, event): dirpath = os.path.dirname(event.src_path) paths = [path + '.py' for path in self.dirs[dirpath]] if event.src_path in paths: self.on_change(files_modified=[event.src_path])
class Interactions(FileSystemEventHandler): _POLL = 5 # Fallback polling interval def __init__(self): FileSystemEventHandler.__init__( self) # futureproofing - not need for current version of watchdog self.root = None self.currentdir = None # The actual logdir that we're monitoring self.observer = None self.observed = None # a watchdog ObservedWatch, or None if polling self.seen = [] # interactions that we've already processed self.interaction_queue = [ ] # For communicating interactions back to main thread def start(self, root, started): self.root = root self.session_start = started logdir = config.get('interactiondir') or config.default_interaction_dir if not logdir or not isdir(logdir): self.stop() return False if self.currentdir and self.currentdir != logdir: self.stop() self.currentdir = logdir # Set up a watchdog observer. # File system events are unreliable/non-existent over network drives on Linux. # We can't easily tell whether a path points to a network drive, so assume # any non-standard logdir might be on a network drive and poll instead. polling = bool(config.get('interactiondir')) and platform != 'win32' if not polling and not self.observer: self.observer = Observer() self.observer.daemon = True self.observer.start() if not self.observed and not polling: self.observed = self.observer.schedule(self, self.currentdir) if __debug__: print '%s interactions "%s"' % (polling and 'Polling' or 'Monitoring', self.currentdir) # Even if we're not intending to poll, poll at least once to process pre-existing # data and to check whether the watchdog thread has crashed due to events not\ # being supported on this filesystem. self.root.after(self._POLL * 1000 / 2, self.poll, True) return True def stop(self): if __debug__: print 'Stopping monitoring interactions' self.currentdir = None if self.observed: self.observed = None self.observer.unschedule_all() def close(self): self.stop() if self.observer: self.observer.stop() self.observer.join() self.observer = None def poll(self, first_time=False): self.process() if first_time: # Watchdog thread emitter = self.observed and self.observer._emitter_for_watch[ self.observed] # Note: Uses undocumented attribute if emitter and emitter.is_alive(): return # Watchdog thread still running - stop polling self.root.after(self._POLL * 1000, self.poll) # keep polling def on_modified(self, event): # watchdog callback - DirModifiedEvent on macOS, FileModifiedEvent on Windows if event.is_directory or stat( event.src_path ).st_size: # Can get on_modified events when the file is emptied self.process(event.src_path if not event.is_directory else None) # Can be called either in watchdog thread or, if polling, in main thread. The code assumes not both. def process(self, logfile=None): if not logfile: for logfile in [ x for x in listdir(self.currentdir) if x.endswith('.cmdrHistory') ]: if self.session_start and getmtime( join(self.currentdir, logfile)) >= self.session_start: break else: return try: # cmdrHistory file is shared between beta and live. So filter out interactions not in this game session. start = self.session_start + 11644473600 # Game time is 369 years in the future with open(join(self.currentdir, logfile), 'rb') as h: current = [ x for x in json.load(h)['Interactions'] if x['Epoch'] >= start ] new = [x for x in current if x not in self.seen ] # O(n^2) comparison but currently limited to 10x10 self.interaction_queue.extend(sorted( new, key=itemgetter('Epoch'))) # sort by time self.seen = current if self.interaction_queue: self.root.event_generate('<<InteractionEvent>>', when="tail") except: if __debug__: print_exc() def get_entry(self): if not self.interaction_queue: return None else: return self.interaction_queue.pop(0)
# Listen for all .msf folders - .msf4 and .msf5 for msf in [ name for name in os.listdir(os.path.expanduser('~')) if 'msf' in name ]: handlers.append((ResponderHandler, os.path.join(os.path.expanduser('~'), msf, 'loot'))) observer = Observer() observers = [] for handler, path in handlers: if not os.path.exists(path): os.makedirs(path) info("Watching ({}) for files with ({})".format( path, ', '.join(handler.patterns))) observer.schedule(handler(), path=path, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.unschedule_all() observer.stop() observer.join()
class Rainmaker(): def __init__(self,config=None, auto_start=True ): self.log=logging.getLogger('main') self.event_handlers = [] self.config = config if config else RainmakerConfig() self.profiles = self.config.profiles self.msg_q = Queue() self.observer = Observer() self.observer.start() if not auto_start: return for k in self.profiles: if self.profiles[k]['auto_start']==True: self.add_watch(k) if not self.event_handlers: self.log.warn('No running profiles') def add_watch(self,key): if not key in self.profiles: self.log.error('unknown profile %s' % key) self.log.info('Starting profile: %s' % key) profile = self.profiles[key] profile['local_root'] = os.path.abspath(os.path.expanduser(profile['local_root'])) profile.subst_all() if not os.path.isdir(profile['local_root']): self.log.info('creating dir: %s' % profile['local_root']) os.mkdir(profile['local_root']) patterns=['*.unison.tmp'] event_handler = RainmakerEventHandler() event_handler.init2( self.config, profile, self.msg_q, key ) self.event_handlers.append( event_handler ) rec_flag = True if profile.has_key('recursive'): rec_flag = bool(profile['recursive']) self.observer.schedule( event_handler, profile['local_root'], recursive = rec_flag) logging.info('Started profile: %s' % key) if profile['cmds']['startup'] != '': event_handler.startup_cmd() def remove_watch(self, k): for eh in self.event_handlers: if eh.name == k: self.log.info('Stopping profile: %s' % k) self.observer.unschedule(eh) break def messages(self): messages = [] try: while True: messages.append( self.msg_q.get_nowait() ) except Empty: pass return messages def shutdown(self): self.log.info( "Shutting down FSwatcher") self.observer.stop() self.observer.unschedule_all() self.observer.join() self.log.info("Shutting down thread and Fork pool") for eh in self.event_handlers: self.log.info('Stopping profile: %s' % eh.name) eh.stop()
class InstrumentManagerPlugin(HasPreferencesPlugin): """The instrument plugin manages the instrument drivers and their use. """ #: List of the known instrument profile ids. profiles = List() #: List of instruments for which at least one driver is declared. instruments = List() #: List of registered intrument users. #: Only registered users can be granted the use of an instrument. users = List() #: List of registered instrument starters. starters = List() #: List of registered connection types. connections = List() #: List of registered settings. settings = List() #: Currently used profiles. #: This dict should be edited by user code. used_profiles = Dict() def start(self): """Start the plugin lifecycle by collecting all contributions. """ super(InstrumentManagerPlugin, self).start() core = self.workbench.get_plugin('enaml.workbench.core') core.invoke_command('exopy.app.errors.enter_error_gathering') state = core.invoke_command('exopy.app.states.get', {'state_id': 'exopy.app.directory'}) i_dir = os.path.join(state.app_directory, 'instruments') # Create instruments subfolder if it does not exist. if not os.path.isdir(i_dir): os.mkdir(i_dir) p_dir = os.path.join(i_dir, 'profiles') # Create profiles subfolder if it does not exist. if not os.path.isdir(p_dir): os.mkdir(p_dir) self._profiles_folders = [p_dir] self._users = ExtensionsCollector(workbench=self.workbench, point=USERS_POINT, ext_class=InstrUser, validate_ext=validate_user) self._users.start() self._starters = ExtensionsCollector(workbench=self.workbench, point=STARTERS_POINT, ext_class=Starter, validate_ext=validate_starter) self._starters.start() checker = make_extension_validator(Connection, ('new',), ('id', 'description')) self._connections = ExtensionsCollector(workbench=self.workbench, point=CONNECTIONS_POINT, ext_class=Connection, validate_ext=checker) self._connections.start() checker = make_extension_validator(Settings, ('new',), ('id', 'description')) self._settings = ExtensionsCollector(workbench=self.workbench, point=SETTINGS_POINT, ext_class=Settings, validate_ext=checker) self._settings.start() checker = make_extension_validator(ManufacturerAlias, (), ('id', 'aliases',)) self._aliases = ExtensionsCollector(workbench=self.workbench, point=ALIASES_POINT, ext_class=ManufacturerAlias, validate_ext=checker) self._aliases.start() self._drivers = DeclaratorsCollector(workbench=self.workbench, point=DRIVERS_POINT, ext_class=[Driver, Drivers]) self._drivers.start() for contrib in ('users', 'starters', 'connections', 'settings'): self._update_contribs(contrib, None) err = False details = {} for d_id, d_infos in self._drivers.contributions.items(): res, tb = d_infos.validate(self) if not res: err = True details[d_id] = tb if err: core.invoke_command('exopy.app.errors.signal', {'kind': 'exopy.driver-validation', 'details': details}) # TODO providing in app a way to have a splash screen while starting to # let the user know what is going on would be nice # TODO handle dynamic addition of drivers by observing contributions # and updating the manufacturers infos accordingly. # should also observe manufacturer aliases self._refresh_profiles() self._bind_observers() core.invoke_command('exopy.app.errors.exit_error_gathering') def stop(self): """Stop the plugin and remove all observers. """ self._unbind_observers() for contrib in ('drivers', 'users', 'starters', 'connections', 'settings'): getattr(self, '_'+contrib).stop() def create_connection(self, connection_id, infos, read_only=False): """Create a connection and initialize it. Parameters ---------- connection_id : unicode Id of the the connection to instantiate. infos : dict Dictionarry to use to initialize the state of the connection. read_only : bool Should the connection be created as read-only. Returns ------- connection : BaseConnection Ready to use widget. """ c_decl = self._connections.contributions[connection_id] conn = c_decl.new(self.workbench, infos, read_only) if conn.declaration is None: conn.declaration = c_decl return conn def create_settings(self, settings_id, infos, read_only=False): """Create a settings and initialize it. Parameters ---------- settings_id : unicode Id of the the settings to instantiate. infos : dict Dictionary to use to initialize the state of the settings. read_only : bool Should the settings be created as read-only. Returns ------- connection : BaseSettings Ready to use widget. """ if settings_id is None: msg = 'No id was found for the settings whose infos are %s' logger.warning(msg, infos) return None s_decl = self._settings.contributions[settings_id] sett = s_decl.new(self.workbench, infos, read_only) if sett.declaration is None: sett.declaration = s_decl return sett def get_drivers(self, drivers): """Query drivers class and the associated starters. Parameters ---------- drivers : list List of driver ids for which the matching class should be returned. Returns ------- drivers : dict Requested drivers and associated starter indexed by id. missing : list List of ids which do not correspond to any known valid driver. """ ds = self._drivers.contributions knowns = {d_id: ds[d_id] for d_id in drivers if d_id in ds} missing = list(set(drivers) - set(knowns)) return {d_id: (infos.cls, self._starters.contributions[infos.starter].starter) for d_id, infos in knowns.items()}, missing def get_profiles(self, user_id, profiles, try_release=True, partial=False): """Query profiles for use by a declared user. Parameters ---------- user_id : unicode Id of the user which request the authorization to use the instrument. profiles : list Ids of the instrument profiles which are requested. try_release : bool, optional Should we attempt to release currently used profiles. partial : bool, optional Should only a subset of the requested profiles be returned if some profiles are not available. Returns ------- profiles : dict Requested profiles as a dictionary. unavailable : list List of profiles that are not currently available and cannot be released. """ if user_id not in self.users: raise ValueError('Unknown instrument user tried to query profiles') used = [p for p in profiles if p in self.used_profiles] unavailable = [] if used: released = [] if not try_release: unavailable = used else: used_by_owner = defaultdict(set) for p in used: used_by_owner[self.used_profiles[p]].add(p) for o in list(used_by_owner): user = self._users.contributions[o] if user.policy == 'releasable': to_release = used_by_owner[o] r = user.release_profiles(self.workbench, to_release) unavailable.extend(set(to_release) - set(r)) released.extend(r) else: unavailable.extend(used_by_owner[o]) if unavailable and not partial: if released: used = {k: v for k, v in self.used_profiles.items() if k not in released} self.used_profiles = used return {}, unavailable available = ([p for p in profiles if p not in unavailable] if unavailable else profiles) with self.suppress_notifications(): u = self.used_profiles self.used_profiles = {} u.update({p: user_id for p in available}) self.used_profiles = u queried = {} for p in available: queried[p] = self._profiles[p]._config.dict() return queried, unavailable def release_profiles(self, user_id, profiles): """Release some previously acquired profiles. The user should not maintain any communication with the instruments whose profiles have been released after calling this method. Parameters ---------- user_id : unicode Id of the user releasing the profiles. profiles : iterable Profiles (ids) which are no longer needed by the user. """ self.used_profiles = {k: v for k, v in self.used_profiles.items() if k not in profiles or v != user_id} def get_aliases(self, manufacturer): """List the known aliases of a manufacturer. Parameters ---------- manufacturer : str Name of the manufacturer for which to return the aliases. Returns ------- aliases : list[unicode] Known aliases of the manufacturer. """ aliases = self._aliases.contributions.get(manufacturer, []) if aliases: aliases = aliases.aliases return aliases # ========================================================================= # --- Private API --------------------------------------------------------- # ========================================================================= #: Collector of drivers. _drivers = Typed(DeclaratorsCollector) #: Collector for the manufacturer aliases. _aliases = Typed(ExtensionsCollector) #: Declared manufacturers storing the corresponding model infos. _manufacturers = Typed(ManufacturersHolder) #: Collector of users. _users = Typed(ExtensionsCollector) #: Collector of starters. _starters = Typed(ExtensionsCollector) #: Collector of connections. _connections = Typed(ExtensionsCollector) #: Collector of settings. _settings = Typed(ExtensionsCollector) #: List of folders in which to search for profiles. # TODO make that list editable and part of the preferences _profiles_folders = List() #: Mapping of profile name to profile infos. _profiles = Dict() #: Watchdog observer tracking changes to the profiles folders. _observer = Typed(Observer) def _update_contribs(self, name, change): """Update the list of available contributions (editors, engines, tools) when they change. """ setattr(self, name, list(getattr(self, '_'+name).contributions)) if name == 'starters': for id_, s in getattr(self, '_'+name).contributions.items(): s.starter.id = id_ def _refresh_profiles(self): """List of profiles living in the profiles folders. """ profiles = {} logger = logging.getLogger(__name__) for path in self._profiles_folders: if os.path.isdir(path): filenames = sorted(f for f in os.listdir(path) if f.endswith('.instr.ini') and (os.path.isfile(os.path.join(path, f)))) for filename in filenames: profile_path = os.path.join(path, filename) # Beware redundant names are overwritten name = filename[:-len('.instr.ini')] # TODO should be delayed and lead to a nicer report i = ProfileInfos(path=profile_path, plugin=self) res, msg = validate_profile_infos(i) if res: profiles[name] = i else: logger.warning(msg) else: logger.warning('{} is not a valid directory'.format(path)) self._profiles = profiles def _bind_observers(self): """Start the observers. """ for contrib in ('users', 'starters', 'connections', 'settings'): callback = partial(self._update_contribs, contrib) getattr(self, '_'+contrib).observe('contributions', callback) def update(): """Run the handler on the main thread to avoid GUI issues. """ deferred_call(self._refresh_profiles) self._observer = Observer() for folder in self._profiles_folders: handler = SystematicFileUpdater(update) self._observer.schedule(handler, folder, recursive=True) self._observer.start() def _unbind_observers(self): """Stop the observers. """ for contrib in ('users', 'starters', 'connections', 'settings'): callback = partial(self._update_contribs, contrib) getattr(self, '_'+contrib).observe('contributions', callback) self._observer.unschedule_all() self._observer.stop() try: self._observer.join() except RuntimeError: pass def _post_setattr__profiles(self, old, new): """Automatically update the profiles member. """ self.profiles = sorted(new) def _default__manufacturers(self): """Delayed till this is first needed. """ holder = ManufacturersHolder(plugin=self) valid_drivers = [d for d in self._drivers.contributions.values()] holder.update_manufacturers(valid_drivers) return holder
class EDLogs(FileSystemEventHandler): _POLL = 1 # Polling is cheap, so do it often def __init__(self): FileSystemEventHandler.__init__(self) # futureproofing - not need for current version of watchdog self.root = None self.currentdir = None # The actual logdir that we're monitoring self.logfile = None self.observer = None self.observed = None # a watchdog ObservedWatch, or None if polling self.thread = None self.event_queue = [] # For communicating journal entries back to main thread # Context for journal handling self.version = None self.is_beta = False self.mode = None self.cmdr = None self.shipid = None self.system = None self.station = None self.coordinates = None self.ranks = None self.credits = None def set_callback(self, name, callback): if name in self.callbacks: self.callbacks[name] = callback def start(self, root): self.root = root logdir = config.get('journaldir') or config.default_journal_dir if not logdir or not exists(logdir): self.stop() return False if self.currentdir and self.currentdir != logdir: self.stop() self.currentdir = logdir # Latest pre-existing logfile - e.g. if E:D is already running. Assumes logs sort alphabetically. # Do this before setting up the observer in case the journal directory has gone away try: logfiles = sorted([x for x in listdir(self.currentdir) if x.startswith('Journal.')]) self.logfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: self.logfile = None return False # Set up a watchog observer. This is low overhead so is left running irrespective of whether monitoring is desired. # File system events are unreliable/non-existent over network drives on Linux. # We can't easily tell whether a path points to a network drive, so assume # any non-standard logdir might be on a network drive and poll instead. polling = bool(config.get('journaldir')) and platform != 'win32' if not polling and not self.observer: self.observer = Observer() self.observer.daemon = True self.observer.start() atexit.register(self.observer.stop) if not self.observed and not polling: self.observed = self.observer.schedule(self, self.currentdir) if __debug__: print '%s "%s"' % (polling and 'Polling' or 'Monitoring', self.currentdir) print 'Start logfile "%s"' % self.logfile if not self.running(): self.thread = threading.Thread(target = self.worker, name = 'Journal worker') self.thread.daemon = True self.thread.start() return True def stop(self): if __debug__: print 'Stopping monitoring' self.currentdir = None self.version = self.mode = self.cmdr = self.system = self.station = self.coordinates = None self.is_beta = False if self.observed: self.observed = None self.observer.unschedule_all() self.thread = None # Orphan the worker thread - will terminate at next poll def running(self): return self.thread and self.thread.is_alive() def on_created(self, event): # watchdog callback, e.g. client (re)started. if not event.is_directory and basename(event.src_path).startswith('Journal.'): self.logfile = event.src_path def worker(self): # Tk isn't thread-safe in general. # event_generate() is the only safe way to poke the main thread from this thread: # https://mail.python.org/pipermail/tkinter-discuss/2013-November/003522.html # Seek to the end of the latest log file logfile = self.logfile if logfile: loghandle = open(logfile, 'r') for line in loghandle: try: self.parse_entry(line) # Some events are of interest even in the past except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) self.root.event_generate('<<JournalEvent>>', when="tail") # Generate null event to update the display at start else: loghandle = None # Watchdog thread emitter = self.observed and self.observer._emitter_for_watch[self.observed] # Note: Uses undocumented attribute while True: # Check whether new log file started, e.g. client (re)started. if emitter and emitter.is_alive(): newlogfile = self.logfile # updated by on_created watchdog callback else: # Poll try: logfiles = sorted([x for x in listdir(self.currentdir) if x.startswith('Journal.')]) newlogfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: if __debug__: print_exc() newlogfile = None if logfile != newlogfile: logfile = newlogfile if loghandle: loghandle.close() if logfile: loghandle = open(logfile, 'r') if __debug__: print 'New logfile "%s"' % logfile if logfile: loghandle.seek(0, SEEK_CUR) # reset EOF flag for line in loghandle: self.event_queue.append(line) if self.event_queue: self.root.event_generate('<<JournalEvent>>', when="tail") sleep(self._POLL) # Check whether we're still supposed to be running if threading.current_thread() != self.thread: return # Terminate def parse_entry(self, line): try: entry = json.loads(line, object_pairs_hook=OrderedDict) # Preserve property order because why not? entry['timestamp'] # we expect this to exist if entry['event'] == 'Fileheader': self.version = entry['gameversion'] self.is_beta = 'beta' in entry['gameversion'].lower() self.ranks = None elif entry['event'] == 'LoadGame': self.cmdr = entry['Commander'] self.shipid = entry['ShipID'] self.mode = entry.get('GameMode') # 'Open', 'Solo', 'Group', or None for CQC self.ranks = { "Combat": None, "Trade": None, "Explore": None, "Empire": None, "Federation": None, "CQC": None } self.credits = { "balance": entry['Credits'], "loan": entry['Loan'] } elif entry['event'] == 'NewCommander': self.cmdr = entry['Name'] elif entry['event'] in ['ShipyardSwap']: self.shipid = entry['ShipID'] elif entry['event'] in ['Undocked']: self.station = None elif entry['event'] in ['Location', 'FSDJump', 'Docked']: if 'StarPos' in entry: self.coordinates = tuple(entry['StarPos']) elif self.system != entry['StarSystem']: self.coordinates = None # Docked event doesn't include coordinates self.system = entry['StarSystem'] == 'ProvingGround' and 'CQC' or entry['StarSystem'] self.station = entry.get('StationName') # May be None elif entry['event'] in ['Rank', 'Promotion'] and self.ranks: for k,v in entry.iteritems(): if k in self.ranks: self.ranks[k] = (v,0) elif entry['event'] == 'Progress' and self.ranks: for k,v in entry.iteritems(): if self.ranks.get(k) is not None: self.ranks[k] = (self.ranks[k][0], min(v, 100)) # perhaps not taken promotion mission yet return entry except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) return { 'event': None } def get_entry(self): if not self.event_queue: return None else: return self.parse_entry(self.event_queue.pop(0))
class EDLogs(FileSystemEventHandler): _POLL = 1 # Polling is cheap, so do it often def __init__(self): FileSystemEventHandler.__init__(self) # futureproofing - not need for current version of watchdog self.root = None self.currentdir = None # The actual logdir that we're monitoring self.logfile = None self.observer = None self.observed = None # a watchdog ObservedWatch, or None if polling self.thread = None self.event_queue = [] # For communicating journal entries back to main thread # Context for journal handling self.version = None self.is_beta = False self.mode = None self.group = None self.cmdr = None self.shipid = None self.shiptype = None self.shippaint = None self.body = None self.system = None self.station = None self.coordinates = None self.ranks = None self.credits = None def set_callback(self, name, callback): if name in self.callbacks: self.callbacks[name] = callback def start(self, root): self.root = root logdir = config.get('journaldir') or config.default_journal_dir if not logdir or not exists(logdir): self.stop() return False if self.currentdir and self.currentdir != logdir: self.stop() self.currentdir = logdir # Latest pre-existing logfile - e.g. if E:D is already running. Assumes logs sort alphabetically. # Do this before setting up the observer in case the journal directory has gone away try: logfiles = sorted([x for x in listdir(self.currentdir) if x.startswith('Journal.') and x.endswith('.log')]) self.logfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: self.logfile = None return False # Set up a watchog observer. This is low overhead so is left running irrespective of whether monitoring is desired. # File system events are unreliable/non-existent over network drives on Linux. # We can't easily tell whether a path points to a network drive, so assume # any non-standard logdir might be on a network drive and poll instead. polling = bool(config.get('journaldir')) and platform != 'win32' if not polling and not self.observer: self.observer = Observer() self.observer.daemon = True self.observer.start() if not self.observed and not polling: self.observed = self.observer.schedule(self, self.currentdir) if __debug__: print '%s "%s"' % (polling and 'Polling' or 'Monitoring', self.currentdir) print 'Start logfile "%s"' % self.logfile self.event_queue.append(None) # Generate null event to signal (re)start if not self.running(): self.thread = threading.Thread(target = self.worker, name = 'Journal worker') self.thread.daemon = True self.thread.start() return True def stop(self): if __debug__: print 'Stopping monitoring' self.currentdir = None self.version = self.mode = self.group = self.cmdr = self.body = self.system = self.station = self.coordinates = None self.is_beta = False if self.observed: self.observed = None self.observer.unschedule_all() self.thread = None # Orphan the worker thread - will terminate at next poll def close(self): thread = self.thread self.stop() if self.observer: self.observer.stop() if thread: thread.join() if self.observer: self.observer.join() self.observer = None def running(self): return self.thread and self.thread.is_alive() def on_created(self, event): # watchdog callback, e.g. client (re)started. if not event.is_directory and basename(event.src_path).startswith('Journal.') and basename(event.src_path).endswith('.log'): self.logfile = event.src_path def worker(self): # Tk isn't thread-safe in general. # event_generate() is the only safe way to poke the main thread from this thread: # https://mail.python.org/pipermail/tkinter-discuss/2013-November/003522.html # Seek to the end of the latest log file logfile = self.logfile if logfile: loghandle = open(logfile, 'r') for line in loghandle: try: self.parse_entry(line) # Some events are of interest even in the past except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) else: loghandle = None # Watchdog thread emitter = self.observed and self.observer._emitter_for_watch[self.observed] # Note: Uses undocumented attribute while True: # Check whether new log file started, e.g. client (re)started. if emitter and emitter.is_alive(): newlogfile = self.logfile # updated by on_created watchdog callback else: # Poll try: logfiles = sorted([x for x in listdir(self.currentdir) if x.startswith('Journal.') and x.endswith('.log')]) newlogfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: if __debug__: print_exc() newlogfile = None if logfile != newlogfile: logfile = newlogfile if loghandle: loghandle.close() if logfile: loghandle = open(logfile, 'r') if __debug__: print 'New logfile "%s"' % logfile if logfile: loghandle.seek(0, SEEK_CUR) # reset EOF flag for line in loghandle: self.event_queue.append(line) if self.event_queue: self.root.event_generate('<<JournalEvent>>', when="tail") sleep(self._POLL) # Check whether we're still supposed to be running if threading.current_thread() != self.thread: return # Terminate def parse_entry(self, line): try: entry = json.loads(line, object_pairs_hook=OrderedDict) # Preserve property order because why not? entry['timestamp'] # we expect this to exist if entry['event'] == 'Fileheader': self.version = entry['gameversion'] self.is_beta = 'beta' in entry['gameversion'].lower() elif entry['event'] == 'LoadGame': self.cmdr = entry['Commander'] self.mode = entry.get('GameMode') # 'Open', 'Solo', 'Group', or None for CQC self.group = entry.get('Group') self.shiptype = 'Ship' in entry and entry['Ship'] not in ['TestBuggy', 'Empire_Fighter', 'Federation_Fighter', 'Independent_Fighter'] and entry['Ship'].lower() or None # None in CQC. TestBuggy or *_Fighter if game starts in SRV/fighter. self.shipid = self.shiptype and entry.get('ShipID') or None # None in CQC self.shippaint = None self.body = None self.system = None self.station = None self.coordinates = None self.ranks = { "Combat": None, "Trade": None, "Explore": None, "Empire": None, "Federation": None, "CQC": None } self.credits = ( entry['Credits'], entry['Loan'] ) elif entry['event'] == 'NewCommander': self.cmdr = entry['Name'] self.group = None elif entry['event'] == 'ShipyardNew': self.shipid = entry['NewShipID'] self.shiptype = entry['ShipType'].lower() self.shippaint = None elif entry['event'] == 'ShipyardSwap': self.shipid = entry['ShipID'] self.shiptype = entry['ShipType'].lower() self.shippaint = None elif entry['event'] in ['ModuleBuy', 'ModuleSell'] and entry['Slot'] == 'PaintJob': symbol = re.match('\$(.+)_name;', entry.get('BuyItem', '')) self.shippaint = symbol and symbol.group(1).lower() or entry.get('BuyItem', '') elif entry['event'] in ['Undocked']: self.station = None elif entry['event'] in ['Location', 'FSDJump', 'Docked']: if entry['event'] != 'Docked': self.body = None if 'StarPos' in entry: self.coordinates = tuple(entry['StarPos']) elif self.system != entry['StarSystem']: self.coordinates = None # Docked event doesn't include coordinates self.system = entry['StarSystem'] == 'ProvingGround' and 'CQC' or entry['StarSystem'] self.station = entry.get('StationName') # May be None elif entry['event'] == 'SupercruiseExit': self.body = entry.get('BodyType') == 'Planet' and entry.get('Body') elif entry['event'] == 'SupercruiseEntry': self.body = None elif entry['event'] in ['Rank', 'Promotion'] and self.ranks: for k,v in entry.iteritems(): if k in self.ranks: self.ranks[k] = (v,0) elif entry['event'] == 'Progress' and self.ranks: for k,v in entry.iteritems(): if self.ranks.get(k) is not None: self.ranks[k] = (self.ranks[k][0], min(v, 100)) # perhaps not taken promotion mission yet return entry except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) return { 'event': None } def get_entry(self): if not self.event_queue: return None else: return self.parse_entry(self.event_queue.pop(0))
class INotify(PollMixin): """ I am a prototype INotify, made to work on Mac OS X (Darwin) using the Watchdog python library. This is actually a simplified subset of the twisted Linux INotify class because we do not utilize the watch mask and only implement the following methods: - watch - startReading - stopReading - wait_until_stopped - set_pending_delay """ def __init__(self): self._pending_delay = 1.0 self.recursive_includes_new_subdirectories = False self._callbacks = {} self._watches = {} self._state = NOT_STARTED self._observer = Observer(timeout=self._pending_delay) def set_pending_delay(self, delay): Message.log(message_type=u"watchdog:inotify:set-pending-delay", delay=delay) assert self._state != STARTED self._pending_delay = delay def startReading(self): with start_action(action_type=u"watchdog:inotify:start-reading"): assert self._state != STARTED try: # XXX twisted.internet.inotify doesn't require watches to # be set before startReading is called. # _assert(len(self._callbacks) != 0, "no watch set") self._observer.start() self._state = STARTED except: self._state = STOPPED raise def stopReading(self): with start_action(action_type=u"watchdog:inotify:stop-reading"): if self._state != STOPPED: self._state = STOPPING self._observer.unschedule_all() self._observer.stop() self._observer.join() self._state = STOPPED def wait_until_stopped(self): return self.poll(lambda: self._state == STOPPED) def _isWatched(self, path_u): return path_u in self._callbacks.keys() def ignore(self, path): path_u = path.path self._observer.unschedule(self._watches[path_u]) del self._callbacks[path_u] del self._watches[path_u] def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False): precondition(isinstance(autoAdd, bool), autoAdd=autoAdd) precondition(isinstance(recursive, bool), recursive=recursive) assert autoAdd == False path_u = path.path if not isinstance(path_u, unicode): path_u = path_u.decode('utf-8') _assert(isinstance(path_u, unicode), path_u=path_u) if path_u not in self._callbacks.keys(): self._callbacks[path_u] = callbacks or [] self._watches[path_u] = self._observer.schedule( INotifyEventHandler(path_u, mask, self._callbacks[path_u], self._pending_delay), path=path_u, recursive=False, )
class ProjDirFactory(Factory): """A Factory that watches a Project directory and dynamically keeps the set of available types up-to-date as project files change. """ def __init__(self, watchdir, use_observer=True, observer=None): super(ProjDirFactory, self).__init__() self._lock = threading.RLock() self.observer = None self.watchdir = watchdir self._files = {} # mapping of file pathnames to _FileInfo objects self._classes = {} # mapping of class names to _FileInfo objects try: added_set = set() changed_set = set() deleted_set = set() modeldir = watchdir + PROJ_DIR_EXT if modeldir not in sys.path: sys.path = [modeldir] + sys.path logger.info("added %s to sys.path" % modeldir) for pyfile in find_files(self.watchdir, "*.py"): self.on_modified(pyfile, added_set, changed_set, deleted_set) if use_observer: self._start_observer(observer) self.publish_updates(added_set, changed_set, deleted_set) else: # sometimes for debugging/testing it's easier to turn observer off self.observer = None except Exception as err: self._error(str(err)) logger.error(str(err)) def _start_observer(self, observer): if observer is None: self.observer = Observer() self._ownsobserver = True else: self.observer = observer self._ownsobserver = False self.observer.schedule(PyWatcher(self), path=self.watchdir, recursive=True) if self._ownsobserver: self.observer.daemon = True self.observer.start() def create(self, typ, version=None, server=None, res_desc=None, **ctor_args): """Create and return an instance of the specified type, or None if this Factory can't satisfy the request. """ if server is None and res_desc is None: klass = self._lookup(typ, version) if klass is not None: return klass(**ctor_args) return None def _lookup(self, typ, version): """ Return class for `typ`. """ try: cinfo = self._classes[typ].classes[typ] mod = sys.modules[cinfo['modpath']] return getattr(mod, cinfo['ctor']) except KeyError: return None def get_available_types(self, groups=None): """Return a list of available types.""" with self._lock: typset = set(self._classes.keys()) types = [] if groups is None: ifaces = set([v[0] for v in plugin_groups.values()]) else: ifaces = set([v[0] for k, v in plugin_groups.items() if k in groups]) for typ in typset: finfo = self._classes[typ] meta = finfo.classes[typ] if ifaces.intersection(meta['ifaces']): meta = { 'bases': meta['bases'], 'ifaces': meta['ifaces'], 'version': meta['version'], '_context': 'In Project', } types.append((typ, meta)) return types def get_signature(self, typname, version=None): """Return constructor argument signature for *typname,* using the specified package version. The return value is a dictionary.""" cls = self._lookup(typname, version) if cls is None: return None else: return self.form_signature(cls) def on_modified(self, fpath, added_set, changed_set, deleted_set): if os.path.isdir(fpath): return None with self._lock: finfo = self._files.get(fpath) if finfo is None: try: fileinfo = _FileInfo(fpath) except Exception as err: if isinstance(err, SyntaxError): if err.offset: msg = '%s%s^\n%s' % (err.text, ' ' * err.offset, str(err)) else: msg = str(err) self._error(msg) else: self._error(str(err)) return self._files[fpath] = fileinfo added_set.update(fileinfo.classes.keys()) for cname in fileinfo.classes.keys(): self._classes[cname] = fileinfo else: # updating a file that's already been imported try: finfo.update(added_set, changed_set, deleted_set) except Exception as err: if isinstance(err, SyntaxError): if err.offset: msg = '%s%s^\n%s' % (err.text, ' ' * err.offset, str(err)) else: msg = str(err) self._error(msg) else: self._error(str(err)) self._remove_fileinfo(fpath) return for cname in added_set: self._classes[cname] = finfo for cname in deleted_set: del self._classes[cname] def on_deleted(self, fpath, deleted_set): with self._lock: if os.path.isdir(fpath): for pyfile in find_files(self.watchdir, "*.py"): self.on_deleted(pyfile, deleted_set) else: finfo = self._files.get(fpath) if finfo: deleted_set.update(finfo.classes.keys()) self._remove_fileinfo(fpath) def publish_updates(self, added_set, changed_set, deleted_set): types = get_available_types() try: publish('types', [ packagedict(types), list(added_set), list(changed_set), list(deleted_set), ]) except: logger.error("publish of types failed") def _error(self, msg): logger.error(msg) publish('console_errors', msg) publish('file_errors', msg) def _remove_fileinfo(self, fpath): """Clean up all data related to the given file. This typically occurs when there is some error during the import of the file. """ finfo = self._files.get(fpath) if finfo: classes = [c for c, f in self._classes.items() if f is finfo] for klass in classes: del self._classes[klass] del self._files[fpath] return classes def cleanup(self): """If this factory is removed from the FactoryManager during execution, this function will stop the watchdog observer thread. """ try: sys.path.remove(self.watchdir) except: pass if self.observer and self._ownsobserver: self.observer.unschedule_all() self.observer.stop() self.observer.join()
class ChangesWatcher(FileSystemEventHandler): def __init__(self, application): self.application = application self.observer = Observer() self.changes = [] self.mute_list = [] self.path = None self.changes_timer = None self.observer.start() def observe(self, path, mute_list=None): self.path = path self.mute_list = (mute_list or []) + DEFAULT_MUTE_LIST self.observer.unschedule_all() self.observer.schedule(self, self.path, recursive=True) def get_changes_since(self, timestamp): ret = [] for change in self.changes: if change.timestamp > timestamp: ret.append(change) return ret def add_pure_change(self, change): """ 监测change的类型,并添加非垃圾change和不在黑名单中的change """ # 如果是黑名单及黑名单子目录的change,则跳过 for black_path in self.mute_list: if path_is_parent(black_path, change.path): print '...', change.type, change.path return # 寻找当前change对应的垃圾change,找到后删除;未找到则添加当前change trash_changes = self.find_related_trash_changes(change) if trash_changes: for change in trash_changes: self.changes.remove(change) print '- ', change.type, change.path else: self.changes.append(change) print '+ ', change.type, change.path self.compile_if_necessary(change) ioloop.IOLoop.instance().add_callback(lambda: self.remove_outdated_changes(30)) def compile_if_necessary(self, change): if change.type == EVENT_TYPE_DELETED: return abs_path = normalize_path(os.path.join(self.path, change.path)) base_path, ext = os.path.splitext(abs_path) ext = ext.lower() if ext not in['.less', '.coffee']: return if not self.application.active_project: return active_project = self.application.active_project begin_time = time.time() os.chdir(APP_FOLDER) if ext == '.less': if active_project.compileLess: output_path = base_path + '.css' cmd = 'bundled/node.exe bundled/less/bin/lessc %s %s' % (abs_path, output_path) os.system(cmd.replace('/', '\\')) print 'compile:', change.path, time.time() - begin_time, 'seconds' else: print 'skip compile', change.path, '(setting is off)' elif ext == '.coffee': if active_project.compileCoffee: output_path = base_path + '.js' cmd = 'bundled/node.exe bundled/coffee/bin/coffee --compile %s' % (abs_path, ) os.system(cmd.replace('/', '\\')) print 'compile:', change.path, time.time() - begin_time, 'seconds' else: print 'skip compile', change.path, '(setting is off)' def if_folder_changed(self, folder_path): print folder_path import sys from watchdog.events import FileModifiedEvent if sys.platform.startswith('win'): return now = time.time() - 2.5 # 2.5秒内的都算修改 if not os.path.isdir(folder_path): return # ignore for filename in os.listdir(folder_path): file_path = os.path.join(folder_path, filename) if not os.path.isfile(file_path): continue modified_time = os.path.getmtime(file_path) if modified_time > now: self.on_any_event(FileModifiedEvent(file_path)) def on_any_event(self, event): if event.is_directory: self.if_folder_changed(event.src_path) return # 暂停文件变更的上报, 以免中途编译占用太长时间,而将事件提前返回 loop = ioloop.IOLoop.instance() if self.changes_timer: ioloop.IOLoop.instance().remove_timeout(self.changes_timer) now = time.time() src_relative_path = get_rel_path(event.src_path, self.path) if event.event_type == EVENT_TYPE_MOVED: self.add_pure_change(Change(dict(timestamp=now, path=src_relative_path, type=EVENT_TYPE_DELETED))) dest_relative_path = get_rel_path(event.dest_path, self.path) self.add_pure_change(Change(dict(timestamp=now, path=dest_relative_path, type=EVENT_TYPE_CREATED))) else: self.add_pure_change(Change(dict(timestamp=now, path=src_relative_path, type=event.event_type))) # 延迟0.1秒上报变更,防止有些事件连续发生时错过 self.changes_timer = loop.add_timeout(time.time() + 0.1, self.application.project_file_changed) def find_related_trash_changes(self, change): """ 寻找当前change之前短时间内的一些垃圾change 有些编辑器喜欢用 改名->写入->改回名 的方式来保存文件,所以不能直接将change上报,需要进行一定的过滤 """ trash_changes = [] for old_change in self.changes[::-1]: if old_change.path != change.path: continue if change.timestamp - old_change.timestamp > 1: break if change.type == EVENT_TYPE_DELETED: # 如果当前change类型是DELETED,那么返回所有该文件的change事件,直到CREATED事件为止 trash_changes.append(old_change) if old_change.type == EVENT_TYPE_CREATED: return trash_changes elif change.type == EVENT_TYPE_CREATED: # 如果当前change类型是CREATED,那么返回所有该文件的change事件,直到DELETED事件为止 trash_changes.append(old_change) if old_change.type == EVENT_TYPE_DELETED: return trash_changes return [] def remove_outdated_changes(self, seconds): for change in self.changes[:]: if change.timestamp - time.time() > seconds: self.changes.remove(change)
class ModuleFileWatcher( EditorModule ): def __init__(self): super(ModuleFileWatcher, self).__init__() self.watches={} def getName(self): return 'filewatcher' def getDependency(self): return [] def onLoad(self): self.observer=Observer() self.observer.start() signals.connect( 'file.moved', self.onFileMoved ) signals.connect( 'file.added', self.onFileCreated ) signals.connect( 'file.removed', self.onFileDeleted ) signals.connect( 'file.modified', self.onFileModified ) def onStart( self ): self.assetWatcher=self.startWatch( self.getProject().getAssetPath(), ignorePatterns = ['*/.git','*/.*','*/_gii'] ) def startWatch(self, path, **options): path = os.path.realpath(path) if self.watches.get(path): logging.warning( 'already watching: %s' % path ) return self.watches[path] logging.info ( 'start watching: %s' % path ) ignorePatterns = ['*/.git','*/.*','*/_gii'] + options.get('ignorePatterns',[]) handler = FileWatcherEventHandler( options.get( 'patterns', None ), ignorePatterns, options.get( 'ignoreDirectories', False ), options.get( 'caseSensitive', True ) ) watch = self.observer.schedule( handler, path, options.get( 'recursive', True ) ) self.watches[ path ] = watch return watch def onStop( self ): # print 'stop file watcher' self.observer.stop() self.observer.join( 0.5 ) # print 'stopped file watcher' def stopWatch(self, path): path = os.path.realpath(path) watch = self.watches.get(path, None) if not watch: return self.observer.unschedule(watch) self.watches[path] = None def stopAllWatches(self): # logging.info('stop all file watchers') self.observer.unschedule_all() self.watches = {} def onFileMoved(self, path, newpath): # print('asset moved:',path, newpath) app.getAssetLibrary().scheduleScanProject() pass def onFileCreated(self, path): # print('asset created:',path) app.getAssetLibrary().scheduleScanProject() pass def onFileModified(self, path): # print('asset modified:',path) app.getAssetLibrary().scheduleScanProject() pass def onFileDeleted(self, path): # print('asset deleted:',path) app.getAssetLibrary().scheduleScanProject() pass
class ScriptsManager(QtCore.QObject): scripts_changed = QtCore.pyqtSignal(list, name='scriptsChanged') error = QtCore.pyqtSignal(str) def __init__(self, module_pkgname): super(ScriptsManager, self).__init__() self._module_pkgname = module_pkgname self._dirs_to_watch = set() self._files_to_watch = set() self._event_handler = FileSystemEventHandler() self._event_handler.on_any_event = self._on_any_event self._observer = Observer() self._observer.start() self._timer = None def start(self): self.reload() def resume(self): self._observer.unschedule_all() for path in self._dirs_to_watch: if path == '': path = '.' self._observer.schedule(self._event_handler, path) def pause(self): self._observer.unschedule_all() @QtCore.pyqtSlot() def reload(self): self.pause() odict = query_subproc(query='list', pkg=self._module_pkgname) if 'error' in odict: self.update_filelist(odict['filelist']) self.resume() self.error.emit(odict['error']) return self.error.emit(None) self.set_filelist(odict['filelist']) self.resume() scripts = odict['scenes'] self.scripts_changed.emit(scripts) def _on_any_event(self, event): def print_reload(): print('Reloading scene') self.reload() if event.src_path in self._files_to_watch: print('Change detected in %s' % event.src_path) if self._timer is not None: self._timer.cancel() self._timer = Timer(MIN_RELOAD_INTERVAL, print_reload, ()) self._timer.start() def _update_dirs_to_watch(self): self._dirs_to_watch = set(op.dirname(f) for f in self._files_to_watch) @QtCore.pyqtSlot(list) def update_filelist(self, filelist): self._files_to_watch.update(filelist) self._update_dirs_to_watch() @QtCore.pyqtSlot(list) def set_filelist(self, filelist): self._files_to_watch = set(filelist) self._update_dirs_to_watch()
class EDLogs(FileSystemEventHandler): _POLL = 1 # Polling is cheap, so do it often _RE_CANONICALISE = re.compile(r'\$(.+)_name;') _RE_CATEGORY = re.compile(r'\$MICRORESOURCE_CATEGORY_(.+);') def __init__(self): FileSystemEventHandler.__init__(self) # futureproofing - not need for current version of watchdog self.root = None self.currentdir = None # The actual logdir that we're monitoring self.logfile = None self.observer = None self.observed = None # a watchdog ObservedWatch, or None if polling self.thread = None self.event_queue = [] # For communicating journal entries back to main thread # On startup we might be: # 1) Looking at an old journal file because the game isn't running or the user has exited to the main menu. # 2) Looking at an empty journal (only 'Fileheader') because the user is at the main menu. # 3) In the middle of a 'live' game. # If 1 or 2 a LoadGame event will happen when the game goes live. # If 3 we need to inject a special 'StartUp' event since consumers won't see the LoadGame event. self.live = False self.game_was_running = False # For generation the "ShutDown" event # Context for journal handling self.version = None self.is_beta = False self.mode = None self.group = None self.cmdr = None self.planet = None self.system = None self.station = None self.stationtype = None self.coordinates = None self.systemaddress = None self.started = None # Timestamp of the LoadGame event # Cmdr state shared with EDSM and plugins self.state = { 'Captain' : None, # On a crew 'Cargo' : defaultdict(int), 'Credits' : None, 'FID' : None, # Frontier Cmdr ID 'Horizons' : None, # Does this user have Horizons? 'Loan' : None, 'Raw' : defaultdict(int), 'Manufactured' : defaultdict(int), 'Encoded' : defaultdict(int), 'Engineers' : {}, 'Rank' : {}, 'Reputation' : {}, 'Statistics' : {}, 'Role' : None, # Crew role - None, Idle, FireCon, FighterCon 'Friends' : set(), # Online friends 'ShipID' : None, 'ShipIdent' : None, 'ShipName' : None, 'ShipType' : None, 'HullValue' : None, 'ModulesValue' : None, 'Rebuy' : None, 'Modules' : None, } def start(self, root): self.root = root logdir = config.get('journaldir') or config.default_journal_dir if not logdir or not isdir(logdir): self.stop() return False if self.currentdir and self.currentdir != logdir: self.stop() self.currentdir = logdir # Latest pre-existing logfile - e.g. if E:D is already running. Assumes logs sort alphabetically. # Do this before setting up the observer in case the journal directory has gone away try: logfiles = sorted([x for x in listdir(self.currentdir) if x.startswith('Journal') and x.endswith('.log')], key=lambda x: x.split('.')[1:]) self.logfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: self.logfile = None return False # Set up a watchdog observer. # File system events are unreliable/non-existent over network drives on Linux. # We can't easily tell whether a path points to a network drive, so assume # any non-standard logdir might be on a network drive and poll instead. polling = bool(config.get('journaldir')) and platform != 'win32' if not polling and not self.observer: self.observer = Observer() self.observer.daemon = True self.observer.start() elif polling and self.observer: self.observer.stop() self.observer = None if not self.observed and not polling: self.observed = self.observer.schedule(self, self.currentdir) if __debug__: print '%s Journal "%s"' % (polling and 'Polling' or 'Monitoring', self.currentdir) print 'Start logfile "%s"' % self.logfile if not self.running(): self.thread = threading.Thread(target = self.worker, name = 'Journal worker') self.thread.daemon = True self.thread.start() return True def stop(self): if __debug__: print 'Stopping monitoring Journal' self.currentdir = None self.version = self.mode = self.group = self.cmdr = self.planet = self.system = self.station = self.stationtype = self.stationservices = self.coordinates = self.systemaddress = None self.is_beta = False if self.observed: self.observed = None self.observer.unschedule_all() self.thread = None # Orphan the worker thread - will terminate at next poll def close(self): thread = self.thread self.stop() if self.observer: self.observer.stop() if self.observer: self.observer.join() self.observer = None def running(self): return self.thread and self.thread.is_alive() def on_created(self, event): # watchdog callback, e.g. client (re)started. if not event.is_directory and basename(event.src_path).startswith('Journal') and basename(event.src_path).endswith('.log'): self.logfile = event.src_path def worker(self): # Tk isn't thread-safe in general. # event_generate() is the only safe way to poke the main thread from this thread: # https://mail.python.org/pipermail/tkinter-discuss/2013-November/003522.html # Seek to the end of the latest log file logfile = self.logfile if logfile: loghandle = open(logfile, 'rb', 0) # unbuffered if platform == 'darwin': fcntl(loghandle, F_GLOBAL_NOCACHE, -1) # required to avoid corruption on macOS over SMB for line in loghandle: try: self.parse_entry(line) # Some events are of interest even in the past except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) logpos = loghandle.tell() else: loghandle = None self.game_was_running = self.game_running() if self.live: if self.game_was_running: # Game is running locally entry = OrderedDict([ ('timestamp', strftime('%Y-%m-%dT%H:%M:%SZ', gmtime())), ('event', 'StartUp'), ('StarSystem', self.system), ('StarPos', self.coordinates), ('SystemAddress', self.systemaddress), ]) if self.planet: entry['Body'] = self.planet entry['Docked'] = bool(self.station) if self.station: entry['StationName'] = self.station entry['StationType'] = self.stationtype self.event_queue.append(json.dumps(entry, separators=(', ', ':'))) else: self.event_queue.append(None) # Generate null event to update the display (with possibly out-of-date info) self.live = False # Watchdog thread emitter = self.observed and self.observer._emitter_for_watch[self.observed] # Note: Uses undocumented attribute while True: # Check whether new log file started, e.g. client (re)started. if emitter and emitter.is_alive(): newlogfile = self.logfile # updated by on_created watchdog callback else: # Poll try: logfiles = sorted([x for x in listdir(self.currentdir) if x.startswith('Journal') and x.endswith('.log')], key=lambda x: x.split('.')[1:]) newlogfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: if __debug__: print_exc() newlogfile = None if logfile != newlogfile: logfile = newlogfile if loghandle: loghandle.close() if logfile: loghandle = open(logfile, 'rb', 0) # unbuffered if platform == 'darwin': fcntl(loghandle, F_GLOBAL_NOCACHE, -1) # required to avoid corruption on macOS over SMB logpos = 0 if __debug__: print 'New logfile "%s"' % logfile if logfile: loghandle.seek(0, SEEK_END) # required to make macOS notice log change over SMB loghandle.seek(logpos, SEEK_SET) # reset EOF flag for line in loghandle: self.event_queue.append(line) if self.event_queue: self.root.event_generate('<<JournalEvent>>', when="tail") logpos = loghandle.tell() sleep(self._POLL) # Check whether we're still supposed to be running if threading.current_thread() != self.thread: return # Terminate if self.game_was_running: if not self.game_running(): self.event_queue.append('{ "timestamp":"%s", "event":"ShutDown" }' % strftime('%Y-%m-%dT%H:%M:%SZ', gmtime())) self.root.event_generate('<<JournalEvent>>', when="tail") self.game_was_running = False else: self.game_was_running = self.game_running() def parse_entry(self, line): if line is None: return { 'event': None } # Fake startup event try: entry = json.loads(line, object_pairs_hook=OrderedDict) # Preserve property order because why not? entry['timestamp'] # we expect this to exist if entry['event'] == 'Fileheader': self.live = False self.version = entry['gameversion'] self.is_beta = 'beta' in entry['gameversion'].lower() self.cmdr = None self.mode = None self.group = None self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None self.systemaddress = None self.started = None self.state = { 'Captain' : None, 'Cargo' : defaultdict(int), 'Credits' : None, 'FID' : None, 'Horizons' : None, 'Loan' : None, 'Raw' : defaultdict(int), 'Manufactured' : defaultdict(int), 'Encoded' : defaultdict(int), 'Engineers' : {}, 'Rank' : {}, 'Reputation' : {}, 'Statistics' : {}, 'Role' : None, 'Friends' : set(), 'ShipID' : None, 'ShipIdent' : None, 'ShipName' : None, 'ShipType' : None, 'HullValue' : None, 'ModulesValue' : None, 'Rebuy' : None, 'Modules' : None, } elif entry['event'] == 'Commander': self.live = True # First event in 3.0 elif entry['event'] == 'LoadGame': self.cmdr = entry['Commander'] self.mode = entry.get('GameMode') # 'Open', 'Solo', 'Group', or None for CQC (and Training - but no LoadGame event) self.group = entry.get('Group') self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None self.systemaddress = None self.started = timegm(strptime(entry['timestamp'], '%Y-%m-%dT%H:%M:%SZ')) self.state.update({ # Don't set Ship, ShipID etc since this will reflect Fighter or SRV if starting in those 'Captain' : None, 'Credits' : entry['Credits'], 'FID' : entry.get('FID'), # From 3.3 'Horizons' : entry['Horizons'], # From 3.0 'Loan' : entry['Loan'], 'Engineers' : {}, 'Rank' : {}, 'Reputation' : {}, 'Statistics' : {}, 'Role' : None, }) elif entry['event'] == 'NewCommander': self.cmdr = entry['Name'] self.group = None elif entry['event'] == 'SetUserShipName': self.state['ShipID'] = entry['ShipID'] if 'UserShipId' in entry: # Only present when changing the ship's ident self.state['ShipIdent'] = entry['UserShipId'] self.state['ShipName'] = entry.get('UserShipName') self.state['ShipType'] = self.canonicalise(entry['Ship']) elif entry['event'] == 'ShipyardBuy': self.state['ShipID'] = None self.state['ShipIdent'] = None self.state['ShipName'] = None self.state['ShipType'] = self.canonicalise(entry['ShipType']) self.state['HullValue'] = None self.state['ModulesValue'] = None self.state['Rebuy'] = None self.state['Modules'] = None elif entry['event'] == 'ShipyardSwap': self.state['ShipID'] = entry['ShipID'] self.state['ShipIdent'] = None self.state['ShipName'] = None self.state['ShipType'] = self.canonicalise(entry['ShipType']) self.state['HullValue'] = None self.state['ModulesValue'] = None self.state['Rebuy'] = None self.state['Modules'] = None elif (entry['event'] == 'Loadout' and not 'fighter' in self.canonicalise(entry['Ship']) and not 'buggy' in self.canonicalise(entry['Ship'])): self.state['ShipID'] = entry['ShipID'] self.state['ShipIdent'] = entry['ShipIdent'] self.state['ShipName'] = entry['ShipName'] self.state['ShipType'] = self.canonicalise(entry['Ship']) self.state['HullValue'] = entry.get('HullValue') # not present on exiting Outfitting self.state['ModulesValue'] = entry.get('ModulesValue') # " self.state['Rebuy'] = entry.get('Rebuy') # Remove spurious differences between initial Loadout event and subsequent self.state['Modules'] = {} for module in entry['Modules']: module = dict(module) module['Item'] = self.canonicalise(module['Item']) if ('Hardpoint' in module['Slot'] and not module['Slot'].startswith('TinyHardpoint') and module.get('AmmoInClip') == module.get('AmmoInHopper') == 1): # lasers module.pop('AmmoInClip') module.pop('AmmoInHopper') self.state['Modules'][module['Slot']] = module elif entry['event'] == 'ModuleBuy': self.state['Modules'][entry['Slot']] = { 'Slot' : entry['Slot'], 'Item' : self.canonicalise(entry['BuyItem']), 'On' : True, 'Priority' : 1, 'Health' : 1.0, 'Value' : entry['BuyPrice'], } elif entry['event'] == 'ModuleSell': self.state['Modules'].pop(entry['Slot'], None) elif entry['event'] == 'ModuleSwap': toitem = self.state['Modules'].get(entry['ToSlot']) self.state['Modules'][entry['ToSlot']] = self.state['Modules'][entry['FromSlot']] if toitem: self.state['Modules'][entry['FromSlot']] = toitem else: self.state['Modules'].pop(entry['FromSlot'], None) elif entry['event'] in ['Undocked']: self.station = None self.stationtype = None self.stationservices = None elif entry['event'] in ['Location', 'FSDJump', 'Docked']: if entry['event'] == 'Location': self.planet = entry.get('Body') if entry.get('BodyType') == 'Planet' else None elif entry['event'] == 'FSDJump': self.planet = None if 'StarPos' in entry: self.coordinates = tuple(entry['StarPos']) elif self.system != entry['StarSystem']: self.coordinates = None # Docked event doesn't include coordinates self.systemaddress = entry.get('SystemAddress') (self.system, self.station) = (entry['StarSystem'] == 'ProvingGround' and 'CQC' or entry['StarSystem'], entry.get('StationName')) # May be None self.stationtype = entry.get('StationType') # May be None self.stationservices = entry.get('StationServices') # None under E:D < 2.4 elif entry['event'] == 'ApproachBody': self.planet = entry['Body'] elif entry['event'] in ['LeaveBody', 'SupercruiseEntry']: self.planet = None elif entry['event'] in ['Rank', 'Promotion']: payload = dict(entry) payload.pop('event') payload.pop('timestamp') for k,v in payload.iteritems(): self.state['Rank'][k] = (v,0) elif entry['event'] == 'Progress': for k,v in entry.iteritems(): if k in self.state['Rank']: self.state['Rank'][k] = (self.state['Rank'][k][0], min(v, 100)) # perhaps not taken promotion mission yet elif entry['event'] in ['Reputation', 'Statistics']: payload = OrderedDict(entry) payload.pop('event') payload.pop('timestamp') self.state[entry['event']] = payload elif entry['event'] == 'EngineerProgress': if 'Engineers' in entry: # Startup summary self.state['Engineers'] = { e['Engineer']: (e['Rank'], e.get('RankProgress', 0)) if 'Rank' in e else e['Progress'] for e in entry['Engineers'] } else: # Promotion self.state['Engineers'][entry['Engineer']] = (entry['Rank'], entry.get('RankProgress', 0)) if 'Rank' in entry else entry['Progress'] elif entry['event'] == 'Cargo' and entry.get('Vessel') == 'Ship': self.state['Cargo'] = defaultdict(int) if 'Inventory' not in entry: # From 3.3 full Cargo event (after the first one) is written to a separate file with open(join(self.currentdir, 'Cargo.json'), 'rb') as h: entry = json.load(h, object_pairs_hook=OrderedDict) # Preserve property order because why not? self.state['Cargo'].update({ self.canonicalise(x['Name']): x['Count'] for x in entry['Inventory'] }) elif entry['event'] in ['CollectCargo', 'MarketBuy', 'BuyDrones', 'MiningRefined']: commodity = self.canonicalise(entry['Type']) self.state['Cargo'][commodity] += entry.get('Count', 1) elif entry['event'] in ['EjectCargo', 'MarketSell', 'SellDrones']: commodity = self.canonicalise(entry['Type']) self.state['Cargo'][commodity] -= entry.get('Count', 1) if self.state['Cargo'][commodity] <= 0: self.state['Cargo'].pop(commodity) elif entry['event'] == 'SearchAndRescue': for item in entry.get('Items', []): commodity = self.canonicalise(item['Name']) self.state['Cargo'][commodity] -= item.get('Count', 1) if self.state['Cargo'][commodity] <= 0: self.state['Cargo'].pop(commodity) elif entry['event'] == 'Materials': for category in ['Raw', 'Manufactured', 'Encoded']: self.state[category] = defaultdict(int) self.state[category].update({ self.canonicalise(x['Name']): x['Count'] for x in entry.get(category, []) }) elif entry['event'] == 'MaterialCollected': material = self.canonicalise(entry['Name']) self.state[entry['Category']][material] += entry['Count'] elif entry['event'] in ['MaterialDiscarded', 'ScientificResearch']: material = self.canonicalise(entry['Name']) self.state[entry['Category']][material] -= entry['Count'] if self.state[entry['Category']][material] <= 0: self.state[entry['Category']].pop(material) elif entry['event'] == 'Synthesis': for category in ['Raw', 'Manufactured', 'Encoded']: for x in entry['Materials']: material = self.canonicalise(x['Name']) if material in self.state[category]: self.state[category][material] -= x['Count'] if self.state[category][material] <= 0: self.state[category].pop(material) elif entry['event'] == 'MaterialTrade': category = self.category(entry['Paid']['Category']) self.state[category][entry['Paid']['Material']] -= entry['Paid']['Quantity'] if self.state[category][entry['Paid']['Material']] <= 0: self.state[category].pop(entry['Paid']['Material']) category = self.category(entry['Received']['Category']) self.state[category][entry['Received']['Material']] += entry['Received']['Quantity'] elif entry['event'] == 'EngineerCraft' or (entry['event'] == 'EngineerLegacyConvert' and not entry.get('IsPreview')): for category in ['Raw', 'Manufactured', 'Encoded']: for x in entry.get('Ingredients', []): material = self.canonicalise(x['Name']) if material in self.state[category]: self.state[category][material] -= x['Count'] if self.state[category][material] <= 0: self.state[category].pop(material) module = self.state['Modules'][entry['Slot']] assert(module['Item'] == self.canonicalise(entry['Module'])) module['Engineering'] = { 'Engineer' : entry['Engineer'], 'EngineerID' : entry['EngineerID'], 'BlueprintName' : entry['BlueprintName'], 'BlueprintID' : entry['BlueprintID'], 'Level' : entry['Level'], 'Quality' : entry['Quality'], 'Modifiers' : entry['Modifiers'], } if 'ExperimentalEffect' in entry: module['Engineering']['ExperimentalEffect'] = entry['ExperimentalEffect'] module['Engineering']['ExperimentalEffect_Localised'] = entry['ExperimentalEffect_Localised'] else: module['Engineering'].pop('ExperimentalEffect', None) module['Engineering'].pop('ExperimentalEffect_Localised', None) elif entry['event'] == 'MissionCompleted': for reward in entry.get('CommodityReward', []): commodity = self.canonicalise(reward['Name']) self.state['Cargo'][commodity] += reward.get('Count', 1) for reward in entry.get('MaterialsReward', []): if 'Category' in reward: # Category not present in E:D 3.0 category = self.category(reward['Category']) material = self.canonicalise(reward['Name']) self.state[category][material] += reward.get('Count', 1) elif entry['event'] == 'EngineerContribution': commodity = self.canonicalise(entry.get('Commodity')) if commodity: self.state['Cargo'][commodity] -= entry['Quantity'] if self.state['Cargo'][commodity] <= 0: self.state['Cargo'].pop(commodity) material = self.canonicalise(entry.get('Material')) if material: for category in ['Raw', 'Manufactured', 'Encoded']: if material in self.state[category]: self.state[category][material] -= entry['Quantity'] if self.state[category][material] <= 0: self.state[category].pop(material) elif entry['event'] == 'TechnologyBroker': for thing in entry.get('Ingredients', []): # 3.01 for category in ['Cargo', 'Raw', 'Manufactured', 'Encoded']: item = self.canonicalise(thing['Name']) if item in self.state[category]: self.state[category][item] -= thing['Count'] if self.state[category][item] <= 0: self.state[category].pop(item) for thing in entry.get('Commodities', []): # 3.02 commodity = self.canonicalise(thing['Name']) self.state['Cargo'][commodity] -= thing['Count'] if self.state['Cargo'][commodity] <= 0: self.state['Cargo'].pop(commodity) for thing in entry.get('Materials', []): # 3.02 material = self.canonicalise(thing['Name']) category = thing['Category'] self.state[category][material] -= thing['Count'] if self.state[category][material] <= 0: self.state[category].pop(material) elif entry['event'] == 'JoinACrew': self.state['Captain'] = entry['Captain'] self.state['Role'] = 'Idle' self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None self.systemaddress = None elif entry['event'] == 'ChangeCrewRole': self.state['Role'] = entry['Role'] elif entry['event'] == 'QuitACrew': self.state['Captain'] = None self.state['Role'] = None self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None self.systemaddress = None elif entry['event'] == 'Friends': if entry['Status'] in ['Online', 'Added']: self.state['Friends'].add(entry['Name']) else: self.state['Friends'].discard(entry['Name']) return entry except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) print_exc() return { 'event': None } # Commodities, Modules and Ships can appear in different forms e.g. "$HNShockMount_Name;", "HNShockMount", and "hnshockmount", # "$int_cargorack_size6_class1_name;" and "Int_CargoRack_Size6_Class1", "python" and "Python", etc. # This returns a simple lowercased name e.g. 'hnshockmount', 'int_cargorack_size6_class1', 'python', etc def canonicalise(self, item): if not item: return '' item = item.lower() match = self._RE_CANONICALISE.match(item) return match and match.group(1) or item def category(self, item): match = self._RE_CATEGORY.match(item) return (match and match.group(1) or item).capitalize() def get_entry(self): if not self.event_queue: return None else: entry = self.parse_entry(self.event_queue.pop(0)) if not self.live and entry['event'] not in [None, 'Fileheader']: # Game not running locally, but Journal has been updated self.live = True if self.station: entry = OrderedDict([ ('timestamp', strftime('%Y-%m-%dT%H:%M:%SZ', gmtime())), ('event', 'StartUp'), ('Docked', True), ('StationName', self.station), ('StationType', self.stationtype), ('StarSystem', self.system), ('StarPos', self.coordinates), ('SystemAddress', self.systemaddress), ]) else: entry = OrderedDict([ ('timestamp', strftime('%Y-%m-%dT%H:%M:%SZ', gmtime())), ('event', 'StartUp'), ('Docked', False), ('StarSystem', self.system), ('StarPos', self.coordinates), ('SystemAddress', self.systemaddress), ]) self.event_queue.append(json.dumps(entry, separators=(', ', ':'))) elif self.live and entry['event'] == 'Music' and entry.get('MusicTrack') == 'MainMenu': self.event_queue.append('{ "timestamp":"%s", "event":"ShutDown" }' % strftime('%Y-%m-%dT%H:%M:%SZ', gmtime())) return entry def game_running(self): if platform == 'darwin': for app in NSWorkspace.sharedWorkspace().runningApplications(): if app.bundleIdentifier() == 'uk.co.frontier.EliteDangerous': return True elif platform == 'win32': def WindowTitle(h): if h: l = GetWindowTextLength(h) + 1 buf = ctypes.create_unicode_buffer(l) if GetWindowText(h, buf, l): return buf.value return None def callback(hWnd, lParam): name = WindowTitle(hWnd) if name and name.startswith('Elite - Dangerous'): handle = GetProcessHandleFromHwnd(hWnd) if handle: # If GetProcessHandleFromHwnd succeeds then the app is already running as this user CloseHandle(handle) return False # stop enumeration return True return not EnumWindows(EnumWindowsProc(callback), 0) return False # Return a subset of the received data describing the current ship as a Loadout event def ship(self, timestamped=True): if not self.state['Modules']: return None standard_order = ['ShipCockpit', 'CargoHatch', 'Armour', 'PowerPlant', 'MainEngines', 'FrameShiftDrive', 'LifeSupport', 'PowerDistributor', 'Radar', 'FuelTank'] d = OrderedDict() if timestamped: d['timestamp'] = strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()) d['event'] = 'Loadout' d['Ship'] = self.state['ShipType'] d['ShipID'] = self.state['ShipID'] if self.state['ShipName']: d['ShipName'] = self.state['ShipName'] if self.state['ShipIdent']: d['ShipIdent'] = self.state['ShipIdent'] # sort modules by slot - hardpoints, standard, internal d['Modules'] = [] for slot in sorted(self.state['Modules'], key=lambda x: ('Hardpoint' not in x, x not in standard_order and len(standard_order) or standard_order.index(x), 'Slot' not in x, x)): module = dict(self.state['Modules'][slot]) module.pop('Health', None) module.pop('Value', None) d['Modules'].append(module) return d # Export ship loadout as a Loadout event def export_ship(self, filename=None): string = json.dumps(self.ship(False), ensure_ascii=False, indent=2, separators=(',', ': ')).encode('utf-8') # pretty print if filename: with open(filename, 'wt') as h: h.write(string) return ship = ship_file_name(self.state['ShipName'], self.state['ShipType']) regexp = re.compile(re.escape(ship) + '\.\d\d\d\d\-\d\d\-\d\dT\d\d\.\d\d\.\d\d\.txt') oldfiles = sorted([x for x in listdir(config.get('outdir')) if regexp.match(x)]) if oldfiles: with open(join(config.get('outdir'), oldfiles[-1]), 'rU') as h: if h.read() == string: return # same as last time - don't write # Write filename = join(config.get('outdir'), '%s.%s.txt' % (ship, strftime('%Y-%m-%dT%H.%M.%S', localtime(time())))) with open(filename, 'wt') as h: h.write(string)
class ProjectWatcher(EventDispatcher): '''ProjectWatcher is responsible for watching any changes in project directory. It will call self._callback whenever there are any changes. It can currently handle only one directory at a time. ''' _active = BooleanProperty(True) '''Indicates if the watchdog can dispatch events :data:`active` is a :class:`~kivy.properties.BooleanProperty` ''' _path = StringProperty('') '''Project folder :data:`path` is a :class:`~kivy.properties.StringProperty` ''' __events__ = ('on_project_modified',) def start_watching(self, path): '''To start watching project_dir. ''' self._path = path self._observer = Observer() self._handler = ProjectEventHandler(project_watcher=self) self._watcher = self._observer.schedule( self._handler, path, recursive=True) self._observer.start() def on_project_modified(self, *args): pass def stop_watching(self): '''To stop watching currently watched directory. This will also call join() on the thread created by Observer. ''' if self._observer: self._observer.unschedule_all() self._observer.stop() self._observer.join() self._observer = None def pause_watching(self): '''Pauses the watcher ''' self._active = False def resume_watching(self, delay=1): '''Resume the watcher :param delay: seconds to start the watching ''' Clock.schedule_once(self._resume_watching, delay) def _resume_watching(self, *args): if self._observer: self._observer.event_queue.queue.clear() self._active = True def on_any_event(self, event): if self._active: self.dispatch('on_project_modified', event)
class ProjDirFactory(Factory): """A Factory that watches a Project directory and dynamically keeps the set of available types up-to-date as project files change. """ def __init__(self, watchdir, use_observer=True, observer=None): super(ProjDirFactory, self).__init__() self._lock = threading.RLock() self.observer = None self.watchdir = watchdir self._files = {} # mapping of file pathnames to _FileInfo objects self._classes = {} # mapping of class names to _FileInfo objects try: added_set = set() changed_set = set() deleted_set = set() modeldir = watchdir + PROJ_DIR_EXT if modeldir not in sys.path: sys.path = [modeldir] + sys.path logger.info("added %s to sys.path" % modeldir) for pyfile in find_files(self.watchdir, "*.py"): self.on_modified(pyfile, added_set, changed_set, deleted_set) if use_observer: self._start_observer(observer) self.publish_updates(added_set, changed_set, deleted_set) else: # sometimes for debugging/testing it's easier to turn observer off self.observer = None except Exception as err: self._error(str(err)) logger.error(str(err)) def _start_observer(self, observer): if observer is None: self.observer = Observer() self._ownsobserver = True else: self.observer = observer self._ownsobserver = False self.observer.schedule(PyWatcher(self), path=self.watchdir, recursive=True) if self._ownsobserver: self.observer.daemon = True self.observer.start() def create(self, typ, version=None, server=None, res_desc=None, **ctor_args): """Create and return an instance of the specified type, or None if this Factory can't satisfy the request. """ if server is None and res_desc is None: klass = self._lookup(typ, version) if klass is not None: return klass(**ctor_args) return None def _lookup(self, typ, version): """ Return class for `typ`. """ try: cinfo = self._classes[typ].classes[typ] mod = sys.modules[cinfo['modpath']] return getattr(mod, cinfo['ctor']) except KeyError: return None def get_available_types(self, groups=None): """Return a list of available types.""" with self._lock: typset = set(self._classes.keys()) types = [] if groups is None: ifaces = set([v[0] for v in plugin_groups.values()]) else: ifaces = set( [v[0] for k, v in plugin_groups.items() if k in groups]) for typ in typset: finfo = self._classes[typ] meta = finfo.classes[typ] if ifaces.intersection(meta['ifaces']): meta = { 'bases': meta['bases'], 'ifaces': meta['ifaces'], 'version': meta['version'], '_context': 'In Project', } types.append((typ, meta)) return types def get_signature(self, typname, version=None): """Return constructor argument signature for *typname,* using the specified package version. The return value is a dictionary.""" cls = self._lookup(typname, version) if cls is None: return None else: return self.form_signature(cls) def on_modified(self, fpath, added_set, changed_set, deleted_set): if os.path.isdir(fpath): return None with self._lock: finfo = self._files.get(fpath) if finfo is None: try: fileinfo = _FileInfo(fpath) except Exception as err: if isinstance(err, SyntaxError): if err.offset: msg = '%s%s^\n%s' % (err.text, ' ' * err.offset, str(err)) else: msg = str(err) self._error(msg) else: self._error(str(err)) return self._files[fpath] = fileinfo added_set.update(fileinfo.classes.keys()) for cname in fileinfo.classes.keys(): self._classes[cname] = fileinfo else: # updating a file that's already been imported try: finfo.update(added_set, changed_set, deleted_set) except Exception as err: if isinstance(err, SyntaxError): if err.offset: msg = '%s%s^\n%s' % (err.text, ' ' * err.offset, str(err)) else: msg = str(err) self._error(msg) else: self._error(str(err)) self._remove_fileinfo(fpath) return for cname in added_set: self._classes[cname] = finfo for cname in deleted_set: del self._classes[cname] def on_deleted(self, fpath, deleted_set): with self._lock: if os.path.isdir(fpath): for pyfile in find_files(self.watchdir, "*.py"): self.on_deleted(pyfile, deleted_set) else: finfo = self._files.get(fpath) if finfo: deleted_set.update(finfo.classes.keys()) self._remove_fileinfo(fpath) def publish_updates(self, added_set, changed_set, deleted_set): types = get_available_types() try: publish('types', [ packagedict(types), list(added_set), list(changed_set), list(deleted_set), ]) except: logger.error("publish of types failed") def _error(self, msg): logger.error(msg) publish('console_errors', msg) publish('file_errors', msg) def _remove_fileinfo(self, fpath): """Clean up all data related to the given file. This typically occurs when there is some error during the import of the file. """ finfo = self._files.get(fpath) if finfo: classes = [c for c, f in self._classes.items() if f is finfo] for klass in classes: del self._classes[klass] del self._files[fpath] return classes def cleanup(self): """If this factory is removed from the FactoryManager during execution, this function will stop the watchdog observer thread. """ try: sys.path.remove(self.watchdir) except: pass if self.observer and self._ownsobserver: self.observer.unschedule_all() self.observer.stop() self.observer.join()
class ModuleFileWatcher(EditorModule): def __init__(self): super(ModuleFileWatcher, self).__init__() self.watches = {} def getName(self): return 'filewatcher' def getDependency(self): return [] def onLoad(self): self.observer = Observer() self.observer.start() signals.connect('file.moved', self.onFileMoved) signals.connect('file.added', self.onFileCreated) signals.connect('file.removed', self.onFileDeleted) signals.connect('file.modified', self.onFileModified) def onStart(self): self.assetWatcher = self.startWatch( self.getProject().getAssetPath(), ignorePatterns=['*/.git', '*/.*', '*/_gii']) def startWatch(self, path, **options): path = os.path.realpath(path) if self.watches.get(path): logging.warning('already watching: %s' % path) return self.watches[path] logging.info('start watching: %s' % path) ignorePatterns = ['*/.git', '*/.*', '*/_gii'] + options.get( 'ignorePatterns', []) handler = FileWatcherEventHandler( options.get('patterns', None), ignorePatterns, options.get('ignoreDirectories', False), options.get('caseSensitive', True)) watch = self.observer.schedule(handler, path, options.get('recursive', True)) self.watches[path] = watch return watch def onStop(self): # print 'stop file watcher' self.observer.stop() self.observer.join(0.5) # print 'stopped file watcher' def stopWatch(self, path): path = os.path.realpath(path) watch = self.watches.get(path, None) if not watch: return self.observer.unschedule(watch) self.watches[path] = None def stopAllWatches(self): # logging.info('stop all file watchers') self.observer.unschedule_all() self.watches = {} def onFileMoved(self, path, newpath): # print('asset moved:',path, newpath) app.getAssetLibrary().scheduleScanProject() pass def onFileCreated(self, path): # print('asset created:',path) app.getAssetLibrary().scheduleScanProject() pass def onFileModified(self, path): # print('asset modified:',path) app.getAssetLibrary().scheduleScanProject() pass def onFileDeleted(self, path): # print('asset deleted:',path) app.getAssetLibrary().scheduleScanProject() pass
class FolderWatcher(QtCore.QObject): """ Watches the webprint folder for changes. Creates the webprint folder if it does not exist. emits: addJob(path, printer) """ addJob = pyqtSignal(str, str) def __init__(self, parent=None): super(FolderWatcher, self).__init__(parent) settings = Settings() self.event_handler = _NewFileEventHandler() self.event_handler.addJob.connect(self.addJob) if not os.path.exists(settings.getWebprintFolder()): self.build_webprint_dir() self.observer = Observer() self.observer.start() self.start_watching() def stop_watching(self): """Stops folder watching. Detaches all watches. To actually stop the daemon thread, call self.observer.stop_watching() """ self.observer.unschedule_all() def start_watching(self): """Starts folder watching. Loads folders to watch from settings. """ settings = Settings() webprint_dir = str(settings.getWebprintFolder()) for (name, location) in settings.getInstalledPrinters(): path = os.path.join(webprint_dir, str(location)) self.observer.schedule(self.event_handler, path) @pyqtSlot() def build_webprint_dir(self): """Create print folders in the 'webprint' directory. Delete files and folders not listed in settings. Creates the webprint directory if it does not exist. Folder watching is temporarily stopped during this operation. """ self.stop_watching() settings = Settings() webprint_dir = settings.getWebprintFolder() if not os.path.exists(webprint_dir): os.makedirs(webprint_dir) folders = [location for (name, location) in settings.getInstalledPrinters()] ls = os.listdir(webprint_dir) for folder in folders: if folder not in ls: os.mkdir(os.path.join(webprint_dir, folder)) else: ls.remove(folder) for item in ls: shutil.rmtree(os.path.join(webprint_dir, item)) self.start_watching() @pyqtSlot(str) def move_webprint_dir(self, dest): """Moves the webprint folder to a new location. Destination folder should not exist. If it does exist, it should be empty. If the webprint folder does not exist, creates it. Folder watching is temporarily stopped during this operation. """ dest = str(dest) settings = Settings() webprint_dir = settings.getWebprintFolder() if os.path.exists(dest): #TODO: Handel errors on this delete shutil.rmtree(dest) os.mkdir(dest) if os.path.exists(webprint_dir): self.stop_watching() for f in os.listdir(webprint_dir): #TODO: Handel errors here shutil.move(os.path.join(webprint_dir, f), dest) os.rmdir(webprint_dir) settings.setWebprintFolder(dest) self.start_watching() else: #webprint folder does not exist settings.setWebprintFolder(dest) self.build_webprint_dir()
class ChangesWatcher(FileSystemEventHandler): def __init__(self, changes_handler=None): self.observer = Observer() self.changes = [] self.mute_list = [] self.path = None self.changes_handler = changes_handler self.changes_timer = None self.observer.start() def observe(self, path, mute_list=None): self.path = path self.mute_list = mute_list or [] self.observer.unschedule_all() self.observer.schedule(self, self.path, recursive=True) def get_changes_since(self, timestamp): ret = [] for change in self.changes: if change.timestamp > timestamp: ret.append(change) return ret def add_pure_change(self, change): """ 监测change的类型,并添加非垃圾change和不在黑名单中的change """ # 如果是黑名单及黑名单子目录的change,则跳过 for black_path in self.mute_list: if path_is_parent(black_path, change.path): print "...", change return # 寻找当前change对应的垃圾change,找到后删除;未找到则添加当前change trash_changes = self.find_related_trash_changes(change) if trash_changes: for change in trash_changes: self.changes.remove(change) print "- ", change else: self.changes.append(change) print "+ ", change ioloop.IOLoop.instance().add_callback(lambda: self.remove_outdated_changes(30)) def on_any_event(self, event): if event.is_directory: return now = time.time() src_relative_path = get_rel_path(event.src_path, self.path) if event.event_type == EVENT_TYPE_MOVED: self.add_pure_change(Change(timestamp=now, path=src_relative_path, type=EVENT_TYPE_DELETED)) dest_relative_path = get_rel_path(event.dest_path, self.path) self.add_pure_change(Change(timestamp=now, path=dest_relative_path, type=EVENT_TYPE_CREATED)) else: self.add_pure_change(Change(timestamp=now, path=src_relative_path, type=event.event_type)) try: ioloop.IOLoop.instance().add_callback(self.notify_changes_with_delay) except RuntimeError: print "ioloop.add_callback failed" def notify_changes_with_delay(self): loop = ioloop.IOLoop.instance() if self.changes_timer: loop.remove_timeout(self.changes_timer) def notify_changes(): if self.changes_handler and callable(self.changes_handler): ioloop.IOLoop.instance().add_callback(self.changes_handler) self.changes_timer = loop.add_timeout(time.time() + 0.1, notify_changes) def find_related_trash_changes(self, change): """ 寻找当前change之前短时间内的一些垃圾change 有些编辑器喜欢用 改名->写入->改回名 的方式来保存文件,所以不能直接将change上报,需要进行一定的过滤 """ trash_changes = [] for old_change in self.changes[::-1]: if old_change.path != change.path: continue if change.timestamp - old_change.timestamp > 0.1: break if change.type == EVENT_TYPE_DELETED: # 如果当前change类型是DELETED,那么返回所有该文件的change事件,直到CREATED事件为止 trash_changes.append(old_change) if old_change.type == EVENT_TYPE_CREATED: return trash_changes elif change.type == EVENT_TYPE_CREATED: # 如果当前change类型是CREATED,那么返回所有该文件的change事件,直到DELETED事件为止 trash_changes.append(old_change) if old_change.type == EVENT_TYPE_DELETED: return trash_changes return [] def remove_outdated_changes(self, seconds): for change in self.changes[:]: if change.timestamp - time.time() > seconds: self.changes.remove(change)
class ProjectWatcher(EventDispatcher): '''ProjectWatcher is responsible for watching any changes in project directory. It will call self._callback whenever there are any changes. It can currently handle only one directory at a time. ''' _active = BooleanProperty(True) '''Indicates if the watchdog can dispatch events :data:`active` is a :class:`~kivy.properties.BooleanProperty` ''' _path = StringProperty('') '''Project folder :data:`path` is a :class:`~kivy.properties.StringProperty` ''' __events__ = ('on_project_modified', ) def start_watching(self, path): '''To start watching project_dir. ''' self._path = path self._observer = Observer() self._handler = ProjectEventHandler(project_watcher=self) self._watcher = self._observer.schedule(self._handler, path, recursive=True) self._observer.start() def on_project_modified(self, *args): pass def stop_watching(self): '''To stop watching currently watched directory. This will also call join() on the thread created by Observer. ''' if self._observer: self._observer.unschedule_all() self._observer.stop() self._observer.join() self._observer = None def pause_watching(self): '''Pauses the watcher ''' self._active = False def resume_watching(self, delay=1): '''Resume the watcher :param delay: seconds to start the watching ''' Clock.schedule_once(self._resume_watching, delay) def _resume_watching(self, *args): if self._observer: self._observer.event_queue.queue.clear() self._active = True def on_any_event(self, event): if self._active: self.dispatch('on_project_modified', event)
class watcher(FileSystemEventHandler): def __init__(self, message=None): super(watcher, self).__init__() # Initialize the base class(es) self.directories = {} self.watchers = [] self.q = SimpleQueue() self.msg = message if message else consoleMessage from watchdog.observers import Observer self.observer = Observer() self.msg(f"Starting Observer()") self.observer.start() def _addWatchItem(self, item): filespec = Path(item).resolve() if filespec.parent not in self.directories: self.msg(f"Creating watchlist() for [{filespec.parent}]") self.directories[filespec.parent] = watchlist(filespec) else: self.directories[filespec.parent].addItem(filespec.name) def _start(self, directory): self.msg(f"Scheduling watcher for [{directory}]") self.watchers.append( self.observer.schedule(self, str(directory), recursive=False)) def _stop(self, watcher): if watcher in self.watchers: #//TODO.py: This won't work as-is. If needed, should make a small class to hold the # watcher stuff: directory, the watcher instance, etc. self.msg(f"UnScheduling watcher for [{directory}]") self.observer.unschedule(watcher) def on_modified(self, event): if event.is_directory: return None spec = Path(event.src_path).resolve() #path = spec.parent #name = spec.name if spec.parent in self.directories: if self.directories[spec.parent].exists(spec.name): self.msg( f"File {event.src_path} was modified. Adding to queue...") self.q.put(event.src_path) else: self.msg( f"File {event.src_path} was modified, but I don't care about it..." ) else: self.msg( f"Got notification of change to {spec.name} in directory {spec.parent} --> {event.src_path}" ) def begin(self, filelist): # hang on to the filelist so .reset() can efficiently determine a course of action self.filelist = filelist # first, create all the watchdir() for each unique directory for item in filelist: self._addWatchItem(item) # now, schedule an event handler(watchdog.watch) for each folder for dir in self.directories: self._start(dir) def reset(self, filelist): # if the filelist didn't change (true most of the time) ignore the reset if set(self.filelist) == set(filelist): self.msg("reset: no change to filelist, ignoring reset...") return # get list of directories being monitored self.msg("reset: shutting down directory observers...") self.observer.unschedule_all() self.directories = {} self.watchers = [] self.begin(filelist) def end(self): self.observer.unschedule_all() self.observer.stop() self.msg("\nObserver Stopped") self.observer.join() def look(self): return True if self.q.qsize() else False def get(self): return self.q.get() if self.q.qsize() else "" def dump(self): for i in self.directories: self.msg(f"Folder: {i}") self.directories[i].dump()
class Dashboard(FileSystemEventHandler): _POLL = 1 # Fallback polling interval def __init__(self): FileSystemEventHandler.__init__(self) # futureproofing - not need for current version of watchdog self.root = None self.currentdir = None # The actual logdir that we're monitoring self.observer = None self.observed = None # a watchdog ObservedWatch, or None if polling self.status = {} # Current status for communicating status back to main thread def start(self, root, started): self.root = root self.session_start = started logdir = config.get('journaldir') or config.default_journal_dir if not logdir or not isdir(logdir): self.stop() return False if self.currentdir and self.currentdir != logdir: self.stop() self.currentdir = logdir # Set up a watchdog observer. # File system events are unreliable/non-existent over network drives on Linux. # We can't easily tell whether a path points to a network drive, so assume # any non-standard logdir might be on a network drive and poll instead. polling = platform != 'win32' if not polling and not self.observer: self.observer = Observer() self.observer.daemon = True self.observer.start() elif polling and self.observer: self.observer.stop() self.observer = None if not self.observed and not polling: self.observed = self.observer.schedule(self, self.currentdir) if __debug__: print '%s Dashboard "%s"' % (polling and 'Polling' or 'Monitoring', self.currentdir) # Even if we're not intending to poll, poll at least once to process pre-existing # data and to check whether the watchdog thread has crashed due to events not # being supported on this filesystem. self.root.after(self._POLL * 1000/2, self.poll, True) return True def stop(self): if __debug__: print 'Stopping monitoring Dashboard' self.currentdir = None if self.observed: self.observed = None self.observer.unschedule_all() self.status = {} def close(self): self.stop() if self.observer: self.observer.stop() if self.observer: self.observer.join() self.observer = None def poll(self, first_time=False): if not self.currentdir: # Stopped self.status = {} else: self.process() if first_time: # Watchdog thread emitter = self.observed and self.observer._emitter_for_watch[self.observed] # Note: Uses undocumented attribute if emitter and emitter.is_alive(): return # Watchdog thread still running - stop polling self.root.after(self._POLL * 1000, self.poll) # keep polling def on_modified(self, event): # watchdog callback - DirModifiedEvent on macOS, FileModifiedEvent on Windows if event.is_directory or (isfile(event.src_path) and getsize(event.src_path)): # Can get on_modified events when the file is emptied self.process(event.src_path if not event.is_directory else None) # Can be called either in watchdog thread or, if polling, in main thread. def process(self, logfile=None): try: with open(join(self.currentdir, 'Status.json'), 'rb') as h: data = h.read().strip() if data: # Can be empty if polling while the file is being re-written entry = json.loads(data) # Status file is shared between beta and live. So filter out status not in this game session. if (timegm(time.strptime(entry['timestamp'], '%Y-%m-%dT%H:%M:%SZ')) >= self.session_start and self.status != entry): self.status = entry self.root.event_generate('<<DashboardEvent>>', when="tail") except: if __debug__: print_exc()
class ScriptsManager(QtCore.QObject): scriptsChanged = QtCore.Signal(list) error = QtCore.Signal(str) def __init__(self, module_pkgname): super().__init__() self._module_pkgname = module_pkgname self._dirs_to_watch = set() self._files_to_watch = set() self._modules = set() self._event_handler = FileSystemEventHandler() self._event_handler.on_any_event = self._on_any_event self._observer = Observer() self._observer.start() self._timer = None self._lock = QtCore.QMutex() self._query_count = 0 self._query_count_cond = QtCore.QWaitCondition() self._reloading = False self._reloading_cond = QtCore.QWaitCondition() def start(self): self.reload() def resume(self): self._observer.unschedule_all() for path in self._dirs_to_watch: if path == "": path = "." try: self._observer.schedule(self._event_handler, path) except Exception as e: print(f"Failed to schedule watch for {path}: {e}") def pause(self): self._observer.unschedule_all() def _set_reloading(self): with QtCore.QMutexLocker(self._lock): while self._reloading: self._reloading_cond.wait(self._lock) self._reloading = True while self._query_count != 0: self._query_count_cond.wait(self._lock) def _set_reloaded(self): with QtCore.QMutexLocker(self._lock): self._reloading = False self._reloading_cond.wakeAll() @QtCore.Slot() def reload(self): self._set_reloading() self.pause() for name in self._modules: del sys.modules[name] self._modules = set() odict = query_list(self._module_pkgname) if "error" in odict: self.update_filelist(odict["filelist"]) self.update_modulelist(odict["modulelist"]) self.resume() self._set_reloaded() self.error.emit(odict["error"]) return self.error.emit(None) self.set_filelist(odict["filelist"]) self.set_modulelist(odict["modulelist"]) self.resume() self._set_reloaded() scripts = odict["scenes"] self.scriptsChanged.emit(scripts) def _on_any_event(self, event): def print_reload(): print("Reloading scene") self.reload() if event.src_path in self._files_to_watch: print("Change detected in %s" % event.src_path) if self._timer is not None: self._timer.cancel() self._timer = Timer(MIN_RELOAD_INTERVAL, print_reload, ()) self._timer.start() def _update_dirs_to_watch(self): self._dirs_to_watch = {op.dirname(f) for f in self._files_to_watch} @QtCore.Slot(list) def update_filelist(self, filelist): self._files_to_watch.update(filelist) self._update_dirs_to_watch() @QtCore.Slot(list) def set_filelist(self, filelist): self._files_to_watch = set(filelist) self._update_dirs_to_watch() def update_modulelist(self, modulelist): self._modules.update(modulelist) def set_modulelist(self, modulelist): self._modules = set(modulelist) def inc_query_count(self): with QtCore.QMutexLocker(self._lock): while self._reloading: self._reloading_cond.wait(self._lock) self._query_count += 1 def dec_query_count(self): with QtCore.QMutexLocker(self._lock): self._query_count -= 1 self._query_count_cond.wakeAll()
class CollectionScanner(object): def __init__(self): self.fswatcher = CollectionEventHandler(self) self.observer = Observer() self.watches = {} self.scanner_pool = Pool(processes=4) signal.signal(signal.SIGINT, self.quit) signal.signal(signal.SIGTERM, self.quit) def scan_directory(self, directory, full_scan): self.scanner_pool.apply_async(start_scanrunner, [directory, full_scan, self.last_shutdown_time]) def add_directory(self, directory, full_scan=False): # start a full scan if required # otherwise do an incremental # and schedule a watch. self.scan_directory(directory, full_scan) #start_scanrunner(directory, full_scan, self.last_shutdown_time) #TODO(nikhil) fix me self.watches[directory] = True self.watches[directory] = self.observer.schedule(self.fswatcher, path=directory, recursive=True) def remove_directory(self, directory): try: self.observer.unschedule(self.watches[directory]) del self.watches[directory] except KeyError: pass # also remove all tracks within that directory from DB tracks = Track.select(Track.q.url.startswith('file://'+directory)) for track in tracks: track.destroySelf() def configuration_changed(self): # check if collection dirs have # been changed since we last started # if yes, we will do a full rescan # otherwise, an incremental scan. config = GlobalConfig() paths_saved_at = 0 last_scan = 0 self.last_shutdown_time = 0 try: paths_saved_at = int(config['collection']['paths_saved_at']) except KeyError: #TODO(nikhil) test this behaviour pass try: last_scan = int(config['collection']['last_scan']) except KeyError: last_scan = paths_saved_at - 1 try: self.last_shutdown_time = int(config['last_shutdown_time']) except KeyError: pass collection_directories = set() try: collection_directories = set(config['collection']['paths']) except KeyError: pass full_scan = False if last_scan < paths_saved_at: full_scan = True if full_scan: try: # for a full scan, first wipe all tables Artist.deleteMany(None) Album.deleteMany(None) Genre.deleteMany(None) Composer.deleteMany(None) Track.deleteMany(None) TrackStatistics.deleteMany(None) AlbumStatistics.deleteMany(None) ArtistStatistics.deleteMany(None) GenreStatistics.deleteMany(None) except OperationalError: pass # first remove watches on # any directories that have been # removed from the collection directories existing_directories = set(self.watches.keys()) for dir in existing_directories.difference(collection_directories): self.remove_directory(dir) for dir in collection_directories: if dir in self.watches: # directory is already being watched # do nothing pass else: self.add_directory(dir, full_scan) def start(self): self.configuration_changed() # Note: a 'first' scan is an # incremental scan behaving like a full # scan, so we don't have to explicitly # handle that case. # finally put a watch on the config file itself self.observer.schedule(ConfigWatcher(self), path=USERDIR) self.observer.start() def quit(self, signum, frame): signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) # stop watching file before we make any changes self.observer.unschedule_all() self.observer.stop() # close all update threads # save current time config = GlobalConfig() try: config['collection'] except KeyError: config['collection'] = {} now = int(time.time()) config['collection']['last_scan'] = now config.save() self.scanner_pool.close() self.scanner_pool.terminate() self.observer.join() self.scanner_pool.join() sys.exit(0)
class ProjDirFactory(Factory): """A Factory that watches a Project directory and dynamically keeps the set of available types up-to-date as project files change. """ def __init__(self, watchdir, use_observer=True, observer=None): super(ProjDirFactory, self).__init__() self._lock = threading.RLock() self.watchdir = watchdir self.imported = {} # imported files vs (module, ctor dict) try: self.analyzer = PythonSourceTreeAnalyser() added_set = set() changed_set = set() deleted_set = set() for pyfile in find_files(self.watchdir, "*.py"): self.on_modified(pyfile, added_set, changed_set, deleted_set) if use_observer: self._start_observer(observer) self.publish_updates(added_set, changed_set, deleted_set) else: self.observer = None # sometimes for debugging/testing it's easier to turn observer off except Exception as err: logger.error(str(err)) def _start_observer(self, observer): if observer is None: self.observer = Observer() self._ownsobserver = True else: self.observer = observer self._ownsobserver = False self.observer.schedule(PyWatcher(self), path=self.watchdir, recursive=True) if self._ownsobserver: self.observer.daemon = True self.observer.start() def _get_mod_ctors(self, mod, fpath, visitor): self.imported[fpath] = (mod, {}) for cname in visitor.classes.keys(): self.imported[fpath][1][cname] = getattr(mod, cname.split('.')[-1]) def create(self, typ, version=None, server=None, res_desc=None, **ctor_args): """Create and return an instance of the specified type, or None if this Factory can't satisfy the request. """ if server is None and res_desc is None and typ in self.analyzer.class_map: with self._lock: fpath = self.analyzer.class_map[typ].fname modpath = self.analyzer.fileinfo[fpath][0].modpath if os.path.getmtime(fpath) > self.analyzer.fileinfo[fpath][ 1] and modpath in sys.modules: reload(sys.modules[modpath]) if fpath not in self.imported: sys.path = [ get_ancestor_dir(fpath, len(modpath.split('.'))) ] + sys.path try: __import__(modpath) except ImportError as err: return None finally: sys.path = sys.path[1:] mod = sys.modules[modpath] visitor = self.analyzer.fileinfo[fpath][0] self._get_mod_ctors(mod, fpath, visitor) try: ctor = self.imported[fpath][1][typ] except KeyError: return None return ctor(**ctor_args) return None def get_available_types(self, groups=None): """Return a list of available types that cause predicate(classname, metadata) to return True. """ with self._lock: graph = self.analyzer.graph typset = set(graph.nodes()) types = [] if groups is None: ifaces = set([v[0] for v in plugin_groups.values()]) else: ifaces = set( [v[0] for k, v in plugin_groups.items() if k in groups]) for typ in typset: if typ.startswith( 'openmdao.'): # don't include any standard lib types continue if 'classinfo' in graph.node[typ]: meta = graph.node[typ]['classinfo'].meta if ifaces.intersection(self.analyzer.get_interfaces(typ)): meta = meta.copy() meta['_context'] = 'In Project' types.append((typ, meta)) return types def on_modified(self, fpath, added_set, changed_set, deleted_set): if os.path.isdir(fpath): return with self._lock: imported = False if fpath in self.analyzer.fileinfo: # file has been previously scanned visitor = self.analyzer.fileinfo[fpath][0] pre_set = set(visitor.classes.keys()) if fpath in self.imported: # we imported it earlier imported = True sys.path = [os.path.dirname(fpath) ] + sys.path # add fpath location to sys.path try: reload(self.imported[fpath][0]) except ImportError as err: return None finally: sys.path = sys.path[1:] # restore original sys.path #self.imported[fpath] = (m, self.imported[fpath][1]) elif os.path.getmtime( fpath) > self.analyzer.fileinfo[fpath][1]: modpath = get_module_path(fpath) if modpath in sys.modules: reload(sys.modules[modpath]) self.on_deleted(fpath, set()) # clean up old refs else: # it's a new file pre_set = set() visitor = self.analyzer.analyze_file(fpath) post_set = set(visitor.classes.keys()) deleted_set.update(pre_set - post_set) added_set.update(post_set - pre_set) if imported: changed_set.update(pre_set.intersection(post_set)) def on_deleted(self, fpath, deleted_set): with self._lock: if os.path.isdir(fpath): for pyfile in find_files(self.watchdir, "*.py"): self.on_deleted(pyfile, deleted_set) else: try: del self.imported[fpath] except KeyError: pass visitor = self.analyzer.fileinfo[fpath][0] deleted_set.update(visitor.classes.keys()) self.analyzer.remove_file(fpath) def publish_updates(self, added_set, changed_set, deleted_set): publisher = Publisher.get_instance() if publisher: types = get_available_types() types.extend(self.get_available_types()) publisher.publish('types', [ packagedict(types), list(added_set), list(changed_set), list(deleted_set), ]) else: logger.error("no Publisher found") def cleanup(self): """If this factory is removed from the FactoryManager during execution, this function will stop the watchdog observer thread. """ if self.observer and self._ownsobserver: self.observer.unschedule_all() self.observer.stop() self.observer.join()
class FileMonitor(FileSystemEventHandler): def __init__(self): self._observer = Observer() self._event_callback = { 'modified': [], 'moved': [], 'created': [], 'deleted': [], } def start(self, directory, recursive=False): self._observer.schedule(self, directory, recursive) self._observer.start() def reset(self, directory, recursive=False): if self._observer.is_alive(): self._observer.unschedule_all() self._observer.stop() self._observer.join() self._observer.schedule(self, directory, recursive) self._observer.start() def stop(self): self._observer.unschedule_all() self._observer.stop() self._observer.join() def bind(self, event, func): if event in self._event_callback: self._event_callback[event].append(func) return True return False def on_moved(self, event): """Called when a file or a directory is moved or renamed. :param event: Event representing file/directory movement. :type event: :class:`DirMovedEvent` or :class:`FileMovedEvent` """ print( 'moved', event.src_path, event.dest_path ) for cb in self._event_callback['moved']: cb(event) def on_created(self, event): """Called when a file or directory is created. :param event: Event representing file/directory creation. :type event: :class:`DirCreatedEvent` or :class:`FileCreatedEvent` """ print( 'created', event.src_path ) for cb in self._event_callback['created']: cb(event) def on_deleted(self, event): """Called when a file or directory is deleted. :param event: Event representing file/directory deletion. :type event: :class:`DirDeletedEvent` or :class:`FileDeletedEvent` """ print( 'deleted', event.src_path ) for cb in self._event_callback['deleted']: cb(event) def on_modified(self, event): """Called when a file or directory is modified. :param event: Event representing file/directory modification. :type event: :class:`DirModifiedEvent` or :class:`FileModifiedEvent` """ print( 'modified', event.src_path ) for cb in self._event_callback['modified']: cb(event)
handlers = [(ResponderHandler, args.responder_dir), (CredsHandler, ResponderHandler().outpath), (SecretsdumpHandler, args.responder_dir), (CashCredsHandler, SecretsdumpHandler().outpath), (CredsHandler, SecretsdumpHandler().outpath)] # Listen for all .msf folders - .msf4 and .msf5 for msf in [name for name in os.listdir(os.path.expanduser('~')) if 'msf' in name]: handlers.append((ResponderHandler, os.path.join(os.path.expanduser('~'), msf, 'loot'))) observer = Observer() observers = [] for handler, path in handlers: if not os.path.exists(path): os.makedirs(path) info("Watching ({}) for files with ({})".format(path, ', '.join(handler.patterns))) observer.schedule(handler(), path=path, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.unschedule_all() observer.stop() observer.join()
class EDLogs(FileSystemEventHandler): _POLL = 1 # Polling is cheap, so do it often _RE_CANONICALISE = re.compile('\$(.+)_name;') # Mostly taken from http://elite-dangerous.wikia.com/wiki/List_of_Rare_Commodities RARES = set([ 'cetiaepyornisegg', 'aganipperush', 'alacarakmoskinart', 'albinoquechuamammoth', 'altairianskin', 'alyabodilysoap', 'anduligafireworks', 'anynacoffee', 'aroucaconventualsweets', 'azcancriformula42', 'bluemilk', 'baltahsinevacuumkrill', 'bakedgreebles', 'bankiamphibiousleather', 'bastsnakegin', 'belalansrayleather', 'borasetanipathogenetics', 'burnhambiledistillate', 'cd75catcoffee', 'centaurimegagin', 'ceremonialheiketea', 'cetirabbits', 'chameleoncloth', 'chateaudeaegaeon', 'cherbonesbloodcrystals', 'chieridanimarinepaste', 'coquimspongiformvictuals', 'cromsilverfesh', 'crystallinespheres', 'damnacarapaces', 'deltaphoenicispalms', 'deuringastruffles', 'disomacorn', 'aerialedenapple', 'eleuthermals', 'eraninpearlwhisky', 'eshuumbrellas', 'esusekucaviar', 'ethgrezeteabuds', 'fujintea', 'galactictravelguide', 'geawendancedust', 'gerasiangueuzebeer', 'giantirukamasnails', 'giantverrix', 'gilyasignatureweapons', 'gomanyauponcoffee', 'haidneblackbrew', 'havasupaidreamcatcher', 'helvetitjpearls', 'hip10175bushmeat', 'hip118311swarm', 'hiporganophosphates', 'hip41181squid', 'holvaduellingblades', 'honestypills', 'hr7221wheat', 'indibourbon', 'jaquesquinentianstill', 'jaradharrepuzzlebox', 'jarouarice', 'jotunmookah', 'kachiriginleaches', 'kamitracigars', 'kamorinhistoricweapons', 'karetiicouture', 'karsukilocusts', 'kinagoinstruments', 'konggaale', 'korrokungpellets', 'lavianbrandy', 'alieneggs', 'leestianeviljuice', 'livehecateseaworms', 'ltthypersweet', 'lyraeweed', 'transgeniconionhead', 'masterchefs', 'mechucoshightea', 'medbstarlube', 'mokojingbeastfeast', 'momusbogspaniel', 'motronaexperiencejelly', 'mukusubiichitinos', 'mulachigiantfungus', 'neritusberries', 'ngadandarifireopals', 'ngunamodernantiques', 'njangarisaddles', 'noneuclidianexotanks', 'ochoengchillies', 'onionhead', 'onionheada', 'onionheadb', 'onionheadc', 'onionheadd', 'onionheade', 'onionheadderivatives', 'onionheadsamples', 'ophiuchiexinoartefacts', 'orrerianviciousbrew', 'pantaaprayersticks', 'pavoniseargrubs', 'personalgifts', 'rajukrustoves', 'rapabaosnakeskins', 'rusanioldsmokey', 'sanumameat', 'saxonwine', 'shanscharisorchid', 'soontillrelics', 'sothiscrystallinegold', 'tanmarktranquiltea', 'tarachtorspice', 'taurichimes', 'terramaterbloodbores', 'thehuttonmug', 'thrutiscream', 'tiegfriessynthsilk', 'tiolcewaste2pasteunits', 'toxandjivirocide', 'advert1', 'uszaiantreegrub', 'utgaroarmillenialeggs', 'uzumokulowgwings', 'vherculisbodyrub', 'vacuumkrill', 'vanayequirhinofur', 'vegaslimweed', 'vidavantianlace', 'lftvoidextractcoffee', 'voidworms', 'volkhabbeedrones', 'watersofshintara', 'wheemetewheatcakes', 'witchhaulkobebeef', 'wolf1301fesh', 'wulpahyperboresystems', 'wuthielokufroth', 'xihecompanions', 'yasokondileaf', 'zeesszeantglue', 'buckyballbeermats', ]) def __init__(self): FileSystemEventHandler.__init__( self) # futureproofing - not need for current version of watchdog self.root = None self.currentdir = None # The actual logdir that we're monitoring self.logfile = None self.observer = None self.observed = None # a watchdog ObservedWatch, or None if polling self.thread = None self.event_queue = [ ] # For communicating journal entries back to main thread # On startup we might be: # 1) Looking at an old journal file because the game isn't running or the user has exited to the main menu. # 2) Looking at an empty journal (only 'Fileheader') because the user is at the main menu. # 3) In the middle of a 'live' game. # If 1 or 2 a LoadGame event will happen when the game goes live. # If 3 we need to inject a special 'StartUp' event since consumers won't see the LoadGame event. self.live = False # Context for journal handling self.version = None self.is_beta = False self.mode = None self.group = None self.cmdr = None self.planet = None self.system = None self.station = None self.stationtype = None self.coordinates = None self.started = None # Timestamp of the LoadGame event # Cmdr state shared with EDSM and plugins self.state = { 'Captain': None, # On a crew 'Cargo': defaultdict(int), 'Credits': None, 'Loan': None, 'Raw': defaultdict(int), 'Manufactured': defaultdict(int), 'Encoded': defaultdict(int), 'PaintJob': None, 'Rank': { 'Combat': None, 'Trade': None, 'Explore': None, 'Empire': None, 'Federation': None, 'CQC': None }, 'Role': None, # Crew role - None, Idle, FireCon, FighterCon 'ShipID': None, 'ShipIdent': None, 'ShipName': None, 'ShipType': None, } def start(self, root): self.root = root logdir = config.get('journaldir') or config.default_journal_dir if not logdir or not isdir(logdir): self.stop() return False if self.currentdir and self.currentdir != logdir: self.stop() self.currentdir = logdir # Latest pre-existing logfile - e.g. if E:D is already running. Assumes logs sort alphabetically. # Do this before setting up the observer in case the journal directory has gone away try: logfiles = sorted([ x for x in listdir(self.currentdir) if x.startswith('Journal') and x.endswith('.log') ], key=lambda x: x.split('.')[1:]) self.logfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: self.logfile = None return False # Set up a watchdog observer. # File system events are unreliable/non-existent over network drives on Linux. # We can't easily tell whether a path points to a network drive, so assume # any non-standard logdir might be on a network drive and poll instead. polling = bool(config.get('journaldir')) and platform != 'win32' if not polling and not self.observer: self.observer = Observer() self.observer.daemon = True self.observer.start() elif polling and self.observer: self.observer.stop() self.observer = None if not self.observed and not polling: self.observed = self.observer.schedule(self, self.currentdir) if __debug__: print '%s Journal "%s"' % (polling and 'Polling' or 'Monitoring', self.currentdir) print 'Start logfile "%s"' % self.logfile if not self.running(): self.thread = threading.Thread(target=self.worker, name='Journal worker') self.thread.daemon = True self.thread.start() return True def stop(self): if __debug__: print 'Stopping monitoring Journal' self.currentdir = None self.version = self.mode = self.group = self.cmdr = self.planet = self.system = self.station = self.stationtype = self.stationservices = self.coordinates = None self.is_beta = False if self.observed: self.observed = None self.observer.unschedule_all() self.thread = None # Orphan the worker thread - will terminate at next poll def close(self): thread = self.thread self.stop() if self.observer: self.observer.stop() if thread: thread.join() if self.observer: self.observer.join() self.observer = None def running(self): return self.thread and self.thread.is_alive() def on_created(self, event): # watchdog callback, e.g. client (re)started. if not event.is_directory and basename( event.src_path).startswith('Journal') and basename( event.src_path).endswith('.log'): self.logfile = event.src_path def worker(self): # Tk isn't thread-safe in general. # event_generate() is the only safe way to poke the main thread from this thread: # https://mail.python.org/pipermail/tkinter-discuss/2013-November/003522.html # Seek to the end of the latest log file logfile = self.logfile if logfile: loghandle = open(logfile, 'r') for line in loghandle: try: self.parse_entry( line) # Some events are of interest even in the past except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) else: loghandle = None if self.live: if self.game_running(): self.event_queue.append( '{ "timestamp":"%s", "event":"StartUp" }' % strftime('%Y-%m-%dT%H:%M:%SZ', gmtime())) else: self.event_queue.append( None ) # Generate null event to update the display (with possibly out-of-date info) self.live = False # Watchdog thread emitter = self.observed and self.observer._emitter_for_watch[ self.observed] # Note: Uses undocumented attribute while True: # Check whether new log file started, e.g. client (re)started. if emitter and emitter.is_alive(): newlogfile = self.logfile # updated by on_created watchdog callback else: # Poll try: logfiles = sorted([ x for x in listdir(self.currentdir) if x.startswith('Journal') and x.endswith('.log') ], key=lambda x: x.split('.')[1:]) newlogfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: if __debug__: print_exc() newlogfile = None if logfile != newlogfile: logfile = newlogfile if loghandle: loghandle.close() if logfile: loghandle = open(logfile, 'r') if __debug__: print 'New logfile "%s"' % logfile if logfile: loghandle.seek(0, SEEK_CUR) # reset EOF flag for line in loghandle: self.event_queue.append(line) if self.event_queue: self.root.event_generate('<<JournalEvent>>', when="tail") sleep(self._POLL) # Check whether we're still supposed to be running if threading.current_thread() != self.thread: return # Terminate def parse_entry(self, line): if line is None: return {'event': None} # Fake startup event try: entry = json.loads(line, object_pairs_hook=OrderedDict ) # Preserve property order because why not? entry['timestamp'] # we expect this to exist if entry['event'] == 'Fileheader': self.live = False self.version = entry['gameversion'] self.is_beta = 'beta' in entry['gameversion'].lower() self.cmdr = None self.mode = None self.group = None self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None self.started = None self.state = { 'Captain': None, 'Cargo': defaultdict(int), 'Credits': None, 'Loan': None, 'Raw': defaultdict(int), 'Manufactured': defaultdict(int), 'Encoded': defaultdict(int), 'PaintJob': None, 'Rank': { 'Combat': None, 'Trade': None, 'Explore': None, 'Empire': None, 'Federation': None, 'CQC': None }, 'Role': None, 'ShipID': None, 'ShipIdent': None, 'ShipName': None, 'ShipType': None, } elif entry['event'] == 'LoadGame': self.live = True self.cmdr = entry['Commander'] self.mode = entry.get( 'GameMode' ) # 'Open', 'Solo', 'Group', or None for CQC (and Training - but no LoadGame event) self.group = entry.get('Group') self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None self.started = timegm( strptime(entry['timestamp'], '%Y-%m-%dT%H:%M:%SZ')) self.state.update({ 'Captain': None, 'Credits': entry['Credits'], 'Loan': entry['Loan'], 'Rank': { 'Combat': None, 'Trade': None, 'Explore': None, 'Empire': None, 'Federation': None, 'CQC': None }, 'Role': None, }) elif entry['event'] == 'NewCommander': self.cmdr = entry['Name'] self.group = None elif entry['event'] == 'SetUserShipName': self.state['ShipID'] = entry['ShipID'] if 'UserShipId' in entry: # Only present when changing the ship's ident self.state['ShipIdent'] = entry['UserShipId'] self.state['ShipName'] = entry.get('UserShipName') self.state['ShipType'] = self.canonicalise(entry['Ship']) elif entry['event'] == 'ShipyardBuy': self.state['ShipID'] = None self.state['ShipIdent'] = None self.state['ShipName'] = None self.state['ShipType'] = self.canonicalise(entry['ShipType']) self.state['PaintJob'] = None elif entry['event'] == 'ShipyardSwap': self.state['ShipID'] = entry['ShipID'] self.state['ShipIdent'] = None self.state['ShipName'] = None self.state['ShipType'] = self.canonicalise(entry['ShipType']) self.state['PaintJob'] = None elif entry[ 'event'] == 'Loadout': # Note: Precedes LoadGame, ShipyardNew, follows ShipyardSwap, ShipyardBuy self.state['ShipID'] = entry['ShipID'] self.state['ShipIdent'] = entry['ShipIdent'] self.state['ShipName'] = entry['ShipName'] self.state['ShipType'] = self.canonicalise(entry['Ship']) # Ignore other Modules since they're missing Engineer modification details self.state[ 'PaintJob'] = 'paintjob_%s_default_defaultpaintjob' % self.state[ 'ShipType'] for module in entry['Modules']: if module.get('Slot') == 'PaintJob' and module.get('Item'): self.state['PaintJob'] = self.canonicalise( module['Item']) elif entry['event'] in ['ModuleBuy', 'ModuleSell' ] and entry['Slot'] == 'PaintJob': self.state['PaintJob'] = self.canonicalise( entry.get('BuyItem')) elif entry['event'] in ['Undocked']: self.station = None self.stationtype = None self.stationservices = None elif entry['event'] in ['Location', 'FSDJump', 'Docked']: if entry['event'] == 'Location': self.planet = entry.get('Body') if entry.get( 'BodyType') == 'Planet' else None elif entry['event'] == 'FSDJump': self.planet = None if 'StarPos' in entry: self.coordinates = tuple(entry['StarPos']) elif self.system != entry['StarSystem']: self.coordinates = None # Docked event doesn't include coordinates (self.system, self.station) = ( entry['StarSystem'] == 'ProvingGround' and 'CQC' or entry['StarSystem'], entry.get('StationName') ) # May be None self.stationtype = entry.get('StationType') # May be None self.stationservices = entry.get( 'StationServices') # None under E:D < 2.4 elif entry['event'] == 'SupercruiseExit': self.planet = entry.get('Body') if entry.get( 'BodyType') == 'Planet' else None elif entry['event'] == 'SupercruiseEntry': self.planet = None elif entry['event'] in ['Rank', 'Promotion']: for k, v in entry.iteritems(): if k in self.state['Rank']: self.state['Rank'][k] = (v, 0) elif entry['event'] == 'Progress': for k, v in entry.iteritems(): if self.state['Rank'].get(k) is not None: self.state['Rank'][k] = ( self.state['Rank'][k][0], min(v, 100) ) # perhaps not taken promotion mission yet elif entry['event'] == 'Cargo': self.live = True # First event in 2.3 self.state['Cargo'] = defaultdict(int) self.state['Cargo'].update({ self.canonicalise(x['Name']): x['Count'] for x in entry['Inventory'] }) elif entry['event'] in [ 'CollectCargo', 'MarketBuy', 'MiningRefined' ]: commodity = self.canonicalise(entry['Type']) self.state['Cargo'][commodity] += entry.get('Count', 1) elif entry['event'] in ['EjectCargo', 'MarketSell']: commodity = self.canonicalise(entry['Type']) self.state['Cargo'][commodity] -= entry.get('Count', 1) if self.state['Cargo'][commodity] <= 0: self.state['Cargo'].pop(commodity) elif entry['event'] == 'MissionCompleted': for reward in entry.get('CommodityReward', []): commodity = self.canonicalise(reward['Name']) self.state['Cargo'][commodity] += reward.get('Count', 1) elif entry['event'] == 'Materials': for category in ['Raw', 'Manufactured', 'Encoded']: self.state[category] = defaultdict(int) self.state[category].update({ self.canonicalise(x['Name']): x['Count'] for x in entry.get(category, []) }) elif entry['event'] == 'MaterialCollected': material = self.canonicalise(entry['Name']) self.state[entry['Category']][material] += entry['Count'] elif entry['event'] in ['MaterialDiscarded', 'ScientificResearch']: material = self.canonicalise(entry['Name']) self.state[entry['Category']][material] -= entry['Count'] if self.state[entry['Category']][material] <= 0: self.state[entry['Category']].pop(material) elif entry['event'] in ['EngineerCraft', 'Synthesis']: for category in ['Raw', 'Manufactured', 'Encoded']: for x in entry[ entry['event'] == 'EngineerCraft' and 'Ingredients' or 'Materials']: material = self.canonicalise(x['Name']) if material in self.state[category]: self.state[category][material] -= x['Count'] if self.state[category][material] <= 0: self.state[category].pop(material) elif entry['event'] == 'EngineerContribution': commodity = self.canonicalise(entry.get('Commodity')) if commodity: self.state['Cargo'][commodity] -= entry['Quantity'] if self.state['Cargo'][commodity] <= 0: self.state['Cargo'].pop(commodity) material = self.canonicalise(entry.get('Material')) if material: for category in ['Raw', 'Manufactured', 'Encoded']: if material in self.state[category]: self.state[category][material] -= entry['Quantity'] if self.state[category][material] <= 0: self.state[category].pop(material) elif entry['event'] == 'JoinACrew': self.state['Captain'] = entry['Captain'] self.state['Role'] = 'Idle' self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None elif entry['event'] == 'ChangeCrewRole': self.state['Role'] = entry['Role'] elif entry['event'] == 'QuitACrew': self.state['Captain'] = None self.state['Role'] = None self.planet = None self.system = None self.station = None self.stationtype = None self.stationservices = None self.coordinates = None return entry except: if __debug__: print 'Invalid journal entry "%s"' % repr(line) print_exc() return {'event': None} # Commodities, Modules and Ships can appear in different forms e.g. "$HNShockMount_Name;", "HNShockMount", and "hnshockmount", # "$int_cargorack_size6_class1_name;" and "Int_CargoRack_Size6_Class1", "python" and "Python", etc. # This returns a simple lowercased name e.g. 'hnshockmount', 'int_cargorack_size6_class1', 'python', etc def canonicalise(self, item): if not item: return '' item = item.lower() match = self._RE_CANONICALISE.match(item) return match and match.group(1) or item def get_entry(self): if not self.event_queue: return None else: entry = self.parse_entry(self.event_queue.pop(0)) if not self.live and entry['event'] not in [None, 'Fileheader']: self.live = True self.event_queue.append( '{ "timestamp":"%s", "event":"StartUp" }' % strftime('%Y-%m-%dT%H:%M:%SZ', gmtime())) return entry def carrying_rares(self): for commodity in self.state['Cargo']: if commodity in self.RARES: return True return False def game_running(self): if platform == 'darwin': for app in NSWorkspace.sharedWorkspace().runningApplications(): if app.bundleIdentifier() == 'uk.co.frontier.EliteDangerous': return True elif platform == 'win32': def WindowTitle(h): if h: l = GetWindowTextLength(h) + 1 buf = ctypes.create_unicode_buffer(l) if GetWindowText(h, buf, l): return buf.value return None def callback(hWnd, lParam): name = WindowTitle(hWnd) if name and name.startswith('Elite - Dangerous'): handle = GetProcessHandleFromHwnd(hWnd) if handle: # If GetProcessHandleFromHwnd succeeds then the app is already running as this user CloseHandle(handle) return False # stop enumeration return True return not EnumWindows(EnumWindowsProc(callback), 0) return False
class EDLogs(FileSystemEventHandler): _POLL = 5 # New system gets posted to log file before hyperspace ends, so don't need to poll too often def __init__(self): FileSystemEventHandler.__init__(self) # futureproofing - not need for current version of watchdog self.root = None self.logdir = self._logdir() # E:D client's default Logs directory, or None if not found self.currentdir = None # The actual logdir that we're monitoring self.logfile = None self.observer = None self.observed = None self.thread = None self.callbacks = { 'Jump': None, 'Dock': None } self.last_event = None # for communicating the Jump event def logging_enabled_in_file(self, appconf): if not isfile(appconf): return False with open(appconf, 'rU') as f: content = f.read().lower() start = content.find('<network') end = content.find('</network>') if start >= 0 and end >= 0: return bool(re.search('verboselogging\s*=\s*\"1\"', content[start+8:end])) else: return False def enable_logging_in_file(self, appconf): try: if not exists(appconf): with open(appconf, 'wt') as f: f.write('<AppConfig>\n\t<Network\n\t\tVerboseLogging="1"\n\t>\n\t</Network>\n</AppConfig>\n') return True with open(appconf, 'rU') as f: content = f.read() f.close() backup = appconf[:-4] + '_backup.xml' if exists(backup): unlink(backup) rename(appconf, backup) with open(appconf, 'wt') as f: start = content.lower().find('<network') if start >= 0: f.write(content[:start+8] + '\n\t\tVerboseLogging="1"' + content[start+8:]) else: start = content.lower().find("</appconfig>") if start >= 0: f.write(content[:start] + '\t<Network\n\t\tVerboseLogging="1"\n\t>\n\t</Network>\n' + content[start:]) else: f.write(content) # eh ? return False return self.logging_enabled_in_file(appconf) except: if __debug__: print_exc() return False def set_callback(self, name, callback): if name in self.callbacks: self.callbacks[name] = callback def start(self, root): self.root = root logdir = config.get('logdir') or self.logdir if not self.is_valid_logdir(logdir): self.stop() return False if self.currentdir and self.currentdir != logdir: self.stop() self.currentdir = logdir if not self._logging_enabled(self.currentdir): # verbose logging reduces likelihood that Docked/Undocked messages will be delayed self._enable_logging(self.currentdir) self.root.bind_all('<<MonitorJump>>', self.jump) # user-generated self.root.bind_all('<<MonitorDock>>', self.dock) # user-generated # Set up a watchog observer. This is low overhead so is left running irrespective of whether monitoring is desired. # File system events are unreliable/non-existent over network drives on Linux. # We can't easily tell whether a path points to a network drive, so assume # any non-standard logdir might be on a network drive and poll instead. polling = bool(config.get('logdir')) and platform != 'win32' if not polling and not self.observer: self.observer = Observer() self.observer.daemon = True self.observer.start() atexit.register(self.observer.stop) if not self.observed and not polling: self.observed = self.observer.schedule(self, self.currentdir) # Latest pre-existing logfile - e.g. if E:D is already running. Assumes logs sort alphabetically. try: logfiles = sorted([x for x in listdir(logdir) if x.startswith('netLog.')]) self.logfile = logfiles and join(logdir, logfiles[-1]) or None except: self.logfile = None if __debug__: print '%s "%s"' % (polling and 'Polling' or 'Monitoring', logdir) print 'Start logfile "%s"' % self.logfile if not self.running(): self.thread = threading.Thread(target = self.worker, name = 'netLog worker') self.thread.daemon = True self.thread.start() return True def stop(self): if __debug__: print 'Stopping monitoring' self.currentdir = None if self.observed: self.observed = None self.observer.unschedule_all() self.thread = None # Orphan the worker thread - will terminate at next poll self.last_event = None def running(self): return self.thread and self.thread.is_alive() def on_created(self, event): # watchdog callback, e.g. client (re)started. if not event.is_directory and basename(event.src_path).startswith('netLog.'): self.logfile = event.src_path def worker(self): # Tk isn't thread-safe in general. # event_generate() is the only safe way to poke the main thread from this thread: # https://mail.python.org/pipermail/tkinter-discuss/2013-November/003522.html # e.g.: # "{18:00:41} System:"Shinrarta Dezhra" StarPos:(55.719,17.594,27.156)ly NormalFlight\r\n" # or with verboseLogging: # "{17:20:18} System:"Shinrarta Dezhra" StarPos:(55.719,17.594,27.156)ly Body:69 RelPos:(0.334918,1.20754,1.23625)km NormalFlight\r\n" # or: # "... Supercruise\r\n" # Note that system name may contain parantheses, e.g. "Pipe (stem) Sector PI-T c3-5". regexp = re.compile(r'\{(.+)\} System:"(.+)" StarPos:\((.+),(.+),(.+)\)ly.* (\S+)') # (localtime, system, x, y, z, context) # e.g.: # "{14:42:11} GetSafeUniversalAddress Station Count 1 moved 0 Docked Not Landed\r\n" # or: # "... Undocked Landed\r\n" # Don't use the simpler "Commander Put ..." message since its more likely to be delayed. dockre = re.compile(r'\{(.+)\} GetSafeUniversalAddress Station Count \d+ moved \d+ (\S+) ([^\r\n]+)') # (localtime, docked_status, landed_status) docked = False # Whether we're docked updated = False # Whether we've sent an update since we docked # Seek to the end of the latest log file logfile = self.logfile if logfile: loghandle = open(logfile, 'r') loghandle.seek(0, SEEK_END) # seek to EOF else: loghandle = None while True: if docked and not updated and not config.getint('output') & config.OUT_MKT_MANUAL: self.root.event_generate('<<MonitorDock>>', when="tail") updated = True if __debug__: print "%s :\t%s %s" % ('Updated', docked and " docked" or "!docked", updated and " updated" or "!updated") # Check whether new log file started, e.g. client (re)started. if self.observed: newlogfile = self.logfile # updated by on_created watchdog callback else: # Poll try: logfiles = sorted([x for x in listdir(self.currentdir) if x.startswith('netLog.')]) newlogfile = logfiles and join(self.currentdir, logfiles[-1]) or None except: if __debug__: print_exc() newlogfile = None if logfile != newlogfile: logfile = newlogfile if loghandle: loghandle.close() if logfile: loghandle = open(logfile, 'r') if __debug__: print 'New logfile "%s"' % logfile if logfile: system = visited = coordinates = None loghandle.seek(0, SEEK_CUR) # reset EOF flag for line in loghandle: match = regexp.match(line) if match: (visited, system, x, y, z, context) = match.groups() if system == 'ProvingGround': system = 'CQC' coordinates = (float(x), float(y), float(z)) else: match = dockre.match(line) if match: if match.group(2) == 'Undocked': docked = updated = False elif match.group(2) == 'Docked': docked = True # do nothing now in case the API server is lagging, but update on next poll if __debug__: print "%s :\t%s %s" % (match.group(2), docked and " docked" or "!docked", updated and " updated" or "!updated") if system and not docked and config.getint('output'): # Convert local time string to UTC date and time visited_struct = strptime(visited, '%H:%M:%S') now = localtime() if now.tm_hour == 0 and visited_struct.tm_hour == 23: # Crossed midnight between timestamp and poll now = localtime(time()-12*60*60) # yesterday time_struct = datetime(now.tm_year, now.tm_mon, now.tm_mday, visited_struct.tm_hour, visited_struct.tm_min, visited_struct.tm_sec).timetuple() # still local time self.last_event = (mktime(time_struct), system, coordinates) self.root.event_generate('<<MonitorJump>>', when="tail") sleep(self._POLL) # Check whether we're still supposed to be running if threading.current_thread() != self.thread: return # Terminate def jump(self, event): # Called from Tkinter's main loop if self.callbacks['Jump'] and self.last_event: self.callbacks['Jump'](event, *self.last_event) def dock(self, event): # Called from Tkinter's main loop if self.callbacks['Dock']: self.callbacks['Dock'](event) def is_valid_logdir(self, path): return self._is_valid_logdir(path) if platform=='darwin': def _logdir(self): # https://support.frontier.co.uk/kb/faq.php?id=97 paths = NSSearchPathForDirectoriesInDomains(NSApplicationSupportDirectory, NSUserDomainMask, True) if len(paths) and self._is_valid_logdir(join(paths[0], 'Frontier Developments', 'Elite Dangerous', 'Logs')): return join(paths[0], 'Frontier Developments', 'Elite Dangerous', 'Logs') else: return None def _is_valid_logdir(self, path): # Apple's SMB implementation is too flaky so assume target machine is OSX return path and isdir(path) and isfile(join(path, pardir, 'AppNetCfg.xml')) def _logging_enabled(self, path): if not self._is_valid_logdir(path): return False else: return self.logging_enabled_in_file(join(path, pardir, 'AppConfigLocal.xml')) def _enable_logging(self, path): if not self._is_valid_logdir(path): return False else: return self.enable_logging_in_file(join(path, pardir, 'AppConfigLocal.xml')) elif platform=='win32': def _logdir(self): # Try locations described in https://support.elitedangerous.com/kb/faq.php?id=108, in reverse order of age candidates = [] # Steam and Steam libraries key = HKEY() if not RegOpenKeyEx(HKEY_CURRENT_USER, r'Software\Valve\Steam', 0, KEY_READ, ctypes.byref(key)): valtype = DWORD() valsize = DWORD() if not RegQueryValueEx(key, 'SteamPath', 0, ctypes.byref(valtype), None, ctypes.byref(valsize)) and valtype.value == REG_SZ: buf = ctypes.create_unicode_buffer(valsize.value / 2) if not RegQueryValueEx(key, 'SteamPath', 0, ctypes.byref(valtype), buf, ctypes.byref(valsize)): steampath = buf.value.replace('/', '\\') # For some reason uses POSIX seperators steamlibs = [steampath] try: # Simple-minded Valve VDF parser with open(join(steampath, 'config', 'config.vdf'), 'rU') as h: for line in h: vals = line.split() if vals and vals[0].startswith('"BaseInstallFolder_'): steamlibs.append(vals[1].strip('"').replace('\\\\', '\\')) except: pass for lib in steamlibs: candidates.append(join(lib, 'steamapps', 'common', 'Elite Dangerous', 'Products')) RegCloseKey(key) # Next try custom installation under the Launcher if not RegOpenKeyEx(HKEY_LOCAL_MACHINE, machine().endswith('64') and r'SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall' or # Assumes that the launcher is a 32bit process r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall', 0, KEY_READ, ctypes.byref(key)): buf = ctypes.create_unicode_buffer(MAX_PATH) i = 0 while True: size = DWORD(MAX_PATH) if RegEnumKeyEx(key, i, buf, ctypes.byref(size), None, None, None, None): break subkey = HKEY() if not RegOpenKeyEx(key, buf, 0, KEY_READ, ctypes.byref(subkey)): valtype = DWORD() valsize = DWORD((len('Frontier Developments')+1)*2) valbuf = ctypes.create_unicode_buffer(valsize.value / 2) if not RegQueryValueEx(subkey, 'Publisher', 0, ctypes.byref(valtype), valbuf, ctypes.byref(valsize)) and valtype.value == REG_SZ and valbuf.value == 'Frontier Developments': if not RegQueryValueEx(subkey, 'InstallLocation', 0, ctypes.byref(valtype), None, ctypes.byref(valsize)) and valtype.value == REG_SZ: valbuf = ctypes.create_unicode_buffer(valsize.value / 2) if not RegQueryValueEx(subkey, 'InstallLocation', 0, ctypes.byref(valtype), valbuf, ctypes.byref(valsize)): candidates.append(join(valbuf.value, 'Products')) RegCloseKey(subkey) i += 1 RegCloseKey(key) # Standard non-Steam locations programs = ctypes.create_unicode_buffer(MAX_PATH) ctypes.windll.shell32.SHGetSpecialFolderPathW(0, programs, CSIDL_PROGRAM_FILESX86, 0) candidates.append(join(programs.value, 'Frontier', 'Products')), applocal = ctypes.create_unicode_buffer(MAX_PATH) ctypes.windll.shell32.SHGetSpecialFolderPathW(0, applocal, CSIDL_LOCAL_APPDATA, 0) candidates.append(join(applocal.value, 'Frontier_Developments', 'Products')) for game in ['elite-dangerous-64', 'FORC-FDEV-D-1']: # Look for Horizons in all candidate places first for base in candidates: if isdir(base): for d in listdir(base): if d.startswith(game) and self._is_valid_logdir(join(base, d, 'Logs')): return join(base, d, 'Logs') return None def _is_valid_logdir(self, path): # Assume target machine is Windows return path and isdir(path) and isfile(join(path, pardir, 'AppConfig.xml')) def _logging_enabled(self, path): if not self._is_valid_logdir(path): return False else: return (self.logging_enabled_in_file(join(path, pardir, 'AppConfigLocal.xml')) or self.logging_enabled_in_file(join(path, pardir, 'AppConfig.xml'))) def _enable_logging(self, path): if not self._is_valid_logdir(path): return False else: return self.enable_logging_in_file(isfile(join(path, pardir, 'AppConfigLocal.xml')) and join(path, pardir, 'AppConfigLocal.xml') or join(path, pardir, 'AppConfig.xml')) elif platform=='linux2': def _logdir(self): return None # Assume target machine is Windows def _is_valid_logdir(self, path): return path and isdir(path) and isfile(join(path, pardir, 'AppConfig.xml')) def _logging_enabled(self, path): if not self._is_valid_logdir(path): return False else: return (self.logging_enabled_in_file(join(path, pardir, 'AppConfigLocal.xml')) or self.logging_enabled_in_file(join(path, pardir, 'AppConfig.xml'))) def _enable_logging(self, path): if not self._is_valid_logdir(path): return False else: return self.enable_logging_in_file(isfile(join(path, pardir, 'AppConfigLocal.xml')) and join(path, pardir, 'AppConfigLocal.xml') or join(path, pardir, 'AppConfig.xml'))
class FSEventHandler(FileSystemEventHandler): def __init__(self, folder, filename): self.folder = folder self.filename = filename self.observer = Observer() def on_created(self, event): if not event.is_directory and event.src_path.endswith(self.filename): self.observer.unschedule_all() self.observer.stop() def watch(self): self.observer.schedule(self, self.folder, recursive=False) self.observer.start() self.observer.join()