DEST_FILE = os.path.join(DEST_DIR, data["dest"]["file"]) def copy_resource_to_server(event): """Copy SRC_FILE to DEST_FILE""" try: if event.src_path == SRC_FILE: shutil.copyfile(SRC_FILE, DEST_FILE) except Exception as ex: print type(ex) print ex event_handler = FileSystemEventHandler() event_handler.on_created = copy_resource_to_server event_handler.on_modified = copy_resource_to_server observer = Observer() observer.schedule(event_handler, SRC_DIR, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def on_created(self, event): FileSystemEventHandler.on_created(self, event) src_relpath = os.path.abspath(event.src_path) if os.path.isdir(event.src_path): self._onchange(src_relpath, FsProvider.DIRECTORY, self.sender.get_uid()) else: self._onchange(src_relpath, FsProvider.FILE, self.sender.get_uid())
def on_modified(self, event): ''' @summary: ファイル更新時イベント ファイルのときはインデックス更新 ディレクトリのときはファイルが追加されたときとかそういうイベントなので、無視 ''' if not event.is_directory: self.refresh_file(event.src_path) FileSystemEventHandler.on_created(self, event)
def on_created(self, event): ''' @summary: 新規作成時イベント 新規ファイルのインデックスを追加する ''' if event.is_directory: self.watcher.analyze_dir(event.src_path, True) else: self.watcher.analyze_file(event.src_path) FileSystemEventHandler.on_created(self, event)
def run(self) -> None: self._logger.info("Started running the server") event_handler = FileSystemEventHandler() event_handler.on_created = self._process_file_system_event event_handler.on_modified = self._process_file_system_event observer = Observer() observer.schedule(event_handler, self._configuration.upload_directory, recursive=True) observer.start() try: while True: time.sleep(1) except Exception: self._logger.exception( "An unexpected exception occurred during watching file changes" ) observer.stop() observer.join() self._logger.info("Finished running the server")
def watchdog_monitor(): def on_created(event): print(type(event)) print(event.src_path) if event.src_path[-4:] == ".xls": print("true") time.sleep(1) confirm(event.src_path) json_f = json.load(open("tmp/config.json")) event_handler = FileSystemEventHandler() event_handler.on_created = on_created path = json_f['defaultPath'] observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def _watch(self, target): queue = self._collect(target) observer = Observer() def on_created(event): nonlocal queue for matcher in self.matchers: matcher.process_file(event.src_path) queue = self._collect(target) def on_modified(event): rule = self.lookup_rule(event.src_path) if isinstance(rule, SourceFileRule): self._invoke_queue(queue) handler = FileSystemEventHandler() handler.on_created = on_created handler.on_modified = on_modified observer.schedule(handler, '.', recursive=True) self._invoke_queue(queue) observer.start() observer.join() return True
def track_directories(directories, session, bucket_name): global aws_session, bucket aws_session = session bucket = bucket_name observer = Observer() # Create event handler and set the function to call when event occurs. event_handler = FileSystemEventHandler() event_handler.on_created = upload_file_to_S3 # Schedule the observer to monitor every directory in the config file. for directory in directories: observer.schedule(event_handler, directory, recursive=True) write_to_log('Scheduled observer for ' + directory) # Start the observer. observer.start() try: write_to_log('Beginning to wait for events.') # Constantly wait for events. while True: time.sleep(1) # Stop when user presses Ctrl + C. except KeyboardInterrupt: write_to_log('Stopping observers...') observer.stop() observer.join() write_to_log('Stopped observers.')
def zest_repro(self, project_cp, test_unit, input_file, log_dir=None, run_buggy_version=False): if exists(log_dir): shutil.rmtree(log_dir) os.mkdir(log_dir) if not exists(input_file): raise PoracleException( 'File does not exist: {}'.format(input_file)) args = '{} {} {}'.format(test_unit.get_cls(), test_unit.get_mthd(), input_file) if log_dir is not None: args = '--logdir {} {}'.format(log_dir, args) if run_buggy_version: args = '--run-buggy-version {}'.format(args) cmd = 'zest-repro --cp {} {}'.format(project_cp, args) if not self.config['verbose']: cmd = cmd + ' &>/dev/null' if log_dir is not None: file_created = threading.Event() event_handler = FileSystemEventHandler() def on_created(event): logger.debug('created: {}'.format(event.src_path)) if event.src_path.endswith('OUT.log'): file_created.set() event_handler.on_created = on_created code = self.run(cmd) # wait until OUT.log becomes ready if log_dir is not None: out_file = join(log_dir, 'OUT.log') if not exists(out_file): observer = Observer() observer.schedule(event_handler, log_dir) observer.start() file_created.wait(60) observer.stop() logger.debug( 'wait for 1 second to make sure file gets ready: {}'. format(out_file)) time.sleep(1) if not exists(out_file): logger.warning('{} does not exist'.format(out_file)) if self.file_empty(out_file): logger.warning('{} is empty'.format(out_file)) if code != 0: raise PoracleException( 'failed to zest-repro with {}'.format(input_file))
def fileWatcher(): observer = Observer() eventHandler = FileSystemEventHandler() eventHandler.on_created = fileSystemEventHandler #eventHandler.on_modified = fileSystemEventHandler pName=pkg.components.processComponentMap["processName"] observer.schedule(eventHandler, pkg.components.processComponentMap["rules"][pName]["directory"], recursive=True) observer.start() log.info("File watcher started") try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def main(): event_handler = FileSystemEventHandler() event_handler.on_created = on_change event_handler.on_moved = on_change event_handler.on_modified = on_change observer = Observer() observer.schedule(event_handler, str(src / Path("res")), recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def __init__(self, hplayer): # Interface settings super(KeyboardInterface, self).__init__(hplayer, "Keyboard") # keyboard connection watchdog event_handler = FileSystemEventHandler() event_handler.on_created = self.bind event_handler.on_deleted = self.unbind self.observer = Observer() self.observer.schedule(event_handler, '/dev/input/', recursive=False) self.observer.start() self.remote = None self.holdDebounce = 5 self.bind(self.detect())
print( f"{datetime.datetime.now()}: {event.src_path} moved to {event.dest_path}" ) if __name__ == "__main__": # указываем, какая область кода не будет выполняться, # если наш модуль импортирован в другой скрипт # patterns = "*" # ignore_patterns = "" # ignore_directories = False # case_sensitive = True # event_handler = PatternMatchingEventHandler event_handler = FileSystemEventHandler( ) #создаем обработчик, который реагирует на поступление определенного события # только он ничего не умеет, так что придется его научить теми процедурами, которые мы написали ранее: event_handler.on_created = on_created event_handler.on_deleted = on_deleted event_handler.on_modified = on_modified event_handler.on_moved = on_moved path = "dir_to_watch" go_recursively = True observer = Observer( ) # создаем наблюдатель, определяет зависимость "один-ко-многим" # между объектами так, что при изменении состояния одного объекта # все зависящие от него объекты уведомляются и обновляются автоматически observer.schedule(event_handler, path, recursive=go_recursively) # Observer является относительно далеким потомком threading.Thread, # соотвественно после вызова start() мы получаем фоновый поток, # следящий за изменениями. # Так что если скрипт сразу завершится, то ничего толкового мы не получим.
x.bind("<Button-1>", lambda e, url=None: open_url(path)) # x.pack() return True if not do(): break tk.pack() def handleChanges(event): global media_files, media_hash exts = ['*.png', '*.jpg', '*.jpeg'] media_files = [] for ext in exts: media_files.extend(glob(media_dir + ext)) media_files.sort(key=os.path.getmtime) media_files.reverse() draw() handleChanges(0) handler = FileSystemEventHandler() handler.on_created = handleChanges handler.on_modified = handleChanges observer = Observer() observer.schedule(handler, media_dir) observer.start() # tk.pack() app.mainloop()
class DashParser: def __init__(self, user, password, server): self.neo = Neo4j(user=user, password=password, server=server) self.tenant = None self.data_path = Path.home() / ".stormspotter/input" self.processed_path = Path.home() / ".stormspotter/processed" self.observer = Observer() self.event_handler = FileSystemEventHandler() self.event_handler.on_created = self.on_created self.observer.schedule(self.event_handler, str(self.data_path)) self.observer.start() self.processExistingFiles() def on_created(self, event): src = Path(event.src_path) if zipfile.is_zipfile(src): self.parseInputFile(src) os.rename( src, f"{self.processed_path}\\{src.stem}_{time.strftime('%Y%m%d-%H%M%S')}.zip" ) def processExistingFiles(self): for file in self.data_path.iterdir(): event = self.event_handler.on_created(FileCreatedEvent(str(file))) def _parseObject(self, data, fields, objtype): obj = {f: data.get(f) for f in fields} obj["raw"] = json.dumps(data) obj["type"] = objtype obj["tags"] = str(data.get("tags")) return obj def dbSummary(self): return self.neo.dbSummary() def _parseTenants(self, tenants): for tenant in tenants: self.tenant = tenant["tenantId"] t_fields = [ "tenantId", "category", "country", "country", "countryCode", "name", "domains" ] t = self._parseObject(tenant, t_fields, TENANT_NODE_LABEL) t["subscriptionCount"] = len(tenant["subscriptions"]) self.neo.insert_asset(t, t["type"], t["tenantId"], [GENERIC_NODE_LABEL]) for sub in tenant["subscriptions"]: s_fields = ["id", "name", "spendingLimit", "state"] s = self._parseObject(sub, s_fields, SUBSCRIPTION_NODE_LABEL) s["resourceGroupCount"] = len(sub["resourceGroups"]) self.neo.insert_asset(s, s["type"], s["id"], [GENERIC_NODE_LABEL]) self.neo.create_relationship(t["tenantId"], TENANT_NODE_LABEL, s["id"], SUBSCRIPTION_NODE_LABEL, DEFAULT_REL) for rg in sub["resourceGroups"]: r_fields = ["id", "name", "location", "managedBy"] r = self._parseObject(rg, r_fields, RESOURCEGROUP_NODE_LABEL) self.neo.insert_asset(r, r["type"], r["id"], [GENERIC_NODE_LABEL]) self.neo.create_relationship(sub["id"], SUBSCRIPTION_NODE_LABEL, r["id"], RESOURCEGROUP_NODE_LABEL, DEFAULT_REL) def _parseUsers(self, users): for aaduser in users: u_fields = [ "objectId", "userPrincipalName", "onPremisesSecurityIdentifier", "lastPasswordChangeDateTime", "mail", "accountEnabled", "immutableId", "dirSyncEnabled" ] user = self._parseObject(aaduser, u_fields, AADUSER_NODE_LABEL) user["name"] = aaduser["displayName"] self.neo.insert_asset(user, AADOBJECT_NODE_LABEL, user["objectId"], [AADUSER_NODE_LABEL]) def _parseGroups(self, groups): for aadgroup in groups: g_fields = [ "objectId", "description", "mail", "dirSyncEnabled", "securityEnabled", "membershipRule", "membershipRuleProcessingState", "onPremisesSecurityIdentifier" ] group = self._parseObject(aadgroup, g_fields, AADGROUP_NODE_LABEL) group["name"] = aadgroup["displayName"] self.neo.insert_asset(group, AADOBJECT_NODE_LABEL, group["objectId"], [AADGROUP_NODE_LABEL]) for member in aadgroup["members"]: self.neo.create_relationship(member, AADOBJECT_NODE_LABEL, group["objectId"], AADGROUP_NODE_LABEL, USER_TO_GROUP) for owner in aadgroup["owners"]: self.neo.create_relationship(owner, AADOBJECT_NODE_LABEL, group["objectId"], AADGROUP_NODE_LABEL, "Owns") def _parseApplications(self, apps): for aadapp in apps: a_fields = [ "objectId", "appId", "homepage", "keyCredentials", "passwordCredentials", "publisherDomain" ] app = self._parseObject(aadapp, a_fields, AADAPP_NODE_LABEL) app["name"] = aadapp["displayName"] app["passwordCredentialCount"] = len( app.pop("passwordCredentials")) app["keyCredentialCount"] = len(app.pop("keyCredentials")) self.neo.insert_asset(app, AADOBJECT_NODE_LABEL, app["objectId"], [AADAPP_NODE_LABEL]) for owner in aadapp["owners"]: self.neo.create_relationship(owner, AADOBJECT_NODE_LABEL, app["objectId"], AADAPP_NODE_LABEL, "Owns") def _parseSPs(self, sps): for aadsp in sps: sp_fields = [ "appDisplayName", "objectId", "appId", "accountEnabled", "servicePrincipalNames", "homepage", "passwordCredentials", "keyCredentials", "appOwnerTenantId", "publisherName", "microsoftFirstParty" ] sp = self._parseObject(aadsp, sp_fields, AADSPN_NODE_LABEL) sp["name"] = aadsp["displayName"] sp["passwordCredentialCount"] = len(sp.pop("passwordCredentials")) sp["keyCredentialCount"] = len(sp.pop("keyCredentials")) self.neo.insert_asset(sp, AADOBJECT_NODE_LABEL, sp["objectId"], [AADSPN_NODE_LABEL]) for owner in aadsp["owners"]: self.neo.create_relationship(owner, AADOBJECT_NODE_LABEL, sp["objectId"], AADSPN_NODE_LABEL, "Owns") def _parseGeneric(self, asset): gen_fields = ["id", "name"] gen = self._parseObject(asset, gen_fields, GENERIC_NODE_LABEL) self.neo.insert_asset(gen, GENERIC_NODE_LABEL, gen["id"]) def _parseKeyVaults(self, vault): rgroup = vault["id"].split("/providers")[0] prop_fields = [ "vaultUri", "enableRbacAuthorization", "enabledForDeployment", "enabledForDiskEncryption", "enableSoftDelete", "softDeleteRetentionInDays", "enabledForTemplateDeployment" ] kv = self._parseObject(vault["properties"], prop_fields, KEYVAULT_NODE_LABEL) kv["name"] = vault["name"] kv["id"] = vault["id"] kv["vaultUri"] = kv["vaultUri"].replace("https://", "").rstrip("/") kv["accessPolicyCount"] = len(vault["properties"]["accessPolicies"]) kv["raw"] = json.dumps(vault) self.neo.insert_asset(kv, KEYVAULT_NODE_LABEL, kv["id"], [GENERIC_NODE_LABEL]) self.neo.create_relationship(rgroup, RESOURCEGROUP_NODE_LABEL, kv["id"], KEYVAULT_NODE_LABEL, DEFAULT_REL) for policy in vault["properties"]["accessPolicies"]: self.neo.create_relationship( policy["objectId"], AADOBJECT_NODE_LABEL, kv["id"], KEYVAULT_NODE_LABEL, AAD_TO_KV, to_find_type="MATCH", relationship_properties=policy["permissions"]) def _parsePublicIps(self, ip): rgroup = ip["id"].split("/providers")[0] pip = {} pip["name"] = ip["name"] pip["id"] = ip["id"] pip["type"] = PUBLIC_IP_NODE_LABEL pip["publicIPAllocationMethod"] = ip.get("properties", {}).get( "publicIPAllocationMethod", "---") pip["fqdn"] = ip.get("properties", {}).get("dnsSettings", {}).get("fqdn", "---") pip["ipAddress"] = ip.get("properties", {}).get("ipAddress", "---") pip['raw'] = json.dumps(ip) self.neo.insert_asset(pip, PUBLIC_IP_NODE_LABEL, pip["id"], [GENERIC_NODE_LABEL]) self.neo.create_relationship(rgroup, RESOURCEGROUP_NODE_LABEL, pip["id"], PUBLIC_IP_NODE_LABEL, DEFAULT_REL) def _parseNSGs(self, nsg): rgroup = nsg["id"].split("/providers")[0] nsgroup = {} nsgroup["name"] = nsg["name"] nsgroup["id"] = nsg["id"] nsgroup["type"] = NSG_NODE_LABEL nsgroup["raw"] = json.dumps(nsg) nsgroup["ruleCount"] = len(nsg["properties"]["securityRules"]) self.neo.insert_asset(nsgroup, NSG_NODE_LABEL, nsgroup["id"]) for secrule in nsg["properties"]["securityRules"]: if secrule["properties"]["access"] == "Allow": rule = {} rule["name"] = secrule["name"] rule["id"] = secrule["id"] rule["description"] = secrule["properties"].get( "description", "---") rule["direction"] = secrule["properties"].get("direction") rule["access"] = "Allow" rule["priority"] = secrule["properties"].get("priority") rule["protocol"] = json.dumps( secrule["properties"].get("protocol")) rule["sourceAddressPrefix"] = secrule["properties"].get( "sourceAddressPrefix") rule["sourcePortRange"] = secrule["properties"].get( "sourcePortRange") rule["destinationAddressPrefix"] = secrule["properties"].get( "destinationAddressPrefix") rule["destinationPortRange"] = secrule["properties"].get( "destinationPortRange") rule["type"] = RULE_NODE_LABEL rule["raw"] = json.dumps(secrule) self.neo.insert_asset(rule, RULE_NODE_LABEL, rule["id"], [GENERIC_NODE_LABEL]) self.neo.create_relationship(nsgroup["id"], NSG_NODE_LABEL, rule["id"], RULE_NODE_LABEL, DEFAULT_REL) if netifs := nsg["properties"].get("networkInterfaces"): for ni in netifs: self.neo.create_relationship(ni["id"], NETWORKINTERFACE_NODE_LABEL, nsg["id"], NSG_NODE_LABEL, NIC_TO_NSG)
def on_created(self, event): FileSystemEventHandler.on_created(self, event) # Creation also fires modification event #self.process_event(event.src_path) return True
print(f"{event.src_path} has been created") image = event.src_path key = generate_key(image) upload_image(image, BUCKET, key) response_labels = detect_labels(BUCKET, key) person_count = get_number_of_persons(response_labels) mqclient.publish(camera_count_topic, payload=generate_payload({'count': person_count})) print("published data") mqclient = mqtt.Client() mqclient.on_connect = on_connect mqclient.on_message = on_message mqclient.connect(broker_hostname, broker_port, 60) patterns = "*" ignore_patterns = "" ignore_directories = False case_sensitive = True my_event_handler = FileSystemEventHandler() my_event_handler.on_created = watchdog_on_created if not os.path.exists(path_to_watch): os.makedirs(path_to_watch) my_observer = Observer() my_observer.schedule(my_event_handler, path=path_to_watch, recursive=True) my_observer.start() mqclient.loop_forever()
path = event.src_path change_type = event.event_type class_name = event.__class__.__name__ if re.match('^Dir', class_name): if DEBUG: print ' - skipped dir |%s|' %(path) return return process_change(change_type, path) if __name__ == '__main__': OBSERVER = Observer() EVENT_HANDLER = FileSystemEventHandler() EVENT_HANDLER.on_created = on_changed EVENT_HANDLER.on_modified = on_changed EVENT_HANDLER.on_deleted = on_changed ## The following event types are not supported # EVENT_HANDLER.on_moved = on_changed # EVENT_HANDLER.on_any_event = on_changed OBSERVER.schedule(EVENT_HANDLER, LOCAL_PATH, recursive=True) OBSERVER.start() try: while True: time.sleep(10) except KeyboardInterrupt: OBSERVER.stop()
try: cursor.execute(stmnt, new_record) except IE: pass if event.is_directory == True: pass else: # load data from recording load_data() # Events recordings_event_handler = FileSystemEventHandler() recordings_event_handler.on_created = on_created # Observer path = "./phone_recordings" go_recursively = True recordings_observer = Observer() recordings_observer.schedule(recordings_event_handler, path, recursive=go_recursively) recordings_observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: pass
def on_deleted(self, event): # When files are deleted pass def on_moved(self, event): # When files are moved pass if __name__ == "__main__": # Initialise file system event handler fs_event_handler = FileSystemEventHandler() # Calling funcs fs_event_handler.on_modified = Handler().on_modified fs_event_handler.on_created = Handler().on_created fs_event_handler.on_deleted = Handler().on_deleted fs_event_handler.on_moved = Handler().on_moved # Initialise Observer observer_fs = Observer() observer_fs.schedule(fs_event_handler, path, recursive=False) # Start the observer observer_fs observer_fs.start() try: print("") print("######## SMRT File Watcher: Active ########") print(f"Put your SMRT files here -> {path}...") while True: time.sleep(5)
elif (filename.endswith('.pdf')): folder = 'Books' elif (filename.endswith('.mp4')): folder = 'Videos' else: folder = 'General' create_folder_if_not_exists(folderToTrack + "/" + folder) dist = folderToTrack + "/" + folder + "/" + filename try: shutil.move(src, dist) except Exception: print("An error happened") print(Exception) myHandler = FileSystemEventHandler() myHandler.on_created = on_created observer = Observer() observer.schedule(myHandler, folderToTrack, recursive=True) observer.start() try: print("Observing") while True: time.sleep(10) except KeyboardInterrupt: observer.stop() observer.join()