def handle_delete_event(self, event): #make sure that all queued uploads are finished before we send a delete event #this ensures that all events stay in-order if len(self.to_upload) > 0: self.upload() #if the file doesn't exist and we're a delete event, just drop it res = self.database.execute( """SELECT * FROM events WHERE localpath=? LIMIT 1""", (event.path, )) exists = next(res, None) if exists is None: return res = self.database.execute( """SELECT * FROM events WHERE localpath=? AND rev != 0 ORDER BY rev DESC LIMIT 1""", (event.path, )) latest = next(res, None) if latest is not None: e = Event(0) e.fromseq(latest) if e.type & EventType.DELETE: #returning because it was already deleted return #add event to the database self.database.execute("INSERT INTO events VALUES (0,?,?,?,?,?,?,?)", event.totuple()[1:]) self.sender_queue.put(event)
def enqueue_delete(self, filepath): if not path.isdir(filepath): e = Event(EventType.DELETE | EventType.LOCAL) e.path = get_rel_path(filepath) e.time = time() + util.time_diff() logging.info("WATCHER "+str(e)) self.uploader_queue.put(e, True)
def enqueue_delete(self, filepath): if not path.isdir(filepath): e = Event(EventType.DELETE | EventType.LOCAL) e.path = get_rel_path(filepath) e.time = time() + util.time_diff() logging.info("WATCHER " + str(e)) self.uploader_queue.put(e, True)
def handle_delete_event(self, event): #make sure that all queued uploads are finished before we send a delete event #this ensures that all events stay in-order if len(self.to_upload) > 0: self.upload() #if the file doesn't exist and we're a delete event, just drop it res = self.database.execute("""SELECT * FROM events WHERE localpath=? LIMIT 1""", (event.path,)) exists = next(res, None) if exists is None: return res = self.database.execute("""SELECT * FROM events WHERE localpath=? AND rev != 0 ORDER BY rev DESC LIMIT 1""", (event.path,)) latest = next(res, None) if latest is not None: e = Event(0) e.fromseq(latest) if e.type & EventType.DELETE: #returning because it was already deleted return #add event to the database self.database.execute("INSERT INTO events VALUES (0,?,?,?,?,?,?,?)", event.totuple()[1:]) self.sender_queue.put(event)
def __init__(self, train_pwl: PointWithLabelProducer, test_pwl: PointWithLabelProducer, teacher: NetworkTeacher, batch_size: int, learning_rate: float, optimizer: torch.optim.Optimizer, criterion: 'Loss'): self.train_pwl = train_pwl self.test_pwl = test_pwl self.teacher = teacher self.batch_size = batch_size self.learning_rate = learning_rate self.optimizer = optimizer self.criterion = criterion self.setup = Event('setup') self.pre_loop = Event('pre_loop') self.post_points = Event('post_points') self.pre_train = Event('pre_train') self.post_train = Event('post_train') self.decay_scheduler = Event( 'decay_scheduler', lambda args, kwargs, result: (True, (args[0], args[1], result), kwargs)) self.decay = Event( 'decay', lambda args, kwargs, result: (True, (result, ), kwargs)) self.stopper = Event( 'stopper', lambda args, kwargs, result: (not result, args, kwargs)) self.finished = Event('finished')
def __init__(self, train_ssp: SeqSeqProducer, test_ssp: SeqSeqProducer, teacher: SeqSeqTeacher, batch_size: int, learning_rate: float, optimizers: typing.List[torch.optim.Optimizer], criterion: 'Loss'): self.train_ssp = train_ssp self.test_ssp = test_ssp self.teacher = teacher self.batch_size = batch_size self.learning_rate = learning_rate self.optimizers = optimizers self.criterion = criterion self.setup = Event('setup') self.pre_loop = Event('pre_loop') self.pre_train = Event('pre_train') self.post_train = Event('post_train') self.decay_scheduler = Event( 'decay_scheduler', lambda args, kwargs, result: (True, (args[0], args[1], result), kwargs)) self.decay = Event( 'decay', lambda args, kwargs, result: (True, (result, ), kwargs)) self.stopper = Event( 'stopper', lambda args, kwargs, result: (not result, args, kwargs)) self.finished = Event('finished')
def run(self): for root, dirs, files in walk(Config().get("core", "syncdir")): if self.stopped: return for file in files: filepath = path.join(root, file) relpath = path.relpath(filepath, Config().get("core", "syncdir")) try: e = Event(EventType.UPDATE | EventType.LOCAL) e.path = relpath e.time = path.getmtime(filepath) + util.time_diff() self.uploader_queue.put(e, True) except OSError: logging.warning("Couldn't get a modification time for " +filepath+". Ignoring file")
def handle_update_event(self, event): filepath = os.path.join(Config().get("core", "syncdir"), event.path) #first, copy the file over to a temporary directory, get its hash, #upload it, and then move it to the filename with that hash value handle, tmppath = mkstemp(dir=Config().get("core", "cachedir")) os.close(handle) #we don't really want it open, we just want a good name try: copy2(filepath, tmppath) except IOError: logging.warning("Dropping update event because file was deleted before we could upload it: %s" % (str(event))) return #get the mode of the file stats = os.stat(filepath) event.permissions = str(stat.S_IMODE(stats.st_mode)) #hash the temporary file event.hash = hash(tmppath)[0] logging.debug("HASHED "+str(event)) #make sure the most recent version of this file doesn't match this one #otherwise it's pointless to re-upload it res = self.database.execute("""SELECT * FROM events WHERE localpath=? AND rev != 0 ORDER BY rev DESC LIMIT 1""", (event.path,)) latest = next(res, None) if latest is not None: e = Event(0) e.fromseq(latest) if e.hash == event.hash: #returning because hashes are equal #but first, remove the temporary file in the cache os.remove(tmppath) return res = self.database.execute("SELECT * FROM events WHERE hash=? AND rev!=0", (event.hash,)) sameHash = next(res, None) #if this file isn't already uploaded, add it to the list to upload, and #upload them in a batch when we have a chance if sameHash is None: self.to_upload.append((event, tmppath)) return e = Event(0) e.fromseq(sameHash) event.storagekey = e.storagekey #add event to the database self.database.execute("INSERT INTO events VALUES (0,?,?,?,?,?,?,?)", event.totuple()[1:]) #move tmp file to hash-named file in cache directory cachepath = os.path.join(Config().get("core", "cachedir"), event.hash) move(tmppath, cachepath) self.sender_queue.put(event)
def handle_update_event(self, event): filepath = os.path.join(Config().get("core", "syncdir"), event.path) #first, copy the file over to a temporary directory, get its hash, #upload it, and then move it to the filename with that hash value handle, tmppath = mkstemp(dir=Config().get("core", "cachedir")) os.close( handle) #we don't really want it open, we just want a good name try: copy2(filepath, tmppath) except IOError: logging.warning( "Dropping update event because file was deleted before we could upload it: %s" % (str(event))) return #get the mode of the file stats = os.stat(filepath) event.permissions = str(stat.S_IMODE(stats.st_mode)) #hash the temporary file event.hash = hash(tmppath)[0] logging.debug("HASHED " + str(event)) #make sure the most recent version of this file doesn't match this one #otherwise it's pointless to re-upload it res = self.database.execute( """SELECT * FROM events WHERE localpath=? AND rev != 0 ORDER BY rev DESC LIMIT 1""", (event.path, )) latest = next(res, None) if latest is not None: e = Event(0) e.fromseq(latest) if e.hash == event.hash: #returning because hashes are equal #but first, remove the temporary file in the cache os.remove(tmppath) return res = self.database.execute( "SELECT * FROM events WHERE hash=? AND rev!=0", (event.hash, )) sameHash = next(res, None) #if this file isn't already uploaded, add it to the list to upload, and #upload them in a batch when we have a chance if sameHash is None: self.to_upload.append((event, tmppath)) return e = Event(0) e.fromseq(sameHash) event.storagekey = e.storagekey #add event to the database self.database.execute("INSERT INTO events VALUES (0,?,?,?,?,?,?,?)", event.totuple()[1:]) #move tmp file to hash-named file in cache directory cachepath = os.path.join(Config().get("core", "cachedir"), event.hash) move(tmppath, cachepath) self.sender_queue.put(event)