def __init__(self, filepath, set_lockstr=None, get_lockstr=None, failure_exc=None, provide_mutex=True): """Create a LockFile object. The 'filepath' argument should be the path to the file that will be used as the lockfile. If the caller may supply the following optional arguments: set_lockstr - A function that returns a string. This is called when the lock operation wants to write implementation specific text into the lock file. get_lockstr - A function that takes a string and returns a dictionary. This function must be able to parse the lock string created by set_lockstr. The dictionary object is passed as **kwargs to 'failure_exc' if the lock is non-blocking and fails. failure_exc - If a non-blocking lock acquisition fails, this exception will be raised. It should allow the caller to specify a kwargs argument, but not all invocations will provide kwargs. provide_mutex - By default, the LockFile object will use a mutex to sychronize access for threads in the current process. If the caller is already providing mutual exclusion to the LockFile object, this should be set to False.""" self._fileobj = None self._filepath = filepath self._set_lockstr = set_lockstr self._get_lockstr = get_lockstr self._provide_mutex = False if failure_exc: self._failure_exc = failure_exc else: self._failure_exc = FileLocked if provide_mutex: self._lock = nrlock.NRLock() self._provide_mutex = True else: self._lock = DummyLock()
def __init__(self, url, engine, ccancel=None): """Create a streaming file object that wraps around a transport engine. This is only necessary if the underlying transport doesn't have its own streaming interface and the repo operation needs a streaming response.""" self.__buf = b"" self.__url = url self.__engine = engine self.__data_callback_invoked = False self.__headers_arrived = False self.__httpmsg = None self.__headers = {} self.__done = False self.__check_cancelation = ccancel self.__lock = DummyLock() self.__uuid = uuidm.uuid4().int # Free buffer on exception. Set to False if caller may # read buffer after exception. Caller should call close() # to cleanup afterwards. self.free_buffer = True
def __init__(self, url, engine, ccancel=None): """Create a streaming file object that wraps around a transport engine. This is only necessary if the underlying transport doesn't have its own streaming interface and the repo operation needs a streaming response.""" self.__buf = "" self.__url = url self.__engine = engine self.__data_callback_invoked = False self.__headers_arrived = False self.__httpmsg = None self.__headers = {} self.__done = False self.__check_cancelation = ccancel self.__lock = DummyLock() self.__uuid = uuidm.uuid4().int # Free buffer on exception. Set to False if caller may # read buffer after exception. Caller should call close() # to cleanup afterwards. self.free_buffer = True
class StreamingFileObj(object): def __init__(self, url, engine, ccancel=None): """Create a streaming file object that wraps around a transport engine. This is only necessary if the underlying transport doesn't have its own streaming interface and the repo operation needs a streaming response.""" self.__buf = b"" self.__url = url self.__engine = engine self.__data_callback_invoked = False self.__headers_arrived = False self.__httpmsg = None self.__headers = {} self.__done = False self.__check_cancelation = ccancel self.__lock = DummyLock() self.__uuid = uuidm.uuid4().int # Free buffer on exception. Set to False if caller may # read buffer after exception. Caller should call close() # to cleanup afterwards. self.free_buffer = True def __del__(self): release = False try: if not self.__done: if not self.__lock._is_owned(): self.__lock.acquire() release = True self.__engine.orphaned_request(self.__url, self.__uuid) except AttributeError: # Ignore attribute error if instance is deleted # before initialization completes. pass finally: if release: self.__lock.release() # File object methods def close(self): # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() if not self.__done: self.__lock.acquire() try: self.__engine.remove_request(self.__url, self.__uuid) self.__done = True finally: self.__lock.release() self.__buf = b"" self.__engine = None self.__url = None def flush(self): """flush the buffer. Since this supports read, but not write, this is a noop.""" return def read(self, size=-1): """Read size bytes from the remote connection. If size isn't specified, read all of the data from the remote side.""" # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() if size < 0: while self.__fill_buffer(): # just fill the buffer pass curdata = self.__buf self.__buf = b"" return curdata else: curdata = self.__buf datalen = len(curdata) if datalen >= size: self.__buf = curdata[size:] return curdata[:size] while self.__fill_buffer(): datalen = len(self.__buf) if datalen >= size: break curdata = self.__buf datalen = len(curdata) if datalen >= size: self.__buf = curdata[size:] return curdata[:size] self.__buf = b"" return curdata def readline(self, size=-1): """Read a line from the remote host. If size is specified, read to newline or size, whichever is smaller. We force the return value to be str here since the caller expect str.""" # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() if size < 0: curdata = self.__buf newline = curdata.find(b"\n") if newline >= 0: newline += 1 self.__buf = curdata[newline:] return force_str(curdata[:newline]) while self.__fill_buffer(): newline = self.__buf.find(b"\n") if newline >= 0: break curdata = self.__buf newline = curdata.find(b"\n") if newline >= 0: newline += 1 self.__buf = curdata[newline:] return force_str(curdata[:newline]) self.__buf = b"" return force_str(curdata) else: curdata = self.__buf newline = curdata.find(b"\n", 0, size) datalen = len(curdata) if newline >= 0: newline += 1 self.__buf = curdata[newline:] return force_str(curdata[:newline]) if datalen >= size: self.__buf = curdata[size:] return force_str(curdata[:size]) while self.__fill_buffer(): newline = self.__buf.find(b"\n", 0, size) datalen = len(self.__buf) if newline >= 0: break if datalen >= size: break curdata = self.__buf newline = curdata.find(b"\n", 0, size) datalen = len(curdata) if newline >= 0: newline += 1 self.__buf = curdata[newline:] return force_str(curdata[:newline]) if datalen >= size: self.__buf = curdata[size:] return force_str(curdata[:size]) self.__buf = b"" return force_str(curdata) def readlines(self, sizehint=0): """Read lines from the remote host, returning an array of the lines that were read. sizehint specifies an approximate size, in bytes, of the total amount of data, as lines, that should be returned to the caller.""" # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() read = 0 lines = [] while True: l = self.readline() if not l: break lines.append(l) read += len(l) if sizehint and read >= sizehint: break return lines def write(self, data): raise NotImplementedError def writelines(self, llist): raise NotImplementedError # Methods that access the callbacks def get_write_func(self): return self.__write_callback def get_header_func(self): return self.__header_callback def get_progress_func(self): return self.__progress_callback # Miscellaneous accessors def set_lock(self, lock): self.__lock = lock @property def uuid(self): return self.__uuid # Header and message methods @property def headers(self): if not self.__headers_arrived: self.__fill_headers() return self.__headers def get_http_message(self): """Return the status message that may be included with a numerical HTTP response code. Not all HTTP implementations are guaranteed to return this value. In some cases it may be None.""" return self.__httpmsg def getheader(self, hdr, default): """Return the HTTP header named hdr. If the hdr isn't present, return default value instead.""" if not self.__headers_arrived: self.__fill_headers() return self.__headers.get(hdr.lower(), default) def _prime(self): """Used by the underlying transport before handing this object off to other layers. It ensures that the object's creator can catch errors that occur at connection time. All callers must still catch transport exceptions, however.""" self.__fill_buffer(1) # Iterator methods def __iter__(self): return self def __next__(self): line = self.readline() if not line: raise StopIteration return line next = __next__ # Private methods def __fill_buffer(self, size=-1): """Call engine.run() to fill the file object's buffer. Read until we might block. If size is specified, stop once we get at least size bytes, or might block, whichever comes first.""" engine = self.__engine if not engine: return False self.__lock.acquire() while 1: if self.__done: self.__lock.release() return False elif not engine.pending: # nothing pending means no more transfer self.__done = True s = engine.check_status([self.__url]) if s: # Cleanup prior to raising exception self.__lock.release() if self.free_buffer: self.close() raise s[0] self.__lock.release() return False try: engine.run() except tx.ExcessiveTransientFailure as ex: s = engine.check_status([self.__url]) ex.failures = s self.__lock.release() if self.free_buffer: self.close() raise except: # Cleanup and close, if exception # raised by run. self.__lock.release() if self.free_buffer: self.close() raise if size > 0 and len(self.__buf) < size: # loop if we need more data in the buffer continue else: # break out of this loop break self.__lock.release() return True def __fill_headers(self): """Run the transport until headers arrive. When the data callback gets invoked, all headers have arrived. The alternate scenario is when no data arrives, but the server isn't providing more input isi over the network. In that case, the client either received just headers, or had the transfer close unexpectedly.""" while not self.__data_callback_invoked: if not self.__fill_buffer(): # We hit this case if we get headers # but no data. break self.__headers_arrived = True def __progress_callback(self, dltot, dlcur, ultot, ulcur): """Called by pycurl/libcurl framework to update progress tracking.""" if self.__check_cancelation and self.__check_cancelation(): return -1 return 0 def __write_callback(self, data): """A callback given to transport engine that writes data into a buffer in this object.""" if not self.__data_callback_invoked: self.__data_callback_invoked = True # We don't force data to str here because data could be from a # gizpped file, which contains gzip magic number that can't be # decoded by 'utf-8'. self.__buf = self.__buf + data def __header_callback(self, data): """A callback given to the transport engine. It reads header information from the transport. This function saves the message from the http response, as well as a dictionary of headers that it can parse.""" if data.startswith(b"HTTP/"): rtup = data.split(None, 2) try: self.__httpmsg = rtup[2] except IndexError: pass elif data.find(b":") > -1: k, v = data.split(b":", 1) if v: # convert to str as early as we can self.__headers[force_str(k.lower())] = \ force_str(v.strip())
class LockFile(object): """A class that provides generic lockfile support. This allows Python processes to perform inter-process locking using the filesystem.""" def __init__(self, filepath, set_lockstr=None, get_lockstr=None, failure_exc=None, provide_mutex=True): """Create a LockFile object. The 'filepath' argument should be the path to the file that will be used as the lockfile. If the caller may supply the following optional arguments: set_lockstr - A function that returns a string. This is called when the lock operation wants to write implementation specific text into the lock file. get_lockstr - A function that takes a string and returns a dictionary. This function must be able to parse the lock string created by set_lockstr. The dictionary object is passed as **kwargs to 'failure_exc' if the lock is non-blocking and fails. failure_exc - If a non-blocking lock acquisition fails, this exception will be raised. It should allow the caller to specify a kwargs argument, but not all invocations will provide kwargs. provide_mutex - By default, the LockFile object will use a mutex to sychronize access for threads in the current process. If the caller is already providing mutual exclusion to the LockFile object, this should be set to False.""" self._fileobj = None self._filepath = filepath self._set_lockstr = set_lockstr self._get_lockstr = get_lockstr self._provide_mutex = False if failure_exc: self._failure_exc = failure_exc else: self._failure_exc = FileLocked if provide_mutex: self._lock = nrlock.NRLock() self._provide_mutex = True else: self._lock = DummyLock() @property def locked(self): if self._provide_mutex: return self._lock.locked and self._fileobj return self._fileobj def lock(self, blocking=True): """Lock the lockfile, to prevent access from other processes. If blocking is False, this method will return an exception, instead of blocking, if the lock is held. If the lockfile cannot be opened, this method may return an EnvironmentError.""" # # The password locking in cfgfiles.py depends on the behavior # of this function, which imitates that of libc's lckpwdf(3C). # If this function is changed, it either needs to continue to be # compatible with lckpwdf, or changes to cfgfiles.py must be # made. # rval = self._lock.acquire(blocking=int(blocking)) # Lock acquisition failed. if not rval: raise self._failure_exc() lock_type = fcntl.LOCK_EX if not blocking: lock_type |= fcntl.LOCK_NB # Attempt an initial open of the lock file. lf = None # Caller should catch EACCES and EROFS. try: # If the file is a symlink we catch an exception # and do not update the file. fd = os.open(self._filepath, os.O_RDWR|os.O_APPEND|os.O_CREAT| os.O_NOFOLLOW) lf = os.fdopen(fd, "ab+") except OSError as e: self._lock.release() if e.errno == errno.ELOOP: raise api_errors.UnexpectedLinkError( os.path.dirname(self._filepath), os.path.basename(self._filepath), e.errno) raise e except: self._lock.release() raise # Attempt to lock the file. try: fcntl.lockf(lf, lock_type) except IOError as e: if e.errno not in (errno.EAGAIN, errno.EACCES): self._lock.release() raise # If the lock failed (because it is likely contended), # then extract the information about the lock acquirer # and raise an exception. lock_data = lf.read().strip() self._lock.release() if self._get_lockstr: lock_dict = self._get_lockstr(lock_data) else: lock_dict = {} raise self._failure_exc(**lock_dict) # Store information about the lock acquirer and write it. try: lf.truncate(0) lock_str = None if self._set_lockstr: lock_str = self._set_lockstr() if lock_str: lf.write(misc.force_bytes(lock_str)) lf.flush() self._fileobj = lf except: self._fileobj = None lf.close() self._lock.release() raise def unlock(self): """Unlocks the LockFile.""" if self._fileobj: # To avoid race conditions with the next caller # waiting for the lock file, it is simply # truncated instead of removed. try: fcntl.lockf(self._fileobj, fcntl.LOCK_UN) self._fileobj.truncate(0) self._fileobj.close() self._lock.release() except EnvironmentError: # If fcntl, or the file operations returned # an exception, drop the lock. Do not catch # the exception that could escape from # releasing the lock. self._lock.release() raise finally: self._fileobj = None else: if self._provide_mutex: assert not self._lock.locked
class LockFile(object): """A class that provides generic lockfile support. This allows Python processes to perform inter-process locking using the filesystem.""" def __init__(self, filepath, set_lockstr=None, get_lockstr=None, failure_exc=None, provide_mutex=True): """Create a LockFile object. The 'filepath' argument should be the path to the file that will be used as the lockfile. If the caller may supply the following optional arguments: set_lockstr - A function that returns a string. This is called when the lock operation wants to write implementation specific text into the lock file. get_lockstr - A function that takes a string and returns a dictionary. This function must be able to parse the lock string created by set_lockstr. The dictionary object is passed as **kwargs to 'failure_exc' if the lock is non-blocking and fails. failure_exc - If a non-blocking lock acquisition fails, this exception will be raised. It should allow the caller to specify a kwargs argument, but not all invocations will provide kwargs. provide_mutex - By default, the LockFile object will use a mutex to sychronize access for threads in the current process. If the caller is already providing mutual exclusion to the LockFile object, this should be set to False.""" self._fileobj = None self._filepath = filepath self._set_lockstr = set_lockstr self._get_lockstr = get_lockstr self._provide_mutex = False if failure_exc: self._failure_exc = failure_exc else: self._failure_exc = FileLocked if provide_mutex: self._lock = nrlock.NRLock() self._provide_mutex = True else: self._lock = DummyLock() @property def locked(self): if self._provide_mutex: return self._lock.locked and self._fileobj return self._fileobj def lock(self, blocking=True): """Lock the lockfile, to prevent access from other processes. If blocking is False, this method will return an exception, instead of blocking, if the lock is held. If the lockfile cannot be opened, this method may return an EnvironmentError.""" # # The password locking in cfgfiles.py depends on the behavior # of this function, which imitates that of libc's lckpwdf(3C). # If this function is changed, it either needs to continue to be # compatible with lckpwdf, or changes to cfgfiles.py must be # made. # rval = self._lock.acquire(blocking=int(blocking)) # Lock acquisition failed. if not rval: raise self._failure_exc() lock_type = fcntl.LOCK_EX if not blocking: lock_type |= fcntl.LOCK_NB # Attempt an initial open of the lock file. lf = None # Caller should catch EACCES and EROFS. try: lf = open(self._filepath, "ab+") except: self._lock.release() raise # Attempt to lock the file. try: fcntl.lockf(lf, lock_type) except IOError, e: if e.errno not in (errno.EAGAIN, errno.EACCES): self._lock.release() raise # If the lock failed (because it is likely contended), # then extract the information about the lock acquirer # and raise an exception. lock_data = lf.read().strip() self._lock.release() if self._get_lockstr: lock_dict = self._get_lockstr(lock_data) else: lock_dict = {} raise self._failure_exc(**lock_dict) # Store information about the lock acquirer and write it. try: lf.truncate(0) lock_str = None if self._set_lockstr: lock_str = self._set_lockstr() if lock_str: lf.write(lock_str) lf.flush() self._fileobj = lf except: self._fileobj = None lf.close() self._lock.release() raise
class StreamingFileObj(object): def __init__(self, url, engine, ccancel=None): """Create a streaming file object that wraps around a transport engine. This is only necessary if the underlying transport doesn't have its own streaming interface and the repo operation needs a streaming response.""" self.__buf = "" self.__url = url self.__engine = engine self.__data_callback_invoked = False self.__headers_arrived = False self.__httpmsg = None self.__headers = {} self.__done = False self.__check_cancelation = ccancel self.__lock = DummyLock() self.__uuid = uuidm.uuid4().int # Free buffer on exception. Set to False if caller may # read buffer after exception. Caller should call close() # to cleanup afterwards. self.free_buffer = True def __del__(self): release = False try: if not self.__done: if not self.__lock._is_owned(): self.__lock.acquire() release = True self.__engine.orphaned_request(self.__url, self.__uuid) except AttributeError: # Ignore attribute error if instance is deleted # before initialization completes. pass finally: if release: self.__lock.release() # File object methods def close(self): # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() if not self.__done: self.__lock.acquire() try: self.__engine.remove_request(self.__url, self.__uuid) self.__done = True finally: self.__lock.release() self.__buf = "" self.__engine = None self.__url = None def flush(self): """flush the buffer. Since this supports read, but not write, this is a noop.""" return def read(self, size=-1): """Read size bytes from the remote connection. If size isn't specified, read all of the data from the remote side.""" # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() if size < 0: while self.__fill_buffer(): # just fill the buffer pass curdata = self.__buf self.__buf = "" return curdata else: curdata = self.__buf datalen = len(curdata) if datalen >= size: self.__buf = curdata[size:] return curdata[:size] while self.__fill_buffer(): datalen = len(self.__buf) if datalen >= size: break curdata = self.__buf datalen = len(curdata) if datalen >= size: self.__buf = curdata[size:] return curdata[:size] self.__buf = "" return curdata def readline(self, size=-1): """Read a line from the remote host. If size is specified, read to newline or size, whichever is smaller.""" # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() if size < 0: curdata = self.__buf newline = curdata.find("\n") if newline >= 0: newline += 1 self.__buf = curdata[newline:] return curdata[:newline] while self.__fill_buffer(): newline = self.__buf.find("\n") if newline >= 0: break curdata = self.__buf newline = curdata.find("\n") if newline >= 0: newline += 1 self.__buf = curdata[newline:] return curdata[:newline] self.__buf = "" return curdata else: curdata = self.__buf newline = curdata.find("\n", 0, size) datalen = len(curdata) if newline >= 0: newline += 1 self.__buf = curdata[newline:] return curdata[:newline] if datalen >= size: self.__buf = curdata[size:] return curdata[:size] while self.__fill_buffer(): newline = self.__buf.find("\n", 0, size) datalen = len(self.__buf) if newline >= 0: break if datalen >= size: break curdata = self.__buf newline = curdata.find("\n", 0, size) datalen = len(curdata) if newline >= 0: newline += 1 self.__buf = curdata[newline:] return curdata[:newline] if datalen >= size: self.__buf = curdata[size:] return curdata[:size] self.__buf = "" return curdata def readlines(self, sizehint=0): """Read lines from the remote host, returning an array of the lines that were read. sizehint specifies an approximate size, in bytes, of the total amount of data, as lines, that should be returned to the caller.""" # Caller shouldn't hold lock when calling this method assert not self.__lock._is_owned() read = 0 lines = [] while True: l = self.readline() if not l: break lines.append(l) read += len(l) if sizehint and read >= sizehint: break return lines def write(self, data): raise NotImplementedError def writelines(self, llist): raise NotImplementedError # Methods that access the callbacks def get_write_func(self): return self.__write_callback def get_header_func(self): return self.__header_callback def get_progress_func(self): return self.__progress_callback # Miscellaneous accessors def set_lock(self, lock): self.__lock = lock @property def uuid(self): return self.__uuid # Header and message methods def get_http_message(self): """Return the status message that may be included with a numerical HTTP response code. Not all HTTP implementations are guaranteed to return this value. In some cases it may be None.""" return self.__httpmsg def getheader(self, hdr, default): """Return the HTTP header named hdr. If the hdr isn't present, return default value instead.""" if not self.__headers_arrived: self.__fill_headers() return self.__headers.get(hdr.lower(), default) def _prime(self): """Used by the underlying transport before handing this object off to other layers. It ensures that the object's creator can catch errors that occur at connection time. All callers must still catch transport exceptions, however.""" self.__fill_buffer(1) # Iterator methods def __iter__(self): return self def next(self): line = self.readline() if not line: raise StopIteration return line # Private methods def __fill_buffer(self, size=-1): """Call engine.run() to fill the file object's buffer. Read until we might block. If size is specified, stop once we get at least size bytes, or might block, whichever comes first.""" engine = self.__engine if not engine: return False self.__lock.acquire() while 1: if self.__done: self.__lock.release() return False elif not engine.pending: # nothing pending means no more transfer self.__done = True s = engine.check_status([self.__url]) if s: # Cleanup prior to raising exception self.__lock.release() if self.free_buffer: self.close() raise s[0] self.__lock.release() return False try: engine.run() except tx.ExcessiveTransientFailure, ex: s = engine.check_status([self.__url]) ex.failures = s self.__lock.release() if self.free_buffer: self.close() raise except: