def _read(self): """Re-read Database from the data in the set location. This does no locking, with one exception: it will automatically migrate an index.yaml to an index.json if possible. This requires taking a write lock. """ if os.path.isfile(self._index_path): # Read from JSON file if a JSON database exists self._read_from_file(self._index_path, format='json') elif os.path.isfile(self._old_yaml_index_path): if os.access(self._db_dir, os.R_OK | os.W_OK): # if we can write, then read AND write a JSON file. self._read_from_file(self._old_yaml_index_path, format='yaml') with WriteTransaction(self.lock): self._write(None, None, None) else: # Read chck for a YAML file if we can't find JSON. self._read_from_file(self._old_yaml_index_path, format='yaml') else: # The file doesn't exist, try to traverse the directory. # reindex() takes its own write lock, so no lock here. with WriteTransaction(self.lock): self._write(None, None, None) self.reindex(spack.store.layout)
def write_transaction(self, key): """Get a write transaction on a file cache item. Returns a WriteTransaction context manager that opens a temporary file for writing. Once the context manager finishes, if nothing went wrong, moves the file into place on top of the old file atomically. """ class WriteContextManager(object): def __enter__(cm): # noqa cm.orig_filename = self.cache_path(key) cm.orig_file = None if os.path.exists(cm.orig_filename): cm.orig_file = open(cm.orig_filename, 'r') cm.tmp_filename = self.cache_path(key) + '.tmp' cm.tmp_file = open(cm.tmp_filename, 'w') return cm.orig_file, cm.tmp_file def __exit__(cm, type, value, traceback): # noqa if cm.orig_file: cm.orig_file.close() cm.tmp_file.close() if value: # remove tmp on exception & raise it shutil.rmtree(cm.tmp_filename, True) else: os.rename(cm.tmp_filename, cm.orig_filename) return WriteTransaction(self._get_lock(key), WriteContextManager)
def write_transaction(self, key): """Get a write transaction on a file cache item. Returns a WriteTransaction context manager that opens a temporary file for writing. Once the context manager finishes, if nothing went wrong, moves the file into place on top of the old file atomically. """ filename = self.cache_path(key) if os.path.exists(filename) and not os.access(filename, os.W_OK): raise CacheError( "Insufficient permissions to write to file cache at {0}". format(filename)) # TODO: this nested context manager adds a lot of complexity and # TODO: is pretty hard to reason about in llnl.util.lock. At some # TODO: point we should just replace it with functions and simplify # TODO: the locking code. class WriteContextManager(object): def __enter__(cm): # noqa cm.orig_filename = self.cache_path(key) cm.orig_file = None if os.path.exists(cm.orig_filename): cm.orig_file = open(cm.orig_filename, 'r') cm.tmp_filename = self.cache_path(key) + '.tmp' cm.tmp_file = open(cm.tmp_filename, 'w') return cm.orig_file, cm.tmp_file def __exit__(cm, type, value, traceback): # noqa if cm.orig_file: cm.orig_file.close() cm.tmp_file.close() if value: # remove tmp on exception & raise it shutil.rmtree(cm.tmp_filename, True) else: rename(cm.tmp_filename, cm.orig_filename) return WriteTransaction(self._get_lock(key), acquire=WriteContextManager)
def reindex(self, directory_layout): """Build database index from scratch based on a directory layout. Locks the DB if it isn't locked already. """ if self.is_upstream: raise UpstreamDatabaseLockingError( "Cannot reindex an upstream database") # Special transaction to avoid recursive reindex calls and to # ignore errors if we need to rebuild a corrupt database. def _read_suppress_error(): try: if os.path.isfile(self._index_path): self._read_from_file(self._index_path) except CorruptDatabaseError as e: self._error = e self._data = {} transaction = WriteTransaction( self.lock, _read_suppress_error, self._write ) with transaction: if self._error: tty.warn( "Spack database was corrupt. Will rebuild. Error was:", str(self._error) ) self._error = None old_data = self._data try: self._construct_from_directory_layout( directory_layout, old_data) except BaseException: # If anything explodes, restore old data, skip write. self._data = old_data raise
def reindex(self, directory_layout): """Build database index from scratch based on a directory layout. Locks the DB if it isn't locked already. """ # Special transaction to avoid recursive reindex calls and to # ignore errors if we need to rebuild a corrupt database. def _read_suppress_error(): try: if os.path.isfile(self._index_path): self._read_from_file(self._index_path) except CorruptDatabaseError as e: self._error = e self._data = {} transaction = WriteTransaction( self.lock, _read_suppress_error, self._write ) with transaction: if self._error: tty.warn( "Spack database was corrupt. Will rebuild. Error was:", str(self._error) ) self._error = None # Read first the `spec.yaml` files in the prefixes. They should be # considered authoritative with respect to DB reindexing, as # entries in the DB may be corrupted in a way that still makes # them readable. If we considered DB entries authoritative # instead, we would perpetuate errors over a reindex. old_data = self._data try: # Initialize data in the reconstructed DB self._data = {} # Start inspecting the installed prefixes processed_specs = set() for spec in directory_layout.all_specs(): # Try to recover explicit value from old DB, but # default it to True if DB was corrupt. This is # just to be conservative in case a command like # "autoremove" is run by the user after a reindex. tty.debug( 'RECONSTRUCTING FROM SPEC.YAML: {0}'.format(spec)) explicit = True inst_time = os.stat(spec.prefix).st_ctime if old_data is not None: old_info = old_data.get(spec.dag_hash()) if old_info is not None: explicit = old_info.explicit inst_time = old_info.installation_time extra_args = { 'explicit': explicit, 'installation_time': inst_time } self._add(spec, directory_layout, **extra_args) processed_specs.add(spec) for key, entry in old_data.items(): # We already took care of this spec using # `spec.yaml` from its prefix. if entry.spec in processed_specs: msg = 'SKIPPING RECONSTRUCTION FROM OLD DB: {0}' msg += ' [already reconstructed from spec.yaml]' tty.debug(msg.format(entry.spec)) continue # If we arrived here it very likely means that # we have external specs that are not dependencies # of other specs. This may be the case for externally # installed compilers or externally installed # applications. tty.debug( 'RECONSTRUCTING FROM OLD DB: {0}'.format(entry.spec)) try: layout = spack.store.layout if entry.spec.external: layout = None install_check = True else: install_check = layout.check_installed(entry.spec) if install_check: kwargs = { 'spec': entry.spec, 'directory_layout': layout, 'explicit': entry.explicit, 'installation_time': entry.installation_time # noqa: E501 } self._add(**kwargs) processed_specs.add(entry.spec) except Exception as e: # Something went wrong, so the spec was not restored # from old data tty.debug(e.message) pass self._check_ref_counts() except BaseException: # If anything explodes, restore old data, skip write. self._data = old_data raise
def write_transaction(self): """Get a write lock context manager for use in a `with` block.""" return WriteTransaction(self.lock, self._read, self._write)
def write_transaction(self, timeout=_db_lock_timeout): """Get a write lock context manager for use in a `with` block.""" return WriteTransaction(self.lock, self._read, self._write, timeout)