def _insert_cpv(self, cpv): cpv = self._sfilter(cpv) try: self.con.execute(self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1) % \ (self.label, cpv)) except self._BaseError, e: raise cache_errors.CacheCorruption( cpv, "tried to insert a cpv, but failed: %s" % str(e))
class database(fs_template.FsBased): complete_eclass_entries = False auxdbkey_order = ('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI', 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION', 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND', 'PDEPEND', 'PROVIDE') def __init__(self, label, auxdbkeys, **config): super(database, self).__init__(label, auxdbkeys, **config) self._base = os.path.join( self._base, self.label.lstrip(os.path.sep).rstrip(os.path.sep)) if len(self._known_keys) > len(self.auxdbkey_order): raise Exception("less ordered keys then auxdbkeys") if not os.path.exists(self._base): self._ensure_dirs() def __getitem__(self, cpv): d = {} try: myf = open(os.path.join(self._base, cpv), "r") for k, v in zip(self.auxdbkey_order, myf): d[k] = v.rstrip("\n") except (OSError, IOError), e: if isinstance(e, IOError) and e.errno == 2: raise KeyError(cpv) raise cache_errors.CacheCorruption(cpv, e) try: d["_mtime_"] = os.lstat(os.path.join(self._base, cpv)).st_mtime except OSError, e: raise cache_errors.CacheCorruption(cpv, e)
def _delitem(self, cpv): try: os.remove(os.path.join(self._base, cpv)) except OSError, e: if e.errno == 2: raise KeyError(cpv) else: raise cache_errors.CacheCorruption(cpv, e)
def iteritems(self): try: self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s " "WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME, self.label)) except self._BaseError, e: raise cache_errors.CacheCorruption(self, cpv, e)
def _getitem(self, cpv): try: self.con.execute( "SELECT key, value FROM %s NATURAL JOIN %s " "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME, self.label, self._sfilter(cpv))) except self._BaseError, e: raise cache_errors.CacheCorruption(self, cpv, e)
def __getitem__(self, cpv): d = {} try: myf = open(os.path.join(self._base, cpv), "r") for k, v in zip(self.auxdbkey_order, myf): d[k] = v.rstrip("\n") except (OSError, IOError), e: if isinstance(e, IOError) and e.errno == 2: raise KeyError(cpv) raise cache_errors.CacheCorruption(cpv, e)
def _setitem(self, cpv, values): try: # insert. try: pkgid = self._insert_cpv(cpv) except self._BaseError, e: raise cache_errors.CacheCorruption(cpv, e) # __getitem__ fills out missing values, # so we store only what's handed to us and is a known key db_values = [] for key in self._known_keys: if values.has_key(key): db_values.append({"key": key, "value": values[key]}) if len(db_values) > 0: try: self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values) except self._BaseError, e: raise cache_errors.CacheCorruption(cpv, e)
def _setitem(self, cpv, values): s = cpv.rfind("/") fp = os.path.join(self._base, cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s + 1:])) try: myf = open(fp, "w") except (OSError, IOError), e: if e.errno == 2: try: self._ensure_dirs(cpv) myf = open(fp, "w") except (OSError, IOError), e: raise cache_errors.CacheCorruption(cpv, e)
def reconstruct_eclasses(cpv, eclass_string): """returns a dict when handed a string generated by serialize_eclasses""" eclasses = eclass_string.rstrip().lstrip().split("\t") if eclasses == [""]: # occasionally this occurs in the fs backends. they suck. return {} if len(eclasses) % 3 != 0: raise cache_errors.CacheCorruption( cpv, "_eclasses_ was of invalid len %i" % len(eclasses)) d = {} for x in range(0, len(eclasses), 3): d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2])) del eclasses return d
def _delitem(self, cpv): """delete a cpv cache entry derived RDBM classes for this *must* either support cascaded deletes, or override this method""" try: try: self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv))) if self.autocommits: self.commit() except self._BaseError, e: raise cache_errors.CacheCorruption(self, cpv, e) if self.con.rowcount <= 0: raise KeyError(cpv)
def _getitem(self, cpv): d = {} try: myf = open(os.path.join(self.location, cpv), "r") for k, v in zip(self.auxdbkey_order, myf): d[k] = v.rstrip("\n") except (OSError, IOError), e: if isinstance(e, IOError) and e.errno == 2: # print "caught for %s" % cpv, e # l=os.listdir(os.path.dirname(os.path.join(self.location,cpv))) # l.sort() # print l raise KeyError(cpv) raise cache_errors.CacheCorruption(cpv, e)
class database(fs_template.FsBased): complete_eclass_entries = False auxdbkey_order = ('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI', 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION', 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND', 'PDEPEND', 'PROVIDE') autocommits = True def __init__(self, *args, **config): if "unused_padding" in config: self.unused_padding = int(config["unused_padding"]) del config["unused_padding"] else: self.unused_padding = 0 super(database, self).__init__(*args, **config) location = self.location self.location = os.path.join(self.location, "metadata/cache") # self.label.lstrip(os.path.sep).rstrip(os.path.sep)) if len(self._known_keys) > len(self.auxdbkey_order): raise Exception("less ordered keys then auxdbkeys") if not os.path.exists(self.location): self._ensure_dirs() self.ec = eclass_cache.cache(location) def __getitem__(self, cpv): d = {} try: myf = open(os.path.join(self.location, cpv), "r") for k, v in zip(self.auxdbkey_order, myf): d[k] = v.rstrip("\n") except (OSError, IOError), e: if isinstance(e, IOError) and e.errno == 2: raise KeyError(cpv) raise cache_errors.CacheCorruption(cpv, e) if "_eclasses_" not in d: if "INHERITED" in d: d["_eclasses_"] = self.ec.get_eclass_data( d["INHERITED"].split(), from_master_only=True) del d["INHERITED"] else: d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"]) try: d["_mtime_"] = os.lstat(os.path.join(self.location, cpv)).st_mtime except OSError, e: raise cache_errors.CacheCorruption(cpv, e)
class database(fs_template.FsBased): autocommits = True # do not screw with this ordering. _eclasses_ needs to be last auxdbkey_order = ('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI', 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION', 'KEYWORDS', 'IUSE', 'CDEPEND', 'PDEPEND', 'PROVIDE', '_eclasses_') def __init__(self, *args, **config): super(database, self).__init__(*args, **config) self.location = os.path.join( self.location, self.label.lstrip(os.path.sep).rstrip(os.path.sep)) if len(self._known_keys) > len(self.auxdbkey_order) + 2: raise Exception("less ordered keys then auxdbkeys") if not os.path.exists(self.location): self._ensure_dirs() def _getitem(self, cpv): d = {} try: myf = open(os.path.join(self.location, cpv), "r") for k, v in zip(self.auxdbkey_order, myf): d[k] = v.rstrip("\n") except (OSError, IOError), e: if isinstance(e, IOError) and e.errno == 2: # print "caught for %s" % cpv, e # l=os.listdir(os.path.dirname(os.path.join(self.location,cpv))) # l.sort() # print l raise KeyError(cpv) raise cache_errors.CacheCorruption(cpv, e) try: d["_mtime_"] = os.fstat(myf.fileno()).st_mtime except OSError, e: myf.close() raise cache_errors.CacheCorruption(cpv, e)
def _setitem(self, cpv, values): s = cpv.rfind("/") fp = os.path.join(self._base, cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s + 1:])) try: myf = open(fp, "w") except (OSError, IOError), e: if e.errno == 2: try: self._ensure_dirs(cpv) myf = open(fp, "w") except (OSError, IOError), e: raise cache_errors.CacheCorruption(cpv, e) else: raise cache_errors.CacheCorruption(cpv, e) # try: # s = os.path.split(cpv) # if len(s[0]) == 0: # s = s[1] # else: # s = s[0] # os._ensure_dirs(s) # # except (OSError, IOError), e: myf.writelines([values.get(x, "") + "\n" for x in self.auxdbkey_order]) myf.close() self._ensure_access(fp, mtime=values["_mtime_"]) #update written. now we move it.
self.con.execute("SELECT name FROM sqlite_master WHERE type=\"trigger\" AND name=%s" % \ self._sfilter(self.SCHEMA_DELETE_NAME)) if self.con.rowcount == 0: self.con.execute(self.SCHEMA_DELETE_TRIGGER); self.db.commit() except self._BaseError, e: raise cache_errors.InitializationError(self.__class__, e) def _table_exists(self, tbl): """return true/false dependant on a tbl existing""" try: self.con.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % self._sfilter(tbl)) except self._BaseError, e: # XXX crappy. return False return len(self.con.fetchall()) == 1 # we can do it minus a query via rowid. def _insert_cpv(self, cpv): cpv = self._sfilter(cpv) try: self.con.execute(self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1) % \ (self.label, cpv)) except self._BaseError, e: raise cache_errors.CacheCorruption(cpv, "tried to insert a cpv, but failed: %s" % str(e)) # sums the delete also if self.con.rowcount <= 0 or self.con.rowcount > 2: raise cache_errors.CacheCorruption(cpv, "tried to insert a cpv, but failed- %i rows modified" % self.rowcount) return self.con.lastrowid