def getDB(self): if self._db: return self._db (driver, database) = self.cfg.repositoryDB self._db = dbstore.connect(database, driver) schema.setupTempTables(self._db) depSchema.setupTempDepTables(self._db) return self._db
def migrate(self): logMe(1, "WARNING: this migration takes a LONG time. Do not interupt!") schema.setupTempTables(self.db) # migrate FilesPath to a dirnames-based setup self._createFilePaths() # prefixes will be created by the migration to schema version 17.1 self.db.analyze("Prefixes") return True
def initDB(self): from conary.server.schema import setupTempTables from conary.local.schema import setupTempDepTables db = self.connect() setupTempTables(db) setupTempDepTables(db) db.commit() # force file creation return db
def migrate(self): schema.setupTempTables(self.db) cu = self.db.cursor() # needed for signature recalculation repos = trovestore.TroveStore(self.db) self.dropViews() self.fixRedirects(repos) self.fixDuplicatePaths(repos) self.fixPermissions() self.updateLatest() return True
def _connect(self): db = self.getDB() schema.createSchema(db) schema.setupTempTables(db) depSchema.setupTempDepTables(db) store = trovestore.TroveStore(db) auth = netauth.NetworkAuthorization(db, ['localhost']) auth.addUser('anonymous', 'anonymous') auth.addRole('anonymous') auth.addRoleMember('anonymous', 'anonymous') auth.addAcl('anonymous', None, None, write = False, remove = False) auth.setAdmin('anonymous', False) return store
def _connect(self): db = self.getDB() schema.createSchema(db) schema.setupTempTables(db) depSchema.setupTempDepTables(db) store = trovestore.TroveStore(db) auth = netauth.NetworkAuthorization(db, ['localhost']) auth.addUser('anonymous', 'anonymous') auth.addRole('anonymous') auth.addRoleMember('anonymous', 'anonymous') auth.addAcl('anonymous', None, None, write=False, remove=False) auth.setAdmin('anonymous', False) return store
def migrate2(self): # fix the dirnames and basenames column types for postgresql cu = self.db.cursor() if self.db.driver == 'postgresql': logMe(2, "fixing column types for pathfields") cu.execute("create table saveDirnames as select dirnameId, dirname from Dirnames") cu.execute("create table saveBasenames as select basenameId, basename from Basenames") cu.execute("create table savePrefixes as select dirnameId, prefixId from Prefixes") self.db.dropForeignKey("FilePaths", "dirnameId") self.db.dropForeignKey("FilePaths", "basenameId") cu.execute("drop table Prefixes") cu.execute("drop table Dirnames") cu.execute("drop table Basenames") self.db.loadSchema() schema.createTroves(self.db, createIndex = False) cu.execute("select dirnameId, dirname from saveDirnames") self.db.bulkload("Dirnames", ( (x[0], cu.binary(x[1])) for x in cu.fetchall() ), ["dirnameId", "dirname"]) cu.execute("select basenameId, basename from saveBasenames") self.db.bulkload("Basenames", ( (x[0], cu.binary(x[1])) for x in cu.fetchall() ), ["basenameId", "basename"]) cu.execute("insert into Prefixes(dirnameId, prefixId) " "select dirnameId, prefixId from savePrefixes") schema.createTroves(self.db, createIndex = True) self.db.addForeignKey("FilePaths", "dirnameId", "Dirnames", "dirnameId") self.db.addForeignKey("FilePaths", "basenameId", "Basenames", "basenameId") cu.execute("drop table saveDirnames") cu.execute("drop table saveBasenames") cu.execute("drop table savePrefixes") self.db.analyze("Dirnames") self.db.analyze("Basenames") self.db.setAutoIncrement("Dirnames", "dirnameId") self.db.setAutoIncrement("Basenames", "basenameId") # fix the missing dirnames/prefixes links schema.setupTempTables(self.db) logMe(2, "looking for missing dirnames/prefixes links") cu = self.db.cursor() cu.execute("""select distinct d.dirnameId, d.dirname from Dirnames as d join ( select fp.dirnameId as dirnameId from FilePaths as fp left join Prefixes as p using(dirnameId) where p.dirnameId is null ) as dq using(dirnameId) """) ret = cu.fetchall() if ret: logMe(2, "fixing missing dirnames/prefixes links in %d dirnames" % (len(ret),)) trovestore.addPrefixesFromList(self.db, ret) self.db.analyze("Prefixes") return True
def getDb(path): if os.path.exists(path): print "Deleting database" os.remove(path) if os.listdir("/srv/rmake-repos/contents/"): print "Deleting contents..." os.system("rm -rf /srv/rmake-repos/contents/*") open(path, 'w') os.chown(path, pwd.getpwnam('apache').pw_uid, pwd.getpwnam('apache').pw_gid) db = dbstore.connect(path, driver='sqlite') schema.loadSchema(db, True) schema.setupTempTables(db) return db
def migrate(self): logMe(1, "WARNING: this migration takes a LONG time. Do not interupt!") schema.setupTempTables(self.db) # migrate FilesPath to a dirnames-based setup self._createFilePaths() return True
def _testSlowActionWithStandalone(self, useSSL = False): # Test to make sure that slow commits still work even with the # proxy keepalive code added (CNY-1341) cfg = server.ServerConfig() cfg.port = testhelp.findPorts(1)[0] cfg.contentsDir = ('legacy', [self.workDir + '/contents']) cfg.repositoryDB = ('sqlite', self.workDir + '/serverdb') cfg.logFile = self.workDir + '/serverlog' cfg.tmpDir = self.workDir + '/tmp' cfg.serverName = 'localhost' util.mkdirChain(cfg.tmpDir) util.mkdirChain(cfg.contentsDir[1][0]) if useSSL: cfg.useSSL = True cfg.sslCert = os.path.join(resources.get_archive(), 'ssl-cert.crt') cfg.sslKey = os.path.join(resources.get_archive(), 'ssl-cert.key') (driver, database) = cfg.repositoryDB db = dbstore.connect(database, driver) schema.loadSchema(db) schema.setupTempTables(db) auth = netauth.NetworkAuthorization(db, 'localhost') auth.addRole('foo') auth.addUser('foo', 'foo') auth.addRoleMember('foo', 'foo') auth.addAcl('foo', None, None, write = True) if useSSL: proto = "https" else: proto = "http" baseUrl = '%s://localhost:%s/' % (proto, cfg.port) pid = os.fork() if not pid: try: netServer = netserver.NetworkRepositoryServer(cfg, baseUrl) oldGetChangeSet = netServer.getChangeSet @netserver.accessReadOnly def getChangeSet(*args, **kw): rv = oldGetChangeSet(*args, **kw) # make sure the client sends its message self.sleep(7) return rv getChangeSet.im_func = getChangeSet netServer.getChangeSet = getChangeSet class HttpRequestsSubclass(server.HttpRequests): tmpDir = cfg.tmpDir netRepos = proxy.SimpleRepositoryFilter(cfg, baseUrl, netServer) restHandler = None HttpRequestsSubclass.cfg = cfg if useSSL: ctx = server.createSSLContext(cfg) httpServer = server.SecureHTTPServer(("", cfg.port), HttpRequestsSubclass, ctx) else: httpServer = server.HTTPServer(("", cfg.port), HttpRequestsSubclass) self.captureOutput(server.serve, httpServer) finally: os._exit(0) try: sock_utils.tryConnect("127.0.0.1", cfg.port) cfg = conarycfg.ConaryConfiguration(False) cfg.repositoryMap = {'localhost' : baseUrl } cfg.user.addServerGlob('localhost', 'foo', 'foo') client = conaryclient.ConaryClient(cfg) repos = client.getRepos() trv, cs = self.Component('foo:run', '1') repos.commitChangeSet(cs) # getTrove will fail because it takes more than 5 seconds assert(repos.getTrove(*trv.getNameVersionFlavor())) finally: os.kill(pid, signal.SIGTERM) os.waitpid(pid, 0)
def _setupDB(self): self.openRepository() db = self.servers.servers[0].reposDB.connect() schema.setupTempTables(db) return db
def _setupDB(self): db = self.getDB() schema.createSchema(db) schema.setupTempTables(db) return db
def migrate2(self): # fix the dirnames and basenames column types for postgresql cu = self.db.cursor() if self.db.driver == 'postgresql': logMe(2, "fixing column types for pathfields") cu.execute( "create table saveDirnames as select dirnameId, dirname from Dirnames" ) cu.execute( "create table saveBasenames as select basenameId, basename from Basenames" ) cu.execute( "create table savePrefixes as select dirnameId, prefixId from Prefixes" ) self.db.dropForeignKey("FilePaths", "dirnameId") self.db.dropForeignKey("FilePaths", "basenameId") cu.execute("drop table Prefixes") cu.execute("drop table Dirnames") cu.execute("drop table Basenames") self.db.loadSchema() schema.createTroves(self.db, createIndex=False) cu.execute("select dirnameId, dirname from saveDirnames") self.db.bulkload("Dirnames", ((x[0], cu.binary(x[1])) for x in cu.fetchall()), ["dirnameId", "dirname"]) cu.execute("select basenameId, basename from saveBasenames") self.db.bulkload("Basenames", ((x[0], cu.binary(x[1])) for x in cu.fetchall()), ["basenameId", "basename"]) cu.execute("insert into Prefixes(dirnameId, prefixId) " "select dirnameId, prefixId from savePrefixes") schema.createTroves(self.db, createIndex=True) self.db.addForeignKey("FilePaths", "dirnameId", "Dirnames", "dirnameId") self.db.addForeignKey("FilePaths", "basenameId", "Basenames", "basenameId") cu.execute("drop table saveDirnames") cu.execute("drop table saveBasenames") cu.execute("drop table savePrefixes") self.db.analyze("Dirnames") self.db.analyze("Basenames") self.db.setAutoIncrement("Dirnames", "dirnameId") self.db.setAutoIncrement("Basenames", "basenameId") # fix the missing dirnames/prefixes links schema.setupTempTables(self.db) logMe(2, "looking for missing dirnames/prefixes links") cu = self.db.cursor() cu.execute("""select distinct d.dirnameId, d.dirname from Dirnames as d join ( select fp.dirnameId as dirnameId from FilePaths as fp left join Prefixes as p using(dirnameId) where p.dirnameId is null ) as dq using(dirnameId) """) ret = cu.fetchall() if ret: logMe( 2, "fixing missing dirnames/prefixes links in %d dirnames" % (len(ret), )) trovestore.addPrefixesFromList(self.db, ret) self.db.analyze("Prefixes") return True