def __init__(self, passwd = None, getPerson=None, host='localhost', user='******', database='jira', verbose=False): self.getPerson=getPerson self.verbose=verbose self.sourceMap = {} self.sourceOwner = {} self.personIdMap = None self.productIdMap = None if not passwd: self.db = dbstore.connect("%s@%s/%s" % (user, host, database), driver = 'mysql' ) else: self.db = dbstore.connect("%s:%s@%s/%s" % (user, passwd, host, database), driver = 'mysql' ) cfg = conarycfg.ConaryConfiguration() client = conaryclient.ConaryClient(cfg) self.repos = client.getRepos() self.indentWrapper = textwrap.TextWrapper( subsequent_indent=' ', break_long_words=False )
def __init__(self, passwd=None, getPerson=None, host='localhost', user='******', database='jira', verbose=False): self.getPerson = getPerson self.verbose = verbose self.sourceMap = {} self.sourceOwner = {} self.personIdMap = None self.productIdMap = None if not passwd: self.db = dbstore.connect("%s@%s/%s" % (user, host, database), driver='mysql') else: self.db = dbstore.connect("%s:%s@%s/%s" % (user, passwd, host, database), driver='mysql') cfg = conarycfg.ConaryConfiguration() client = conaryclient.ConaryClient(cfg) self.repos = client.getRepos() self.indentWrapper = textwrap.TextWrapper(subsequent_indent=' ', break_long_words=False)
def create(self, name): path = self._getTemplate()[1] % 'postgres' db = dbstore.connect(path, self.driver) dbName = self.translate(name) cu = db.cursor() # this check should never be required outside of the test suite, # and it could be kind of dangerous being called in production. cu.execute("SELECT datname FROM pg_database") createDb = True if dbName in [x[0] for x in cu.fetchall()]: createDb = False db.close() if self.cfg.debugMode: import gc while gc.collect(): pass reposDb = dbstore.connect( self._getTemplate()[1] % dbName, self.driver) reposDb.loadSchema() reposCu = reposDb.cursor() for t in reposDb.tempTables: reposCu.execute("DROP TABLE %s" % (t,)) for t in reposDb.tables: reposCu.execute("DROP TABLE %s CASCADE" % (t,)) reposDb.close() else: raise RepositoryAlreadyExists(name) if createDb: cu.execute("CREATE DATABASE %s %s" % (dbName, self.tableOpts)) db.close() RepositoryDatabase.create(self, name)
def conaryrc(req): hostname = req.host cfg = req.cfg if cfg.mirrorsInGroup: return managedConaryrc(req) if hostname.endswith(':80') or hostname.endswith(':443'): hostname = hostname.split(':')[0] # Lack of a repository indicates proxy mode (RBM-273) if cfg.repositoryDB == None: body = "proxyMap * conarys://%s\n" % hostname else: # Otherwise, we're in mirror mode db = dbstore.connect(cfg.repositoryDB[1], driver = cfg.repositoryDB[0]) cu = db.cursor() cu.execute("""SELECT LABEL FROM Labels WHERE EXISTS ( SELECT labelId FROM LabelMap JOIN Nodes USING(itemId, branchId) JOIN Instances USING(itemId, versionId) WHERE isPresent=1 AND LabelMap.labelId = Labels.labelId )""") serverNames = set() for label, in cu: if '@' not in label: continue name = label.split('@', 1)[0] serverNames.add(name) body = "" for name in serverNames: body += "repositoryMap %s https://%s/conary/\n" % (name, hostname) req.response.content_type = 'text/plain' req.response.body = body return req.response
def main(args): if len(args) != 1: sys.exit("Usage: %s <psql db path>" % (sys.argv[0],)) path = args[0] db = dbstore.connect(path, 'postgresql') db.loadSchema() cu = db.cursor() tableDeps = {} for table in db.tables: if table.lower == 'databaseversion': continue tableDeps[table] = set() for table in db.tables: cu.execute("""select b.relname from pg_catalog.pg_class a join pg_catalog.pg_constraint r on a.oid = r.conrelid left join pg_catalog.pg_class b on r.confrelid = b.oid where a.relname = ? and r.contype = 'f'""", table) tableDeps[table] = set(x[0] for x in cu) ordered = [] while tableDeps: for table, deps in tableDeps.items(): if deps: continue print table ordered.append(table) for otherDeps in tableDeps.values(): otherDeps.discard(table) del tableDeps[table] print repr(ordered)
def getDB(dbfile, create=False): db = dbstore.connect(dbfile, driver="sqlite") if create: createSchema(db) v = db.getVersion() assert (v == DB_VERSION) return db
def fixtureImageJob(self, cfg): """ ImageJob fixture. Creates the following setup: - One user: - test (a basic user with no special privileges) - A project called "foo" - "test" is a member of "foo" - A build called "Test Build" - A single image job, in the "started" state @param cfg: The current effective Mint configuration. @return: A 2-tuple consisting of the current Mint configuration and a a dictionary containing the following: - C{test} - the id of the user "test" """ db = dbstore.connect(cfg.dbPath, cfg.dbDriver) testId = self.createUser(cfg, db, 'test') client = shimclient.ShimMintClient(cfg, ('test', 'testpass')) projectId = client.newProject("Foo", "foo", "rpath.org") build = client.newBuild(projectId, "Test Build") build.setBuildType(buildtypes.STUB_IMAGE) stockBuildFlavor(db, build.getId()) prodJob = client.startImageJob(build.getId()) return cfg, { 'test': testId }
def connect(self): if self.db: return self.db if self.verbose: print "CONNECT DATABASE", self.driver, self.path self.db = dbstore.connect(self.path, driver = self.driver) return self.db
def getMirrorAcl(self, project, username): """ Given a project and a username, will determine whether or not the user has the ability to mirror. @param project: the project to check against @param username: the user whose credentials should be checked @returns: C{True} if C{username} can mirror C{project}; C{False} otherwise. """ dbCon = project.server._server.projects.reposDB.getRepositoryDB( \ project.getFQDN()) db = dbstore.connect(dbCon[1], dbCon[0]) cu = db.cursor() cu.execute("""SELECT canMirror FROM Users LEFT JOIN UserGroupMembers ON Users.userId = UserGroupMembers.userId LEFT JOIN UserGroups ON UserGroups.userGroupId = UserGroupMembers.userGroupId WHERE Users.username=?""", username) try: # nonexistent results trigger value error canMirror = max([x[0] for x in cu.fetchall()]) except ValueError: canMirror = None db.close() return canMirror
def getDB(dbfile, create = False): db = dbstore.connect(dbfile, driver="sqlite") if create: createSchema(db) v = db.getVersion() assert (v == DB_VERSION) return db
def testTransaction(self): hdb = self.harness.getDB() db = hdb.connect() db2 = dbstore.connect(hdb.path, hdb.driver) db2.transaction() self.assertTrue(sqlerrors.DatabaseLocked, db.transaction) db2.commit() db.transaction()
def getDB(self): if self._db: return self._db (driver, database) = self.cfg.repositoryDB self._db = dbstore.connect(database, driver) schema.setupTempTables(self._db) depSchema.setupTempDepTables(self._db) return self._db
def delete(self, name): path = self._getTemplate()[1] % 'mysql' db = dbstore.connect(path, 'mysql') reposName = self.translate(name) cu = db.cursor() cu.execute("DROP DATABASE %s" % reposName) util.rmtree(path + reposName, ignore_errors = True)
def handle(self, *args, **options): dbVersion = models.DatabaseVersion( version=schema.RBUILDER_DB_VERSION.major, minor=schema.RBUILDER_DB_VERSION.minor) dbVersion.save() db = dbstore.connect(settings.DATABASES['default']['NAME'], 'sqlite') version = schema.loadSchema(db, should_migrate=True) print "Migrated rBuilder schema to %s" % version
def action(self): db = dbstore.connect(self.cfg.dbPath, self.cfg.dbDriver) if self.options.create: print >> sys.stderr, "Force-creating database schema ..." db.loadSchema() schema.createSchema(db, cfg=self.cfg) else: schema.loadSchema(db, self.cfg, self.options.should_migrate) return 0
def testSqliteRegexp(self): db = dbstore.connect(':memory:', 'sqlite') cu = db.cursor() cu.execute('create table foo(bar varchar)') cu.execute("insert into foo values ('foo1')") cu.execute("insert into foo values ('foo2')") cu.execute("insert into foo values ('foo3')") cu.execute('select * from foo where bar regexp ".*2"') matches = cu.fetchall() assert(matches == [ ('foo2',) ])
def testSqliteRegexp(self): db = dbstore.connect(':memory:', 'sqlite') cu = db.cursor() cu.execute('create table foo(bar varchar)') cu.execute("insert into foo values ('foo1')") cu.execute("insert into foo values ('foo2')") cu.execute("insert into foo values ('foo3')") cu.execute('select * from foo where bar regexp ".*2"') matches = cu.fetchall() assert (matches == [('foo2', )])
def startRepository(cfg, logger=None): reposDir = cfg.getReposDir() util.mkdirChain(reposDir) # Generate and store a random password for the rmake user if not configured # with one. if not cfg.reposUser: passwordFile = reposDir + '/password' if os.path.exists(passwordFile): password = open(passwordFile).readline().strip() else: password = ''.join([ chr(random.randrange(ord('a'), ord('z'))) for x in range(10)]) open(passwordFile, 'w').write(password + '\n') os.chmod(reposDir + '/password', 0700) cfg.reposUser.addServerGlob(cfg.reposName, 'rmake', password) serverCfg = cny_server.ServerConfig() serverCfg.repositoryDB = ('sqlite', cfg.getReposDbPath()) serverCfg.contentsDir = cfg.getContentsPath() serverCfg.port = cfg.getReposInfo()[1] serverCfg.configKey('serverName', cfg.reposName) # Transfer SSL settings from rMake config object if getattr(cny_server, 'SSL', None): # The server supports starting in SSL mode serverCfg.useSSL = cfg.reposRequiresSsl() serverCfg.sslCert = cfg.sslCertPath serverCfg.sslKey = cfg.sslCertPath elif cfg.reposRequiresSsl(): raise errors.RmakeError("Tried to start repository at %s, but missing " "ssl server library: Please install m2crypto." % (cfg.getRepositoryUrl(),)) (driver, database) = serverCfg.repositoryDB db = dbstore.connect(database, driver) # Note - this will automatically migrate this repository! # Since this is a throwaway repos anyway, I think that's # acceptable. cny_schema.loadSchema(db, doMigrate=True) db.commit() db.close() user, password = cfg.reposUser.find(cfg.reposName) addUser(serverCfg, user, password, write=True) if not serverCfg.useSSL: # allow anonymous access if we're not securing this repos # by using SSL - no use securing access if it's all going to be # viewable via tcpdump. addUser(serverCfg, 'anonymous', 'anonymous') return _startServer(serverCfg, cfg.getReposLogPath(), cfg.getReposConfigPath(), 'repository')
def action(self, fqdn=None): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver=self.cfg.dbDriver) self.db.connect() self.db.loadSchema() cu = self.db.cursor() labelsTable = projects.LabelsTable(self.db, self.cfg) self.db.commit() cu = self.db.cursor() sql = """SELECT projectId, fqdn, EXISTS(SELECT * FROM InboundMirrors WHERE projectId=targetProjectId) AS localMirror FROM Projects WHERE external AND NOT hidden AND NOT disabled""" args = [] if fqdn: sql += " AND fqdn = ?" args.append(fqdn) cu.execute(sql, args) labels = {} projectIds = {} netclients = {} hasErrors = False for projectId, hostname, localMirror in cu.fetchall(): try: self.log.info("Retrieving labels from %s...", hostname) l, repMap, userMap, entMap = labelsTable.getLabelsForProject(projectId) hostname = repMap.keys()[0] labels[hostname] = versions.Label(l.keys()[0]) projectIds[hostname] = projectId ccfg = conarycfg.ConaryConfiguration(False) ccfg.configLine("proxyMap * conarys://localhost") ccfg.root = ccfg.dbPath = ":memory:" ccfg.repositoryMap = repMap if not localMirror: for host, authInfo in userMap: ccfg.user.addServerGlob(host, authInfo[0], authInfo[1]) for host, entitlement in entMap: ccfg.entitlement.addEntitlement(host, entitlement[1]) ccfg = helperfuncs.configureClientProxies( ccfg, self.cfg.useInternalConaryProxy, self.cfg.proxy, self.cfg.getInternalProxies() ) repos = conaryclient.ConaryClient(ccfg).getRepos() netclients[hostname] = repos except Exception, e: self.log.error("Exception from %s", hostname) self.log.error(str(e)) hasErrors = True
def newMintCfg(self, name): cfg = FixtureCache.newMintCfg(self, name) cfg.dbDriver = 'sqlite' cfg.dbPath = os.path.join(cfg.dataPath, 'mintdb') reposDBPath = os.path.join(cfg.dataPath, 'repos', '%s', 'sqldb') cfg.configLine('database default sqlite ' + reposDBPath) from mint.db import schema db = dbstore.connect(cfg.dbPath, cfg.dbDriver) schema.loadSchema(db, cfg) return cfg
def getAdminAcl(self, project, username): dbCon = project.server._server.projects.reposDB.getRepositoryDB( \ project.getFQDN()) db = dbstore.connect(dbCon[1], dbCon[0]) cu = db.cursor() cu.execute("""SELECT MAX(admin) FROM Users JOIN UserGroupMembers ON Users.userId = UserGroupMembers.userId JOIN UserGroups ON UserGroups.userGroupId = UserGroupMembers.userGroupId WHERE Users.username=?""", username) return cu.fetchone()[0]
def migrate(self): import tempfile import os from conary import dbstore # figure out where the database lives currently assert(self.db.driver == 'sqlite') dbPath = self.db.database assert(isinstance(dbPath, str)) # make a new database file fd, fn = tempfile.mkstemp(prefix=os.path.basename(dbPath) + '-new-', dir=os.path.dirname(dbPath)) os.close(fd) newdb = dbstore.connect(fn, driver='sqlite') # create the schema in the new db newdb.loadSchema() createSchema(newdb) # make sure we have a good view of the new schema newdb.commit() newdb.loadSchema() cu = self.cu # have to commit in order to attach self.db.commit() cu.execute("ATTACH '%s' AS newdb" %fn, start_transaction=False) for t in newdb.tables.keys(): self.message('Converting database schema to version 20 ' '- current table: %s' %t) cu.execute('INSERT OR REPLACE INTO newdb.%s ' 'SELECT * FROM %s' % (t, t)) # fix up some potentially bad entries we know about cu.execute("""UPDATE newdb.TroveInfo SET data='1.0' WHERE hex(data)='31' AND infotype=3""") cu.execute("""UPDATE newdb.Dependencies SET flag='1.0' WHERE name LIKE 'conary:%' AND flag='1'"""); self.message('Converting database schema to version 20 ' '- committing') self.db.commit() self.message('') newdb.close() os.chmod(fn, 0644) os.rename(dbPath, dbPath + '-pre-schema-update') os.rename(fn, dbPath) self.db.reopen() self.db.loadSchema() return self.Version
def testMigrateAll(self): dbPath = self.workDir + '/jobs.db' shutil.copyfile(resources.get_archive('jobs.db.v1')) db = dbstore.connect(dbPath, driver = "sqlite", timeout=10000) db.loadSchema() cu = db.cursor() assert(cu.execute('select state from Jobs where jobId=1').next()[0] == -1) assert(cu.execute('select state from Jobs where jobId=3').next()[0] == 99) mgr = schema.SchemaManager(db) m = schema.Migrator(db, mgr) m.migrate(1, schema.SCHEMA_VERSION) db.loadSchema() assert(cu.execute('select state from Jobs where jobId=1').next()[0] == 1) assert(cu.execute('select state from Jobs where jobId=3').next()[0] == 5)
def _openDb(self): if not self._db: self._db = dbstore.connect(self._cfg.dbPath, self._cfg.dbDriver) self._autoDb = True # check to make sure the schema version is correct try: schema.checkVersion(self._db) except sqlerrors.SchemaVersionError: rethrow(mint_error.DatabaseVersionMismatch, False) tables = TableCache(self._db, self._cfg) self._copyTables(tables) if self._db.inTransaction(True): self._db.commit()
def testDBInstances(self): cx = dbstore.connect(":memory:", driver="sqlite") cx.loadSchema() idb = sqldb.DBInstanceTable(cx) idb.addId('fred', 1, 2, [1, 2]) assert(idb[('fred', 1, 2)] == 1) assert(idb.getId(1) == ('fred', 1,2,1) ) assert(idb.get(('fred', 1, 2), None) == 1) idb.addId('wilma', 5, 6, [1, 2]) assert(idb[('wilma', 5, 6)] == 2) idb.delId(2) self.assertRaises(KeyError, idb.__getitem__, ('wilma', 5, 6)) idb.delId(1) assert(idb.get(('fred', 1, 2), None) == None)
def getDb(path): if os.path.exists(path): print "Deleting database" os.remove(path) if os.listdir("/srv/rmake-repos/contents/"): print "Deleting contents..." os.system("rm -rf /srv/rmake-repos/contents/*") open(path, 'w') os.chown(path, pwd.getpwnam('apache').pw_uid, pwd.getpwnam('apache').pw_gid) db = dbstore.connect(path, driver='sqlite') schema.loadSchema(db, True) schema.setupTempTables(db) return db
def getWriteAcl(self, project, username): dbCon = project.server._server.projects.reposDB.getRepositoryDB( \ project.getFQDN()) db = dbstore.connect(dbCon[1], dbCon[0]) cu = db.cursor() cu.execute("""SELECT MAX(canWrite) FROM Users LEFT JOIN UserGroupMembers ON Users.userId = UserGroupMembers.userId LEFT JOIN Permissions ON Permissions.userGroupId = UserGroupMembers.userGroupId WHERE Users.username=?""", username) return cu.fetchone()[0]
def testDBInstances(self): cx = dbstore.connect(":memory:", driver="sqlite") cx.loadSchema() idb = sqldb.DBInstanceTable(cx) idb.addId('fred', 1, 2, [1, 2]) assert (idb[('fred', 1, 2)] == 1) assert (idb.getId(1) == ('fred', 1, 2, 1)) assert (idb.get(('fred', 1, 2), None) == 1) idb.addId('wilma', 5, 6, [1, 2]) assert (idb[('wilma', 5, 6)] == 2) idb.delId(2) self.assertRaises(KeyError, idb.__getitem__, ('wilma', 5, 6)) idb.delId(1) assert (idb.get(('fred', 1, 2), None) == None)
def testDBTroveFiles(self): cx = dbstore.connect(":memory:", driver="sqlite") cx.loadSchema() fs = sqldb.DBTroveFiles(cx) cu = cx.cursor() fs.addItem(cu, self.id1, 1, "/bin/ls", self.fid1, 11, "abc", ["tag1", "tag2"]) fs.addItem(cu, self.id2, 2, "/bin/cat", self.fid2, 11, "def", ["tag1"]) fs.addItem(cu, self.id3, 1, "/bin/dd", self.fid3, 12, "tuv", []) fs.addItem(cu, self.id4, 2, "/bin/bc", self.fid4, 12, "xyz", []) assert ([x for x in fs[11]] == [("/bin/ls", "abc"), ("/bin/cat", "def")]) assert ([x for x in fs[12]] == [("/bin/dd", "tuv"), ("/bin/bc", "xyz")]) assert ([x for x in fs.iterFilesWithTag('tag1') ] == ['/bin/cat', '/bin/ls']) assert ([x for x in fs.iterFilesWithTag('tag2')] == ['/bin/ls']) fs.delInstance(11) assert ([x for x in fs[11]] == []) # make sure the tags are gone assert ([x for x in fs.iterFilesWithTag('tag1')] == []) cu = cx.cursor() cu.execute('select * from DBFileTags') assert ([x for x in cu] == []) assert (fs.getFileByFileId(self.fid3, 0) == ("/bin/dd", "tuv")) self.assertRaises(KeyError, fs.getFileByFileId, self.fid7, 0) assert ([x for x in fs[12]] == [("/bin/dd", "tuv"), ("/bin/bc", "xyz")]) fs.removePath(12, "/bin/dd") assert ([x for x in fs.getByInstanceId(12)] == [("/bin/bc", "xyz")]) assert (fs.getFileByFileId(self.fid4, justPresent=False) == ('/bin/bc', 'xyz')) assert ([x for x in fs.getByInstanceId(12, justPresent=False) ] == [("/bin/dd", "tuv"), ("/bin/bc", "xyz")]) fs.delInstance(12) fs.addItem(cu, self.id1, 1, "/bin/ls", self.fid1, 11, "abc", []) fs.addItem(cu, self.id2, 2, "/bin/cat", self.fid2, 11, "def", []) fs.addItem(cu, self.id3, 1, "/bin/dd", self.fid3, 11, "tuv", []) fs.addItem(cu, self.id4, 2, "/bin/bc", self.fid4, 11, "xyz", []) assert ([x for x in fs[11]] == [("/bin/ls", "abc"), ("/bin/cat", "def"), ("/bin/dd", "tuv"), ("/bin/bc", "xyz")])
def testDBTroveFiles(self): cx = dbstore.connect(":memory:", driver="sqlite") cx.loadSchema() fs = sqldb.DBTroveFiles(cx) cu = cx.cursor() fs.addItem(cu, self.id1, 1, "/bin/ls", self.fid1, 11, "abc", [ "tag1", "tag2"]) fs.addItem(cu, self.id2, 2, "/bin/cat", self.fid2, 11, "def", ["tag1"]) fs.addItem(cu, self.id3, 1, "/bin/dd", self.fid3, 12, "tuv", []) fs.addItem(cu, self.id4, 2, "/bin/bc", self.fid4, 12, "xyz", []) assert([x for x in fs[11]] == [("/bin/ls", "abc"), ("/bin/cat", "def")]) assert([x for x in fs[12]] == [("/bin/dd", "tuv"), ("/bin/bc", "xyz")]) assert([x for x in fs.iterFilesWithTag('tag1') ] == ['/bin/cat', '/bin/ls'] ) assert([x for x in fs.iterFilesWithTag('tag2') ] == ['/bin/ls' ]) fs.delInstance(11) assert([x for x in fs[11]] == []) # make sure the tags are gone assert([x for x in fs.iterFilesWithTag('tag1') ] == []) cu = cx.cursor() cu.execute('select * from DBFileTags') assert([x for x in cu ] == []) assert(fs.getFileByFileId(self.fid3, 0) == ("/bin/dd", "tuv")) self.assertRaises(KeyError, fs.getFileByFileId, self.fid7, 0) assert([x for x in fs[12]] == [("/bin/dd", "tuv"), ("/bin/bc", "xyz")]) fs.removePath(12, "/bin/dd") assert([x for x in fs.getByInstanceId(12)] == [("/bin/bc", "xyz")]) assert(fs.getFileByFileId(self.fid4, justPresent = False) == ('/bin/bc', 'xyz')) assert([x for x in fs.getByInstanceId(12, justPresent = False)] == [("/bin/dd", "tuv"), ("/bin/bc", "xyz")]) fs.delInstance(12) fs.addItem(cu, self.id1, 1, "/bin/ls", self.fid1, 11, "abc", []) fs.addItem(cu, self.id2, 2, "/bin/cat", self.fid2, 11, "def", []) fs.addItem(cu, self.id3, 1, "/bin/dd", self.fid3, 11, "tuv", []) fs.addItem(cu, self.id4, 2, "/bin/bc", self.fid4, 11, "xyz", []) assert([x for x in fs[11]] == [("/bin/ls", "abc"), ("/bin/cat", "def"), ("/bin/dd", "tuv"), ("/bin/bc", "xyz")])
def loadFixture(self, name, loadFn=None): """ Loads the fixture for the unit test. @param name: the name of the fixture (e.g. "Full") @returns: A 3-typle consisting of a database connection, a Mint client with admin credentials, and a dictionary of fixture data (may be empty). """ # reset the cached db connection mint.db.database.dbConnection = None self.cfg, fixtureData = fixtureCache.load(name, loadFn=loadFn) db = dbstore.connect(self.cfg.dbPath, self.cfg.dbDriver) # this is so fugly it makes me wanna cry --gafton mint.db.database.dbConnection = db mint.db.database.tables = None return db, fixtureData
def connect(self, dbName): if not self.check(): raise RuntimeError("postmaster is dead") start = time.time() while True: try: return dbstore.connect('conary@localhost:%s/%s' % (self.port, dbName), 'postgresql') except sqlerrors.DatabaseError, err: if ('the database system is starting up' in err.msg or 'Connection refused' in err.msg): if time.time() - start > 15: self.kill() raise RuntimeError("Database did not start") time.sleep(0.1) continue raise
def testMigrateAll(self): dbPath = self.workDir + '/jobs.db' shutil.copyfile(resources.get_archive('jobs.db.v1')) db = dbstore.connect(dbPath, driver="sqlite", timeout=10000) db.loadSchema() cu = db.cursor() assert ( cu.execute('select state from Jobs where jobId=1').next()[0] == -1) assert ( cu.execute('select state from Jobs where jobId=3').next()[0] == 99) mgr = schema.SchemaManager(db) m = schema.Migrator(db, mgr) m.migrate(1, schema.SCHEMA_VERSION) db.loadSchema() assert ( cu.execute('select state from Jobs where jobId=1').next()[0] == 1) assert ( cu.execute('select state from Jobs where jobId=3').next()[0] == 5)
def create(self, name): path = self._getTemplate()[1] % 'mysql' db = dbstore.connect(path, 'mysql') dbName = self.translate(name) cu = db.cursor() # this check should never be required outside of the test suite, # and it could be kind of dangerous being called in production. # audited for SQL injection cu.execute("SHOW DATABASES") if dbName in [x[0] for x in cu.fetchall()]: if self.cfg.debugMode: cu.execute("DROP DATABASE %s" % dbName) else: raise RepositoryAlreadyExists(name) cu.execute("CREATE DATABASE %s %s" % (dbName, self.tableOpts)) db.close() RepositoryDatabase.create(self, name)
def fixtureEmpty(self, cfg): """ Empty fixture. Should be used when you want a (mostly) blank setup for testing. Creates the following setup: - Two users: - test (a basic user with no special privileges) - admin (a user wih admin privileges) @param cfg: The current effective Mint configuration. @return: A 2-tuple consisting of the current Mint configuration and a a dictionary containing the following: - C{test} - the id of the user "test" - C{admin} - the id of the user "admin" """ db = dbstore.connect(cfg.dbPath, cfg.dbDriver) testId = self.createUser(cfg, db, "test") adminId = self.createUser(cfg, db, "admin", isAdmin=True) return cfg, { 'test': testId, 'admin': adminId }
def _openClient(self, root): self.conarycfg.root = root # page_size has to be set before the first table is created path = util.joinPaths(root, self.conarycfg.dbPath + "/conarydb") util.mkdirChain(os.path.dirname(path)) db = dbstore.connect(path, driver="sqlite") cu = db.cursor() cu.execute("PRAGMA page_size = 4096") db.commit() cu.execute("VACUUM") db.commit() db.close() # The rest are per-session and apply only to this install job cclient = conaryclient.ConaryClient(self.conarycfg) cclient.db.opJournalPath = None db = cclient.db.db.db cu = db.cursor() cu.execute("PRAGMA cache_size = 200000") cu.execute("PRAGMA journal_mode = MEMORY") db.commit() return cclient
def __init__(self, db=None, driver=None, path=None): cmd.Cmd.__init__(self) # default to .head off self.show_headers = False # default to .mode list self.format = self.format_list # a dictionary of column number: width for manual setting self.manual_widths = {} # use a pager? self.use_pager = False # calculate column widths for column view? self.auto_width = False # display stats: N rows in set (0.00 sec) self.show_stats = True if driver and path: self.db = dbstore.connect(path, driver=driver) elif db: self.db = db else: raise RuntimeError, 'driver and path OR db must be given' self.cu = self.db.cursor()
def _testSlowActionWithStandalone(self, useSSL = False): # Test to make sure that slow commits still work even with the # proxy keepalive code added (CNY-1341) cfg = server.ServerConfig() cfg.port = testhelp.findPorts(1)[0] cfg.contentsDir = ('legacy', [self.workDir + '/contents']) cfg.repositoryDB = ('sqlite', self.workDir + '/serverdb') cfg.logFile = self.workDir + '/serverlog' cfg.tmpDir = self.workDir + '/tmp' cfg.serverName = 'localhost' util.mkdirChain(cfg.tmpDir) util.mkdirChain(cfg.contentsDir[1][0]) if useSSL: cfg.useSSL = True cfg.sslCert = os.path.join(resources.get_archive(), 'ssl-cert.crt') cfg.sslKey = os.path.join(resources.get_archive(), 'ssl-cert.key') (driver, database) = cfg.repositoryDB db = dbstore.connect(database, driver) schema.loadSchema(db) schema.setupTempTables(db) auth = netauth.NetworkAuthorization(db, 'localhost') auth.addRole('foo') auth.addUser('foo', 'foo') auth.addRoleMember('foo', 'foo') auth.addAcl('foo', None, None, write = True) if useSSL: proto = "https" else: proto = "http" baseUrl = '%s://localhost:%s/' % (proto, cfg.port) pid = os.fork() if not pid: try: netServer = netserver.NetworkRepositoryServer(cfg, baseUrl) oldGetChangeSet = netServer.getChangeSet @netserver.accessReadOnly def getChangeSet(*args, **kw): rv = oldGetChangeSet(*args, **kw) # make sure the client sends its message self.sleep(7) return rv getChangeSet.im_func = getChangeSet netServer.getChangeSet = getChangeSet class HttpRequestsSubclass(server.HttpRequests): tmpDir = cfg.tmpDir netRepos = proxy.SimpleRepositoryFilter(cfg, baseUrl, netServer) restHandler = None HttpRequestsSubclass.cfg = cfg if useSSL: ctx = server.createSSLContext(cfg) httpServer = server.SecureHTTPServer(("", cfg.port), HttpRequestsSubclass, ctx) else: httpServer = server.HTTPServer(("", cfg.port), HttpRequestsSubclass) self.captureOutput(server.serve, httpServer) finally: os._exit(0) try: sock_utils.tryConnect("127.0.0.1", cfg.port) cfg = conarycfg.ConaryConfiguration(False) cfg.repositoryMap = {'localhost' : baseUrl } cfg.user.addServerGlob('localhost', 'foo', 'foo') client = conaryclient.ConaryClient(cfg) repos = client.getRepos() trv, cs = self.Component('foo:run', '1') repos.commitChangeSet(cs) # getTrove will fail because it takes more than 5 seconds assert(repos.getTrove(*trv.getNameVersionFlavor())) finally: os.kill(pid, signal.SIGTERM) os.waitpid(pid, 0)
argv=argv) except options.OptionError, msg: print >> sys.stderr, msg sys.exit(1) if "help" in argSet: usage(argv[0]) sys.exit(0) startLogging() if not cfg.check(): raise RuntimeError("configuration file is invalid") (driver, database) = cfg.repositoryDB db = dbstore.connect(database, driver) # if there is no schema or we're asked to migrate, loadSchema dbVersion = db.getVersion() # a more recent major is not compatible if dbVersion.major > schema.VERSION.major: log.error("code base too old for this repository database") log.error("repo=%s code=%s", dbVersion, schema.VERSION) sys.exit(-1) db.close() return (cfg, argSet, otherArgs[1:]) def main(): opts = {} opts["fix"] = options.NO_PARAM cfg, opts, args = getServer(opts)
def __init__(self, driver, db, verbose=True): self.db = dbstore.connect(db, driver) self.driver = driver self.db.loadSchema() self.verbose = verbose self._hint = ''
def reopen_fork(self, forked=False): self.db.close() self.db = dbstore.connect(self.cfg.dbPath, self.cfg.dbDriver) self.users.db = self.db
def __init__(self, cfg, db = None): self.cfg = cfg self.db = dbstore.connect(cfg.dbPath, cfg.dbDriver) self.users = self.Users(self.db)
def testSqlDataStore(self): db = dbstore.connect(':memory:', driver='sqlite') db.loadSchema() self._testDataStore(SqlDataStore(db))
def open(self): return dbstore.connect(self.dbpath, driver=self.driver, timeout=120000, lockJournal=True)
def startRepository(cfg, fork=True, logger=None): global conaryDir baseDir = cfg.serverDir if logger is None: logger = log reposDir = '%s/repos' % baseDir util.mkdirChain(reposDir) if not cfg.reposUser: passwordFile = reposDir + '/password' if os.path.exists(passwordFile): password = open(passwordFile).readline()[:-1] else: password = ''.join( [chr(random.randrange(ord('a'), ord('z'))) for x in range(10)]) open(passwordFile, 'w').write(password + '\n') os.chmod(reposDir + '/password', 0700) cfg.reposUser.addServerGlob(cfg.reposName, 'rmake', password) serverConfig = os.path.join(cfg.getReposDir(), 'serverrc') if os.path.exists(serverConfig): os.unlink(serverConfig) serverCfg = server.ServerConfig(os.path.join(cfg.getReposDir(), 'serverrc')) serverCfg.repositoryDB = ('sqlite', cfg.getReposDbPath()) serverCfg.contentsDir = cfg.getContentsPath() serverCfg.port = cfg.getReposInfo()[1] serverCfg.configKey('serverName', cfg.reposName) # this works with either # 1.0.16 or 1.0.17+ serverCfg.logFile = cfg.getReposDir() + '/repos.log' serverCfg.logFile = None # Transfer SSL settings from rMake config object if hasattr(server, 'SSL') and server.SSL: # The server supports starting in SSL mode serverCfg.useSSL = cfg.reposRequiresSsl() serverCfg.sslCert = cfg.sslCertPath serverCfg.sslKey = cfg.sslCertPath elif cfg.reposRequiresSsl(): raise errors.RmakeError( 'Tried to start repository at %s, but missing ssl server library: Please install m2crypto' % (cfg.getRepositoryUrl(), )) (driver, database) = serverCfg.repositoryDB db = dbstore.connect(database, driver) # Note - this will automatically migrate this repository! # Since this is a throwaway repos anyway, I think that's # acceptable. compat.ConaryVersion().loadServerSchema(db) db.commit() db.close() user, password = cfg.reposUser.find(cfg.reposName) addUser(serverCfg, user, password, write=True) if not serverCfg.useSSL: # allow anonymous access if we're not securing this repos # by using SSL - no use securing access if it's all going to be # viewable via tcpdump. addUser(serverCfg, 'anonymous', 'anonymous') if fork: pid = os.fork() if pid: try: pingServer(cfg) except: killServer(pid) raise logger.info('Started repository "%s" on port %s (pid %s)' % (cfg.reposName, serverCfg.port, pid)) return pid elif hasattr(logger, 'close'): logger.close() try: os.chdir(cfg.getReposDir()) serverrc = open(cfg.getReposConfigPath(), 'w') serverCfg.store(serverrc, includeDocs=False) util.mkdirChain(os.path.dirname(cfg.getReposLogPath())) logFile = logfile.LogFile(cfg.getReposLogPath()) logFile.redirectOutput(close=True) serverrc.close() os.execv('%s/server/server.py' % conaryDir, [ '%s/server/server.py' % conaryDir, '--config-file', cfg.getReposConfigPath() ]) except Exception, err: print >> sys.stderr, 'Could not start repository server: %s' % err os._exit(1)
def testVersion20Migration(self): dbfile = os.path.join(resources.get_archive(), 'conarydbs', 'conarydb-version-19') fd, fn = tempfile.mkstemp() os.close(fd) shutil.copyfile(dbfile, fn) # get a list of tables db = dbstore.connect(fn, driver='sqlite') db.loadSchema() cu = db.cursor() tableCounts = dict.fromkeys(db.tables.keys()) for table in tableCounts.keys(): tableCounts[table] = cu.execute('select count(*) from %s' % table).fetchall()[0][0] # DBInstances is gone... tableCounts.pop('DBInstances') # we have a VersionId 0 entry now tableCounts['Versions'] += 1 # new table added tableCounts['DatabaseAttributes'] = 1 # do the migration db, str = self.captureOutput(sqldb.Database, fn) cu = db.db.cursor() # make sure we have all the tables db2 = dbstore.connect(fn, driver='sqlite') db2.loadSchema() cu = db2.cursor() tableCounts2 = dict.fromkeys(db2.tables.keys()) for table in tableCounts2.keys(): tableCounts2[table] = cu.execute('select count(*) from %s' % table).fetchall()[0][0] self.assertEqual(tableCounts, tableCounts2) # check to make sure that we fixed our broken deps and troveinfo cu.execute("select count(*) from troveinfo where infoType=3 " "and hex(data) == '31'") assert (cu.next()[0] == 0) cu.execute("select count(*) from dependencies where " "name like 'conary:%' and flag='1'") assert (cu.next()[0] == 0) # verify the conary:runtime trove v = VersionFromString('/conary.rpath.com@rpl:devel//1/1.0-2-0.1') f = deps.parseFlavor('~!bootstrap is: x86') t = db.getTroves([('conary:runtime', v, f)])[0] t.verifyDigitalSignatures() # verify that we can insert a '1.0' into deps and troveinfo cu.execute("insert into Dependencies values (NULL, 4, 'test', '1.0')") cu.execute("select flag from Dependencies where name='test'") assert (cu.next()[0] == '1.0') cu.execute("insert into TroveInfo values (300, 3, '1.0')") cu.execute("select data from TroveInfo where instanceId=300 and " "infotype=3") assert (cu.next()[0] == '1.0') db.close() db2.close() # make sure the addition of DatabaseAttributes happens correctly db = dbstore.connect(fn, driver='sqlite') db.loadSchema() self.assertTrue('DatabaseAttributes' in db.tables) cu = db.cursor() cu.execute("DROP TABLE DatabaseAttributes") db.commit() db.close() sdb = sqldb.Database(fn) self.assertTrue('DatabaseAttributes' in sdb.db.tables) del sdb os.unlink(fn)