def get_all_cluster_nodes(con): """Return a list of all Nodes in the cluster. node.is_master will be None, as this boolean doesn't make sense in the context of a cluster rather than a single replication set. """ if not slony_installed(con): return [] nodes = _get_nodes(con, """ SELECT DISTINCT pa_server AS node_id, 'node' || pa_server || '_node', pa_conninfo AS connection_string, NULL FROM _sl.sl_path ORDER BY node_id """) if not nodes: # There are no subscriptions yet, so no paths. Generate the # master Node. cur = con.cursor() cur.execute("SELECT no_id from _sl.sl_node") node_ids = [row[0] for row in cur.fetchall()] if len(node_ids) == 0: return [] assert len(node_ids) == 1, "Multiple nodes but no paths." master_node_id = node_ids[0] master_connection_string = ConnectionString( config.database.rw_main_master) master_connection_string.user = '******' return [Node( master_node_id, 'node%d_node' % master_node_id, master_connection_string, True)] return nodes
def __init__(self, log, pgbouncer_conn_str, dbname, dbuser): self.log = log pgbouncer_conn_str = ConnectionString(pgbouncer_conn_str) if not pgbouncer_conn_str.dbname: pgbouncer_conn_str.dbname = 'pgbouncer' if pgbouncer_conn_str.dbname != 'pgbouncer': log.warn("pgbouncer administrative database not named 'pgbouncer'") self.pgbouncer_con = pg_connect(pgbouncer_conn_str) self.master_name = None self.master = None self.slaves = {} for db in self.pgbouncer_cmd('show databases', results=True): if db.database != dbname: continue conn_str = ConnectionString( 'dbname=%s port=%s user=%s' % (dbname, db.port, dbuser)) if db.host: conn_str.host = db.host con = pg_connect(conn_str) cur = con.cursor() cur.execute('select pg_is_in_recovery()') if cur.fetchone()[0] is True: self.slaves[db.name] = conn_str else: self.master_name = db.name self.master = conn_str if self.master_name is None: log.fatal('No master detected.') raise SystemExit(98)
def test_str_with_changes(self): initial = 'dbname=foo host=bar' expected = 'dbname=foo user=baz host=blah' cs = ConnectionString(initial) cs.host = 'blah' cs.user = '******' self.assertEqual(expected, str(cs))
def test_equality(self): cs1 = ConnectionString('dbname=foo host=bar') cs2 = ConnectionString('dbname=foo host=bar') cs3 = ConnectionString('dbname=foo host=baz') self.assertEqual(cs1, cs2) self.assertNotEqual(cs1, cs3) self.assertNotEqual(cs2, cs3) self.assertEqual(hash(cs1), hash(cs2)) self.assertNotEqual(hash(cs1), hash(cs3)) self.assertContentEqual([cs1, cs3], set([cs1, cs2, cs3]))
def connect_string(user=None, dbname=None): """Return a PostgreSQL connection string. Allows you to pass the generated connection details to external programs like pg_dump or embed in slonik scripts. """ # We must connect to the read-write DB here, so we use rw_main_master # directly. from lp.services.database.postgresql import ConnectionString con_str = ConnectionString(dbconfig.rw_main_master) if user is not None: con_str.user = user if dbname is not None: con_str.dbname = dbname return str(con_str)
def _init_db(self, isolation): """Initialize the database transaction. Can be overridden for testing purposes. """ dbuser = self.dbuser if dbuser is None: connstr = ConnectionString(dbconfig.main_master) dbuser = connstr.user or dbconfig.dbuser dbconfig.override(dbuser=dbuser, isolation_level=isolation) self.txn = transaction
def raw_connect(self): if config.launchpad_session.database is not None: dsn = ConnectionString(config.launchpad_session.database) dsn.user = config.launchpad_session.dbuser self._dsn = str(dsn) else: # This is fallback code for old config files. It can be # removed when all live configs have been updated to use the # 'database' setting instead of 'dbname' + 'dbhost' settings. self._dsn = 'dbname=%s user=%s' % (config.launchpad_session.dbname, config.launchpad_session.dbuser) if config.launchpad_session.dbhost: self._dsn += ' host=%s' % config.launchpad_session.dbhost flags = _get_dirty_commit_flags() raw_connection = super(LaunchpadSessionDatabase, self).raw_connect() if safe_hasattr(raw_connection, 'auto_close'): raw_connection.auto_close = False raw_connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) _reset_dirty_commit_flags(*flags) return raw_connection
def update_db_config(**kw): connection_string_keys = [ 'rw_main_master', 'rw_main_slave', ] config_data = ["[database]"] for con_str_key in connection_string_keys: con_str = ConnectionString(getattr(config.database, con_str_key)) for kwarg, kwval in kw.items(): setattr(con_str, kwarg, kwval) config_data.append("%s: %s" % (con_str_key, str(con_str))) config.push('update_db_config', '\n'.join(config_data))
def raw_connect(self): if config.launchpad_session.database is not None: dsn = ConnectionString(config.launchpad_session.database) dsn.user = config.launchpad_session.dbuser self._dsn = str(dsn) else: # This is fallback code for old config files. It can be # removed when all live configs have been updated to use the # 'database' setting instead of 'dbname' + 'dbhost' settings. self._dsn = 'dbname=%s user=%s' % ( config.launchpad_session.dbname, config.launchpad_session.dbuser) if config.launchpad_session.dbhost: self._dsn += ' host=%s' % config.launchpad_session.dbhost flags = _get_dirty_commit_flags() raw_connection = super(LaunchpadSessionDatabase, self).raw_connect() if safe_hasattr(raw_connection, 'auto_close'): raw_connection.auto_close = False raw_connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) _reset_dirty_commit_flags(*flags) return raw_connection
def test_relevant_fields_parsed(self): s = ('dbname=dbname user=user host=host port=port ' 'connect_timeout=timeout sslmode=mode') cs = ConnectionString(s) self.assertEqual('dbname', cs.dbname) self.assertEqual('user', cs.user) self.assertEqual('host', cs.host) self.assertEqual('port', cs.port) self.assertEqual('timeout', cs.connect_timeout) self.assertEqual('mode', cs.sslmode) # and check that str/repr have the same keys and values. self.assertContentEqual(s.split(), str(cs).split()) self.assertContentEqual(s.split(), repr(cs).split())
def remoteAddFile(self, name, size, file, contentType, expires=None): """See `IFileUploadClient`.""" if file is None: raise TypeError('No data') if size <= 0: raise UploadFailed('No data') if isinstance(name, unicode): name = name.encode('utf-8') self._connect() try: database_name = ConnectionString(dbconfig.main_master).dbname self._sendLine('STORE %d %s' % (size, name)) self._sendHeader('Database-Name', database_name) self._sendHeader('Content-Type', str(contentType)) if expires is not None: epoch = time.mktime(expires.utctimetuple()) self._sendHeader('File-Expires', str(int(epoch))) # Send blank line self._sendLine('') # Prepare to the upload the file bytesWritten = 0 # Read in and upload the file 64kb at a time, by using the two-arg # form of iter (see # /usr/share/doc/python/html/library/functions.html#iter). for chunk in iter(lambda: file.read(1024 * 64), ''): self.state.f.write(chunk) bytesWritten += len(chunk) assert bytesWritten == size, ( 'size is %d, but %d were read from the file' % (size, bytesWritten)) self.state.f.flush() # Read response response = self.state.f.readline().strip() if not response.startswith('200'): raise UploadFailed( 'Could not upload %s. Server said: %s' % (name, response)) status, ids = response.split() contentID, aliasID = ids.split('/', 1) path = get_libraryfilealias_download_path(aliasID, name) return urljoin(self.download_url, path) finally: self._close()
def get_all_cluster_nodes(con): """Return a list of all Nodes in the cluster. node.is_master will be None, as this boolean doesn't make sense in the context of a cluster rather than a single replication set. """ if not slony_installed(con): return [] nodes = _get_nodes( con, """ SELECT DISTINCT pa_server AS node_id, 'node' || pa_server || '_node', pa_conninfo AS connection_string, NULL FROM _sl.sl_path ORDER BY node_id """) if not nodes: # There are no subscriptions yet, so no paths. Generate the # master Node. cur = con.cursor() cur.execute("SELECT no_id from _sl.sl_node") node_ids = [row[0] for row in cur.fetchall()] if len(node_ids) == 0: return [] assert len(node_ids) == 1, "Multiple nodes but no paths." master_node_id = node_ids[0] master_connection_string = ConnectionString( config.database.rw_main_master) master_connection_string.user = '******' return [ Node(master_node_id, 'node%d_node' % master_node_id, master_connection_string, True) ] return nodes
def store(self): self.debugLog.append('storing %r, size %r' % (self.filename, self.size)) self.tmpfile.close() # Verify the digest matches what the client sent us dstDigest = self.sha1_digester.hexdigest() if self.srcDigest is not None and dstDigest != self.srcDigest: # XXX: Andrew Bennetts 2004-09-20: Write test that checks that # the file really is removed or renamed, and can't possibly be # left in limbo os.remove(self.tmpfilepath) raise DigestMismatchError(self.srcDigest, dstDigest) try: # If the client told us the name of the database it's using, # check that it matches. if self.databaseName is not None: # Per Bug #840068, there are two methods of getting the # database name (connection string and db # introspection), and they can give different results # due to pgbouncer database aliases. Lets check both, # and succeed if either matches. config_dbname = ConnectionString( dbconfig.rw_main_master).dbname result = IStore(Product).execute("SELECT current_database()") real_dbname = result.get_one()[0] if self.databaseName not in (config_dbname, real_dbname): raise WrongDatabaseError(self.databaseName, (config_dbname, real_dbname)) self.debugLog.append('database name %r ok' % (self.databaseName, )) # If we haven't got a contentID, we need to create one and return # it to the client. if self.contentID is None: contentID = self.storage.library.add( dstDigest, self.size, self.md5_digester.hexdigest(), self.sha256_digester.hexdigest()) aliasID = self.storage.library.addAlias( contentID, self.filename, self.mimetype, self.expires) self.debugLog.append('created contentID: %r, aliasID: %r.' % (contentID, aliasID)) else: contentID = self.contentID aliasID = None self.debugLog.append('received contentID: %r' % (contentID, )) except: # Abort transaction and re-raise self.debugLog.append('failed to get contentID/aliasID, aborting') raise # Move file to final location try: self._move(contentID) except: # Abort DB transaction self.debugLog.append('failed to move file, aborting') # Remove file os.remove(self.tmpfilepath) # Re-raise raise # Commit any DB changes self.debugLog.append('committed') # Return the IDs if we created them, or None otherwise return contentID, aliasID
def db_options(parser): """Add and handle default database connection options on the command line Adds -d (--database), -H (--host), -p (--port) and -U (--user) Parsed options provide dbname, dbhost and dbuser attributes. Generally, scripts will not need this and should instead pull their connection details from launchpad.config.config. The database setup and maintenance tools cannot do this however. dbname and dbhost are also propagated to config.database.dbname and config.database.dbhost. This ensures that all systems will be using the requested connection details. Ensure that command line options propagate to where we say they do >>> from optparse import OptionParser >>> parser = OptionParser() >>> db_options(parser) >>> options, args = parser.parse_args( ... ['--dbname=foo', '--host=bar', '--user=baz', '--port=6432']) >>> options.dbname 'foo' >>> options.dbhost 'bar' >>> options.dbuser 'baz' >>> options.dbport 6432 >>> config.database.rw_main_master 'dbname=foo user=baz host=bar port=6432' >>> config.database.rw_main_slave 'dbname=foo user=baz host=bar port=6432' Make sure that the default user is None >>> parser = OptionParser() >>> db_options(parser) >>> options, args = parser.parse_args([]) >>> print options.dbuser None """ conn_string = ConnectionString(config.database.rw_main_master) def update_db_config(**kw): connection_string_keys = [ 'rw_main_master', 'rw_main_slave', ] config_data = ["[database]"] for con_str_key in connection_string_keys: con_str = ConnectionString(getattr(config.database, con_str_key)) for kwarg, kwval in kw.items(): setattr(con_str, kwarg, kwval) config_data.append("%s: %s" % (con_str_key, str(con_str))) config.push('update_db_config', '\n'.join(config_data)) def dbname_callback(option, opt_str, value, parser): parser.values.dbname = value update_db_config(dbname=value) parser.add_option("-d", "--dbname", action="callback", callback=dbname_callback, type="string", dest="dbname", default=conn_string.dbname, help="PostgreSQL database to connect to.") def dbhost_callback(options, opt_str, value, parser): parser.values.dbhost = value update_db_config(host=value) parser.add_option("-H", "--host", action="callback", callback=dbhost_callback, type="string", dest="dbhost", default=conn_string.host, help="Hostname or IP address of PostgreSQL server.") def dbport_callback(options, opt_str, value, parser): value = int(value) parser.values.dbport = value update_db_config(port=value) parser.add_option("-p", "--port", action="callback", callback=dbport_callback, type=int, dest="dbport", default=conn_string.port, help="Port PostgreSQL server is listening on.") def dbuser_callback(options, opt_str, value, parser): parser.values.dbuser = value update_db_config(user=value) parser.add_option("-U", "--user", action="callback", callback=dbuser_callback, type="string", dest="dbuser", default=None, help="PostgreSQL user to connect as.")
def test_hyphens_in_values(self): cs = ConnectionString('user=foo-bar host=foo.bar-baz.quux') self.assertEqual('foo-bar', cs.user) self.assertEqual('foo.bar-baz.quux', cs.host)