def init_cluster(self): """ init a PostgreSQL cluster from pg_dumpall -g sql script """ # unused here, in fact self.dated_dbname = None basename = "%s.%s" % (self.section, os.path.basename(self.dumpall_url)) filename = self.wget(self.backup_host, self.dumpall_url, basename) # the restore object host the source sql file method r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) # if necessary, add a new postgres database to pgbouncer setup just # to be able to replay the globals dumpall file, which will \connect # postgres # # this is needed in case of multi cluster support with a single # pgbouncer instance. if self.maintdb != 'postgres': self.pgbouncer_add_database('postgres') # psql -f filename r.source_sql_file(filename) # don't forget to clean up the mess os.unlink(filename) if self.maintdb != 'postgres': self.pgbouncer_del_database('postgres') return
def vacuumdb(self): """ run VACUUM VERBOSE ANALYZE on the database """ r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) return r.vacuumdb(standalone=True)
def dump(self, filename, force=False): """ launch a pg_restore for the current staging configuration """ from options import VERBOSE, TERSE # first attempt to establish the connection to remote server # no need to fetch the big backup file unless this succeed # # we will restore from pgbouncer connection, first connection is # made to the maintenance database r = restore.pgrestore(self.dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore) fullname = os.path.join(self.tmpdir, filename) try: secs = r.pg_dump(fullname, force=force) except ExportFileAlreadyExistsException: print "Error: dump file '%s' already exists" % fullname return None if not TERSE: os.system('ls -lh %s' % fullname) return secs
def dump(self, filename, force = False): """ launch a pg_restore for the current staging configuration """ from options import VERBOSE, TERSE # first attempt to establish the connection to remote server # no need to fetch the big backup file unless this succeed # # we will restore from pgbouncer connection, first connection is # made to the maintenance database r = restore.pgrestore(self.dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore) fullname = os.path.join(self.tmpdir, filename) try: secs = r.pg_dump(fullname, force = force) except ExportFileAlreadyExistsException: print "Error: dump file '%s' already exists" % fullname return None if not TERSE: os.system('ls -lh %s' % fullname) return secs
def pg_size_pretty(self, size): """ return the size, pretty printed """ r = restore.pgrestore(self.dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) return r.pg_size_pretty(size)
def show(self, setting): """ return setting value """ r = restore.pgrestore(self.dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) return r.show(setting)
def psql_connect(self): """ connect to the given database """ r = restore.pgrestore(self.dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore) # no file name to psql_source_file means sys.stdin return r.psql_source_file()
def restore(self): """ launch a pg_restore for the current staging configuration """ from options import VERBOSE, TERSE, DEBUG # first attempt to establish the connection to remote server # no need to fetch the big backup file unless this succeed # # we will restore from pgbouncer connection, first connection is # made to the maintenance database r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore, self.pg_restore_st, self.schemas, self.schemas_nodata, self.relname_nodata) # while connected, try to create the database r.createdb(self.db_encoding) # add the new database to pgbouncer configuration now # it could be that the restore will be connected to pgbouncer self.pgbouncer_add_database() # source the extra SQL files (generator function) for sql in self.psql_source_files(utils.PRE_SQL): if VERBOSE: print "psql -f %s" % sql # now, download the dump we need. filename = self.get_dump() # and restore it mesg = None try: if VERBOSE: os.system("ls -l %s" % filename) r.restore_jobs = self.restore_jobs secs = r.pg_restore(filename, self.get_nodata_tables()) # only switch pgbouncer configuration to new database when there # was no restore error if self.auto_switch: self.switch() except Exception, e: if DEBUG: raise mesg = "Error: couldn't pg_restore from '%s'" % (filename) mesg += "\nDetail: %s" % e raise PGRestoreFailedException, mesg
def set_database_search_path(self): """ set search path """ if self.search_path: r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) r.set_database_search_path(self.search_path)
def createdb(self): """ only create the target database """ from options import VERBOSE, TERSE, DEBUG r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) # while connected, try to create the database r.createdb(self.db_encoding) # add the new database to pgbouncer configuration now self.pgbouncer_add_database()
def dbsize(self, dbname=None): """ return database size, pretty printed """ if dbname is not None: dated_dbname = dbname else: dated_dbname = self.dated_dbname r = restore.pgrestore(dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) size, pretty = r.dbsize() return dated_dbname, size, pretty
def get_triggers(self, filename): """ get a list of triggers with the functions attached to them """ self.dated_dbname = None r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.postgres_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore, self.pg_restore_st, connect = False) return r.get_trigger_funcs(filename)
def get_triggers(self, filename): """ get a list of triggers with the functions attached to them """ self.dated_dbname = None r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.postgres_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore, self.pg_restore_st, connect=False) return r.get_trigger_funcs(filename)
def dbsize(self, dbname = None): """ return database size, pretty printed """ if dbname is not None: dated_dbname = dbname else: dated_dbname = self.dated_dbname r = restore.pgrestore(dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major) size, pretty = r.dbsize() return dated_dbname, size, pretty
def psql_source_files(self, phase): """ connect to the given database and run some scripts """ from options import VERBOSE, TERSE if not self.sql_path: if not TERSE: print "There's no custom SQL file to load" return if phase == utils.POST_SQL: sql_path = os.path.join(self.sql_path, 'post') elif phase == utils.PRE_SQL: sql_path = os.path.join(self.sql_path, 'pre') else: raise Exception, "INTERNAL: psql_source_files is given unknown phase" if not os.path.isdir(sql_path): if VERBOSE: print "skipping '%s' which is not a directory" % sql_path return r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore) filenames = [x for x in os.listdir(sql_path) if len(x) > 4 and x[-4:] == '.sql'] filenames.sort() for filename in filenames: yield filename out = r.psql_source_file(os.path.join(sql_path, filename)) if VERBOSE: print out return
def get_catalog(self, filename): """ get a cleaned out catalog (nodata tables are commented) """ self.dated_dbname = None r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.postgres_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore, self.pg_restore_st, self.schemas, self.schemas_nodata, self.relname_nodata, connect = False) catalog = r.get_catalog(filename, self.get_nodata_tables()) return catalog.getvalue()
def get_catalog(self, filename): """ get a cleaned out catalog (nodata tables are commented) """ self.dated_dbname = None r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.postgres_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore, self.pg_restore_st, self.schemas, self.schemas_nodata, self.relname_nodata, connect=False) catalog = r.get_catalog(filename, self.get_nodata_tables()) return catalog.getvalue()
def psql_source_files(self, phase): """ connect to the given database and run some scripts """ from options import VERBOSE, TERSE if not self.sql_path: if not TERSE: print "There's no custom SQL file to load" return if phase == utils.POST_SQL: sql_path = os.path.join(self.sql_path, 'post') elif phase == utils.PRE_SQL: sql_path = os.path.join(self.sql_path, 'pre') else: raise Exception, "INTERNAL: psql_source_files is given unknown phase" if not os.path.isdir(sql_path): if VERBOSE: print "skipping '%s' which is not a directory" % sql_path return r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore) filenames = [ x for x in os.listdir(sql_path) if len(x) > 4 and x[-4:] == '.sql' ] filenames.sort() for filename in filenames: yield filename out = r.psql_source_file(os.path.join(sql_path, filename)) if VERBOSE: print out return
x = int(self.backup_date) self.dated_dbname = "%s_%s" % (self.dbname, self.backup_date) except ValueError, e: os.chdir(pushd) mesg = "load: '%s' isn't a valid dump file name" % filename raise ParseDumpFileException, mesg if filename[0] != '/': filename = os.path.join(self.tmpdir, filename) # see comments in previous self.restore() method r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore, self.pg_restore_st, self.schemas, self.schemas_nodata, self.relname_nodata) # create the target database if it does not already exists exists = self.dated_dbname in [ n for n, d, h, p in self.pgbouncer_databases() ] if not exists: r.createdb(self.db_encoding) self.pgbouncer_add_database() # now restore the dump try: if VERBOSE: os.system("ls -l %s" % filename)
except ValueError, e: os.chdir(pushd) mesg = "load: '%s' isn't a valid dump file name" % filename raise ParseDumpFileException, mesg if filename[0] != '/': filename = os.path.join(self.tmpdir, filename) # see comments in previous self.restore() method r = restore.pgrestore(self.dated_dbname, self.dbuser, self.host, self.pgbouncer_port, self.dbowner, self.maintdb, self.postgres_major, self.pg_restore, self.pg_restore_st, self.schemas, self.schemas_nodata, self.relname_nodata) # create the target database if it does not already exists exists = self.dated_dbname in [n for n,d,h,p in self.pgbouncer_databases()] if not exists: r.createdb(self.db_encoding) self.pgbouncer_add_database() # now restore the dump try: