Esempio n. 1
0
def get_copy_filter(host, port):
    # pg8000 connects to master in utility mode
    db = None
    databases = None
    filters = []

    try:
        db = pg8000.Connection(host=host, user=ENV.USER, database='template1', port=port, options='-c gp_session_role=utility')
        
        # get list of user databases
        # select oid, datname from pg_database where oid >= 16384;
        db.execute('select oid, datname from pg_database where oid >= 16384')
#        db.execute('select oid, datname from pg_database')
        databases = [ r for r in db.iterate_tuple() ]

    except Exception, e:
        print str(e)
        return None
Esempio n. 2
0
        ]

    # meta data directories, like commit log
    meta = [
        'global', 'pg_clog', 'pg_distributedlog', 'pg_distributedxidmap',
        'pg_multixact', 'pg_subtrans', 'pg_twophase', 'pg_utilitymodedtmredo',
        'pg_xlog', 'postgresql.conf', 'pg_hba.conf', 'pg_ident.conf',
        'PG_VERSION'
    ]
    return [files, meta]


if __name__ == '__main__':
    postgres = pg8000.Connection(user='******',
                                 database='postgres',
                                 port=12000,
                                 host='localhost',
                                 options='-c gp_session_role=utility')
    postgres.execute('''select datname, datallowconn from pg_database where
            datname not in('template0', 'postgres');''')
    dbs = {}
    dbs['postgres'] = postgres
    for row in postgres.iterate_dict():
        if not row['datallowconn']:
            raise "database %s has datallowconn set to false" % row['datname']
        dbname = row['datname']
        conn = pg8000.Connection(user='******',
                                 database=dbname,
                                 port=12000,
                                 host='localhost',
                                 options='-c gp_session_role=utility')
Esempio n. 3
0
import pg8000


def has33mods(masterdb):
    masterdb.execute('''select
        (select 1 from pg_class where relnamespace = 11 and 
         relname = 'pg_aocs') is not null as hasaocs,
        (select 1 from pg_attribute where attname = 'compresstype' and
         attrelid = 'pg_appendonly'::regclass) is not null as aocol,
        (select 1 from pg_class where relnamespace = 11 and
         relname = 'gp_configuration_history') is not null as hasgpconfhis;''')
    return masterdb.iterate_dict().next()


if __name__ == '__main__':
    conn = pg8000.Connection(user='******',
                             host='localhost',
                             port=12000,
                             database='postgres')
    dets = has33mods(conn)
    print dets
Esempio n. 4
0
        databases = [ r for r in db.iterate_tuple() ]

    except Exception, e:
        print str(e)
        return None
    finally:
        if db:
            db.close()
            db = None

    # foreach database
    for d in databases:
        try:
            oid = d[0]
            datname = d[1]
            db = pg8000.Connection(host=host, user=ENV.USER, database=datname, port=port,  options='-c gp_session_role=utility')

            # get user table filter (user table, TOAST table, sequences)
            # select oid from pg_class where old >= 16384 and relkind='r';
            db.execute("select oid from pg_class where oid >= 16384 and relkind='r'")
            table_filter = [ r[0] for r in db.iterate_tuple() ] 
            
            # get user TOAST table filter
            # select oid from pg_class where old >= 16384 and relkind='t';    
            db.execute("select oid from pg_class where oid >= 16384 and relkind='t'")
            toast_filter = [ r[0] for r in db.iterate_tuple() ] 

            # get sequences filter
            # select oid from pg_class where old >= 16384 and relkind='S';
            db.execute("select oid from pg_class where oid >= 16384 and relkind='S'")
            sequence_filter = [ r[0] for r in db.iterate_tuple() ] 
Esempio n. 5
0
    def start_backup_job(self):
        """
        Make filelist in super class and tell Postgres
        that we start a backup now
        """
        bareosfd.DebugMessage(100, "start_backup_job in PostgresPlugin called")
        try:
            if self.options["dbHost"].startswith("/"):
                self.dbCon = pg8000.Connection(self.dbuser,
                                               database=self.dbname,
                                               unix_sock=self.dbHost)
            else:
                self.dbCon = pg8000.Connection(self.dbuser,
                                               database=self.dbname,
                                               host=self.dbHost)

            result = self.dbCon.run(
                "SELECT current_setting('server_version_num')")
            self.pgVersion = int(result[0][0])
            # WARNING: JobMessages cause fatal errors at this stage
            bareosfd.JobMessage(
                bareosfd.M_INFO,
                "Connected to Postgres version %d\n" % self.pgVersion,
            )
        except Exception as e:
            bareosfd.JobMessage(
                bareosfd.M_FATAL,
                "Could not connect to database %s, user %s, host: %s: %s\n" %
                (self.dbname, self.dbuser, self.dbHost, e),
            )
            return bareosfd.bRC_Error
        if chr(self.level) == "F":
            # For Full we backup the Postgres data directory
            # Restore object ROP comes later, after file backup
            # is done.
            startDir = self.options["postgresDataDir"]
            self.files_to_backup.append(startDir)
            bareosfd.DebugMessage(
                100, "dataDir: %s\n" % self.options["postgresDataDir"])
            bareosfd.JobMessage(
                bareosfd.M_INFO,
                "dataDir: %s\n" % self.options["postgresDataDir"])
        else:
            # If level is not Full, we only backup WAL files
            # and create a restore object ROP with timestamp information.
            startDir = self.options["walArchive"]
            self.files_to_backup.append("ROP")
            # get current Log Sequence Number (LSN)
            # PG8: not supported
            # PG9: pg_get_current_xlog_location
            # PG10: pg_current_wal_lsn
            pgMajorVersion = self.pgVersion // 10000
            if pgMajorVersion >= 10:
                getLsnStmt = "SELECT pg_current_wal_lsn()"
                switchLsnStmt = "SELECT pg_switch_wal()"
            elif pgMajorVersion >= 9:
                getLsnStmt = "SELECT pg_current_xlog_location()"
                switchLsnStmt = "SELECT pg_switch_xlog()"
            if pgMajorVersion < 9:
                bareosfd.JobMessage(
                    bareosfd.M_INFO,
                    "WAL switching not supported on Postgres Version < 9\n",
                )
            else:
                try:
                    currentLSN = self.formatLSN(
                        self.dbCon.run(getLsnStmt)[0][0])
                    bareosfd.JobMessage(
                        bareosfd.M_INFO,
                        "Current LSN %s, last LSN: %s\n" %
                        (currentLSN, self.lastLSN),
                    )
                except Exception as e:
                    currentLSN = 0
                    bareosfd.JobMessage(
                        bareosfd.M_WARNING,
                        "Could not get current LSN, last LSN was: %s : %s \n" %
                        (self.lastLSN, e),
                    )
                if currentLSN > self.lastLSN and self.switchWal:
                    # Let Postgres write latest transaction into a new WAL file now
                    try:
                        result = self.dbCon.run(switchLsnStmt)
                    except Exception as e:
                        bareosfd.JobMessage(
                            bareosfd.M_WARNING,
                            "Could not switch to next WAL segment: %s\n" % e,
                        )
                    try:
                        result = self.dbCon.run(getLsnStmt)
                        currentLSNraw = result[0][0]
                        currentLSN = self.formatLSN(currentLSNraw)

                        bareosfd.DebugMessage(
                            150,
                            "after pg_switch_wal(): currentLSN: %s lastLSN: %s\n"
                            % (currentLSN, self.lastLSN),
                        )

                        self.lastLSN = currentLSN

                    except Exception as e:
                        bareosfd.JobMessage(
                            bareosfd.M_WARNING,
                            "Could not read LSN after switching to new WAL segment: %s\n"
                            % e,
                        )

                    if not self.wait_for_wal_archiving(currentLSNraw):
                        return bareosfd.bRC_Error

                else:
                    # Nothing has changed since last backup - only send ROP this time
                    bareosfd.JobMessage(
                        bareosfd.M_INFO,
                        "Same LSN %s as last time - nothing to do\n" %
                        currentLSN,
                    )
                    return bareosfd.bRC_OK

        # Gather files from startDir (Postgres data dir or walArchive for incr/diff jobs)
        for fileName in os.listdir(startDir):
            fullName = os.path.join(startDir, fileName)
            # We need a trailing '/' for directories
            if os.path.isdir(fullName) and not fullName.endswith("/"):
                fullName += "/"
                bareosfd.DebugMessage(100, "fullName: %s\n" % fullName)
            # Usually Bareos takes care about timestamps when doing incremental backups
            # but here we have to compare against last BackupPostgres timestamp
            try:
                mTime = os.stat(fullName).st_mtime
            except Exception as e:
                bareosfd.JobMessage(
                    bareosfd.M_ERROR,
                    "Could net get stat-info for file %s: %s\n" %
                    (fullName, e),
                )
                continue
            bareosfd.DebugMessage(
                150,
                "%s fullTime: %d mtime: %d\n" %
                (fullName, self.lastBackupStopTime, mTime),
            )
            if mTime > self.lastBackupStopTime + 1:
                bareosfd.DebugMessage(
                    150,
                    "file: %s, fullTime: %d mtime: %d\n" %
                    (fullName, self.lastBackupStopTime, mTime),
                )
                self.files_to_backup.append(fullName)
                if os.path.isdir(
                        fullName) and fileName not in self.ignoreSubdirs:
                    for topdir, dirNames, fileNames in os.walk(fullName):
                        for fileName in fileNames:
                            self.files_to_backup.append(
                                os.path.join(topdir, fileName))
                        for dirName in dirNames:
                            fullDirName = os.path.join(topdir, dirName) + "/"
                            self.files_to_backup.append(fullDirName)

        # If level is not Full, we are done here and set the new
        # lastBackupStopTime as reference for future jobs
        # Will be written into the Restore Object
        if not chr(self.level) == "F":
            self.lastBackupStopTime = int(time.time())
            return bareosfd.bRC_OK

        # For Full we check for a running job and tell Postgres that
        # we want to backup the DB files now.
        if os.path.exists(self.labelFileName):
            self.parseBackupLabelFile()
            bareosfd.JobMessage(
                bareosfd.M_FATAL,
                'Another Postgres Backup Operation is in progress ("{}" file exists). You may stop it using SELECT pg_stop_backup()\n'
                .format(self.labelFileName),
            )
            return bareosfd.bRC_Error

        bareosfd.DebugMessage(100,
                              "Send 'SELECT pg_start_backup' to Postgres\n")
        # We tell Postgres that we want to start to backup file now
        self.backupStartTime = datetime.datetime.now(
            tz=dateutil.tz.tzoffset(None, self.tzOffset))
        try:
            result = self.dbCon.run("SELECT pg_start_backup('%s');" %
                                    self.backupLabelString)
        except Exception as e:
            bareosfd.JobMessage(bareosfd.M_FATAL,
                                "pg_start_backup statement failed: %s" % (e))
            return bareosfd.bRC_Error

        bareosfd.DebugMessage(150, "Start response: %s\n" % str(result))
        bareosfd.DebugMessage(
            150, "Adding label file %s to fileset\n" % self.labelFileName)
        self.files_to_backup.append(self.labelFileName)
        bareosfd.DebugMessage(150, "Filelist: %s\n" % (self.files_to_backup))
        self.PostgressFullBackupRunning = True
        return bareosfd.bRC_OK