Exemplo n.º 1
0
 def execute(self):
     needed_space = 0
     dburl = dbconn.DbURL(dbname=self.dump_database, port=self.segport)
     conn = None
     try:
         conn = dbconn.connect(dburl, utility=True)
         if self.include_dump_tables:
             for dump_table in self.include_dump_tables:
                 schema, table = dump_table.split('.')
                 needed_space += execSQLForSingleton(
                     conn,
                     "select sotdsize/1024 from gp_toolkit.gp_size_of_table_disk where sotdschemaname='%s' and sotdtablename='%s';"
                     % (schema, table))
         else:
             needed_space = execSQLForSingleton(
                 conn,
                 "select sodddatsize/1024 from gp_toolkit.gp_size_of_database where sodddatname='%s';"
                 % self.dump_database)
     except UnexpectedRowsError, e:
         logger.exception(
             "Disk space queries have failed. Cannot estimate disk space needed for dump."
         )
         raise ExceptionNoStackTraceNeeded(
             "Cannot estimate disk space needed for dump. Use -b to override this check."
         )
Exemplo n.º 2
0
 def execute(self):
     schema, table = UpdateHistoryTable.HISTORY_TABLE.split('.')
     exists = CheckTableExists(database=self.dump_database,
                               schema=schema,
                               table=table,
                               master_port=self.master_port).run()
     if not exists:
         conn = None
         CREATE_HISTORY_TABLE = """ create table %s (rec_date timestamp, start_time char(8), end_time char(8), options text, dump_key varchar(20), dump_exit_status smallint, script_exit_status smallint, exit_text varchar(10)) distributed by (rec_date); """ % UpdateHistoryTable.HISTORY_TABLE
         try:
             dburl = dbconn.DbURL(port=self.master_port,
                                  dbname=self.dump_database)
             conn = dbconn.connect(dburl)
             execSQL(conn, CREATE_HISTORY_TABLE)
             conn.commit()
         except Exception, e:
             logger.exception(
                 "Unable to create %s in %s database" %
                 (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
             return
         else:
             logger.info(
                 "Created %s in %s database" %
                 (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
         finally:
Exemplo n.º 3
0
 def execute(self):
     dburl = dbconn.DbURL()
     query = self.UPDATE_VERIFICATION_ENTRY % (self.state, self.done,
                                               self.mismatch, self.token)
     with dbconn.connect(dburl, allowSystemTableMods='dml') as conn:
         dbconn.execSQL(conn, query)
         conn.commit()
Exemplo n.º 4
0
 def execute(self):
     dburl = dbconn.DbURL()
     query = self.INSERT_VERIFICATION_ENTRY % (
         self.token, self.type, self.content, VerificationState.RUNNING)
     with dbconn.connect(dburl, allowSystemTableMods='dml') as conn:
         dbconn.execSQL(conn, query)
         conn.commit()
Exemplo n.º 5
0
 def execute(self):
     dburl = dbconn.DbURL()
     query = self.SELECT_VERIFICATION_ENTRY % self.token
     with dbconn.connect(dburl) as conn:
         try:
             tuple = dbconn.execSQLForSingletonRow(conn, query)
         except UnexpectedRowsError, e:
             if e.actual == 0:
                 raise TokenNotFound(self.token)
             raise
Exemplo n.º 6
0
    def __init__(self,
                 masterDataDir,
                 readFromMasterCatalog,
                 timeout=None,
                 retries=None):
        """
        masterDataDir: if None then we try to find it from the system environment
        readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more
                               data from there (like collation settings)

        """
        if masterDataDir is None:
            self.__masterDataDir = gp.get_masterdatadir()
        else:
            self.__masterDataDir = masterDataDir

        logger.debug("Obtaining master's port from master data directory")
        pgconf_dict = pgconf.readfile(self.__masterDataDir +
                                      "/postgresql.conf")
        self.__masterPort = pgconf_dict.int('port')
        logger.debug("Read from postgresql.conf port=%s" % self.__masterPort)
        self.__masterMaxConnections = pgconf_dict.int('max_connections')
        logger.debug("Read from postgresql.conf max_connections=%s" %
                     self.__masterMaxConnections)

        self.__gpHome = gp.get_gphome()
        self.__gpVersion = gp.GpVersion.local(
            'local GP software version check', self.__gpHome)
        logger.info("local Greenplum Version: '%s'" % self.__gpVersion)

        # read collation settings from master
        if readFromMasterCatalog:
            dbUrl = dbconn.DbURL(port=self.__masterPort,
                                 dbname='template1',
                                 timeout=timeout,
                                 retries=retries)
            conn = dbconn.connect(dbUrl, utility=True)
            (self.__lcCollate, self.__lcMonetary,
             self.__lcNumeric) = catalog.getCollationSettings(conn)

            # MPP-13807, read/show the master's database version too
            self.__pgVersion = dbconn.execSQLForSingletonRow(
                conn, "select version();")[0]
            logger.info("master Greenplum Version: '%s'" % self.__pgVersion)
            conn.close()

            checkNotNone("lc_collate", self.__lcCollate)
            checkNotNone("lc_monetary", self.__lcMonetary)
            checkNotNone("lc_numeric", self.__lcNumeric)
        else:
            self.__lcCollate = None
            self.__lcMonetary = None
            self.__lcNumeric = None
            self.__pgVersion = None
Exemplo n.º 7
0
 def _analyze(self, restore_db, master_port):
     conn = None
     logger.info('Commencing analyze of %s database, please wait' %
                 restore_db)
     try:
         dburl = dbconn.DbURL(port=master_port, dbname=restore_db)
         conn = dbconn.connect(dburl)
         execSQL(conn, 'analyze')
         conn.commit()
     except Exception, e:
         logger.warn('Issue with analyze of %s database' % restore_db)
Exemplo n.º 8
0
 def execute(self):
     try:
         dburl = dbconn.DbURL(port=self.master_port, dbname=self.database)
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(
             conn,
             "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'"
             % (self.table, self.schema))
         return count > 0
     finally:
         if conn is not None:
             conn.close()
Exemplo n.º 9
0
    def pauseFaultProber(self):
        assert not self.__isPaused
        assert self.__masterDbUrl is not None  # must be initialized
        assert self.__conn is None

        logger.debug("Pausing fault prober")

        self.__conn = dbconn.connect(
            self.__masterDbUrl, True
        )  # use utility mode so we don't do any segment connection stuff
        dbconn.execSQL(self.__conn, "set gp_fts_probe_pause = on")

        self.__isPaused = True
Exemplo n.º 10
0
 def execute(self):
     conn = None
     try:
         dburl = dbconn.DbURL(port=self.master_port)
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(
             conn, "select count(*) from pg_database where datname='%s';" %
             self.database)
         if count == 0:
             raise ExceptionNoStackTraceNeeded(
                 "Database %s does not exist." % self.database)
     finally:
         if conn is not None:
             conn.close()
Exemplo n.º 11
0
 def execute(self):
     conn = None
     logger.info('Commencing vacuum of %s database, please wait' %
                 self.database)
     try:
         dburl = dbconn.DbURL(port=self.master_port, dbname=self.database)
         conn = dbconn.connect(dburl)
         cursor = conn.cursor()
         cursor.execute(
             "commit")  # hack to move drop stmt out of implied transaction
         cursor.execute("vacuum")
         cursor.close()
     except Exception, e:
         logger.exception('Error encountered with vacuum of %s database' %
                          self.database)
Exemplo n.º 12
0
    def execute(self):
        existing_tables = []
        table_counts = []
        conn = None
        try:
            dburl = dbconn.DbURL(port=self.master_port, dbname=self.restore_db)
            conn = dbconn.connect(dburl)
            for restore_table in self.restore_tables:
                if '.' not in restore_table:
                    logger.warn(
                        "No schema name supplied for %s, removing from list of tables to restore"
                        % restore_table)
                    continue

                schema, table = restore_table.split('.')
                count = execSQLForSingleton(
                    conn,
                    "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'"
                    % (table, schema))
                if count == 0:
                    logger.warn(
                        "Table %s does not exist in database %s, removing from list of tables to restore"
                        % (table, self.restore_db))
                    continue

                count = execSQLForSingleton(
                    conn, "select count(*) from %s.%s" % (schema, table))
                if count > 0:
                    logger.warn('Table %s has %d records %s' %
                                (restore_table, count, WARN_MARK))
                existing_tables.append(restore_table)
                table_counts.append((restore_table, count))
        finally:
            if conn is not None:
                conn.close()

        if len(existing_tables) == 0:
            raise ExceptionNoStackTraceNeeded("Have no tables to restore")
        logger.info("Have %d tables to restore, will continue" %
                    len(existing_tables))

        return (existing_tables, table_counts)
Exemplo n.º 13
0
 def execute(self):
     dburl = dbconn.DbURL(dbname=self.database, port=self.master_port)
     conn = None
     try:
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(
             conn,
             "select count(*) from pg_class, pg_namespace where pg_namespace.nspname = 'gp_toolkit' and pg_class.relnamespace = pg_namespace.oid"
         )
     finally:
         if conn is not None:
             conn.close()
     if count > 0:
         logger.debug("gp_toolkit exists within database %s." %
                      self.database)
         return
     logger.info("gp_toolkit not found. Installing...")
     Psql('Installing gp_toolkit',
          filename='$GPHOME/share/postgresql/gp_toolkit.sql',
          database=self.database,
          port=self.master_port).run(validateAfter=True)
Exemplo n.º 14
0
 def _analyze(self, restore_db, restore_tables, master_port):
     conn = None
     try:
         dburl = dbconn.DbURL(port=master_port, dbname=restore_db)
         conn = dbconn.connect(dburl)
         for table in restore_tables:
             logger.info(
                 'Commencing analyze of %s in %s database, please wait...' %
                 (table, restore_db))
             try:
                 execSQL(conn, 'analyze %s' % table)
                 conn.commit()
             except Exception, e:
                 logger.warn(
                     'Issue with analyze of %s table, check log file for details'
                     % table)
             else:
                 logger.info('Analyze of %s table completed without error' %
                             table)
     finally:
         if conn is not None:
             conn.close()
Exemplo n.º 15
0
 def execute(self):
     ret = []
     dburl = dbconn.DbURL()
     with dbconn.connect(dburl) as conn:
         # TODO: improve execSQL APIs to avoid need to use cursor here for such a simple task
         cursor = conn.cursor()
         cursor.execute(self.SELECT_ALL_VERIFICATIONS)
         res = cursor.fetchall()
         cursor.close()
     for tuple in res:
         # TODO: execSQL or pygresql should be able to do this for us
         ret.append({
             'vertoken': tuple[0],
             'vertype': tuple[1],
             'vercontent': tuple[2],
             'verstarttime': tuple[3],
             'verstate': tuple[4],
             'verdone': tuple[5],
             'verendtime': tuple[6],
             'vermismatch': tuple[7]
         })
     return ret
Exemplo n.º 16
0
    def getFaultProberInterval(self):
        probe_interval_re = re.compile(r'(?P<val>\d+)(?P<unit>[a-zA-Z]*)')
        probe_interval_secs = 60

        conn = None

        try:
            conn = dbconn.connect(self.__masterDbUrl, True)
            fts_probe_interval_value = catalog.getSessionGUC(
                conn, 'gp_fts_probe_interval')
            m = probe_interval_re.match(fts_probe_interval_value)
            if m.group('unit') == 'min':
                probe_interval_secs = int(m.group('val')) * 60
            else:
                probe_interval_secs = int(m.group('val'))
        except:
            raise
        finally:
            if conn:
                conn.close()

        return probe_interval_secs
Exemplo n.º 17
0
    def _process_createdb(self, restore_timestamp, restore_db, master_datadir,
                          master_port):
        conn = None
        try:
            dburl = dbconn.DbURL(port=master_port)
            conn = dbconn.connect(dburl)
            count = execSQLForSingleton(
                conn, "select count(*) from pg_database where datname='%s';" %
                restore_db)

            if count == 1:
                logger.info("Dropping database %s" % restore_db)
                try:
                    cursor = conn.cursor()
                    cursor.execute(
                        "commit"
                    )  # hack to move drop stmt out of implied transaction
                    cursor.execute("drop database %s" % restore_db)
                    cursor.close()
                except Exception, e:
                    logger.exception("Could not create database %s" %
                                     restore_db)
                    raise ExceptionNoStackTraceNeeded(
                        'Failed to drop database %s' % restore_db)
                else:
                    logger.info('Dropped database %s' % restore_db)
        finally:
            if conn is not None:
                conn.close()

        createdb_file = os.path.join(
            master_datadir, DUMP_DIR, restore_timestamp[0:8],
            "%s%s" % (CREATEDB_PREFIX, restore_timestamp))
        logger.info('Invoking %s' % createdb_file)
        Psql('Invoking schema dump',
             filename=createdb_file).run(validateAfter=True)
Exemplo n.º 18
0
 def execute(self):
     dburl = dbconn.DbURL()
     query = self.REMOVE_VERIFICATION_ENTRY % self.token
     with dbconn.connect(dburl, allowSystemTableMods='dml') as conn:
         dbconn.execSQL(conn, query)
         conn.commit()
Exemplo n.º 19
0
    def rebalance(self):
        # Get the unbalanced primary segments grouped by hostname
        # These segments are what we will shutdown.
        logger.info("Getting unbalanced segments")
        unbalanced_primary_segs = GpArray.getSegmentsByHostName(
            self.gpArray.get_unbalanced_primary_segdbs())
        pool = WorkerPool()

        count = 0

        try:
            # Disable ctrl-c
            signal.signal(signal.SIGINT, signal.SIG_IGN)

            logger.info("Stopping unbalanced primary segments...")
            for hostname in unbalanced_primary_segs.keys():
                cmd = GpSegStopCmd("stop unbalanced primary segs",
                                   self.gpEnv.getGpHome(),
                                   self.gpEnv.getGpVersion(),
                                   'fast',
                                   unbalanced_primary_segs[hostname],
                                   ctxt=REMOTE,
                                   remoteHost=hostname,
                                   timeout=600)
                pool.addCommand(cmd)
                count += 1

            pool.wait_and_printdots(count, False)

            failed_count = 0
            completed = pool.getCompletedItems()
            for res in completed:
                if not res.get_results().wasSuccessful():
                    failed_count += 1

            if failed_count > 0:
                logger.warn(
                    "%d segments failed to stop.  A full rebalance of the")
                logger.warn(
                    "system is not possible at this time.  Please check the")
                logger.warn(
                    "log files, correct the problem, and run gprecoverseg -r")
                logger.warn("again.")
                logger.info(
                    "gprecoverseg will continue with a partial rebalance.")

            pool.empty_completed_items()
            # issue a distributed query to make sure we pick up the fault
            # that we just caused by shutting down segments
            conn = None
            try:
                logger.info("Triggering segment reconfiguration")
                dburl = dbconn.DbURL()
                conn = dbconn.connect(dburl)
                cmd = ReconfigDetectionSQLQueryCommand(conn)
                pool.addCommand(cmd)
                pool.wait_and_printdots(1, False)
            except Exception:
                # This exception is expected
                pass
            finally:
                if conn:
                    conn.close()

            # Final step is to issue a recoverseg operation to resync segments
            logger.info("Starting segment synchronization")
            cmd = GpRecoverseg("rebalance recoverseg")
            pool.addCommand(cmd)
            pool.wait_and_printdots(1, False)
        except Exception, ex:
            raise ex
Exemplo n.º 20
0
 def setUp(self):
     self.dburl = dbconn.DbURL()
     self.conn = dbconn.connect(self.dburl)
Exemplo n.º 21
0
def skipIfDatabaseDown():
    try:
        dbconn = connect(DbURL())
    except:
        return unittest.skip("database must be up")
    return lambda o: o
Exemplo n.º 22
0
                    (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
            finally:
                if conn is not None:
                    conn.close()

        translate_rc_to_msg = {0: "COMPLETED", 1: "WARNING", 2: "FATAL"}
        exit_msg = translate_rc_to_msg[self.pseudo_exit_status]
        APPEND_HISTORY_TABLE = """ insert into %s values (now(), '%s', '%s', '%s', '%s', %d, %d, '%s'); """ % (
            UpdateHistoryTable.HISTORY_TABLE, self.time_start, self.time_end,
            self.options_list, self.timestamp, self.dump_exit_status,
            self.pseudo_exit_status, exit_msg)
        conn = None
        try:
            dburl = dbconn.DbURL(port=self.master_port,
                                 dbname=self.dump_database)
            conn = dbconn.connect(dburl)
            execSQL(conn, APPEND_HISTORY_TABLE)
            conn.commit()
        except Exception, e:
            logger.exception(
                "Failed to insert record into %s in %s database" %
                (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
        else:
            logger.info("Inserted dump record into %s in %s database" %
                        (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
        finally:
            if conn is not None:
                conn.close()


class DumpGlobal(Operation):