예제 #1
0
    def __init__(self, name):
        self.name = name
        self.path = tempfile.mkdtemp()
        self.dbname = 'tablespace_db_%s' % name
        self.table_counter = 0
        self.initial_data = None

        gparray = GpArray.initFromCatalog(dbconn.DbURL())
        for host in gparray.getHostList():
            run_cmd('ssh %s mkdir -p %s' %
                    (pipes.quote(host), pipes.quote(self.path)))

        with dbconn.connect(dbconn.DbURL(), unsetSearchPath=False) as conn:
            db = pg.DB(conn)
            db.query("CREATE TABLESPACE %s LOCATION '%s'" %
                     (self.name, self.path))
            db.query("CREATE DATABASE %s TABLESPACE %s" %
                     (self.dbname, self.name))

        with dbconn.connect(dbconn.DbURL(dbname=self.dbname),
                            unsetSearchPath=False) as conn:
            db = pg.DB(conn)
            db.query("CREATE TABLE tbl (i int) DISTRIBUTED RANDOMLY")
            db.query("INSERT INTO tbl VALUES (GENERATE_SERIES(0, 25))")
            # save the distributed data for later verification
            self.initial_data = db.query(
                "SELECT gp_segment_id, i FROM tbl").getresult()
예제 #2
0
    def verify_for_gpexpand(self, hostname=None, port=0):
        """
        For gpexpand, we need make sure:
          1. data is the same after redistribution finished
          2. the table's numsegments is enlarged to the new cluster size
        """
        url = dbconn.DbURL(hostname=hostname, port=port, dbname=self.dbname)
        with dbconn.connect(url, unsetSearchPath=False) as conn:
            db = pg.DB(conn)
            data = db.query("SELECT gp_segment_id, i FROM tbl").getresult()
            tbl_numsegments = dbconn.execSQLForSingleton(conn,
                                                         "SELECT numsegments FROM gp_distribution_policy "
                                                         "WHERE localoid = 'tbl'::regclass::oid")
            num_segments = dbconn.execSQLForSingleton(conn,
                                                     "SELECT COUNT(DISTINCT(content)) - 1 FROM gp_segment_configuration")

        if tbl_numsegments != num_segments:
            raise Exception("After gpexpand the numsegments for tablespace table 'tbl' %d does not match "
                            "the number of segments in the cluster %d." % (tbl_numsegments, num_segments))

        initial_data = [i for _, i in self.initial_data]
        data_without_segid = [i for _, i in data]
        if sorted(data_without_segid) != sorted(initial_data):
            raise Exception("Tablespace data is not identically distributed after running gp_expand. "
                            "Expected pre-gpexpand data:\n%\n but found post-gpexpand data:\n%r" % (
                                sorted(self.initial_data), sorted(data)))
예제 #3
0
    def performQuery(self, queryStatement="", operationType=1):
        '''
        Put SQL statement into queryStatement.

        By default, operationType = 1, and it means that there is a return value.

        Set operationType = 0, if there is no return value.
        '''
        rtnVal = None
        try:
            self.db_connection = pg.DB(dbname=self.db_name,
                                       host=self.db_ip,
                                       port=self.db_port,
                                       user=self.db_user,
                                       passwd=self.db_password)
            if operationType == 1:
                rtnVal = self.db_connection.query(queryStatement).dictresult()
            else:
                rtnVal = self.db_connection.query(queryStatement)
        except Exception as e:
            print(
                "*---------- Fail to connect to database or perform query statement. ----------*"
            )
            print("------------Query statement is: %s" % queryStatement)
            print("------------Error message: %s" % str(e))
            print(
                "*-----------------------------------------------------------------------------*"
            )
        finally:
            if self.db_connection is not None:
                self.db_connection.close()
            return rtnVal
예제 #4
0
def drop_tables():
    try:
        db = pg.DB(dbname='reuse_gptest'
                  ,host='localhost'
                  ,port=int(PGPORT)
                  )
    except Exception,e:
        errorMessage = str(e)
        print 'could not connect to database: ' + errorMessage
예제 #5
0
    def trigger_fts_probe(self, port=0):
        self.logger.info('Triggering FTS probe')
        with dbconn.connect(dbconn.DbURL(port=port)) as conn:
            db = pg.DB(conn)

            # XXX Perform two probe scans in a row, to work around a known
            # race where gp_request_fts_probe_scan() can return early during the
            # first call. Remove this duplication once that race is fixed.
            for _ in range(2):
                db.query("SELECT gp_request_fts_probe_scan()")
예제 #6
0
 def setUpClass(cls):
     # Connect to the database pointed to by PGHOST et al.
     with dbconn.connect(dbconn.DbURL()) as conn:
         # using the pg.DB connection so that each SQL is done as a single
         # transaction
         db = pg.DB(conn)
         test_database_name = "gpdb_test_database"
         db.query("DROP DATABASE IF EXISTS %s" % test_database_name)
         db.query("CREATE DATABASE %s" % test_database_name)
         cls.url = dbconn.DbURL(dbname=test_database_name)
예제 #7
0
    def test_warnings_are_normally_suppressed(self):
        warning = "this is my warning message"

        with dbconn.connect(self.url) as conn:
            # Wrap our connection in pg.DB() so we can get at the underlying
            # notices. (This isn't available in the standard DB-API.)
            db = pg.DB(conn)
            self._raise_warning(db, warning)
            notices = db.notices()

        self.assertEqual(notices, [])  # we expect no notices
예제 #8
0
    def __init__(self, name):
        self.name = name
        self.path = tempfile.mkdtemp()
        self.dbname = 'tablespace_db_%s' % name
        self.table_counter = 0
        self.initial_data = None

        with dbconn.connect(dbconn.DbURL()) as conn:
            db = pg.DB(conn)
            db.query("CREATE TABLESPACE %s LOCATION '%s'" %
                     (self.name, self.path))
            db.query("CREATE DATABASE %s TABLESPACE %s" %
                     (self.dbname, self.name))

        with dbconn.connect(dbconn.DbURL(dbname=self.dbname)) as conn:
            db = pg.DB(conn)
            db.query("CREATE TABLE tbl (i int) DISTRIBUTED RANDOMLY")
            db.query("INSERT INTO tbl VALUES (GENERATE_SERIES(0, 25))")
            # save the distributed data for later verification
            self.initial_data = db.query(
                "SELECT gp_segment_id, i FROM tbl").getresult()
예제 #9
0
    def cleanup(self):
        with dbconn.connect(dbconn.DbURL(dbname="postgres")) as conn:
            db = pg.DB(conn)
            db.query("DROP DATABASE IF EXISTS %s" % self.dbname)
            db.query("DROP TABLESPACE IF EXISTS %s" % self.name)

            # Without synchronous_commit = 'remote_apply' introduced in 9.6, there
            # is no guarantee that the mirrors have removed their tablespace
            # directories by the time the DROP TABLESPACE command returns.
            # We need those directories to no longer be in use by the mirrors
            # before removing them below.
            _checkpoint_and_wait_for_replication_replay(db)

        shutil.rmtree(self.path)
예제 #10
0
    def test_verbose_mode_allows_warnings_to_be_sent_to_the_client(self):
        warning = "this is my warning message"

        with dbconn.connect(self.url, verbose=True) as conn:
            db = pg.DB(conn)
            self._raise_warning(db, warning)
            notices = db.notices()

        for notice in notices:
            if warning in notice:
                return  # found it!

        self.fail("Didn't find expected notice '{}' in {!r}".format(
            warning, notices))
예제 #11
0
    def cleanup(self):
        with dbconn.connect(dbconn.DbURL(dbname="postgres"), unsetSearchPath=False) as conn:
            db = pg.DB(conn)
            db.query("DROP DATABASE IF EXISTS %s" % self.dbname)
            db.query("DROP TABLESPACE IF EXISTS %s" % self.name)

            # Without synchronous_commit = 'remote_apply' introduced in 9.6, there
            # is no guarantee that the mirrors have removed their tablespace
            # directories by the time the DROP TABLESPACE command returns.
            # We need those directories to no longer be in use by the mirrors
            # before removing them below.
            _checkpoint_and_wait_for_replication_replay(db)

        gparray = GpArray.initFromCatalog(dbconn.DbURL())
        for host in gparray.getHostList():
            run_cmd('ssh %s rm -rf %s' % (pipes.quote(host), pipes.quote(self.path)))
예제 #12
0
    def verify(self, hostname=None, port=0):
        """
        Verify tablespace functionality by ensuring the tablespace can be
        written to, read from, and the initial data is still correctly
        distributed.
        """
        url = dbconn.DbURL(hostname=hostname, port=port, dbname=self.dbname)
        with dbconn.connect(url, unsetSearchPath=False) as conn:
            db = pg.DB(conn)
            data = db.query("SELECT gp_segment_id, i FROM tbl").getresult()

            # verify that we can still write to the tablespace
            self.table_counter += 1
            db.query("CREATE TABLE tbl_%s (i int) DISTRIBUTED RANDOMLY" % self.table_counter)
            db.query("INSERT INTO tbl_%s VALUES (GENERATE_SERIES(0, 25))" % self.table_counter)

        if sorted(data) != sorted(self.initial_data):
            raise Exception("Tablespace data is not identically distributed. Expected:\n%r\n but found:\n%r" % (
                sorted(self.initial_data), sorted(data)))
예제 #13
0
def pg_connect(start, rt_list, keeps):
    gpdb = pg.DB(host='mas01', dbname='csgbi', user='******', port=6666)
    end = time.time()
    runtime = end - start
    while runtime <= keeps:
        try:
            mystart = time.time()
            ran = random.randrange(1, 1000000000)
            query = """
            insert into test values(%s,'test1','test2','test3')
            """ % (ran)
            gpdb.query(query)
            myend = time.time()
            rt = myend - mystart
        except Exception as e:
            print e
            rt = 0
        end = time.time()
        runtime = end - start
        rt_list.append(rt)
    gpdb.close()
예제 #14
0
 def __init__(self, dbname, host=None, port=None, user=None, passwd=None):
     args = {"dbname": dbname, "host": host, "user": user, "passwd": passwd}
     # pygresql comains if port is included in kwargs and its value is
     # None.  It allows all other kwargs to be None.
     if port is not None:
         args["port"] = port
     self.db = pg.DB(**args)
     is_super = self.db.query("SHOW is_superuser").getresult()
     if is_super[0][0] != "on":
         user = self.db.query("SELECT CURRENT_USER").getresult()
         self.db.close()
         raise GPDBException("'%s' is not a superuser." % user[0][0])
     # Ensure we only interact with a 4.3.x GPDB instance.
     version = self.db.query("SELECT version()").getresult()
     if re.match(".*Greenplum Database[ ]*4.3", version[0][0]) is None:
         self.db.close()
         raise GPDBException("GPDB is not running version 4.3.x")
     self.__systable_mods_set = False
     self.__catdml_created = False
     self.public_schema_is_created = False
     self.public_schema_oid = self.checkPublicSchemaOid()
예제 #15
0
 def test_no_transaction_after_connect(self):
     with dbconn.connect(self.url) as conn:
         db = pg.DB(conn)
         # this would fail if we were in a transaction DROP DATABASE cannot
         # run inside a transaction block
         db.query("DROP DATABASE IF EXISTS some_nonexistent_database")
예제 #16
0
 def test_connect(self):
     dburl = DbURL()
     logger.info("YO")
     db = pg.DB(dbname=dburl.pgdb)
     q = db.query("SELECT 1")
     logger.info(q.getresult())
예제 #17
0
파일: utils.py 프로젝트: satchel9/gpdb
def escape_string(string, conn):
    return pg.DB(db=conn).escape_string(string)
예제 #18
0
def connect(dbname, host, port, user, passwd):
    global db
    db = pg.DB(dbname=dbname, host=host, port=port, user=user, passwd=passwd)