Exemplo n.º 1
0
def getPortCoordinatorOnly(host='localhost',
                           coordinator_value=None,
                           user=os.environ.get('USER'),
                           gphome=os.environ['GPHOME'],
                           cdd=get_coordinatordatadir(),
                           port=os.environ['PGPORT']):

    coordinator_pattern = r"Context:\s*-1\s*Value:\s*\d+"
    command = "gpconfig -s %s" % ("port")

    cmd = "source %s/greenplum_path.sh; export COORDINATOR_DATA_DIRECTORY=%s; export PGPORT=%s; %s" \
           % (gphome, cdd, port, command)

    (ok, out) = run(cmd)
    if not ok:
        cmd = "python %s/bin/gpconfig -s port" % (gphome)
        (ok, out) = run(cmd)
        if not ok:
            raise Exception(
                "Unable to connect to segment server %s as user %s" %
                (host, user))

    for line in out:
        out = line.decode().split('\n')
    for line in out:
        if re.search(coordinator_pattern, line):
            coordinator_value = int(line.split()[3].strip())

    if coordinator_value is None:
        error_msg = "".join(out)
        raise Exception(error_msg)

    return str(coordinator_value)
Exemplo n.º 2
0
def make_data_directory_called(data_directory_name):
    cdd_parent_parent = os.path.realpath(get_coordinatordatadir() +
                                         "../../../")
    mirror_data_dir = os.path.join(cdd_parent_parent, data_directory_name)
    if not os.path.exists(mirror_data_dir):
        os.mkdir(mirror_data_dir)
    return mirror_data_dir
Exemplo n.º 3
0
def impl(context):
    context.mirror_config = _generate_input_config()
    cdd = get_coordinatordatadir()
    del os.environ['COORDINATOR_DATA_DIRECTORY']
    try:
        cmd = Command('gpaddmirrors ', 'gpaddmirrors -a -i %s -d %s' % (context.mirror_config, cdd))
        cmd.run(validateAfter=True)
    finally:
        os.environ['COORDINATOR_DATA_DIRECTORY'] = cdd
Exemplo n.º 4
0
def get_port_from_conf():
    file = get_coordinatordatadir() + '/postgresql.conf'
    if os.path.isfile(file):
        with open(file) as f:
            for line in f.xreadlines():
                match = re.search(r'port=\d+', line)
                if match:
                    match1 = re.search(r'\d+', match.group())
                    if match1:
                        return match1.group()
Exemplo n.º 5
0
def check_database_is_running(context):
    if not 'PGPORT' in os.environ:
        raise Exception('PGPORT should be set')

    pgport = int(os.environ['PGPORT'])

    running_status = chk_local_db_running(get_coordinatordatadir(), pgport)
    gpdb_running = running_status[0] and running_status[1] and running_status[2] and running_status[3]

    return gpdb_running
Exemplo n.º 6
0
Arquivo: TEST.py Projeto: zyclove/gpdb
def get_port():
    file = get_coordinatordatadir() + '/postgresql.conf'
    if os.path.isfile(file):
        f = open(file)
        for line in f:
            match = re.search('port=\d+', line)
            if match:
                match1 = re.search('\d+', match.group())
                if match1:
                    return match1.group()
        f.close()
        raise Exception('Could not get port from ' + file)
    else:
        raise Exception(file + ' does not exist.  Cannot get port.')
Exemplo n.º 7
0
    def __init__(self,
                 coordinatorDataDir,
                 readFromCoordinatorCatalog,
                 timeout=None,
                 retries=None,
                 verbose=True):
        """
        coordinatorDataDir: if None then we try to find it from the system environment
        readFromCoordinatorCatalog: if True then we will connect to the coordinator in utility mode and fetch some more
                               data from there (like collation settings)

        """
        if coordinatorDataDir is None:
            self.__coordinatorDataDir = gp.get_coordinatordatadir()
        else:
            self.__coordinatorDataDir = coordinatorDataDir

        logger.debug(
            "Obtaining coordinator's port from coordinator data directory")
        pgconf_dict = pgconf.readfile(self.__coordinatorDataDir +
                                      "/postgresql.conf")
        self.__coordinatorPort = pgconf_dict.int('port')
        logger.debug("Read from postgresql.conf port=%s" %
                     self.__coordinatorPort)
        self.__coordinatorMaxConnections = pgconf_dict.int('max_connections')
        logger.debug("Read from postgresql.conf max_connections=%s" %
                     self.__coordinatorMaxConnections)

        self.__gpHome = gp.get_gphome()
        self.__gpVersion = gp.GpVersion.local(
            'local GP software version check', self.__gpHome)

        if verbose:
            logger.info("local Greenplum Version: '%s'" % self.__gpVersion)

        # read collation settings from coordinator
        if readFromCoordinatorCatalog:
            dbUrl = dbconn.DbURL(port=self.__coordinatorPort,
                                 dbname='template1',
                                 timeout=timeout,
                                 retries=retries)
            conn = dbconn.connect(dbUrl, utility=True)

            # MPP-13807, read/show the coordinator's database version too
            self.__pgVersion = dbconn.queryRow(conn, "select version();")[0]
            logger.info("coordinator Greenplum Version: '%s'" %
                        self.__pgVersion)
            conn.close()
        else:
            self.__pgVersion = None
Exemplo n.º 8
0
    def __init__(self, mainOptions):
        self.pidlockpath = mainOptions.get('pidlockpath', None)  # the directory we're using for locking
        self.parentpidvar = mainOptions.get('parentpidvar', None)  # environment variable holding parent pid
        self.parentpid = None  # parent pid which already has the lock
        self.ppath = None  # complete path to the lock file
        self.pidlockfile = None  # PIDLockFile object
        self.pidfilepid = None  # pid of the process which has the lock
        self.locktorelease = None  # PIDLockFile object we should release when done

        if self.parentpidvar is not None and self.parentpidvar in os.environ:
            self.parentpid = int(os.environ[self.parentpidvar])

        if self.pidlockpath is not None:
            self.ppath = os.path.join(gp.get_coordinatordatadir(), self.pidlockpath)
            self.pidlockfile = PIDLockFile(self.ppath)
Exemplo n.º 9
0
    def run(self):
        if self.build:
            if self.filename:
                BuildGppkg(self.build, self.filename).run()
            else:
                BuildGppkg(self.build, None).run()
            return

        if linux_distribution_id() == 'ubuntu':
            try:
                cmd = Command(name='Check for dpkg', cmdStr='dpkg --version')
                cmd.run(validateAfter=True)
                cmd = Command(name='Check for fakeroot',
                              cmdStr='fakeroot --version')
                cmd.run(validateAfter=True)
            except Exception as ex:
                raise ExceptionNoStackTraceNeeded(
                    'fakeroot and dpkg are both required by gppkg')
        else:
            try:
                cmd = Command(name='Check for rpm', cmdStr='rpm --version')
                cmd.run(validateAfter=True)
                results = cmd.get_results().stdout.strip()
                rpm_version_string = results.split(' ')[-1]

                if not rpm_version_string.startswith('4.'):
                    raise ExceptionNoStackTraceNeeded(
                        'gppkg requires rpm version 4.x')

            except ExecutionError as ex:
                results = ex.cmd.get_results().stderr.strip()
                if len(results) != 0 and 'not found' in results:
                    raise ExceptionNoStackTraceNeeded(
                        'gppkg requires RPM to be available in PATH')

        if self.coordinator_datadir is None:
            self.coordinator_datadir = gp.get_coordinatordatadir()
        self.coordinator_port = self._get_coordinator_port(
            self.coordinator_datadir)

        self._get_gpdb_host_list()

        if self.migrate:
            MigratePackages(from_gphome=self.migrate[0],
                            to_gphome=self.migrate[1],
                            standby_host=self.standby_host,
                            segment_host_list=self.segment_host_list).run()
            return

        if self.install:
            pkg = Gppkg.from_package_path(self.install)
            InstallPackage(pkg, self.coordinator_host, self.standby_host,
                           self.segment_host_list).run()
        elif self.query:
            query_type, package_path = self.query
            QueryPackage(query_type, package_path).run()
        elif self.remove:
            # Check for exact match first, then use wildcard for what will be removed.
            pkg_file_list = ListFilesByPattern(
                GPPKG_ARCHIVE_PATH, self.remove + GPPKG_EXTENSION).run()
            if len(pkg_file_list) == 0:
                # now try wildcard
                pkg_file_list = ListFilesByPattern(
                    GPPKG_ARCHIVE_PATH,
                    self.remove + '*' + GPPKG_EXTENSION).run()
                if len(pkg_file_list) == 0:
                    raise ExceptionNoStackTraceNeeded(
                        'Package %s has not been installed.' % self.remove)

                # refuse to remove at all if the match is too broad, i.e., > 1
                if len(pkg_file_list) > 1:
                    err_msg = "Remove request '%s' too broad. " \
                              "Multiple packages match remove request: ( %s )." % (self.remove, ", ".join(pkg_file_list))
                    raise ExceptionNoStackTraceNeeded(err_msg)

            pkg_file = pkg_file_list[0]
            pkg = Gppkg.from_package_path(
                os.path.join(GPPKG_ARCHIVE_PATH, pkg_file))
            UninstallPackage(pkg, self.coordinator_host, self.standby_host,
                             self.segment_host_list).run()
        elif self.update:
            logger.warning(
                'WARNING: The process of updating a package includes removing all'
            )
            logger.warning(
                'previous versions of the system objects related to the package. For'
            )
            logger.warning(
                'example, previous versions of shared libraries are removed.')
            logger.warning(
                'After the update process, a database function will fail when it is'
            )
            logger.warning(
                'called if the function references a package file that has been removed.'
            )
            if self.interactive:
                if not ask_yesno(None, 'Do you still want to continue ?', 'N'):
                    logger.info('Skipping update of gppkg based on user input')
                    return
            pkg = Gppkg.from_package_path(self.update)
            UpdatePackage(pkg, self.coordinator_host, self.standby_host,
                          self.segment_host_list).run()
        elif self.clean:
            CleanGppkg(self.standby_host, self.segment_host_list).run()
Exemplo n.º 10
0
def impl(context, segment):
    if segment not in ('standby', 'mirrors'):
        raise Exception("invalid segment type")

    context.standby_hostname = 'mdw-2'
    context.execute_steps("""
    Given the segments are synchronized
     Then replication connections can be made from the acting {segment}

    Given a tablespace is created with data
    """.format(segment=segment))

    # For the 'standby' case, we set PGHOST back to its original value instead
    # of 'mdw-1'.  When the function impl() is called, PGHOST is initially unset
    # by the test framework, and we want to respect that.
    orig_PGHOST = os.environ.get('PGHOST')

    # Fail over to standby/mirrors.
    if segment == 'standby':
        coordinator_data_dir = get_coordinatordatadir()
        context.standby_port = os.environ.get('PGPORT')
        context.standby_data_dir = coordinator_data_dir
        context.new_standby_data_dir = '%s_1' % coordinator_data_dir
        context.execute_steps("""
         When the coordinator goes down
          And the user runs command "gpactivatestandby -a" from standby coordinator
         Then gpactivatestandby should return a return code of 0
         """)
        os.environ['PGHOST'] = 'mdw-2'

    else:  # mirrors
        context.execute_steps("""
        Given user stops all primary processes
          And user can start transactions
         When the user runs "gprecoverseg -a"
         Then gprecoverseg should return a return code of 0
        """)

    context.execute_steps("""
     Then the segments are synchronized
      And the tablespace is valid
      And replication connections can be made from the acting {segment}

    Given another tablespace is created with data
    """.format(segment=segment))

    # Fail over (rebalance) to original coordinator/primaries.
    if segment == 'standby':
        # Re-initialize the standby with a new directory, since
        # the previous coordinator cannot assume the role of standby
        # because it does not have the required recover.conf file.
        context.execute_steps("""
         When the user runs command "gpinitstandby -a -s mdw-1 -S {datadir}" from standby coordinator
         Then gpinitstandby should return a return code of 0
         """.format(datadir=context.new_standby_data_dir))
        os.environ['COORDINATOR_DATA_DIRECTORY'] = context.new_standby_data_dir
        # NOTE: this must be set before gpactivatestandby is called
        if orig_PGHOST is None:
            del os.environ['PGHOST']
        else:
            os.environ['PGHOST'] = orig_PGHOST
        context.standby_hostname = 'mdw-1'
        context.execute_steps("""
         When the coordinator goes down on "mdw-2"
          And the user runs "gpactivatestandby -a"
         Then gpactivatestandby should return a return code of 0
        """)

        context.execute_steps("""
         When the user runs "gpinitstandby -a -s mdw-2 -S {datadir}"
         Then gpinitstandby should return a return code of 0
         """.format(datadir=context.new_standby_data_dir))

    else:  # mirrors
        context.execute_steps("""
         When the user runs "gprecoverseg -ra"
         Then gprecoverseg should return a return code of 0
        """)

    context.execute_steps("""
     Then the segments are synchronized
      And the tablespace is valid
      And the other tablespace is valid
      And replication connections can be made from the acting {segment}
      And all tablespaces are dropped
    """.format(segment=segment))
Exemplo n.º 11
0
class Context(object):
    filename = os.path.join(gp.get_coordinatordatadir(), 'gpexpand.status')
    dbname = os.getenv('PGDATABASE', 'postgres')
    dburl = dbconn.DbURL(dbname=dbname)
    conn = dbconn.connect(dburl)
    day = 0
Exemplo n.º 12
0
import difflib

import pg

from contextlib import closing
from datetime import datetime
from gppylib.commands.base import Command, ExecutionError, REMOTE
from gppylib.commands.gp import chk_local_db_running, get_coordinatordatadir
from gppylib.db import dbconn
from gppylib.gparray import GpArray, MODE_SYNCHRONIZED


PARTITION_START_DATE = '2010-01-01'
PARTITION_END_DATE = '2013-01-01'

coordinator_data_dir = get_coordinatordatadir()
if coordinator_data_dir is None:
    raise Exception('COORDINATOR_DATA_DIRECTORY is not set')


# query_sql returns a cursor object, so the caller is responsible for closing
# the dbconn connection.
def query_sql(dbname, sql):
    result = None

    with dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False) as conn:
        result = dbconn.query(conn, sql)
    return result

def execute_sql(dbname, sql):
    result = None
Exemplo n.º 13
0
 def __init__(self, cdd=None):
     if not cdd:
         cdd = get_coordinatordatadir()
     cmd_str = "export COORDINATOR_DATA_DIRECTORY=%s; echo -e \"y\\ny\\n\" | gpdeletesystem -d %s" % (
         cdd, cdd)
     Command.__init__(self, 'run gpdeletesystem', cmd_str)
Exemplo n.º 14
0
def get_analyze_dir(dbname):
    coordinator_data_dir = get_coordinatordatadir()
    analyze_dir = os.path.join(coordinator_data_dir, 'db_analyze', dbname)
    return analyze_dir
Exemplo n.º 15
0
def impl(context):
    gprecoverseg_lock_dir = os.path.join(get_coordinatordatadir() + '/gprecoverseg.lock')
    os.mkdir(gprecoverseg_lock_dir)