Example #1
0
def impl(context, tablename, dbname, poolname):
    pool = WorkerPool(numWorkers=1)
    cmd = Command(name='drop a table in a worker pool', cmdStr="""psql -c "DROP TABLE %s" -d %s""" % (tablename, dbname))
    pool.addCommand(cmd)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
Example #2
0
class CleanVerification(Operation):
    def __init__(self, token, batch_default):
        self.token = token
        self.batch_default = batch_default
        self.pool = None
    def execute(self):
        entry = ValidateVerificationEntry(token = self.token).run()
        if not entry['verdone']:   
            raise WrongStateError("Only finished verification tasks may be cleaned up.")

        path = os.path.join(get_masterdatadir(), 'pg_verify', self.token)
        Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True)
        #RemoveTree(path).run()

        to_clean = ValidateVerification(content = entry['vercontent'],
                                        primaries_only = False).run()
        self.pool = WorkerPool(min(len(to_clean), self.batch_default))
        for seg in to_clean:
            host = seg.getSegmentHostName()
            path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token)
            cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host)
            self.pool.addCommand(cmd)

        logger.info('Waiting for clean commands to complete...')
        self.pool.wait_and_printdots(len(to_clean))

        for cmd in self.pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error('Failed to send cleanup on %s' % cmd.host)
                logger.error('Error: %s' % res.stderr)
                raise CleanVerificationError()
        RemoveVerificationEntry(token = self.token).run()
        logger.info('Verification %s has been cleaned.' % self.token)
Example #3
0
def restore_pg_hba_on_segment(gparr):
    """
    Restore the pg_hba.conf on all of the segments
    present in the array
    """
    logger.debug('Restoring pg_hba.conf file on segments...')

    host_to_seg_map = defaultdict(list)
    for seg in gparr.getDbList():
        if not seg.isSegmentMaster() and not seg.isSegmentStandby():
            host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())

    pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)

    try:
        for host, data_dirs_list in host_to_seg_map.items():
            pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
            cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -r" % pickled_data_dirs_list
            cmd = Command('Restore the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
            pool.addCommand(cmd)

        pool.join()

        for item in pool.getCompletedItems():
            result = item.get_results()
            if result.rc != 0:
                logger.error('Unable to restore pg_hba.conf %s' % str(result.stderr))
                logger.error('Please check the segment for more details')

    finally:
        pool.haltWork()
        pool.joinWorkers()
        pool = None
Example #4
0
    def execute(self):
        pool = WorkerPool()
        try:
            for seg in self.segments:
                datadir = seg.getSegmentDataDirectory()
                postmaster_pid_file = '%s/postmaster.pid' % datadir
                shared_mem = None
                if os.path.isfile(postmaster_pid_file):
                    with open(postmaster_pid_file) as fp:
                        shared_mem = fp.readlines()[-1].split()[-1].strip()
                if shared_mem:
                    cmd = Command('clean up shared memory', cmdStr="ipcrm -m %s" % shared_mem) 
                    pool.addCommand(cmd)
                pool.join()

            for item in pool.getCompletedItems():
                result = item.get_results()

                # This code is usually called after a GPDB segment has
                # been terminated.  In that case, it is possible that
                # the shared memory has already been freed by the
                # time we are called to clean up.  Due to this race
                # condition, it is possible to get an `ipcrm: invalid
                # id1` error from ipcrm.  We, therefore, ignore it.
                if result.rc != 0 and not result.stderr.startswith("ipcrm: invalid id"):
                    raise Exception('Unable to clean up shared memory for segment: (%s)' % (result.stderr))
        finally:
            pool.haltWork()
            pool.joinWorkers()
            pool = None
Example #5
0
def impl(context, tablename, dbname, poolname):
    pool = WorkerPool(numWorkers=1)
    cmd = Command(name='drop a table in a worker pool', cmdStr="""psql -c "DROP TABLE %s" -d %s""" % (tablename, dbname))
    pool.addCommand(cmd)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
Example #6
0
    def execute(self): 
        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port), utility=True)
        from_host, from_path = self.host, self.path
        logger.info("Commencing remote database dump file recovery process, please wait...")
        segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True) or seg.isSegmentMaster()]
        pool = WorkerPool(numWorkers = min(len(segs), self.batch_default))
        for seg in segs:
            if seg.isSegmentMaster():
                file = '%s%s' % (MASTER_DBDUMP_PREFIX, self.restore_timestamp)
            else:
                file = '%s0_%d_%s' % (DBDUMP_PREFIX, seg.getSegmentDbId(), self.restore_timestamp)
            if self.compress:
                file += '.gz'

            to_host = seg.getSegmentHostName()
            to_path = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, self.restore_timestamp[0:8])
            if not CheckRemoteDir(to_path, to_host).run():
                logger.info('Creating directory %s on %s' % (to_path, to_host))
                try:
                    MakeRemoteDir(to_path, to_host).run()
                except OSError, e:
                    raise ExceptionNoStackTraceNeeded("Failed to create directory %s on %s" % (to_path, to_host))
   
            logger.info("Commencing remote copy from %s to %s:%s" % (from_host, to_host, to_path))
            pool.addCommand(Scp('Copying dump for seg %d' % seg.getSegmentDbId(),
                            srcFile=os.path.join(from_path, file),
                            dstFile=os.path.join(to_path, file),
                            srcHost=from_host,
                            dstHost=to_host))
Example #7
0
    def execute(self): 
        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port), utility=True)
        from_host, from_path = self.host, self.path
        logger.info("Commencing remote database dump file recovery process, please wait...")
        segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True) or seg.isSegmentMaster()]
        pool = WorkerPool(numWorkers = min(len(segs), self.batch_default))
        for seg in segs:
            if seg.isSegmentMaster():
                file = '%s%s' % (MASTER_DBDUMP_PREFIX, self.restore_timestamp)
            else:
                file = '%s0_%d_%s' % (DBDUMP_PREFIX, seg.getSegmentDbId(), self.restore_timestamp)
            if self.compress:
                file += '.gz'

            to_host = seg.getSegmentHostName()
            to_path = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, self.restore_timestamp[0:8])
            if not CheckRemoteDir(to_path, to_host).run():
                logger.info('Creating directory %s on %s' % (to_path, to_host))
                try:
                    MakeRemoteDir(to_path, to_host).run()
                except OSError, e:
                    raise ExceptionNoStackTraceNeeded("Failed to create directory %s on %s" % (to_path, to_host))
   
            logger.info("Commencing remote copy from %s to %s:%s" % (from_host, to_host, to_path))
            pool.addCommand(Scp('Copying dump for seg %d' % seg.getSegmentDbId(),
                            srcFile=os.path.join(from_path, file),
                            dstFile=os.path.join(to_path, file),
                            srcHost=from_host,
                            dstHost=to_host))
Example #8
0
def impl(context, cmd, poolname):
    command = Command(cmdStr=cmd)
    pool = WorkerPool(numWorkers=1)
    pool.addCommand(command)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
    context.cmd = cmd
Example #9
0
def impl(context, query, dbname, poolname):
    pool = WorkerPool(numWorkers=1)
    cmd = on_unlock(query, dbname)
    pool.addCommand(cmd)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
    context.cmd = cmd
Example #10
0
def impl(context, cmd, poolname):
    command = Command(name='run gpcrondump in a separate thread', cmdStr=cmd)
    pool = WorkerPool(numWorkers=1)
    pool.addCommand(command)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
    context.cmd = cmd
Example #11
0
def impl(context, query, dbname, poolname):
    pool = WorkerPool(numWorkers=1)
    cmd = on_unlock(query,dbname)
    pool.addCommand(cmd)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
    context.cmd = cmd
Example #12
0
def impl(context, cmd, poolname):
    command = Command(name='run gpcrondump in a separate thread', cmdStr=cmd)
    pool = WorkerPool(numWorkers=1)
    pool.addCommand(command)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
    context.cmd = cmd
class GpMirrorListToBuildTestCase(GpTestCase):

    def setUp(self):
        self.pool = WorkerPool()

    def tearDown(self):
        # All background threads must be stopped, or else the test runner will
        # hang waiting. Join the stopped threads to make sure we're completely
        # clean for the next test.
        self.pool.haltWork()
        self.pool.joinWorkers()
        super(GpMirrorListToBuildTestCase, self).tearDown()

    def test_pg_rewind_parallel_execution(self):
        self.apply_patches([
            # Mock CHECKPOINT command in run_pg_rewind() as successful
            patch('gppylib.db.dbconn.connect', return_value=Mock()),
            patch('gppylib.db.dbconn.execSQL', return_value=Mock()),
            # Mock the command to remove postmaster.pid as successful
            patch('gppylib.commands.base.Command.run', return_value=Mock()),
            patch('gppylib.commands.base.Command.get_return_code', return_value=0),
            # Mock all pg_rewind commands to be not successful
            patch('gppylib.commands.base.Command.was_successful', return_value=False),
            patch('gppylib.commands.base.Command.get_stdout', return_value='Mocking results')
        ])
        from gppylib.operations.buildMirrorSegments import GpMirrorListToBuild
        # WorkerPool is the only valid parameter required in this test
        # case.  The test expects the workers to get a pg_rewind
        # command to run (and the command should fail to run).
        g = GpMirrorListToBuild(1, self.pool, 1,1)
        rewindInfo = {}
        p0 = Segment.initFromString("2|0|p|p|s|u|sdw1|sdw1|40000|/data/primary0")
        p1 = Segment.initFromString("3|1|p|p|s|u|sdw2|sdw2|40001|/data/primary1")
        m0 = Segment.initFromString("4|0|m|m|s|u|sdw2|sdw2|50000|/data/mirror0")
        m1 = Segment.initFromString("5|1|m|m|s|u|sdw1|sdw1|50001|/data/mirror1")
        rewindInfo[p0.dbid] = GpMirrorListToBuild.RewindSegmentInfo(
            p0, p0.address, p0.port)
        rewindInfo[p1.dbid] = GpMirrorListToBuild.RewindSegmentInfo(
            p1, p1.address, p1.port)
        rewindInfo[m0.dbid] = GpMirrorListToBuild.RewindSegmentInfo(
            m0, m0.address, m0.port)
        rewindInfo[m1.dbid] = GpMirrorListToBuild.RewindSegmentInfo(
            m1, m1.address, m1.port)

        # Test1: all 4 pg_rewind commands should fail due the "was_successful" patch
        failedSegments = g.run_pg_rewind(rewindInfo)
        self.assertEqual(len(failedSegments), 4)
        # The returned list of failed segments should contain items of
        # type gparray.Segment
        failedSegments.remove(p0)
        self.assertTrue(failedSegments[0].getSegmentDbId() > 0)

        # Test2: patch it such that no failures this time
        patch('gppylib.commands.base.Command.was_successful', return_value=True).start()
        failedSegments = g.run_pg_rewind(rewindInfo)
        self.assertEqual(len(failedSegments), 0)
Example #14
0
    def run_using_workerpool(self, option=''):
        if not (set(option.split()) <= set(['-F' , '-r', '--persistent-check', ' '])):
            raise GpRecoversegException('Not a valid option with gprecoverseg')

        rcvr_cmd = 'gprecoverseg -a  %s' % option
        cmd = Command(name='Run gprecoverseg', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, rcvr_cmd))
        tinctest.logger.info("Running gprecoverseg : %s" % cmd)

        pool = WorkerPool()
        pool.addCommand(cmd)
Example #15
0
 def test_print_progress(self, mock1):
     w = WorkerPool(numWorkers=32)
     c1 = Command('dummy command1', '')
     c2 = Command('dummy command2', '')
     w.addCommand(c1)
     w.addCommand(c2)
     w.join()
     w.print_progress(2)
     self.assertTrue(mock1.called_with('100.00% of jobs completed'))
     w.haltWork()
Example #16
0
    def run_using_workerpool(self, option=''):
        if not (set(option.split()) <= set(['-F' , '-r', '--persistent-check', ' '])):
            raise GpRecoversegException('Not a valid option with gprecoverseg')

        rcvr_cmd = 'gprecoverseg -a  %s' % option
        cmd = Command(name='Run gprecoverseg', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, rcvr_cmd))
        tinctest.logger.info("Running gprecoverseg : %s" % cmd)

        pool = WorkerPool(numWorkers=1, daemonize=True)
        pool.addCommand(cmd)
Example #17
0
def get_host_status(hostlist):
    """
    Test if SSH command works on a host and return a dictionary
    Return Ex: {host1: True, host2: False}
    where True represents SSH command success and False represents failure
    """
    if not isinstance(hostlist, list):
        raise Exception("Input parameter should be of type list")

    pool = WorkerPool()

    for host in hostlist:
        cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=host)
        pool.addCommand(cmd)

    pool.join()
    pool.haltWork()

    host_status_dict = {}
    for cmd in pool.getCompletedItems():
        if not cmd.get_results().wasSuccessful():
            host_status_dict[cmd.remoteHost] = False
        else:
            host_status_dict[cmd.remoteHost] = True

    return host_status_dict
Example #18
0
def scp_file_to_hosts(host_list, filename, batch_default):
    pool = WorkerPool(numWorkers=min(len(host_list), batch_default))

    for hname in host_list:
        pool.addCommand(Scp('Copying table_filter_file to %s' % hname,
                            srcFile=filename,
                            dstFile=filename,
                            dstHost=hname))
    pool.join()
    pool.haltWork()
    pool.check_results()
Example #19
0
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
    pool = WorkerPool(numWorkers=min(len(host_list), batch_default))

    for host in host_list:
        cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
        pool.addCommand(cmd)

    pool.join()
    pool.haltWork()
    if check_results:
        pool.check_results()
Example #20
0
class TriggerFilerepVerifyMessages(Operation):
    def __init__(self, content, token, batch_default, full=None, verify_file=None, verify_dir=None,
                       abort=None, suspend=None, resume=None, ignore_dir=None, ignore_file=None,
                       results=None, results_level=None):
        self.content = content
        self.token = token
        self.full = full
        self.verify_file = verify_file
        self.verify_dir = verify_dir
        self.abort = abort
        self.suspend = suspend
        self.resume = resume
        self.ignore_dir = ignore_dir
        self.ignore_file = ignore_file
        self.results = results
        self.results_level = results_level
        self.batch_default = batch_default
        self.pool = None
    def execute(self):
        """
        Sends arbitrary gp_primarymirror requests to the backend processes defined.
        """
        to_trigger = ValidateVerification(content = self.content).run()

        logger.info('Sending gp_primarymirror requests...')
        self.pool = WorkerPool(min(len(to_trigger), self.batch_default))

        for pseg in to_trigger:
            host, port = pseg.getSegmentHostName(), pseg.getSegmentPort()
            cmd = SendFilerepVerifyMessage(name = 'verify %s' % host, host = host, port = port,
                                           token = self.token,
                                           full = self.full,
                                           verify_file = self.verify_file,
                                           verify_dir = self.verify_dir,
                                           abort = self.abort,
                                           suspend = self.suspend,
                                           resume = self.resume,
                                           ignore_dir = self.ignore_dir,
                                           ignore_file = self.ignore_file,
                                           results = self.results,
                                           results_level = self.results_level)
            logger.debug("Sending request to %s:%d" % (host, port))
            self.pool.addCommand(cmd)

        logger.info('Waiting for gp_primarymirror commands to complete...')
        self.pool.wait_and_printdots(len(to_trigger))

        for cmd in self.pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error('Failed to send gp_primarymirror message to %s:%s' % (cmd.host, cmd.port))
                logger.error('Error: %s' % res.stderr)
                raise TriggerGpPrimaryMirrorFailure()
        logger.info('gp_primarymirror messages have been triggered succesfully.')
Example #21
0
 def test_print_progress(self, mock1):
     w = WorkerPool(numWorkers=32)
     c1 = Command('dummy command1', '')
     c2 = Command('dummy command2', '')
     w.addCommand(c1)
     w.addCommand(c2)
     w.join()
     w.print_progress(2)
     self.assertTrue(mock1.called_with('100.00% of jobs completed'))
     w.haltWork()
Example #22
0
def scp_file_to_hosts(host_list, filename, batch_default):
    pool = WorkerPool(numWorkers=min(len(host_list), batch_default))

    for hname in host_list:
        pool.addCommand(
            Scp("Copying table_filter_file to %s" % hname, srcFile=filename, dstFile=filename, dstHost=hname)
        )
    pool.join()
    pool.haltWork()
    pool.check_results()
Example #23
0
 def _get_pgcontrol_data_from_segments(self, gpdb_list):
     pool = WorkerPool(numWorkers=self.workers)
     try:
         for gpdb in gpdb_list:  # iterate for all segments
             cmd = PgControlData(name='run pg_controldata', datadir=gpdb.getSegmentDataDirectory(),
                                 ctxt=REMOTE, remoteHost=gpdb.getSegmentHostName())
             cmd.gparray_gpdb = gpdb
             pool.addCommand(cmd)
         pool.join()
     finally:
         # Make sure that we halt the workers or else we'll hang
         pool.haltWork()
         pool.joinWorkers()
     return pool.getCompletedItems()
Example #24
0
def get_host_status(hostlist):
    """
    Test if SSH command works on a host and return a dictionary
    Return Ex: {host1: True, host2: False}
    where True represents SSH command success and False represents failure
    """
    if not isinstance(hostlist, list):
        raise Exception("Input parameter should be of type list")

    pool = WorkerPool(min(len(hostlist), 16))

    for host in hostlist:
        cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=host)
        pool.addCommand(cmd)

    pool.join()
    pool.haltWork()

    host_status_dict = {}
    for cmd in pool.getCompletedItems():
        if not cmd.get_results().wasSuccessful():
            host_status_dict[cmd.remoteHost] = False
        else:
            host_status_dict[cmd.remoteHost] = True

    return host_status_dict
Example #25
0
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
    pool = WorkerPool(numWorkers=min(len(host_list), batch_default))

    for host in host_list:
        cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
        pool.addCommand(cmd)

    pool.join()
    pool.haltWork()
    if check_results:
        pool.check_results()
Example #26
0
def impl(context, cmd, poolname):
    if hasattr(context, 'netbackup_service_host'):
        netbackup_service_host = context.netbackup_service_host
    if hasattr(context, 'netbackup_policy'):
        netbackup_policy = context.netbackup_policy
    if hasattr(context, 'netbackup_schedule'):
        netbackup_schedule = context.netbackup_schedule
    cmd = cmd + " --netbackup-service-host " + netbackup_service_host + " --netbackup-policy " + netbackup_policy + " --netbackup-schedule " + netbackup_schedule
    command = Command(name='run command in a separate thread', cmdStr=cmd)
    pool = WorkerPool(numWorkers=1)
    pool.addCommand(command)
    if not hasattr(context, 'pool'):
        context.pool = {}
    context.pool[poolname] = pool
    context.cmd = cmd
Example #27
0
    def main(self, file_name, get_cmd_list):
        pool = None
        logger = None
        try:
            options = self.parseargs()
            exec_name = os.path.split(file_name)[-1]
            logger = gplog.setup_tool_logging(exec_name,
                                              unix.getLocalHostname(),
                                              unix.getUserName(),
                                              logdir=options.logfileDirectory)

            if not options.confinfo:
                raise Exception('Missing --confinfo argument.')

            if options.batch_size <= 0:
                logger.warn('batch_size was less than zero.  Setting to 1.')
                options.batch_size = 1

            if options.verbose:
                gplog.enable_verbose_logging()

            # TODO: should we output the name of the exact file?
            logger.info("Starting recovery with args: %s" %
                        ' '.join(sys.argv[1:]))

            seg_recovery_info_list = recoveryinfo.deserialize_recovery_info_list(
                options.confinfo)
            if len(seg_recovery_info_list) == 0:
                raise Exception(
                    'No segment configuration values found in --confinfo argument'
                )

            cmd_list = get_cmd_list(seg_recovery_info_list,
                                    options.forceoverwrite, logger)

            pool = WorkerPool(
                numWorkers=min(options.batch_size, len(cmd_list)))
            self.run_cmd_list(cmd_list, logger, options, pool)

            sys.exit(0)
        except Exception as e:
            if logger:
                logger.error(str(e))
            print(e, file=sys.stderr)
            sys.exit(1)
        finally:
            if pool:
                pool.haltWork()
Example #28
0
    def main(self, cmd_list):
        pool = None
        try:
            # TODO: should we output the name of the exact file?
            self.logger.info("Starting recovery with args: %s" %
                             ' '.join(sys.argv[1:]))

            pool = WorkerPool(
                numWorkers=min(self.options.batch_size, len(cmd_list)))
            self.run_cmd_list(cmd_list, self.logger, self.options, pool)
            sys.exit(0)
        except Exception as e:
            self._write_to_stderr_and_exit(e)
        finally:
            if pool:
                pool.haltWork()
 def execute(self):
     pool = WorkerPool()
     given = set(ListFiles(self.pid_dir).run())
     try:
         for host in self.trail:
             available = ListRemoteFiles(self.pid_dir, host).run()
             to_copy = [dir for dir in available if dir not in given]
             for dir in to_copy:
                 comp_dir = os.path.join(self.pid_dir, dir)
                 pool.addCommand(Scp('collect coverage',
                                     srcFile = comp_dir,
                                     srcHost = host,
                                     dstFile = comp_dir,
                                     recursive = True))
     finally:
         pool.join()
Example #30
0
    def execute(self):
        entry = ValidateVerificationEntry(token=self.token).run()
        if not entry['verdone']:
            raise WrongStateError(
                "Only finished verification tasks may be cleaned up.")

        path = os.path.join(get_masterdatadir(), 'pg_verify', self.token)
        Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True)
        #RemoveTree(path).run()

        to_clean = ValidateVerification(content=entry['vercontent'],
                                        primaries_only=False).run()
        pool = WorkerPool(min(len(to_clean), self.batch_default))
        for seg in to_clean:
            host = seg.getSegmentHostName()
            path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify',
                                "*%s*" % self.token)
            cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host)
            pool.addCommand(cmd)

        logger.info('Waiting for clean commands to complete...')
        pool.wait_and_printdots(len(to_clean))

        for cmd in pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error('Failed to send cleanup on %s' % cmd.host)
                logger.error('Error: %s' % res.stderr)
                raise CleanVerificationError()
        RemoveVerificationEntry(token=self.token).run()
        logger.info('Verification %s has been cleaned.' % self.token)
Example #31
0
    def checkSSH(self):
        '''Check that ssh to hostlist is okay.'''

        pool = WorkerPool()

        for h in self.list:
            cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        for cmd in pool.getCompletedItems():
            if not cmd.get_results().wasSuccessful():
                raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)

        return True
Example #32
0
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree provided with -B argument: %d" % self.__options.parallelDegree)

        self.__pool = WorkerPool(self.__options.parallelDegree)
        gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)

        # verify "where to recover" options
        optionCnt = 0
        if self.__options.newRecoverHosts is not None:
            optionCnt += 1
        if self.__options.recoveryConfigFile is not None:
            optionCnt += 1
        if self.__options.rebalanceSegments:
            optionCnt += 1
        if optionCnt > 1:
            raise ProgramArgumentValidationException("Only one of -i, -p, and -r may be specified")

        faultProberInterface.getFaultProber().initializeProber(gpEnv.getMasterPort())

        confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getMasterPort())

        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        num_workers = min(len(gpArray.get_hostlist()), self.__options.parallelDegree)
        hosts = set(gpArray.get_hostlist(includeMaster=False))
        unreachable_hosts = get_unreachable_segment_hosts(hosts, num_workers)
        for i, segmentPair in enumerate(gpArray.segmentPairs):
            if segmentPair.primaryDB.getSegmentHostName() in unreachable_hosts:
                logger.warning("Not recovering segment %d because %s is unreachable" % (segmentPair.primaryDB.dbid, segmentPair.primaryDB.getSegmentHostName()))
                gpArray.segmentPairs[i].primaryDB.unreachable = True

            if segmentPair.mirrorDB.getSegmentHostName() in unreachable_hosts:
                logger.warning("Not recovering segment %d because %s is unreachable" % (segmentPair.mirrorDB.dbid, segmentPair.mirrorDB.getSegmentHostName()))
                gpArray.segmentPairs[i].mirrorDB.unreachable = True

        if not gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded(
                'GPDB Mirroring replication is not configured for this Greenplum Database instance.')

        # We have phys-rep/filerep mirrors.

        if self.__options.newRecoverHosts is not None:
            try:
                uniqueHosts = []
                for h in self.__options.newRecoverHosts.split(','):
                    if h.strip() not in uniqueHosts:
                        uniqueHosts.append(h.strip())
                self.__options.newRecoverHosts = uniqueHosts
            except Exception, ex:
                raise ProgramArgumentValidationException( \
                    "Invalid value for recover hosts: %s" % ex)
Example #33
0
class GpStopPrintProgressTestCase(unittest.TestCase):
    def setUp(self):
        self.logger = Mock(spec=logging.Logger)
        self.pool = WorkerPool(numWorkers=1, logger=self.logger)

    def tearDown(self):
        self.pool.haltWork()

    def test_print_progress_prints_once_with_completed_pool(self):
        self.pool.addCommand(Mock(spec=Command))
        self.pool.addCommand(Mock(spec=Command))
        self.pool.join()

        gpstop.print_progress(self.pool)
        self.logger.info.assert_called_once_with('100.00% of jobs completed')

    def test_print_progress_prints_once_with_empty_pool(self):
        gpstop.print_progress(self.pool)
        self.logger.info.assert_called_once_with('0.00% of jobs completed')

    def test_print_progress_prints_intermediate_progress(self):
        duration = 0.01

        cmd = Mock(spec=Command)

        def wait_for_duration():
            time.sleep(duration)

        cmd.run.side_effect = wait_for_duration

        self.pool.addCommand(Mock(spec=Command))
        self.pool.addCommand(cmd)

        # We run a command for ten milliseconds, printing progress every
        # millisecond, so at some point we should transition from 50% to 100%.
        gpstop.print_progress(self.pool, interval=(duration / 10))
        self.logger.info.assert_has_calls([
            call('50.00% of jobs completed'),
            call('100.00% of jobs completed'),
        ])
Example #34
0
    def checkSSH(self):
        '''Check that ssh to hostlist is okay.'''

        pool = WorkerPool()

        for h in self.list:
            cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        for cmd in pool.getCompletedItems():
            if not cmd.get_results().wasSuccessful():
                raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)

        return True
Example #35
0
 def execute(self):
     pool = WorkerPool()
     given = set(ListFiles(self.pid_dir).run())
     try:
         for host in self.trail:
             available = ListRemoteFiles(self.pid_dir, host).run()
             to_copy = [dir for dir in available if dir not in given]
             for dir in to_copy:
                 comp_dir = os.path.join(self.pid_dir, dir)
                 pool.addCommand(
                     Scp('collect coverage',
                         srcFile=comp_dir,
                         srcHost=host,
                         dstFile=comp_dir,
                         recursive=True))
         pool.join()
     finally:
         pool.haltWork()
Example #36
0
    def removeBadHosts(self):
        ''' Update list of host to include only the host on which SSH was successful'''

        pool = WorkerPool()

        for h in self.list:
            cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        bad_hosts = []
        working_hosts = []
        for cmd in pool.getCompletedItems():
            if not cmd.get_results().wasSuccessful():
                bad_hosts.append(cmd.remoteHost)
            else:
                working_hosts.append(cmd.remoteHost)

        self.list = working_hosts[:]
        return bad_hosts
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree provided with -B argument: %d" %
                self.__options.parallelDegree)

        self.__pool = WorkerPool(self.__options.parallelDegree)
        gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)

        # verify "where to recover" options
        optionCnt = 0
        if self.__options.newRecoverHosts is not None:
            optionCnt += 1
        if self.__options.recoveryConfigFile is not None:
            optionCnt += 1
        if self.__options.rebalanceSegments:
            optionCnt += 1
        if optionCnt > 1:
            raise ProgramArgumentValidationException(
                "Only one of -i, -p, and -r may be specified")

        faultProberInterface.getFaultProber().initializeProber(
            gpEnv.getMasterPort())

        confProvider = configInterface.getConfigurationProvider(
        ).initializeProvider(gpEnv.getMasterPort())

        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        if not gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded(
                'GPDB Mirroring replication is not configured for this Greenplum Database instance.'
            )

        # We have phys-rep/filerep mirrors.

        if self.__options.newRecoverHosts is not None:
            try:
                uniqueHosts = []
                for h in self.__options.newRecoverHosts.split(','):
                    if h.strip() not in uniqueHosts:
                        uniqueHosts.append(h.strip())
                self.__options.newRecoverHosts = uniqueHosts
            except Exception, ex:
                raise ProgramArgumentValidationException( \
                    "Invalid value for recover hosts: %s" % ex)
Example #38
0
    def filterMultiHomedHosts(self):
        '''For multiple host that is of the same node, keep only one in the hostlist.'''
        unique = {}

        pool = WorkerPool()
        for h in self.list:
            cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        for finished_cmd in pool.getCompletedItems():
            hostname = finished_cmd.get_hostname()
            if (not hostname):
                unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
            elif not unique.get(hostname):
                unique[hostname] = finished_cmd.remoteHost
            elif hostname == finished_cmd.remoteHost:
                unique[hostname] = finished_cmd.remoteHost

        self.list = list(unique.values())

        return self.list
Example #39
0
    def removeBadHosts(self):
        ''' Update list of host to include only the host on which SSH was successful'''

        pool = WorkerPool()

        for h in self.list:
            cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        bad_hosts = []
        working_hosts = []
        for cmd in pool.getCompletedItems():
            if not cmd.get_results().wasSuccessful():
                bad_hosts.append(cmd.remoteHost)
            else:
                working_hosts.append(cmd.remoteHost)

        self.list = working_hosts[:]
        return bad_hosts
Example #40
0
    def execute(self):
        logger.info('Gathering results of verification %s...' % self.token)
        to_gather = ValidateVerification(content=self.content,
                                         primaries_only=False).run()

        dest_base = os.path.join(self.master_datadir, 'pg_verify', self.token)
        if CheckDir(dest_base).run():
            # TODO: if end user has mucked around with artifacts on master, a regathering may
            # be needed; perhaps, a --force option to accompany --results?
            return
        MakeDir(dest_base).run()

        pool = WorkerPool(min(len(to_gather), self.batch_default))
        for seg in to_gather:
            host = seg.getSegmentHostName()
            content = seg.getSegmentContentId()
            role = seg.getSegmentRole()
            src = os.path.join(seg.getSegmentDataDirectory(), "pg_verify",
                               "*%s*" % self.token)

            dest = os.path.join(dest_base, str(content), str(role))
            MakeDir(dest).run()
            cmd = Scp('consolidate', srcFile=src, srcHost=host, dstFile=dest)
            pool.addCommand(cmd)

        logger.info('Waiting for scp commands to complete...')
        pool.wait_and_printdots(len(to_gather))
        pool.check_results()

        dest = os.path.join(dest_base, 'verification_%s.fix' % self.token)
        with open(dest, 'w') as output:
            for seg in to_gather:
                content = seg.getSegmentContentId()
                role = seg.getSegmentRole()
                src = os.path.join(dest_base, str(content), str(role),
                                   'verification_%s.fix' % self.token)
                with open(src, 'r') as input:
                    output.writelines(input.readlines())
Example #41
0
    def filterMultiHomedHosts(self):
        '''For multiple host that is of the same node, keep only one in the hostlist.'''
        unique = {}

        pool = WorkerPool()
        for h in self.list:
            cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
            pool.addCommand(cmd)

        pool.join()
        pool.haltWork()

        for finished_cmd in pool.getCompletedItems():
            hostname = finished_cmd.get_hostname()
            if (not hostname):
                unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
            elif not unique.get(hostname):
                unique[hostname] = finished_cmd.remoteHost
            elif hostname == finished_cmd.remoteHost:
                unique[hostname] = finished_cmd.remoteHost

        self.list = unique.values()

        return self.list
Example #42
0
    def execute(self):
        """
        Sends arbitrary gp_primarymirror requests to the backend processes defined.
        """
        to_trigger = ValidateVerification(content=self.content).run()

        logger.info('Sending gp_primarymirror requests...')
        pool = WorkerPool(min(len(to_trigger), self.batch_default))

        for pseg in to_trigger:
            host, port = pseg.getSegmentHostName(), pseg.getSegmentPort()
            cmd = SendFilerepVerifyMessage(name='verify %s' % host,
                                           host=host,
                                           port=port,
                                           token=self.token,
                                           full=self.full,
                                           verify_file=self.verify_file,
                                           verify_dir=self.verify_dir,
                                           abort=self.abort,
                                           suspend=self.suspend,
                                           resume=self.resume,
                                           ignore_dir=self.ignore_dir,
                                           ignore_file=self.ignore_file,
                                           results=self.results,
                                           results_level=self.results_level)
            logger.debug("Sending request to %s:%d" % (host, port))
            pool.addCommand(cmd)

        logger.info('Waiting for gp_primarymirror commands to complete...')
        pool.wait_and_printdots(len(to_trigger))

        for cmd in pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error(
                    'Failed to send gp_primarymirror message to %s:%s' %
                    (cmd.host, cmd.port))
                logger.error('Error: %s' % res.stderr)
                raise TriggerGpPrimaryMirrorFailure()
        logger.info(
            'gp_primarymirror messages have been triggered succesfully.')
Example #43
0
    def execute(self):
        logger.info('Gathering results of verification %s...' % self.token)
        to_gather = ValidateVerification(content = self.content,
                                         primaries_only = False).run()

        dest_base = os.path.join(self.master_datadir, 'pg_verify', self.token)
        if CheckDir(dest_base).run():
            # TODO: if end user has mucked around with artifacts on master, a regathering may
            # be needed; perhaps, a --force option to accompany --results?
            return
        MakeDir(dest_base).run()

        self.pool = WorkerPool(min(len(to_gather), self.batch_default))
        for seg in to_gather:
            host = seg.getSegmentHostName()
            content = seg.getSegmentContentId()
            role = seg.getSegmentRole()
            src = os.path.join(seg.getSegmentDataDirectory(), "pg_verify", "*%s*" % self.token)

            dest = os.path.join(dest_base, str(content), str(role))
            MakeDir(dest).run()
            cmd = Scp('consolidate', srcFile=src, srcHost=host, dstFile=dest)
            self.pool.addCommand(cmd)

        logger.info('Waiting for scp commands to complete...')
        self.pool.wait_and_printdots(len(to_gather))
        self.pool.check_results()

        dest = os.path.join(dest_base, 'verification_%s.fix' % self.token)
        with open(dest, 'w') as output:
            for seg in to_gather:
                content = seg.getSegmentContentId()
                role = seg.getSegmentRole()
                src = os.path.join(dest_base, str(content), str(role), 'verification_%s.fix' % self.token)
                with open(src, 'r') as input:
                    output.writelines(input.readlines())
Example #44
0
    def rebalance(self):
        # Get the unbalanced primary segments grouped by hostname
        # These segments are what we will shutdown.
        logger.info("Getting unbalanced segments")
        unbalanced_primary_segs = GpArray.getSegmentsByHostName(
            self.gpArray.get_unbalanced_primary_segdbs())
        pool = WorkerPool()

        count = 0

        try:
            # Disable ctrl-c
            signal.signal(signal.SIGINT, signal.SIG_IGN)

            logger.info("Stopping unbalanced primary segments...")
            for hostname in unbalanced_primary_segs.keys():
                cmd = GpSegStopCmd("stop unbalanced primary segs",
                                   self.gpEnv.getGpHome(),
                                   self.gpEnv.getGpVersion(),
                                   'fast',
                                   unbalanced_primary_segs[hostname],
                                   ctxt=REMOTE,
                                   remoteHost=hostname,
                                   timeout=600)
                pool.addCommand(cmd)
                count += 1

            pool.wait_and_printdots(count, False)

            failed_count = 0
            completed = pool.getCompletedItems()
            for res in completed:
                if not res.get_results().wasSuccessful():
                    failed_count += 1

            if failed_count > 0:
                logger.warn(
                    "%d segments failed to stop.  A full rebalance of the")
                logger.warn(
                    "system is not possible at this time.  Please check the")
                logger.warn(
                    "log files, correct the problem, and run gprecoverseg -r")
                logger.warn("again.")
                logger.info(
                    "gprecoverseg will continue with a partial rebalance.")

            pool.empty_completed_items()
            # issue a distributed query to make sure we pick up the fault
            # that we just caused by shutting down segments
            conn = None
            try:
                logger.info("Triggering segment reconfiguration")
                dburl = dbconn.DbURL()
                conn = dbconn.connect(dburl)
                cmd = ReconfigDetectionSQLQueryCommand(conn)
                pool.addCommand(cmd)
                pool.wait_and_printdots(1, False)
            except Exception:
                # This exception is expected
                pass
            finally:
                if conn:
                    conn.close()

            # Final step is to issue a recoverseg operation to resync segments
            logger.info("Starting segment synchronization")
            cmd = GpRecoverseg("rebalance recoverseg")
            pool.addCommand(cmd)
            pool.wait_and_printdots(1, False)
        except Exception, ex:
            raise ex
Example #45
0
 def setUp(self):
     self.pool = None
     self.pool = WorkerPool()
Example #46
0
    def execute(self):
        pool = WorkerPool()
        try:
            for seg in self.segments:
                datadir = seg.getSegmentDataDirectory()
                postmaster_pid_file = '%s/postmaster.pid' % datadir
                shared_mem = None
                if os.path.isfile(postmaster_pid_file):
                    with open(postmaster_pid_file) as fp:
                        shared_mem = fp.readlines()[-1].split()[-1].strip()
                if shared_mem:
                    cmd = Command('clean up shared memory',
                                  cmdStr="ipcrm -m %s" % shared_mem)
                    pool.addCommand(cmd)
                pool.join()

            for item in pool.getCompletedItems():
                result = item.get_results()

                # This code is usually called after a GPDB segment has
                # been terminated.  In that case, it is possible that
                # the shared memory has already been freed by the
                # time we are called to clean up.  Due to this race
                # condition, it is possible to get an `ipcrm: invalid
                # id1` error from ipcrm.  We, therefore, ignore it.
                if result.rc != 0 and not result.stderr.startswith(
                        "ipcrm: invalid id"):
                    raise Exception(
                        'Unable to clean up shared memory for segment: (%s)' %
                        (result.stderr))
        finally:
            pool.haltWork()
            pool.joinWorkers()
            pool = None
 def setUp(self):
     self.pool = WorkerPool()
Example #48
0
class WorkerPoolTest(unittest.TestCase):
    def setUp(self):
        self.pool = WorkerPool(numWorkers=1, logger=mock.Mock())

    def tearDown(self):
        # All background threads must be stopped, or else the test runner will
        # hang waiting. Join the stopped threads to make sure we're completely
        # clean for the next test.
        self.pool.haltWork()
        self.pool.joinWorkers()

    def test_pool_must_have_some_workers(self):
        with self.assertRaises(Exception):
            WorkerPool(numWorkers=0)
        
    def test_pool_runs_added_command(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd)
        self.pool.join()

        cmd.run.assert_called_once_with()

    def test_completed_commands_are_retrievable(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd) # should quickly be completed
        self.pool.join()

        self.assertEqual(self.pool.getCompletedItems(), [cmd])

    def test_pool_is_not_marked_done_until_commands_finish(self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()
        def wait_for_event():
            event.wait()
        cmd.run.side_effect = wait_for_event

        self.assertTrue(self.pool.isDone())

        try:
            self.pool.addCommand(cmd)
            self.assertFalse(self.pool.isDone())

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        self.pool.join()

        self.assertTrue(self.pool.isDone())

    def test_pool_can_be_emptied_of_completed_commands(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd)
        self.pool.join()

        self.pool.empty_completed_items()
        self.assertEqual(self.pool.getCompletedItems(), [])

    def test_check_results_succeeds_when_no_items_fail(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return True if the command
        # succeeds.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = True

        self.pool.addCommand(cmd)
        self.pool.join()
        self.pool.check_results()

    def test_check_results_throws_exception_at_first_failure(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return False to simulate a
        # failure.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = False

        self.pool.addCommand(cmd)
        self.pool.join()

        with self.assertRaises(ExecutionError):
            self.pool.check_results()

    def test_join_with_timeout_returns_done_immediately_if_there_is_nothing_to_do(self):
        start = time.time()
        done = self.pool.join(10)
        delta = time.time() - start

        self.assertTrue(done)

        # "Returns immediately" is a difficult thing to test. Longer than two
        # seconds seems like a reasonable failure case, even on a heavily loaded
        # test container.
        self.assertLess(delta, 2)

    def test_join_with_timeout_doesnt_return_done_until_all_commands_complete(self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()
        def wait_for_event():
            event.wait()
        cmd.run.side_effect = wait_for_event

        try:
            self.pool.addCommand(cmd)

            done = self.pool.join(0.001)
            self.assertFalse(done)

            # Test zero and negative timeouts too.
            done = self.pool.join(0)
            self.assertFalse(done)

            done = self.pool.join(-1)
            self.assertFalse(done)

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        done = self.pool.join(2) # should be immediate, but there's still a race
        self.assertTrue(done)

    def test_completed_returns_number_of_completed_commands(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.assertEqual(self.pool.completed, 3)

    def test_completed_can_be_cleared_back_to_zero(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.empty_completed_items()
        self.assertEqual(self.pool.completed, 0)

    def test_completed_is_reset_to_zero_after_getCompletedItems(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.getCompletedItems()
        self.assertEqual(self.pool.completed, 0)

    def test_assigned_returns_number_of_assigned_commands(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.assertEqual(self.pool.assigned, 3)

    def test_assigned_is_decremented_when_completed_items_are_emptied(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.empty_completed_items()

        self.assertEqual(self.pool.assigned, 0)

    def test_assigned_is_decremented_when_completed_items_are_checked(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return True if the command
        # succeeds.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = True

        self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.check_results()

        self.assertEqual(self.pool.assigned, 0)

    def test_assigned_is_decremented_when_completed_items_are_popped(self):
        # The first command will finish immediately.
        cmd1 = mock.Mock(spec=Command)
        self.pool.addCommand(cmd1)

        # The other command will wait until we allow it to continue.
        cmd2 = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()
        def wait_for_event():
            event.wait()
        cmd2.run.side_effect = wait_for_event

        try:
            self.pool.addCommand(cmd2)
            self.assertEqual(self.pool.assigned, 2)

            # Avoid race flakes; make sure we actually complete the first
            # command.
            while self.pool.completed < 1:
                self.pool.join(0.001)

            # Pop the completed item.
            self.assertEqual(self.pool.getCompletedItems(), [cmd1])

            # Now we should be down to one assigned command.
            self.assertEqual(self.pool.assigned, 1)

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        self.pool.join()

        # Pop the other completed item.
        self.assertEqual(self.pool.getCompletedItems(), [cmd2])
        self.assertEqual(self.pool.assigned, 0)

    def test_join_and_indicate_progress_prints_nothing_if_pool_is_done(self):
        stdout = StringIO.StringIO()
        join_and_indicate_progress(self.pool, stdout)

        self.assertEqual(stdout.getvalue(), '')

    def test_join_and_indicate_progress_prints_dots_until_pool_is_done(self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()
        def wait_for_event():
            event.wait()
        cmd.run.side_effect = wait_for_event

        # Open up a pipe and wrap each end in a file-like object.
        read_end, write_end = os.pipe()
        read_end = os.fdopen(read_end, 'r')
        write_end = os.fdopen(write_end, 'w')

        # Create a thread to perform join_and_indicate_progress().
        def tmain():
            join_and_indicate_progress(self.pool, write_end, interval=0.001)
            write_end.close()
        join_thread = threading.Thread(target=tmain)

        try:
            # Add the command, then join the WorkerPool.
            self.pool.addCommand(cmd)
            join_thread.start()

            # join_and_indicate_progress() is now writing to our pipe. Wait for
            # a few dots...
            for _ in range(3):
                byte = read_end.read(1)
                self.assertEqual(byte, '.')

            # ...then stop the command.
            event.set()

            # Make sure the rest of the output consists of dots ending in a
            # newline. (tmain() closes the write end of the pipe so that this
            # read() will complete.)
            remaining = read_end.read()
            self.assertRegexpMatches(remaining, r'^[.]*\n$')

        finally:
            # Make sure that we unblock and join all threads, even on a test
            # failure.
            event.set()
            join_thread.join()

    def test_join_and_indicate_progress_flushes_every_dot(self):
        duration = 0.005

        cmd = mock.Mock(spec=Command)
        def wait_for_duration():
            time.sleep(duration)
        cmd.run.side_effect = wait_for_duration
        self.pool.addCommand(cmd)

        stdout = mock.Mock(spec=file)
        join_and_indicate_progress(self.pool, stdout, interval=(duration / 5))

        for i, call in enumerate(stdout.mock_calls):
            # Every written dot should be followed by a flush().
            if call == mock.call.write('.'):
                self.assertEqual(stdout.mock_calls[i + 1], mock.call.flush())
Example #49
0
class WorkerPoolTest(unittest.TestCase):
    def setUp(self):
        self.pool = WorkerPool(numWorkers=1, logger=mock.Mock())

    def tearDown(self):
        # All background threads must be stopped, or else the test runner will
        # hang waiting. Join the stopped threads to make sure we're completely
        # clean for the next test.
        self.pool.haltWork()
        self.pool.joinWorkers()

    def test_pool_must_have_some_workers(self):
        with self.assertRaises(Exception):
            WorkerPool(numWorkers=0)
        
    def test_pool_runs_added_command(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd)
        self.pool.join()

        cmd.run.assert_called_once_with()

    def test_completed_commands_are_retrievable(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd) # should quickly be completed
        self.pool.join()

        self.assertEqual(self.pool.getCompletedItems(), [cmd])

    def test_pool_is_not_marked_done_until_commands_finish(self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()
        def wait_for_event():
            event.wait()
        cmd.run.side_effect = wait_for_event

        self.assertTrue(self.pool.isDone())

        try:
            self.pool.addCommand(cmd)
            self.assertFalse(self.pool.isDone())

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        self.pool.join()

        self.assertTrue(self.pool.isDone())

    def test_pool_can_be_emptied_of_completed_commands(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd)
        self.pool.join()

        self.pool.empty_completed_items()
        self.assertEqual(self.pool.getCompletedItems(), [])

    def test_check_results_succeeds_when_no_items_fail(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return True if the command
        # succeeds.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = True

        self.pool.addCommand(cmd)
        self.pool.join()
        self.pool.check_results()

    def test_check_results_throws_exception_at_first_failure(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return False to simulate a
        # failure.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = False

        self.pool.addCommand(cmd)
        self.pool.join()

        with self.assertRaises(ExecutionError):
            self.pool.check_results()

    def test_join_with_timeout_returns_done_immediately_if_there_is_nothing_to_do(self):
        start = time.time()
        done = self.pool.join(10)
        delta = time.time() - start

        self.assertTrue(done)

        # "Returns immediately" is a difficult thing to test. Longer than two
        # seconds seems like a reasonable failure case, even on a heavily loaded
        # test container.
        self.assertLess(delta, 2)

    def test_join_with_timeout_doesnt_return_done_until_all_commands_complete(self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()
        def wait_for_event():
            event.wait()
        cmd.run.side_effect = wait_for_event

        try:
            self.pool.addCommand(cmd)

            done = self.pool.join(0.001)
            self.assertFalse(done)

            # Test zero and negative timeouts too.
            done = self.pool.join(0)
            self.assertFalse(done)

            done = self.pool.join(-1)
            self.assertFalse(done)

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        done = self.pool.join(2) # should be immediate, but there's still a race
        self.assertTrue(done)

    def test_completed_returns_number_of_completed_commands(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.assertEqual(self.pool.completed, 3)

    def test_completed_can_be_cleared_back_to_zero(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.empty_completed_items()
        self.assertEqual(self.pool.completed, 0)

    def test_completed_is_reset_to_zero_after_getCompletedItems(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.getCompletedItems()
        self.assertEqual(self.pool.completed, 0)

    def test_assigned_returns_number_of_assigned_commands(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.assertEqual(self.pool.assigned, 3)

    def test_assigned_is_decremented_when_completed_items_are_emptied(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.empty_completed_items()

        self.assertEqual(self.pool.assigned, 0)

    def test_assigned_is_decremented_when_completed_items_are_checked(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return True if the command
        # succeeds.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = True

        self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.check_results()

        self.assertEqual(self.pool.assigned, 0)

    def test_assigned_is_decremented_when_completed_items_are_popped(self):
        # The first command will finish immediately.
        cmd1 = mock.Mock(spec=Command)
        self.pool.addCommand(cmd1)

        # The other command will wait until we allow it to continue.
        cmd2 = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()
        def wait_for_event():
            event.wait()
        cmd2.run.side_effect = wait_for_event

        try:
            self.pool.addCommand(cmd2)
            self.assertEqual(self.pool.assigned, 2)

            # Avoid race flakes; make sure we actually complete the first
            # command.
            while self.pool.completed < 1:
                self.pool.join(0.001)

            # Pop the completed item.
            self.assertEqual(self.pool.getCompletedItems(), [cmd1])

            # Now we should be down to one assigned command.
            self.assertEqual(self.pool.assigned, 1)

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        self.pool.join()

        # Pop the other completed item.
        self.assertEqual(self.pool.getCompletedItems(), [cmd2])
        self.assertEqual(self.pool.assigned, 0)

    def test_join_and_indicate_progress_prints_nothing_if_pool_is_done(self):
        stdout = StringIO.StringIO()
        join_and_indicate_progress(self.pool, stdout)

        self.assertEqual(stdout.getvalue(), '')

    def test_join_and_indicate_progress_prints_dots_until_pool_is_done(self):
        # To avoid false negatives from the race conditions here, let's set up a
        # situation where we'll print ten dots on average, and verify that there
        # were at least five dots printed.
        duration = 0.01

        cmd = mock.Mock(spec=Command)
        def wait_for_duration():
            time.sleep(duration)
        cmd.run.side_effect = wait_for_duration
        self.pool.addCommand(cmd)

        stdout = StringIO.StringIO()
        join_and_indicate_progress(self.pool, stdout, interval=(duration / 10))

        results = stdout.getvalue()
        self.assertIn('.....', results)
        self.assertTrue(results.endswith('\n'))

    def test_join_and_indicate_progress_flushes_every_dot(self):
        # Set up a test scenario like the progress test above.
        duration = 0.005

        cmd = mock.Mock(spec=Command)
        def wait_for_duration():
            time.sleep(duration)
        cmd.run.side_effect = wait_for_duration
        self.pool.addCommand(cmd)

        stdout = mock.Mock(spec=file)
        join_and_indicate_progress(self.pool, stdout, interval=(duration / 5))

        for i, call in enumerate(stdout.mock_calls):
            # Every written dot should be followed by a flush().
            if call == mock.call.write('.'):
                self.assertEqual(stdout.mock_calls[i + 1], mock.call.flush())
Example #50
0
 def test_pool_must_have_some_workers(self):
     with self.assertRaises(Exception):
         WorkerPool(numWorkers=0)
Example #51
0
def findFsDetails():
    global serverFSMap
    try:
        # find the mount points in parallel
        pool = WorkerPool()

        for hname in serverFSMap.keys():
            hname.strip()
            subCmd = "df -P %s" % (serverFSMap[hname])
            cmdStr = 'ssh -o PasswordAuthentication=no %s "%s"' % (hname, subCmd)
            pool.addCommand(Command(hname, cmdStr, REMOTE, hname))
        pool.join()
        items = pool.getCompletedItems()
        for i in items:
            if i.results.rc == 0:
                df_with_header = i.results.stdout.strip()
                df_list = df_with_header.splitlines()
                df_list.pop(0)
                fsList = serverFSMap[i.remoteHost].split()
                if len(df_list) != len(fsList):
                    print "Mismatch"
                    continue
                for df_vals in df_list:
                    df_val = df_vals.split()
                    fsDetailsMap[fsList.pop(0).strip()] = [i.remoteHost, df_val[0], df_val[5]]
            else:
                print("Failure in talking to host %s" % (i.remoteHost))

        pool.join()
        pool.haltWork()
        pool.joinWorkers()

    except Exception, e:
        print e.__str__()
        pool.join()
        pool.haltWork()
        pool.joinWorkers()
Example #52
0
    def validate_nic_down(self):
        """     
            Ping validation on the nics.
        """     

        pool = WorkerPool()

        try:    
            for nic, hostname in self.nic_to_address_map:
                address = self.nic_to_address_map[(nic, hostname)]
                cmd = Ping('ping validation', address, ctxt=REMOTE, remoteHost='localhost')
                pool.addCommand(cmd)
            pool.join()

            for cmd in pool.getCompletedItems():
                results = cmd.get_results()
                if results.rc == 0:
                    return False
        finally:
            pool.haltWork()
            pool.joinWorkers()
            pool.join()

        tinctest.logger.info("Successfully brought down nics ...")   
        return True
Example #53
0
 def setUp(self):
     self.pool = WorkerPool(numWorkers=1, logger=mock.Mock())
    def rebalance(self):
        # Get the unbalanced primary segments grouped by hostname
        # These segments are what we will shutdown.
        logger.info("Getting unbalanced segments")
        unbalanced_primary_segs = GpArray.getSegmentsByHostName(self.gpArray.get_unbalanced_primary_segdbs())
        pool = WorkerPool()
        
        count = 0

        try:        
            # Disable ctrl-c
            signal.signal(signal.SIGINT,signal.SIG_IGN)
            
            logger.info("Stopping unbalanced primary segments...")
            for hostname in unbalanced_primary_segs.keys():
                cmd = GpSegStopCmd("stop unbalanced primary segs",
                                   self.gpEnv.getGpHome(),
                                   self.gpEnv.getGpVersion(),
                                   'fast',
                                   unbalanced_primary_segs[hostname],
                                   ctxt=REMOTE,
                                   remoteHost=hostname,
                                   timeout=600)
                pool.addCommand(cmd)
                count+=1
                
            pool.wait_and_printdots(count, False)
            
            failed_count = 0
            completed = pool.getCompletedItems()
            for res in completed:
                if not res.get_results().wasSuccessful():
                    failed_count+=1
                    
            if failed_count > 0:
                logger.warn("%d segments failed to stop.  A full rebalance of the")
                logger.warn("system is not possible at this time.  Please check the")
                logger.warn("log files, correct the problem, and run gprecoverseg -r")
                logger.warn("again.")
                logger.info("gprecoverseg will continue with a partial rebalance.")
            
            pool.empty_completed_items()
            # issue a distributed query to make sure we pick up the fault
            # that we just caused by shutting down segments
            conn = None
            try:
                logger.info("Triggering segment reconfiguration")
                dburl = dbconn.DbURL()
                conn = dbconn.connect(dburl)
                cmd = ReconfigDetectionSQLQueryCommand(conn)
                pool.addCommand(cmd)
                pool.wait_and_printdots(1, False)
            except Exception:
                # This exception is expected
                pass
            finally:
                if conn:
                    conn.close()

            # Final step is to issue a recoverseg operation to resync segments
            logger.info("Starting segment synchronization")
            cmd = GpRecoverseg("rebalance recoverseg")
            pool.addCommand(cmd)
            pool.wait_and_printdots(1, False)
        except Exception, ex:
            raise ex
Example #55
0
class ConcurrentFilespaceMoveTestCase(unittest.TestCase):
    """ This test suite tests the scenario of running gpfilespace concurrently while
        trying to move the filespace. 
        The expected behavior is that only one of the processes succeeds and the 
        rest error out."""

    ALREADY_RUNNING_MSG = 'Another instance of gpfilespace is already running!'

    def setUp(self):
        self.pool = None
        self.pool = WorkerPool()

    def tearDown(self):
        if self.pool:
            self.pool.haltWork()
            self.pool.joinWorkers()
            self.pool.join()

    def get_move_filespace_cmd(self, filespace='myfspc', file_type=FileType.TEMPORARY_FILES):
        if file_type == FileType.TEMPORARY_FILES:
            file_type = 'movetempfiles'
        elif file_type == FileType.TRANSACTION_FILES:
            file_type = 'movetransfiles'

        return Command(name='move filespace', cmdStr='gpfilespace --%s %s' % (file_type, filespace))

    def run_concurrently(self, cmd_list):

        for cmd in cmd_list:
            self.pool.addCommand(cmd)
        self.pool.join()

    def check_concurrent_execution_result(self, execution_results):

        succeeded = 0
        for cmd in execution_results:
            results = cmd.get_results().stdout.strip()
            if self.ALREADY_RUNNING_MSG in results:
                continue
            succeeded += 1

        self.assertEqual(succeeded, 1)
            
    def test00_move_temp_filespace(self):

        cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES) for i in range(2)]
        self.run_concurrently(cmd_list)
        self.check_concurrent_execution_result(self.pool.getCompletedItems())
            
    def test01_move_trans_filespace(self):

        cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES) for i in range(2)]
        self.run_concurrently(cmd_list)
        self.check_concurrent_execution_result(self.pool.getCompletedItems())

    def test02_move_temp_and_trans_filespace(self):
        
        cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES), self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES)]
        self.run_concurrently(cmd_list) 
        self.check_concurrent_execution_result(self.pool.getCompletedItems())
Example #56
0
class WorkerPoolTest(unittest.TestCase):
    def setUp(self):
        self.pool = WorkerPool(numWorkers=1, logger=mock.Mock())

    def tearDown(self):
        # All background threads must be stopped, or else the test runner will
        # hang waiting. Join the stopped threads to make sure we're completely
        # clean for the next test.
        self.pool.haltWork()
        self.pool.joinWorkers()

    def test_pool_must_have_some_workers(self):
        with self.assertRaises(Exception):
            WorkerPool(numWorkers=0)

    def test_pool_runs_added_command(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd)
        self.pool.join()

        cmd.run.assert_called_once_with()

    def test_completed_commands_are_retrievable(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd)  # should quickly be completed
        self.pool.join()

        self.assertEqual(self.pool.getCompletedItems(), [cmd])

    def test_pool_is_not_marked_done_until_commands_finish(self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()

        def wait_for_event():
            event.wait()

        cmd.run.side_effect = wait_for_event

        self.assertTrue(self.pool.isDone())

        try:
            self.pool.addCommand(cmd)
            self.assertFalse(self.pool.isDone())

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        self.pool.join()

        self.assertTrue(self.pool.isDone())

    def test_pool_can_be_emptied_of_completed_commands(self):
        cmd = mock.Mock(spec=Command)

        self.pool.addCommand(cmd)
        self.pool.join()

        self.pool.empty_completed_items()
        self.assertEqual(self.pool.getCompletedItems(), [])

    def test_check_results_succeeds_when_no_items_fail(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return True if the command
        # succeeds.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = True

        self.pool.addCommand(cmd)
        self.pool.join()
        self.pool.check_results()

    def test_check_results_throws_exception_at_first_failure(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return False to simulate a
        # failure.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = False

        self.pool.addCommand(cmd)
        self.pool.join()

        with self.assertRaises(ExecutionError):
            self.pool.check_results()

    def test_join_with_timeout_returns_done_immediately_if_there_is_nothing_to_do(
            self):
        start = time.time()
        done = self.pool.join(10)
        delta = time.time() - start

        self.assertTrue(done)

        # "Returns immediately" is a difficult thing to test. Longer than two
        # seconds seems like a reasonable failure case, even on a heavily loaded
        # test container.
        self.assertLess(delta, 2)

    def test_join_with_timeout_doesnt_return_done_until_all_commands_complete(
            self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()

        def wait_for_event():
            event.wait()

        cmd.run.side_effect = wait_for_event

        try:
            self.pool.addCommand(cmd)

            done = self.pool.join(0.001)
            self.assertFalse(done)

            # Test zero and negative timeouts too.
            done = self.pool.join(0)
            self.assertFalse(done)

            done = self.pool.join(-1)
            self.assertFalse(done)

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        done = self.pool.join(
            2)  # should be immediate, but there's still a race
        self.assertTrue(done)

    def test_completed_returns_number_of_completed_commands(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.assertEqual(self.pool.completed, 3)

    def test_completed_can_be_cleared_back_to_zero(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.empty_completed_items()
        self.assertEqual(self.pool.completed, 0)

    def test_completed_is_reset_to_zero_after_getCompletedItems(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.getCompletedItems()
        self.assertEqual(self.pool.completed, 0)

    def test_assigned_returns_number_of_assigned_commands(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.assertEqual(self.pool.assigned, 3)

    def test_assigned_is_decremented_when_completed_items_are_emptied(self):
        for _ in range(3):
            cmd = mock.Mock(spec=Command)
            self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.empty_completed_items()

        self.assertEqual(self.pool.assigned, 0)

    def test_assigned_is_decremented_when_completed_items_are_checked(self):
        cmd = mock.Mock(spec=Command)

        # Command.get_results() returns a CommandResult.
        # CommandResult.wasSuccessful() should return True if the command
        # succeeds.
        result = cmd.get_results.return_value
        result.wasSuccessful.return_value = True

        self.pool.addCommand(cmd)

        self.pool.join()
        self.pool.check_results()

        self.assertEqual(self.pool.assigned, 0)

    def test_assigned_is_decremented_when_completed_items_are_popped(self):
        # The first command will finish immediately.
        cmd1 = mock.Mock(spec=Command)
        self.pool.addCommand(cmd1)

        # The other command will wait until we allow it to continue.
        cmd2 = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()

        def wait_for_event():
            event.wait()

        cmd2.run.side_effect = wait_for_event

        try:
            self.pool.addCommand(cmd2)
            self.assertEqual(self.pool.assigned, 2)

            # Avoid race flakes; make sure we actually complete the first
            # command.
            while self.pool.completed < 1:
                self.pool.join(0.001)

            # Pop the completed item.
            self.assertEqual(self.pool.getCompletedItems(), [cmd1])

            # Now we should be down to one assigned command.
            self.assertEqual(self.pool.assigned, 1)

        finally:
            # Make sure that we unblock the thread even on a test failure.
            event.set()

        self.pool.join()

        # Pop the other completed item.
        self.assertEqual(self.pool.getCompletedItems(), [cmd2])
        self.assertEqual(self.pool.assigned, 0)

    def test_join_and_indicate_progress_prints_nothing_if_pool_is_done(self):
        stdout = io.StringIO()
        join_and_indicate_progress(self.pool, stdout)

        self.assertEqual(stdout.getvalue(), '')

    def test_join_and_indicate_progress_prints_dots_until_pool_is_done(self):
        cmd = mock.Mock(spec=Command)

        # cmd.run() will block until this Event is set.
        event = threading.Event()

        def wait_for_event():
            event.wait()

        cmd.run.side_effect = wait_for_event

        # Open up a pipe and wrap each end in a file-like object.
        read_end, write_end = os.pipe()
        read_end = os.fdopen(read_end, 'r')
        write_end = os.fdopen(write_end, 'w')

        # Create a thread to perform join_and_indicate_progress().
        def tmain():
            join_and_indicate_progress(self.pool, write_end, interval=0.001)
            write_end.close()

        join_thread = threading.Thread(target=tmain)

        try:
            # Add the command, then join the WorkerPool.
            self.pool.addCommand(cmd)
            join_thread.start()

            # join_and_indicate_progress() is now writing to our pipe. Wait for
            # a few dots...
            for _ in range(3):
                byte = read_end.read(1)
                self.assertEqual(byte, '.')

            # ...then stop the command.
            event.set()

            # Make sure the rest of the output consists of dots ending in a
            # newline. (tmain() closes the write end of the pipe so that this
            # read() will complete.)
            remaining = read_end.read()
            self.assertRegex(remaining, r'^[.]*\n$')

        finally:
            # Make sure that we unblock and join all threads, even on a test
            # failure.
            event.set()
            join_thread.join()

    def test_join_and_indicate_progress_flushes_every_dot(self):
        duration = 0.005

        cmd = mock.Mock(spec=Command)

        def wait_for_duration():
            time.sleep(duration)

        cmd.run.side_effect = wait_for_duration
        self.pool.addCommand(cmd)

        stdout = mock.Mock(io.StringIO())
        join_and_indicate_progress(self.pool, stdout, interval=(duration / 5))

        for i, call in enumerate(stdout.mock_calls):
            # Every written dot should be followed by a flush().
            if call == mock.call.write('.'):
                self.assertEqual(stdout.mock_calls[i + 1], mock.call.flush())
Example #57
0
    def bring_down_nic(self, nics, hostname):
        """
            Bring down nics based on the input nic names
        """ 
        if nics is None:
            return False

        pool = WorkerPool()

        try:    
            #get the ip address of the interface
            for nic in nics:
                cmd = Command(name='get the ip of the interface', cmdStr="/sbin/ifconfig %s | grep \'inet addr:\' | cut -d: -f2 | awk \'{ print $1}\'" % nic, ctxt=REMOTE, remoteHost=hostname)
                cmd.run(validateAfter=True)
                results = cmd.get_results()
                if results.rc != 0:
                    raise Exception('Unable to map interface to ipaddress') 

                self.nic_to_address_map[(nic, hostname)] = results.stdout.split()[0].strip()

            for nic in nics:
                tinctest.logger.info("Bringing down %s:%s ..." % (hostname, nic))   
                cmd = Command(name='bring NIC down', cmdStr='sudo /sbin/ifdown %s' % nic, ctxt=REMOTE, remoteHost=hostname)
                pool.addCommand(cmd)

            pool.join()
            for cmd in pool.getCompletedItems():
                results = cmd.get_results()
                if results.rc != 0:
                    return False
        finally:
            pool.haltWork()
            pool.joinWorkers()
            pool.join()

        return True
Example #58
0
 def setUp(self):
     self.pool = WorkerPool(numWorkers=1, logger=mock.Mock())
Example #59
0
 def test_print_progress_none(self, mock1):
     w = WorkerPool(numWorkers=32)
     w.print_progress(0)
     w.join()
     self.assertTrue(mock1.called_with('0.00% of jobs completed'))
     w.haltWork()