예제 #1
0
 def test_ParallelOperation_handles_empty_operations_successfully(self):
     ParallelOperation([]).run()
     ParallelOperation([], 0).run()
     ops = ParallelOperation([], 1)
     ops.run()
     self.assertTrue(len(ops.operations) == 0)
     self.assertTrue(ops.parallelism == 0)
예제 #2
0
    def execute(self):
        logger.info('Checking for filespace consistency')
        if self.file_type == FileType.TRANSACTION_FILES:
            flat_file = GP_TRANSACTION_FILES_FILESPACE
        elif self.file_type == FileType.TEMPORARY_FILES:
            flat_file = GP_TEMPORARY_FILES_FILESPACE

        operations = []
        pg_system_fs_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(self.gparray,
                                PG_SYSTEM_FILESPACE).run()).run()
        cur_filespace_entries = GetFilespaceEntriesDict(
            GetCurrentFilespaceEntries(self.gparray,
                                       self.file_type).run()).run()
        for seg in self.gparray.getDbList():
            flat_file_location = os.path.join(
                pg_system_fs_entries[seg.getSegmentDbId()][2], flat_file)
            logger.debug('flat file location = %s' % flat_file_location)
            operations.append(
                RemoteOperation(CheckFilespaceOidLocally(flat_file_location),
                                seg.getSegmentHostName()))
        ParallelOperation(operations, NUM_WORKERS).run()

        try:
            oid_set = set([int(op.get_ret()) for op in operations])
        except Exception, e:
            logger.error('Invalid OID in flat file on host %s' % op.host)
            return False
예제 #3
0
    def __ensureSharedMemCleaned(self, gpEnv, directives):
        """

        @param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to cleanup 

        """

        if len(directives) == 0:
            return

        logger.info(
            'Ensuring that shared memory is cleaned up for stopped segments')
        segments = [d.getSegment() for d in directives]
        segmentsByHost = GpArray.getSegmentsByHostName(segments)
        operation_list = [
            RemoteOperation(CleanSharedMem(segments), host=hostName)
            for hostName, segments in segmentsByHost.items()
        ]
        ParallelOperation(operation_list).run()

        for operation in operation_list:
            try:
                operation.get_ret()
            except Exception as e:
                logger.warning(
                    'Unable to clean up shared memory for stopped segments on host (%s)'
                    % operation.host)
예제 #4
0
    def __moveFilespaces(self, gparray, target_segment):
        """
            Moves filespaces for temporary and transaction files to a particular location.
        """
        master_seg = gparray.master
        default_filespace_dir = master_seg.getSegmentDataDirectory()

        cur_filespace_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(gparray, PG_SYSTEM_FILESPACE).run()).run()
        pg_system_filespace_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(gparray, PG_SYSTEM_FILESPACE).run()).run()
        cur_filespace_name = gparray.getFileSpaceName(
            int(cur_filespace_entries[1][0]))
        segments = [target_segment] + [
            seg for seg in gparray.getDbList()
            if seg.getSegmentContentId() == target_segment.getSegmentContentId(
            ) and seg.getSegmentDbId() != target_segment.getSegmentDbId()
        ]

        self.__logger.info('Starting file move procedure for %s' %
                           target_segment)

        if os.path.exists(
                os.path.join(default_filespace_dir,
                             GP_TRANSACTION_FILES_FILESPACE)):
            # On the expansion segments, the current filespace used by existing nodes will be the
            # new filespace to which we want to move the transaction and temp files.
            # The filespace directories which have to be moved will be the default pg_system directories.
            new_filespace_entries = GetFilespaceEntriesDict(
                GetCurrentFilespaceEntries(
                    gparray, FileType.TRANSACTION_FILES).run()).run()
            self.__logger.info('getting filespace information')
            new_filespace_name = gparray.getFileSpaceName(
                int(new_filespace_entries[1][0]))
            self.__logger.info(
                'getting move operations list for filespace %s' %
                new_filespace_name)
            operation_list = GetMoveOperationList(
                segments, FileType.TRANSACTION_FILES, new_filespace_name,
                new_filespace_entries, cur_filespace_entries,
                pg_system_filespace_entries).run()
            self.__logger.info('Starting transaction files move')
            ParallelOperation(operation_list).run()

            self.__logger.debug('Checking transaction files move')
            try:
                for operation in operation_list:
                    operation.get_ret()
                    pass
            except Exception, e:
                self.__logger.info(
                    'Failed to move transaction filespace. Rolling back changes ...'
                )
                RollBackFilespaceChanges(gparray.getExpansionSegDbList(),
                                         FileType.TRANSACTION_FILES,
                                         cur_filespace_name,
                                         cur_filespace_entries,
                                         new_filespace_entries,
                                         pg_system_filespace_entries).run()
                raise
예제 #5
0
    def execute(self):

        # Obtain list of segments from gparray
        if self.expansion:
            db_list = self.gparray.getExpansionSegDbList()
        else:
            db_list = self.gparray.getDbList()

        if self.primaries:
            segments = [seg for seg in db_list if seg.isSegmentPrimary()]
        else:
            segments = [seg for seg in db_list if seg.isSegmentMirror()]

        logger.debug('segment_list = %s' % self.gparray.getDbList())
        logger.debug('segments on which flat files will be updated = %s' %
                     segments)
        pg_system_filespace_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(self.gparray,
                                PG_SYSTEM_FILESPACE).run()).run()
        transaction_flat_file = os.path.join(pg_system_filespace_entries[1][2],
                                             GP_TRANSACTION_FILES_FILESPACE)
        if os.path.exists(transaction_flat_file):
            logger.debug('Updating transaction flat files')
            cur_filespace_entries = GetFilespaceEntriesDict(
                GetCurrentFilespaceEntries(
                    self.gparray, FileType.TRANSACTION_FILES).run()).run()
            operation_list = []
            for seg in segments:
                filespace_oid = cur_filespace_entries[seg.getSegmentDbId()][0]
                cur_filespace_entry = cur_filespace_entries[
                    seg.getSegmentDbId()]
                peer_filespace_entry = get_peer_filespace_entry(
                    cur_filespace_entries, seg.getSegmentDbId(),
                    seg.getSegmentContentId(), db_list)
                logger.debug('cur_filespace_entry = %s' %
                             str(cur_filespace_entry))
                logger.debug('peer_filespace_entry = %s' %
                             str(peer_filespace_entry))
                flat_file = os.path.join(
                    pg_system_filespace_entries[seg.getSegmentDbId()][2],
                    GP_TRANSACTION_FILES_FILESPACE)
                operation_list.append(
                    RemoteOperation(
                        UpdateFlatFilesLocally(flat_file, filespace_oid,
                                               cur_filespace_entry,
                                               peer_filespace_entry),
                        seg.getSegmentHostName()))

            ParallelOperation(operation_list, NUM_WORKERS).run()

            try:
                for operation in operation_list:
                    operation.get_ret()
            except Exception, e:
                raise MoveFilespaceError(
                    'Failed to update transaction flat file.')
예제 #6
0
 def test_ParallelOperation_handles_empty_operations_successfully(self):
     ParallelOperation([]).run()
     ParallelOperation([], 0).run()
     ops = ParallelOperation([], 1)
     ops.run()
     self.assertTrue(len(ops.operations) == 0)
     self.assertTrue(ops.parallelism == 0)
예제 #7
0
 def syncPackages(self, new_hosts):
     # The design decision here is to squash any exceptions resulting from the
     # synchronization of packages. We should *not* disturb the user's attempts to recover.
     try:
         self.logger.info('Syncing Greenplum Database extensions')
         operations = [SyncPackages(host) for host in new_hosts]
         ParallelOperation(operations, self.__options.parallelDegree).run()
         # introspect outcomes
         for operation in operations:
             operation.get_ret()
     except:
         self.logger.exception('Syncing of Greenplum Database extensions has failed.')
         self.logger.warning('Please run gppkg --clean after successful segment recovery.')
예제 #8
0
 def execute(self):
     state_dict = {
         VerificationState.RUNNING: [],
         VerificationState.SUCCEEDED: [],
         VerificationState.ABORTED: [],
         VerificationState.FAILED: []
     }
     operations = []
     for pseg in self.to_validate:
         operations.append(
             RemoteOperation(
                 ValidateResultFile(token=self.token,
                                    datadir=pseg.getSegmentDataDirectory(),
                                    content=pseg.getSegmentContentId()),
                 pseg.getSegmentHostName()))
     ParallelOperation(operations, self.batch_default).run()
     for remote in operations:
         state = remote.get_ret()
         state_dict[state].append(remote.operation.content)
     return state_dict
예제 #9
0
    def execute(self): 
        fake_timestamp = PickDumpTimestamp(restore_timestamp = self.restore_timestamp,
                                           compress = self.compress,
                                           master_datadir = self.master_datadir).run()

        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port), utility=True)  
        primaries = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
        operations = []
        for seg in primaries:
            real_filename = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, self.restore_timestamp[0:8], "%s0_%d_%s" % (DBDUMP_PREFIX, seg.getSegmentDbId(), self.restore_timestamp))
            fake_filename = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, fake_timestamp[0:8], "%s0_%d_%s" % (DBDUMP_PREFIX, seg.getSegmentDbId(), fake_timestamp))
            operations.append( BuildRemoteTableDump(self.restore_tables, real_filename, fake_filename, self.compress, seg.getSegmentHostName()) )

        ParallelOperation(operations, self.batch_default).run()
        for operation in operations:
            try:
                operation.get_ret()
            except Exception, e:
                logger.exception('Parallel table dump file build failed.')
                raise ExceptionNoStackTraceNeeded('Parallel table dump file build failed, review log file for details')
예제 #10
0
    def execute(self):
        logger.info('Rolling back filespace changes ...')
        operations = []
        for seg in self.segments:
            logger.debug('Creating RemoteOperation for segment %s' % seg)
            peer_filespace_entry = get_peer_filespace_entry(
                self.cur_filespace_entries, seg.getSegmentDbId(),
                seg.getSegmentContentId(), self.segments)
            if self.file_type == FileType.TRANSACTION_FILES:
                # Move from new -> cur
                operations.append(
                    RemoteOperation(
                        MoveTransFilespaceLocally(
                            self.new_filespace_entries[seg.getSegmentDbId()],
                            self.cur_filespace_name,
                            self.cur_filespace_entries[seg.getSegmentDbId()],
                            peer_filespace_entry,
                            self.pg_system_filespace_entries[
                                seg.getSegmentDbId()],
                            rollback=True), seg.getSegmentHostName()), )
            elif self.file_type == FileType.TEMPORARY_FILES:
                operations.append(
                    RemoteOperation(
                        MoveTempFilespaceLocally(
                            self.new_filespace_entries[seg.getSegmentDbId()],
                            self.cur_filespace_name,
                            self.cur_filespace_entries[seg.getSegmentDbId()],
                            peer_filespace_entry,
                            self.pg_system_filespace_entries[
                                seg.getSegmentDbId()],
                            rollback=True), seg.getSegmentHostName()), )

        logger.debug('Running remote operations in parallel')
        ParallelOperation(operations, NUM_WORKERS).run()

        logger.debug('Checking results of parallel operations')
        for operation in operations:
            operation.get_ret()
예제 #11
0
    def execute(self):
        ValidateGpToolkit(database=self.dump_database,
                          master_port=self.master_port).run()

        operations = []
        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port),
                                          utility=True)
        segs = [
            seg for seg in gparray.getDbList()
            if seg.isSegmentPrimary(current_role=True)
        ]
        for seg in segs:
            operations.append(
                RemoteOperation(
                    ValidateSegDiskSpace(
                        free_space_percent=self.free_space_percent,
                        compress=self.compress,
                        dump_database=self.dump_database,
                        include_dump_tables=self.include_dump_tables,
                        datadir=seg.getSegmentDataDirectory(),
                        segport=seg.getSegmentPort()),
                    seg.getSegmentHostName()))

        ParallelOperation(operations, self.batch_default).run()

        success = 0
        for remote in operations:
            host = remote.host
            try:
                remote.get_ret()
            except NotEnoughDiskSpace, e:
                logger.error(
                    "%s has insufficient disk space. [Need: %dK, Free %dK]" %
                    (host, e.needed_space, e.free_space))
            else:
                success += 1
예제 #12
0
class GpMirrorListToBuild:
    def __init__(self,
                 toBuild,
                 pool,
                 quiet,
                 parallelDegree,
                 additionalWarnings=None):
        self.__mirrorsToBuild = toBuild
        self.__pool = pool
        self.__quiet = quiet
        self.__parallelDegree = parallelDegree
        self.__additionalWarnings = additionalWarnings or []

    def getMirrorsToBuild(self):
        """
        Returns a newly allocated list
        """
        return [m for m in self.__mirrorsToBuild]

    def getAdditionalWarnings(self):
        """
        Returns any additional warnings generated during building of list
        """
        return self.__additionalWarnings

    def __moveFilespaces(self, gparray, target_segment):
        """
            Moves filespaces for temporary and transaction files to a particular location.
        """
        master_seg = gparray.master
        default_filespace_dir = master_seg.getSegmentDataDirectory()

        cur_filespace_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(gparray, PG_SYSTEM_FILESPACE).run()).run()
        pg_system_filespace_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(gparray, PG_SYSTEM_FILESPACE).run()).run()
        cur_filespace_name = gparray.getFileSpaceName(
            int(cur_filespace_entries[1][0]))
        segments = [target_segment] + [
            seg for seg in gparray.getDbList()
            if seg.getSegmentContentId() == target_segment.getSegmentContentId(
            ) and seg.getSegmentDbId() != target_segment.getSegmentDbId()
        ]

        logger.info('Starting file move procedure for %s' % target_segment)

        if os.path.exists(
                os.path.join(default_filespace_dir,
                             GP_TRANSACTION_FILES_FILESPACE)):
            #On the expansion segments, the current filespace used by existing nodes will be the
            #new filespace to which we want to move the transaction and temp files.
            #The filespace directories which have to be moved will be the default pg_system directories.
            new_filespace_entries = GetFilespaceEntriesDict(
                GetCurrentFilespaceEntries(
                    gparray, FileType.TRANSACTION_FILES).run()).run()
            logger.info('getting filespace information')
            new_filespace_name = gparray.getFileSpaceName(
                int(new_filespace_entries[1][0]))
            logger.info('getting move operations list for filespace %s' %
                        new_filespace_name)
            operation_list = GetMoveOperationList(
                segments, FileType.TRANSACTION_FILES, new_filespace_name,
                new_filespace_entries, cur_filespace_entries,
                pg_system_filespace_entries).run()
            logger.info('Starting transaction files move')
            ParallelOperation(operation_list).run()

            logger.debug('Checking transaction files move')
            try:
                for operation in operation_list:
                    operation.get_ret()
                    pass
            except Exception, e:
                logger.info(
                    'Failed to move transaction filespace. Rolling back changes ...'
                )
                RollBackFilespaceChanges(gparray.getExpansionSegDbList(),
                                         FileType.TRANSACTION_FILES,
                                         cur_filespace_name,
                                         cur_filespace_entries,
                                         new_filespace_entries,
                                         pg_system_filespace_entries).run()
                raise

        if os.path.exists(
                os.path.join(default_filespace_dir,
                             GP_TEMPORARY_FILES_FILESPACE)):
            new_filespace_entries = GetFilespaceEntriesDict(
                GetCurrentFilespaceEntries(
                    gparray, FileType.TEMPORARY_FILES).run()).run()
            new_filespace_name = gparray.getFileSpaceName(
                int(new_filespace_entries[1][0]))
            operation_list = GetMoveOperationList(
                segments, FileType.TEMPORARY_FILES, new_filespace_name,
                new_filespace_entries, cur_filespace_entries,
                pg_system_filespace_entries).run()
            logger.info('Starting temporary files move')
            ParallelOperation(operation_list).run()

            logger.debug('Checking temporary files move')
            try:
                for operation in operation_list:
                    operation.get_ret()
                    pass
            except Exception, e:
                logger.info(
                    'Failed to move temporary filespace. Rolling back changes ...'
                )
                RollBackFilespaceChanges(gparray.getExpansionDbList(),
                                         FileType.TRANSACTION_FILES,
                                         cur_filespace_name,
                                         cur_filespace_entries,
                                         new_filespace_entries,
                                         pg_system_filespace_entries).run()
                raise
예제 #13
0
            )
            path = os.path.join(path, DUMP_DIR, DUMP_DATE)
            status_file = os.path.join(
                path, "%s%d_%s" %
                (SEG_STATUS_PREFIX, seg.getSegmentDbId(), timestamp))
            dump_file = os.path.join(
                path, "%s%d_%s" %
                (SEG_DBDUMP_PREFIX, seg.getSegmentDbId(), timestamp))
            if self.compress: dump_file += ".gz"
            operations.append(
                RemoteOperation(
                    PostDumpSegment(status_file=status_file,
                                    dump_file=dump_file),
                    seg.getSegmentHostName()))

        ParallelOperation(operations, self.batch_default).run()

        success = 0
        for remote in operations:
            host = remote.host
            status_file = remote.operation.status_file
            dump_file = remote.operation.dump_file
            try:
                remote.get_ret()
            except NoStatusFile, e:
                logger.warn('Status file %s not found on %s' %
                            (status_file, host))
            except StatusFileError, e:
                logger.warn('Status file %s on %s indicates errors' %
                            (status_file, host))
            except NoDumpFile, e:
예제 #14
0
 def test_ParallelOperation_succeeds(self):
     ops = ParallelOperation([ListFiles("/tmp")], 1)
     ops.run()
     self.assertTrue(len(ops.operations[0].get_ret()) > 0)
예제 #15
0
 def test_ParallelOperation_succeeds(self):
     ops = ParallelOperation([ListFiles("/tmp")], 1)
     ops.run()
     self.assertTrue(len(ops.operations[0].get_ret()) > 0)
예제 #16
0
 def test_ParallelOperation_with_operation_but_no_threads_raises(self):
     with self.assertRaises(Exception):
         ParallelOperation([ListFiles("/tmp")], 0).run()
예제 #17
0
            cur_filespace_entry = cur_filespace_entries[seg.getSegmentDbId()]
            peer_filespace_entry = get_peer_filespace_entry(
                cur_filespace_entries, seg.getSegmentDbId(),
                seg.getSegmentContentId(), self.gparray.getDbList())
            logger.debug('current_filespace_entry = %s' %
                         str(cur_filespace_entry))
            logger.debug('peer_filespace_entry = %s' %
                         str(peer_filespace_entry))
            operation_list.append(
                RemoteOperation(
                    CheckFilespaceEntriesLocally(
                        cur_filespace_entry, peer_filespace_entry,
                        pg_system_fs_entries[seg.getSegmentDbId()][2],
                        self.file_type), seg.getSegmentHostName()))

        ParallelOperation(operation_list, NUM_WORKERS).run()

        for operation in operation_list:
            try:
                if not operation.get_ret():
                    logger.error(
                        '%s entries are inconsistent for %s filespace on host %s'
                        % (FileType.lookup[self.file_type], fs_name,
                           operation.host))
                    return False
            except Exception, e:
                logger.error(
                    '%s entries are inconsistent for %s filespace on host %s' %
                    (FileType.lookup[self.file_type], fs_name, operation.host))
                return False