コード例 #1
0
ファイル: test_unit_utils.py プロジェクト: zhuomingliang/gpdb
 def test_RemoteOperation_logger_debug(self, mock_split, mock_cmd, mock_lods, mock_debug):
     mock_cmd.run = MagicMock()
     mockRemoteOperation = RemoteOperation(operation=TestOperation(), host="sdw1", msg_ctx="dbid 2")
     mockRemoteOperation.execute()
     mock_debug.assert_called()
     first_call_args, fist_call_kwargs = mock_debug.call_args_list[0]
     self.assertTrue(first_call_args[0].startswith("Output for dbid 2 on host sdw1:"))
コード例 #2
0
 def execute(self):
     logger.info('Creating RemoteOperations list')
     operations = []
     for seg in self.segments:
         logger.debug('segment_dbid = %s' % seg.getSegmentDbId())
         logger.debug('segmenthostname = %s' % seg.getSegmentHostName())
         logger.debug(self.new_filespace_entries[seg.getSegmentDbId()])
         logger.debug(self.cur_filespace_entries[seg.getSegmentDbId()])
         peer_filespace_entry = get_peer_filespace_entry(
             self.new_filespace_entries, seg.getSegmentDbId(),
             seg.getSegmentContentId(), self.segments)
         if self.file_type == FileType.TRANSACTION_FILES:
             operations.append(
                 RemoteOperation(
                     MoveTransFilespaceLocally(
                         self.cur_filespace_entries[seg.getSegmentDbId()],
                         self.new_filespace_name,
                         self.new_filespace_entries[seg.getSegmentDbId()],
                         peer_filespace_entry,
                         self.pg_system_filespace_entries[
                             seg.getSegmentDbId()]),
                     seg.getSegmentHostName()))
         elif self.file_type == FileType.TEMPORARY_FILES:
             operations.append(
                 RemoteOperation(
                     MoveTempFilespaceLocally(
                         self.cur_filespace_entries[seg.getSegmentDbId()],
                         self.new_filespace_name,
                         self.new_filespace_entries[seg.getSegmentDbId()],
                         peer_filespace_entry,
                         self.pg_system_filespace_entries[
                             seg.getSegmentDbId()]),
                     seg.getSegmentHostName()))
     return operations
コード例 #3
0
ファイル: test_unit_utils.py プロジェクト: zyclove/gpdb
    def test_RemoteOperation_logger_debug(self, mock_split, mock_cmd, mock_lods, mock_debug):
        # We want to lock down the Command's get_results().stdout.
        cmd_instance = mock_cmd.return_value
        cmd_instance.get_results.return_value.stdout = 'output'

        mockRemoteOperation = RemoteOperation(operation=TestOperation(), host="sdw1", msg_ctx="dbid 2")
        mockRemoteOperation.execute()
        mock_debug.assert_has_calls([mock.call("Output for dbid 2 on host sdw1: output")])
コード例 #4
0
ファイル: test_unit_utils.py プロジェクト: adam8157/gpdb
    def test_RemoteOperation_logger_debug(self, mock_split, mock_cmd, mock_lods, mock_debug):
        # We want to lock down the Command's get_results().stdout.
        cmd_instance = mock_cmd.return_value
        cmd_instance.get_results.return_value.stdout = 'output'

        mockRemoteOperation = RemoteOperation(operation=TestOperation(), host="sdw1", msg_ctx="dbid 2")
        mockRemoteOperation.execute()
        mock_debug.assert_has_calls([mock.call("Output for dbid 2 on host sdw1: output")])
コード例 #5
0
    def execute(self):
        logger.info('Checking for filespace consistency')
        if self.file_type == FileType.TRANSACTION_FILES:
            flat_file = GP_TRANSACTION_FILES_FILESPACE
        elif self.file_type == FileType.TEMPORARY_FILES:
            flat_file = GP_TEMPORARY_FILES_FILESPACE

        operations = []
        pg_system_fs_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(self.gparray,
                                PG_SYSTEM_FILESPACE).run()).run()
        cur_filespace_entries = GetFilespaceEntriesDict(
            GetCurrentFilespaceEntries(self.gparray,
                                       self.file_type).run()).run()
        for seg in self.gparray.getDbList():
            flat_file_location = os.path.join(
                pg_system_fs_entries[seg.getSegmentDbId()][2], flat_file)
            logger.debug('flat file location = %s' % flat_file_location)
            operations.append(
                RemoteOperation(CheckFilespaceOidLocally(flat_file_location),
                                seg.getSegmentHostName()))
        ParallelOperation(operations, NUM_WORKERS).run()

        try:
            oid_set = set([int(op.get_ret()) for op in operations])
        except Exception, e:
            logger.error('Invalid OID in flat file on host %s' % op.host)
            return False
コード例 #6
0
class DeleteCurrentDump(Operation):
    def __init__(self, timestamp, master_datadir, master_port):
        self.timestamp = timestamp
        self.master_datadir = master_datadir
        self.master_port = master_port

    def execute(self):
        try:
            DeleteCurrentSegDump(self.timestamp, self.master_datadir).run()
        except OSError, e:
            logger.warn("Error encountered during deletion of %s on master" %
                        self.timestamp)
        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port),
                                          utility=True)
        segs = [
            seg for seg in gparray.getDbList()
            if seg.isSegmentPrimary(current_role=True)
        ]
        for seg in segs:
            try:
                RemoteOperation(
                    DeleteCurrentSegDump(self.timestamp,
                                         seg.getSegmentDataDirectory()),
                    seg.getSegmentHostName()).run()
            except OSError, e:
                logger.warn("Error encountered during deletion of %s on %s" %
                            (self.timestamp, seg.getSegmentHostName()))
コード例 #7
0
 def test_inner_exceptions(self):
     """ Verify that an object not at the global level of this file cannot be pickled properly. """
     try:
         RemoteOperation(RaiseOperation_Nested(), "localhost").run()
     except ExecutionError, e:
         self.assertTrue(e.cmd.get_results().stderr.strip().endswith(
             "raise RaiseOperation_Nested.MyException2()"))
コード例 #8
0
ファイル: test_unit_utils.py プロジェクト: zyclove/gpdb
 def test_proper_exceptions_with_args(self):
     try:
         RemoteOperation(RaiseOperation_Safe(), "localhost").run()
     except ExceptionWithArgs as e:
         self.assertTrue(e.x == 1 and e.y == 2)
     else:
         self.fail("RaiseOperation_Safe should have thrown ExceptionWithArgs(1, 2)")
コード例 #9
0
ファイル: test_unit_utils.py プロジェクト: zyclove/gpdb
 def test_proper_exceptions_sanity(self):
     try:
         RemoteOperation(RaiseOperation_Safe(), "localhost").run()
     except ExceptionWithArgs as e:
         pass
     else:
         self.fail("ExceptionWithArgs should have been successfully raised + caught, because proper idiom is used.")
コード例 #10
0
    def __ensureSharedMemCleaned(self, gpEnv, directives):
        """

        @param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to cleanup 

        """

        if len(directives) == 0:
            return

        logger.info(
            'Ensuring that shared memory is cleaned up for stopped segments')
        segments = [d.getSegment() for d in directives]
        segmentsByHost = GpArray.getSegmentsByHostName(segments)
        operation_list = [
            RemoteOperation(CleanSharedMem(segments), host=hostName)
            for hostName, segments in segmentsByHost.items()
        ]
        ParallelOperation(operation_list).run()

        for operation in operation_list:
            try:
                operation.get_ret()
            except Exception as e:
                logger.warning(
                    'Unable to clean up shared memory for stopped segments on host (%s)'
                    % operation.host)
コード例 #11
0
 def test_Remote_harden(self):
     """ Ensure that some logging occurs in event of error. """
     # One case encountered thus far is the raising of a pygresql DatabaseError,
     # which due to the import from a shared object (I think), does not behave
     # nicely in terms of imports and namespacing. """
     try:
         RemoteOperation(RaiseOperation_Unpicklable(), "localhost").run()
     except ExecutionError, e:
         self.assertTrue(e.cmd.get_results().stderr.strip().endswith("raise pg.DatabaseError()"))
コード例 #12
0
    def execute(self):

        # Obtain list of segments from gparray
        if self.expansion:
            db_list = self.gparray.getExpansionSegDbList()
        else:
            db_list = self.gparray.getDbList()

        if self.primaries:
            segments = [seg for seg in db_list if seg.isSegmentPrimary()]
        else:
            segments = [seg for seg in db_list if seg.isSegmentMirror()]

        logger.debug('segment_list = %s' % self.gparray.getDbList())
        logger.debug('segments on which flat files will be updated = %s' %
                     segments)
        pg_system_filespace_entries = GetFilespaceEntriesDict(
            GetFilespaceEntries(self.gparray,
                                PG_SYSTEM_FILESPACE).run()).run()
        transaction_flat_file = os.path.join(pg_system_filespace_entries[1][2],
                                             GP_TRANSACTION_FILES_FILESPACE)
        if os.path.exists(transaction_flat_file):
            logger.debug('Updating transaction flat files')
            cur_filespace_entries = GetFilespaceEntriesDict(
                GetCurrentFilespaceEntries(
                    self.gparray, FileType.TRANSACTION_FILES).run()).run()
            operation_list = []
            for seg in segments:
                filespace_oid = cur_filespace_entries[seg.getSegmentDbId()][0]
                cur_filespace_entry = cur_filespace_entries[
                    seg.getSegmentDbId()]
                peer_filespace_entry = get_peer_filespace_entry(
                    cur_filespace_entries, seg.getSegmentDbId(),
                    seg.getSegmentContentId(), db_list)
                logger.debug('cur_filespace_entry = %s' %
                             str(cur_filespace_entry))
                logger.debug('peer_filespace_entry = %s' %
                             str(peer_filespace_entry))
                flat_file = os.path.join(
                    pg_system_filespace_entries[seg.getSegmentDbId()][2],
                    GP_TRANSACTION_FILES_FILESPACE)
                operation_list.append(
                    RemoteOperation(
                        UpdateFlatFilesLocally(flat_file, filespace_oid,
                                               cur_filespace_entry,
                                               peer_filespace_entry),
                        seg.getSegmentHostName()))

            ParallelOperation(operation_list, NUM_WORKERS).run()

            try:
                for operation in operation_list:
                    operation.get_ret()
            except Exception, e:
                raise MoveFilespaceError(
                    'Failed to update transaction flat file.')
コード例 #13
0
ファイル: test_unit_utils.py プロジェクト: zyclove/gpdb
 def test_Remote_harden(self):
     """ Ensure that some logging occurs in event of error. """
     # One case encountered thus far is the raising of a pygresql DatabaseError,
     # which due to the import from a shared object (I think), does not behave
     # nicely in terms of imports and namespacing. """
     try:
         RemoteOperation(RaiseOperation_Unpicklable(), "localhost").run()
     except DatabaseError:
         pass
     else:
         self.fail("""A DatabaseError should have been raised remotely, and because it cannot
                      be pickled cleanly (due to a strange import in pickle.py),
                      an ExecutionError should have ultimately been caused.""")
コード例 #14
0
    def execute(self):
        logger.info('Rolling back filespace changes ...')
        operations = []
        for seg in self.segments:
            logger.debug('Creating RemoteOperation for segment %s' % seg)
            peer_filespace_entry = get_peer_filespace_entry(
                self.cur_filespace_entries, seg.getSegmentDbId(),
                seg.getSegmentContentId(), self.segments)
            if self.file_type == FileType.TRANSACTION_FILES:
                # Move from new -> cur
                operations.append(
                    RemoteOperation(
                        MoveTransFilespaceLocally(
                            self.new_filespace_entries[seg.getSegmentDbId()],
                            self.cur_filespace_name,
                            self.cur_filespace_entries[seg.getSegmentDbId()],
                            peer_filespace_entry,
                            self.pg_system_filespace_entries[
                                seg.getSegmentDbId()],
                            rollback=True), seg.getSegmentHostName()), )
            elif self.file_type == FileType.TEMPORARY_FILES:
                operations.append(
                    RemoteOperation(
                        MoveTempFilespaceLocally(
                            self.new_filespace_entries[seg.getSegmentDbId()],
                            self.cur_filespace_name,
                            self.cur_filespace_entries[seg.getSegmentDbId()],
                            peer_filespace_entry,
                            self.pg_system_filespace_entries[
                                seg.getSegmentDbId()],
                            rollback=True), seg.getSegmentHostName()), )

        logger.debug('Running remote operations in parallel')
        ParallelOperation(operations, NUM_WORKERS).run()

        logger.debug('Checking results of parallel operations')
        for operation in operations:
            operation.get_ret()
コード例 #15
0
ファイル: verify.py プロジェクト: pkdevboxy/incubator-hawq
 def execute(self):
     state_dict = {
         VerificationState.RUNNING: [],
         VerificationState.SUCCEEDED: [],
         VerificationState.ABORTED: [],
         VerificationState.FAILED: []
     }
     operations = []
     for pseg in self.to_validate:
         operations.append(
             RemoteOperation(
                 ValidateResultFile(token=self.token,
                                    datadir=pseg.getSegmentDataDirectory(),
                                    content=pseg.getSegmentContentId()),
                 pseg.getSegmentHostName()))
     ParallelOperation(operations, self.batch_default).run()
     for remote in operations:
         state = remote.get_ret()
         state_dict[state].append(remote.operation.content)
     return state_dict
コード例 #16
0
    def execute(self):
        ValidateGpToolkit(database=self.dump_database,
                          master_port=self.master_port).run()

        operations = []
        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port),
                                          utility=True)
        segs = [
            seg for seg in gparray.getDbList()
            if seg.isSegmentPrimary(current_role=True)
        ]
        for seg in segs:
            operations.append(
                RemoteOperation(
                    ValidateSegDiskSpace(
                        free_space_percent=self.free_space_percent,
                        compress=self.compress,
                        dump_database=self.dump_database,
                        include_dump_tables=self.include_dump_tables,
                        datadir=seg.getSegmentDataDirectory(),
                        segport=seg.getSegmentPort()),
                    seg.getSegmentHostName()))

        ParallelOperation(operations, self.batch_default).run()

        success = 0
        for remote in operations:
            host = remote.host
            try:
                remote.get_ret()
            except NotEnoughDiskSpace, e:
                logger.error(
                    "%s has insufficient disk space. [Need: %dK, Free %dK]" %
                    (host, e.needed_space, e.free_space))
            else:
                success += 1
コード例 #17
0
 def test_proper_exceptions_with_args(self):
     try:
         RemoteOperation(RaiseOperation_Safe(), "localhost").run()
     except ExceptionWithArgs, e:
         self.assertTrue(e.x == 1 and e.y == 2)
コード例 #18
0
 def test_proper_exceptions_sanity(self):
     try:
         RemoteOperation(RaiseOperation_Safe(), "localhost").run()
     except ExceptionWithArgs, e:
         pass
コード例 #19
0
 def test_unsafe_exceptions_with_args(self):
     try:
         RemoteOperation(RaiseOperation_Unsafe(), "localhost").run()
     except TypeError, e:  # Because Exceptions don't retain init args, they are not pickle-able normally
         pass
コード例 #20
0
 def test_Remote_exceptions(self):
     """ Test that an Exception returned remotely will be raised locally. """
     try:
         RemoteOperation(RaiseOperation(), "localhost").run()
     except MyException, e:
         pass
コード例 #21
0
 def test_Remote_basic(self):
     """ Basic RemoteOperation test """
     self.assertTrue(TestOperation().run() == RemoteOperation(
         TestOperation(), "localhost").run())
コード例 #22
0
        # Now check for the filespace entries
        operation_list = []
        for seg in self.gparray.getDbList():
            cur_filespace_entry = cur_filespace_entries[seg.getSegmentDbId()]
            peer_filespace_entry = get_peer_filespace_entry(
                cur_filespace_entries, seg.getSegmentDbId(),
                seg.getSegmentContentId(), self.gparray.getDbList())
            logger.debug('current_filespace_entry = %s' %
                         str(cur_filespace_entry))
            logger.debug('peer_filespace_entry = %s' %
                         str(peer_filespace_entry))
            operation_list.append(
                RemoteOperation(
                    CheckFilespaceEntriesLocally(
                        cur_filespace_entry, peer_filespace_entry,
                        pg_system_fs_entries[seg.getSegmentDbId()][2],
                        self.file_type), seg.getSegmentHostName()))

        ParallelOperation(operation_list, NUM_WORKERS).run()

        for operation in operation_list:
            try:
                if not operation.get_ret():
                    logger.error(
                        '%s entries are inconsistent for %s filespace on host %s'
                        % (FileType.lookup[self.file_type], fs_name,
                           operation.host))
                    return False
            except Exception, e:
                logger.error(
コード例 #23
0
            if seg.isSegmentPrimary(current_role=True)
        ]
        for seg in segs:
            path = self.backup_dir if self.backup_dir is not None else seg.getSegmentDataDirectory(
            )
            path = os.path.join(path, DUMP_DIR, DUMP_DATE)
            status_file = os.path.join(
                path, "%s%d_%s" %
                (SEG_STATUS_PREFIX, seg.getSegmentDbId(), timestamp))
            dump_file = os.path.join(
                path, "%s%d_%s" %
                (SEG_DBDUMP_PREFIX, seg.getSegmentDbId(), timestamp))
            if self.compress: dump_file += ".gz"
            operations.append(
                RemoteOperation(
                    PostDumpSegment(status_file=status_file,
                                    dump_file=dump_file),
                    seg.getSegmentHostName()))

        ParallelOperation(operations, self.batch_default).run()

        success = 0
        for remote in operations:
            host = remote.host
            status_file = remote.operation.status_file
            dump_file = remote.operation.dump_file
            try:
                remote.get_ret()
            except NoStatusFile, e:
                logger.warn('Status file %s not found on %s' %
                            (status_file, host))
            except StatusFileError, e:
コード例 #24
0
ファイル: test_unit_utils.py プロジェクト: hellomokey/gp
 def test_Remote_exceptions(self):
     """ Test that an Exception returned remotely will be raised locally. """
     with self.assertRaises(Exception):
         RemoteOperation(RaiseOperation(), "localhost").run()
コード例 #25
0
ファイル: restore.py プロジェクト: xiehechong/hawq
def BuildRemoteTableDump(restore_tables, real_filename, fake_filename,
                         compress, host):
    return RemoteOperation(
        BuildTableDump(restore_tables, real_filename, fake_filename, compress),
        host)