def _run_both_failed_failover_tests(self, tests): mirrors_to_build = [] expected_segs_to_start = [] expected_segs_to_stop = [] expected_segs_to_markdown = [] for test in tests: with self.subTest(msg=test["name"]): mirrors_to_build.append( GpMirrorToBuild(test["failed"], test["live"], test["failover"], test["forceFull"])) expected_segs_to_stop.append(test["failed"]) expected_segs_to_start.append(test["failover"]) if 'is_failed_segment_up' in test and test[ "is_failed_segment_up"]: expected_segs_to_markdown.append(test['failed']) buildMirrorSegs_obj = self._run_buildMirrors(mirrors_to_build) self._common_asserts_with_stop_and_logger( buildMirrorSegs_obj, "Ensuring 3 failed segment(s) are stopped", expected_segs_to_stop, expected_segs_to_start, expected_segs_to_markdown, { 1: True, 5: True, 9: True }, 1) for test in tests: self.assertEqual('n', test['live'].getSegmentMode()) self.assertEqual('d', test['failover'].getSegmentStatus()) self.assertEqual('n', test['failover'].getSegmentMode())
def _run_no_failed_tests(self, tests): mirrors_to_build = [] expected_segs_to_start = [] for test in tests: with self.subTest(msg=test["name"]): mirrors_to_build.append( GpMirrorToBuild(None, test["live"], test["failover"], test["forceFull"])) expected_segs_to_start.append(test["failover"]) buildMirrorSegs_obj = self._run_buildMirrors(mirrors_to_build) self.assertEqual(3, self.mock_logger.info.call_count) self.assertEqual( 0, buildMirrorSegs_obj._get_running_postgres_segments.call_count) self._common_asserts(buildMirrorSegs_obj, expected_segs_to_start, [], { 2: True, 4: True }, 1) for test in tests: self.assertEqual('n', test['live'].getSegmentMode()) self.assertEqual('d', test['failover'].getSegmentStatus()) self.assertEqual('n', test['failover'].getSegmentMode())
def test_clean_up_failed_segments_no_segs_to_cleanup(self): failed2 = self.create_primary(dbid='3', status='d') failover2 = self.create_primary(dbid='3', status='d') live2 = self.create_mirror(dbid='4') failed4 = self.create_primary(dbid='5') live4 = self.create_mirror(dbid='7') not_inplace_full = GpMirrorToBuild(failed2, live2, failover2, True) inplace_not_full = GpMirrorToBuild(failed4, live4, None, False) buildMirrorSegs_obj = GpMirrorListToBuild( toBuild=[not_inplace_full, inplace_not_full], pool=None, quiet=True, parallelDegree=0, logger=self.mock_logger, forceoverwrite=True) buildMirrorSegs_obj._GpMirrorListToBuild__runWaitAndCheckWorkerPoolForErrorsAndClear = Mock( ) buildMirrorSegs_obj._clean_up_failed_segments() self.assertEqual(0, self.mock_get_segments_by_hostname.call_count) self.assertEqual(0, self.mock_logger.info.call_count)
def test_clean_up_failed_segments(self): failed1 = self.create_primary(status='d') live1 = self.create_mirror() failed2 = self.create_primary(dbid='3', status='d') failover2 = self.create_primary(dbid='3', status='d') live2 = self.create_mirror(dbid='4') failed3 = self.create_primary(dbid='5') live3 = self.create_mirror(dbid='6') failed4 = self.create_primary(dbid='5') live4 = self.create_mirror(dbid='7') inplace_full1 = GpMirrorToBuild(failed1, live1, None, True) not_inplace_full = GpMirrorToBuild(failed2, live2, failover2, True) inplace_full2 = GpMirrorToBuild(failed3, live3, None, True) inplace_not_full = GpMirrorToBuild(failed4, live4, None, False) buildMirrorSegs_obj = GpMirrorListToBuild(toBuild=[ inplace_full1, not_inplace_full, inplace_full2, inplace_not_full ], pool=None, quiet=True, parallelDegree=0, logger=self.mock_logger, forceoverwrite=True) buildMirrorSegs_obj._GpMirrorListToBuild__runWaitAndCheckWorkerPoolForErrorsAndClear = Mock( ) buildMirrorSegs_obj._clean_up_failed_segments() self.mock_get_segments_by_hostname.assert_called_once_with( [failed1, failed3]) self.mock_logger.info.called_once_with( '"Cleaning files from 2 segment(s)')
def test_buildMirrors_failed_seg_in_gparray_fail(self): tests = [{ "name": "failed_seg_exists_in_gparray1", "failed": self.create_primary(status='d'), "failover": self.create_primary(status='d'), "live": self.create_mirror(), "forceFull": True, "forceoverwrite": False }, { "name": "failed_seg_exists_in_gparray2", "failed": self.create_primary(dbid='3', status='d'), "failover": self.create_primary(dbid='3', status='d'), "live": self.create_mirror(dbid='4'), "forceFull": False, "forceoverwrite": False }, { "name": "failed_seg_exists_in_gparray2", "failed": self.create_primary(dbid='3', status='d'), "failover": self.create_primary(dbid='3', status='d'), "live": self.create_mirror(dbid='4'), "forceFull": False, "forceoverwrite": True }] for test in tests: mirror_to_build = GpMirrorToBuild(test["failed"], test["live"], test["failover"], test["forceFull"]) buildMirrorSegs_obj = GpMirrorListToBuild( toBuild=[ mirror_to_build, ], pool=None, quiet=True, parallelDegree=0, logger=self.mock_logger, forceoverwrite=test['forceoverwrite']) self._setup_mocks(buildMirrorSegs_obj) local_gp_array = GpArray([self.coordinator, test["failed"]]) expected_error = "failed segment should not be in the new configuration if failing over to" with self.subTest(msg=test["name"]): with self.assertRaisesRegex(Exception, expected_error): buildMirrorSegs_obj.buildMirrors(self.action, self.gpEnv, local_gp_array)
def test_buildMirrors_forceoverwrite_true(self): failed = self.create_primary(status='d') live = self.create_mirror() failover = self.create_primary(host='sdw3') buildMirrorSegs_obj = GpMirrorListToBuild( toBuild=[GpMirrorToBuild(failed, live, failover, False)], pool=None, quiet=True, parallelDegree=0, logger=self.mock_logger, forceoverwrite=True) self._setup_mocks(buildMirrorSegs_obj) self.assertTrue( buildMirrorSegs_obj.buildMirrors(self.action, self.gpEnv, self.gpArray)) self._common_asserts_with_stop_and_logger( buildMirrorSegs_obj, "Ensuring 1 failed segment(s) are stopped", [failed], [failover], [], {1: True}, 0) self.assertEqual('n', live.getSegmentMode()) self.assertEqual('d', failover.getSegmentStatus()) self.assertEqual('n', failover.getSegmentMode())
def test_build_recovery_info_passes(self): # The expected dictionary within each test has target_host as the key. # Each recoveryInfo object holds source_host (live segment), but not the target_host. tests = [ { "name": "single_target_host_suggest_full_and_incr", "mirrors_to_build": [ GpMirrorToBuild(self.m3, self.p3, None, True), GpMirrorToBuild(self.m4, self.p4, None, False) ], "expected": { 'sdw3': [ RecoveryInfo( '/data/mirror3', 7000, 7, 'sdw2', 3000, True, '/tmp/logdir/pg_basebackup.111.dbid7.out'), RecoveryInfo('/data/mirror4', 8000, 8, 'sdw3', 4000, False, '/tmp/logdir/pg_rewind.111.dbid8.out') ] } }, { "name": "single_target_hosts_suggest_full_and_incr_with_failover", "mirrors_to_build": [ GpMirrorToBuild(self.m1, self.p1, self.m5, True), GpMirrorToBuild(self.m2, self.p2, self.m6, False) ], "expected": { 'sdw4': [ RecoveryInfo( '/data/mirror5', 9000, 5, 'sdw1', 1000, True, '/tmp/logdir/pg_basebackup.111.dbid5.out'), RecoveryInfo( '/data/mirror6', 10000, 6, 'sdw2', 2000, True, '/tmp/logdir/pg_basebackup.111.dbid6.out') ] } }, { "name": "multiple_target_hosts_suggest_full", "mirrors_to_build": [ GpMirrorToBuild(self.m1, self.p1, None, True), GpMirrorToBuild(self.m2, self.p2, None, True) ], "expected": { 'sdw2': [ RecoveryInfo( '/data/mirror1', 5000, 5, 'sdw1', 1000, True, '/tmp/logdir/pg_basebackup.111.dbid5.out') ], 'sdw1': [ RecoveryInfo( '/data/mirror2', 6000, 6, 'sdw2', 2000, True, '/tmp/logdir/pg_basebackup.111.dbid6.out') ] } }, { "name": "multiple_target_hosts_suggest_full_and_incr", "mirrors_to_build": [ GpMirrorToBuild(self.m1, self.p1, None, True), GpMirrorToBuild(self.m3, self.p3, None, False), GpMirrorToBuild(self.m4, self.p4, None, True) ], "expected": { 'sdw2': [ RecoveryInfo( '/data/mirror1', 5000, 5, 'sdw1', 1000, True, '/tmp/logdir/pg_basebackup.111.dbid5.out') ], 'sdw3': [ RecoveryInfo('/data/mirror3', 7000, 7, 'sdw2', 3000, False, '/tmp/logdir/pg_rewind.111.dbid7.out'), RecoveryInfo( '/data/mirror4', 8000, 8, 'sdw3', 4000, True, '/tmp/logdir/pg_basebackup.111.dbid8.out') ] } }, { "name": "multiple_target_hosts_suggest_incr_failover_same_as_failed", "mirrors_to_build": [ GpMirrorToBuild(self.m1, self.p1, self.m1, False), GpMirrorToBuild(self.m2, self.p2, self.m2, False) ], "expected": { 'sdw2': [ RecoveryInfo( '/data/mirror1', 5000, 5, 'sdw1', 1000, True, '/tmp/logdir/pg_basebackup.111.dbid5.out') ], 'sdw1': [ RecoveryInfo( '/data/mirror2', 6000, 6, 'sdw2', 2000, True, '/tmp/logdir/pg_basebackup.111.dbid6.out') ] } }, { "name": "multiple_target_hosts_suggest_full_failover_same_as_failed", "mirrors_to_build": [ GpMirrorToBuild(self.m1, self.p1, self.m1, True), GpMirrorToBuild(self.m3, self.p3, self.m3, True), GpMirrorToBuild(self.m4, self.p4, None, True) ], "expected": { 'sdw2': [ RecoveryInfo( '/data/mirror1', 5000, 5, 'sdw1', 1000, True, '/tmp/logdir/pg_basebackup.111.dbid5.out') ], 'sdw3': [ RecoveryInfo( '/data/mirror3', 7000, 7, 'sdw2', 3000, True, '/tmp/logdir/pg_basebackup.111.dbid7.out'), RecoveryInfo( '/data/mirror4', 8000, 8, 'sdw3', 4000, True, '/tmp/logdir/pg_basebackup.111.dbid8.out') ] } }, { "name": "multiple_target_hosts_suggest_full_and_incr", "mirrors_to_build": [ GpMirrorToBuild(self.m1, self.p1, self.m5, True), GpMirrorToBuild(self.m2, self.p2, None, False), GpMirrorToBuild(self.m3, self.p3, self.m3, False), GpMirrorToBuild(self.m4, self.p4, self.m8, True) ], "expected": { 'sdw4': [ RecoveryInfo( '/data/mirror5', 9000, 5, 'sdw1', 1000, True, '/tmp/logdir/pg_basebackup.111.dbid5.out'), ], 'sdw1': [ RecoveryInfo('/data/mirror2', 6000, 6, 'sdw2', 2000, False, '/tmp/logdir/pg_rewind.111.dbid6.out'), RecoveryInfo( '/data/mirror8', 12000, 8, 'sdw3', 4000, True, '/tmp/logdir/pg_basebackup.111.dbid8.out') ], 'sdw3': [ RecoveryInfo( '/data/mirror3', 7000, 7, 'sdw2', 3000, True, '/tmp/logdir/pg_basebackup.111.dbid7.out') ] } }, ] self.run_tests(tests)
def setUp(self): self.temp_dir = tempfile.mkdtemp() self.config_file_path = os.path.join(self.temp_dir, "foo") with open(self.config_file_path, "w") as config_file: config_file.write("") self.conn = Mock() self.conn.__enter__ = Mock(return_value=(Mock(), None)) self.conn.__exit__ = Mock(return_value=None) self.cursor = FakeCursor() self.db_singleton = Mock() self.os_env = dict(USER="******") self.os_env["COORDINATOR_DATA_DIRECTORY"] = self.temp_dir self.os_env["GPHOME"] = self.temp_dir self.gparray = self._create_gparray_with_2_primary_2_mirrors() self.pool = Mock() self.pool.getCompletedItems.return_value = [] self.pgconf_dict = gucdict() self.pgconf_dict["port"] = setting("port", "123", None, None, None) self.pgconf_dict["max_connection"] = setting("max_connections", "1", None, None, None) self.config_provider_mock = MagicMock(spec=GpConfigurationProvider) self.config_provider_mock.initializeProvider.return_value = self.config_provider_mock self.gpArrayMock = MagicMock(spec=GpArray) self.gpArrayMock.getDbList.side_effect = [[self.primary0], [self.primary0], [self.primary0]] self.gpArrayMock.segmentPairs = [] self.gpArrayMock.hasMirrors = True self.gpArrayMock.isStandardArray.return_value = (True, None) self.gpArrayMock.coordinator = self.gparray.coordinator self.config_provider_mock.loadSystemConfig.return_value = self.gpArrayMock self.mirror_to_build = GpMirrorToBuild(self.mirror0, self.primary0, None, False) self.apply_patches([ patch('os.environ', new=self.os_env), patch('gppylib.db.dbconn.connect', return_value=self.conn), patch('gppylib.db.dbconn.query', return_value=self.cursor), patch('gppylib.db.dbconn.queryRow', return_value=["foo"]), patch('gppylib.pgconf.readfile', return_value=self.pgconf_dict), patch('gppylib.commands.gp.GpVersion'), patch('gppylib.system.faultProberInterface.getFaultProber'), patch( 'gppylib.system.configurationInterface.getConfigurationProvider', return_value=self.config_provider_mock), patch('gppylib.commands.base.WorkerPool', return_value=self.pool), patch('gppylib.gparray.GpArray.getSegmentsByHostName', return_value={}), patch('gppylib.gplog.get_default_logger'), patch.object(GpMirrorListToBuild, "__init__", return_value=None), patch.object(GpMirrorListToBuild, "buildMirrors"), patch.object(GpMirrorListToBuild, "getAdditionalWarnings"), patch.object(GpMirrorListToBuild, "getMirrorsToBuild"), patch.object(HeapChecksum, "check_segment_consistency"), patch.object(HeapChecksum, "get_segments_checksum_settings"), ]) self.call_count = 0 self.return_one = True self.mock_build_mirrors = self.get_mock_from_apply_patch( "buildMirrors") self.mock_get_mirrors_to_build = self.get_mock_from_apply_patch( 'getMirrorsToBuild') self.mock_heap_checksum_init = self.get_mock_from_apply_patch( "__init__") self.mock_check_segment_consistency = self.get_mock_from_apply_patch( 'check_segment_consistency') self.mock_get_segments_checksum_settings = self.get_mock_from_apply_patch( 'get_segments_checksum_settings') sys.argv = ["gprecoverseg"] # reset to relatively empty args list options = Options() options.coordinatorDataDirectory = self.temp_dir options.spareDataDirectoryFile = self.config_file_path options.showProgress = True options.showProgressInplace = True # import HERE so that patches are already in place! from gppylib.programs.clsRecoverSegment import GpRecoverSegmentProgram self.subject = GpRecoverSegmentProgram(options) self.subject.logger = Mock( spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal']) faultProberInterface.gFaultProber = Mock()