def __init__(self) -> None:
     self._cluster_versions: Dict[Tuple[str, str], str] = {}  # format: {(cluster_name, namespace): resource_version}
     self._kubernetes_service = KubernetesService()
     self._mongo_service = MongoService(self._kubernetes_service)
     self._backup_checker = BackupHelper(self._kubernetes_service)
     self._resource_checkers: List[BaseResourceChecker] = [
         ServiceChecker(self._kubernetes_service),
         StatefulSetChecker(self._kubernetes_service),
         AdminSecretChecker(self._kubernetes_service),
     ]
    def __init__(self):
        self.kubernetes_service = KubernetesService()
        self.mongo_service = MongoService(self.kubernetes_service)

        self.resource_checkers = [
            ServiceChecker(self.kubernetes_service),
            StatefulSetChecker(self.kubernetes_service),
            AdminSecretChecker(self.kubernetes_service),
        ]  # type: List[BaseResourceChecker]

        self.backup_checker = BackupChecker(self.kubernetes_service)

        self.cluster_versions = {
        }  # type: Dict[Tuple[str, str], str]  # format: {(cluster_name, namespace): resource_version}
class ClusterManager:
    """ Manager that periodically checks the status of the MongoDB objects in the cluster. """

    def __init__(self) -> None:
        self._cluster_versions: Dict[Tuple[str, str], str] = {}  # format: {(cluster_name, namespace): resource_version}
        self._kubernetes_service = KubernetesService()
        self._mongo_service = MongoService(self._kubernetes_service)
        self._backup_checker = BackupHelper(self._kubernetes_service)
        self._resource_checkers: List[BaseResourceChecker] = [
            ServiceChecker(self._kubernetes_service),
            StatefulSetChecker(self._kubernetes_service),
            AdminSecretChecker(self._kubernetes_service),
        ]

    def checkExistingClusters(self) -> None:
        """
        Check all Mongo objects and see if the sub objects are available.
        If they are not, they should be (re-)created to ensure the cluster is in the expected state.
        """
        mongo_objects = self._kubernetes_service.listMongoObjects()
        logging.info("Checking %s mongo objects.", len(mongo_objects["items"]))
        for cluster_dict in mongo_objects["items"]:
            cluster_object = self._parseConfiguration(cluster_dict)
            if cluster_object:
                self._checkCluster(cluster_object)

    def collectGarbage(self) -> None:
        """
        Cleans up any resources that are left after a cluster has been removed.
        """
        for checker in self._resource_checkers:
            checker.cleanResources()

    def _checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool = False) -> None:
        """
        Checks whether the given cluster is configured and updated.
        :param cluster_object: The cluster object from the YAML file.
        :param force: If this is True, we will re-update the cluster even if it has been checked before.
        """
        key = cluster_object.metadata.name, cluster_object.metadata.namespace

        if self._cluster_versions.get(key) == cluster_object.metadata.resource_version and not force:
            logging.debug("Cluster object %s has been checked already in version %s.",
                          key, cluster_object.metadata.resource_version)
            # we still want to check the replicas to make sure everything is working.
            self._mongo_service.checkOrCreateReplicaSet(cluster_object)
        else:
            for checker in self._resource_checkers:
                checker.checkResource(cluster_object)
            self._mongo_service.checkOrCreateReplicaSet(cluster_object)
            self._mongo_service.createUsers(cluster_object)
            self._cluster_versions[key] = cluster_object.metadata.resource_version

        self._backup_checker.backupIfNeeded(cluster_object)

    @staticmethod
    def _parseConfiguration(cluster_dict: Dict[str, any]) -> Optional[V1MongoClusterConfiguration]:
        """
        Tries to parse the given cluster configuration, returning None if the object cannot be parsed.
        :param cluster_dict: The dictionary containing the configuration.
        :return: The cluster configuration model, if valid, or None.
        """
        try:
            result = V1MongoClusterConfiguration(**cluster_dict)
            result.validate()
            return result
        except ValueError as err:
            meta = cluster_dict.get("metadata", {})
            logging.error("Could not validate cluster configuration for %s @ ns/%s: %s. The cluster will be ignored.",
                          meta.get("name"), meta.get("namespace"), err)
Example #4
0
    def setUp(self):
        super().setUp()
        self.kubernetes_service: Union[MagicMock,
                                       KubernetesService] = MagicMock()
        self.kubernetes_service.getSecret.return_value = V1Secret(
            metadata=V1ObjectMeta(name="mongo-cluster-admin-credentials",
                                  namespace="default"),
            data={
                "password": b64encode(b"random-password"),
                "username": b64encode(b"root")
            },
        )

        self.service = MongoService(self.kubernetes_service)
        self.cluster_dict = getExampleClusterDefinition()
        self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict)

        self.not_initialized_response = {
            "info": "run rs.initiate(...) if not yet done for the set",
            "ok": 0,
            "errmsg": "no replset config has been received",
            "code": 94,
            "codeName": "NotYetInitialized"
        }

        self.initiate_ok_response = {
            "ok": 1,
            "operationTime": 1528365094.1,
            "$clusterTime": {
                "clusterTime": 1528365094.1,
                "signature": {
                    "hash": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=",
                    "keyId": 0
                }
            }
        }

        self.initiate_not_found_response = {
            "ok":
            0,
            "errmsg":
            "replSetInitiate quorum check failed because not all proposed set members responded "
            "affirmatively: some-db-2.some-db.default.svc.cluster.local:27017 failed with Connection refused",
            "code":
            74,
            "codeName":
            "NodeNotFound"
        }

        self.expected_cluster_config = json.dumps({
            "_id":
            "mongo-cluster",
            "version":
            1,
            "members": [{
                "_id":
                0,
                "host":
                "mongo-cluster-0.mongo-cluster.default.svc.cluster.local"
            }, {
                "_id":
                1,
                "host":
                "mongo-cluster-1.mongo-cluster.default.svc.cluster.local"
            }, {
                "_id":
                2,
                "host":
                "mongo-cluster-2.mongo-cluster.default.svc.cluster.local"
            }]
        })

        self.expected_user_create = """
Example #5
0
class TestMongoService(TestCase):
    maxDiff = None

    def setUp(self):
        super().setUp()
        self.kubernetes_service: Union[MagicMock,
                                       KubernetesService] = MagicMock()
        self.kubernetes_service.getSecret.return_value = V1Secret(
            metadata=V1ObjectMeta(name="mongo-cluster-admin-credentials",
                                  namespace="default"),
            data={
                "password": b64encode(b"random-password"),
                "username": b64encode(b"root")
            },
        )

        self.service = MongoService(self.kubernetes_service)
        self.cluster_dict = getExampleClusterDefinition()
        self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict)

        self.not_initialized_response = {
            "info": "run rs.initiate(...) if not yet done for the set",
            "ok": 0,
            "errmsg": "no replset config has been received",
            "code": 94,
            "codeName": "NotYetInitialized"
        }

        self.initiate_ok_response = {
            "ok": 1,
            "operationTime": 1528365094.1,
            "$clusterTime": {
                "clusterTime": 1528365094.1,
                "signature": {
                    "hash": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=",
                    "keyId": 0
                }
            }
        }

        self.initiate_not_found_response = {
            "ok":
            0,
            "errmsg":
            "replSetInitiate quorum check failed because not all proposed set members responded "
            "affirmatively: some-db-2.some-db.default.svc.cluster.local:27017 failed with Connection refused",
            "code":
            74,
            "codeName":
            "NodeNotFound"
        }

        self.expected_cluster_config = json.dumps({
            "_id":
            "mongo-cluster",
            "version":
            1,
            "members": [{
                "_id":
                0,
                "host":
                "mongo-cluster-0.mongo-cluster.default.svc.cluster.local"
            }, {
                "_id":
                1,
                "host":
                "mongo-cluster-1.mongo-cluster.default.svc.cluster.local"
            }, {
                "_id":
                2,
                "host":
                "mongo-cluster-2.mongo-cluster.default.svc.cluster.local"
            }]
        })

        self.expected_user_create = """
            admin = db.getSiblingDB("admin")
            admin.createUser({
                user: "******", pwd: "random-password",
                roles: [ { role: "root", db: "admin" } ]
            })
            admin.auth("root", "random-password")
        """

    def _getFixture(self, name):
        with open("tests/fixtures/mongo_responses/{}.txt".format(name)) as f:
            return f.read()

    def test__execInPod(self):
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "replica-status-not-initialized")
        result = self.service._execInPod(0, "cluster", "default",
                                         "rs.status()")
        self.assertEquals(self.not_initialized_response, result)
        expected_calls = [
            call.execInPod(
                'mongodb', 'cluster-0', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test__execInPod_NodeNotFound(self):
        self.kubernetes_service.execInPod.side_effect = (
            self._getFixture("initiate-not-found"),
            self._getFixture("initiate-not-found"),
            self._getFixture("initiate-ok"))
        result = self.service._execInPod(1, "cluster", "default",
                                         "rs.initiate({})")
        self.assertEquals(self.initiate_ok_response, result)
        expected_calls = 3 * [
            call.execInPod('mongodb', 'cluster-1', 'default', [
                'mongo', 'localhost:27017/admin', '--eval', 'rs.initiate({})'
            ])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test__execInPod_connect_failed(self):
        self.kubernetes_service.execInPod.side_effect = ValueError(
            "connect failed"), self._getFixture("initiate-ok")
        result = self.service._execInPod(1, "cluster", "default", "rs.test()")
        self.assertEquals(self.initiate_ok_response, result)
        expected_calls = 2 * [
            call.execInPod(
                'mongodb', 'cluster-1', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test__execInPod_handshake_status(self):
        self.kubernetes_service.execInPod.side_effect = (
            ApiException(500, reason="Handshake status: Failed!"),
            self._getFixture("initiate-ok"))
        result = self.service._execInPod(1, "cluster", "default", "rs.test()")
        self.assertEquals(self.initiate_ok_response, result)
        expected_calls = 2 * [
            call.execInPod(
                'mongodb', 'cluster-1', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test__execInPod_ValueError(self):
        self.kubernetes_service.execInPod.side_effect = ValueError(
            "Value error.")
        with self.assertRaises(ValueError) as context:
            self.service._execInPod(1, "cluster", "default", "rs.test()")
        self.assertEquals("Value error.", str(context.exception))
        expected_calls = [
            call.execInPod(
                'mongodb', 'cluster-1', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test__execInPod_ApiException(self):
        self.kubernetes_service.execInPod.side_effect = ApiException(
            400, reason="A reason.")
        with self.assertRaises(ApiException) as context:
            self.service._execInPod(5, "mongo-cluster", "ns", "rs.test()")

        self.assertEquals("(400)\nReason: A reason.\n", str(context.exception))
        expected_calls = [
            call.execInPod(
                'mongodb', 'mongo-cluster-5', 'ns',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test__execInPod_TimeoutError(self):
        self.kubernetes_service.execInPod.side_effect = (
            ValueError("connection attempt failed"),
            ApiException(500, reason="Handshake status: Failed!"),
            self._getFixture("initiate-not-found"),
            ApiException(404, reason="Handshake status: error"))
        with self.assertRaises(TimeoutError) as context:
            self.service._execInPod(5, "mongo-cluster", "ns", "rs.test()")

        self.assertEquals("Could not check the replica set after 4 retries!",
                          str(context.exception))
        expected_calls = 4 * [
            call.execInPod(
                'mongodb', 'mongo-cluster-5', 'ns',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_initializeReplicaSet(self):
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "initiate-ok")
        self.service.initializeReplicaSet(self.cluster_object)
        expected_calls = [
            call.execInPod('mongodb', 'mongo-cluster-0', 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                'rs.initiate({})'.format(self.expected_cluster_config)
            ])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_initializeReplicaSet_ValueError(self):
        exec_result = self._getFixture("initiate-not-found").replace(
            "NodeNotFound", "Error")
        self.kubernetes_service.execInPod.return_value = exec_result
        with self.assertRaises(ValueError) as context:
            self.service.initializeReplicaSet(self.cluster_object)

        self.initiate_not_found_response["codeName"] = "Error"
        self.assertEquals(
            "Unexpected response initializing replica set mongo-cluster @ ns/default:\n"
            + str(self.initiate_not_found_response), str(context.exception))

    def test_reconfigureReplicaSet(self):
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "initiate-ok")
        self.service.reconfigureReplicaSet(self.cluster_object)
        expected_calls = [
            call.execInPod('mongodb', 'mongo-cluster-0', 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                'rs.reconfig({})'.format(self.expected_cluster_config)
            ])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_reconfigureReplicaSet_ValueError(self):
        exec_result = self._getFixture("initiate-not-found").replace(
            "NodeNotFound", "Error")
        self.kubernetes_service.execInPod.return_value = exec_result
        with self.assertRaises(ValueError) as context:
            self.service.reconfigureReplicaSet(self.cluster_object)

        self.initiate_not_found_response["codeName"] = "Error"
        self.assertEquals(
            "Unexpected response reconfiguring replica set mongo-cluster @ ns/default:\n"
            + str(self.initiate_not_found_response), str(context.exception))

    def test_checkReplicaSetOrInitialize_ok(self):
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "replica-status-ok")
        self.service.checkReplicaSetOrInitialize(self.cluster_object)
        expected_calls = [
            call.execInPod(
                'mongodb', 'mongo-cluster-0', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_checkReplicaSetOrInitialize_initialize(self):
        self.kubernetes_service.execInPod.side_effect = (
            self._getFixture("replica-status-not-initialized"),
            self._getFixture("initiate-ok"))
        self.service.checkReplicaSetOrInitialize(self.cluster_object)
        expected_calls = [
            call.execInPod(
                'mongodb', 'mongo-cluster-0', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()']),
            call.execInPod('mongodb', 'mongo-cluster-0', 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                'rs.initiate({})'.format(self.expected_cluster_config)
            ])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_checkReplicaSetOrInitialize_reconfigure(self):
        self.cluster_object.spec.mongodb.replicas = 4
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "replica-status-ok")
        self.service.checkReplicaSetOrInitialize(self.cluster_object)

        cluster_config = json.loads(self.expected_cluster_config)
        cluster_config["members"].append({
            "_id":
            3,
            "host":
            "mongo-cluster-3.mongo-cluster.default.svc.cluster.local"
        })
        self.expected_cluster_config = json.dumps(cluster_config)

        expected_calls = [
            call.execInPod(
                'mongodb', 'mongo-cluster-0', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()']),
            call.execInPod(
                'mongodb',
                'mongo-cluster-0',
                'default',
                [
                    'mongo', 'localhost:27017/admin', '--eval',
                    'rs.reconfig({})'.format(self.expected_cluster_config)
                ],
            )
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_checkReplicaSetOrInitialize_ValueError(self):
        response = self._getFixture("replica-status-ok").replace(
            '"ok" : 1', '"ok" : 2')
        self.kubernetes_service.execInPod.return_value = response

        with self.assertRaises(ValueError) as context:
            self.service.checkReplicaSetOrInitialize(self.cluster_object)

        expected_calls = [
            call.execInPod(
                'mongodb', 'mongo-cluster-0', 'default',
                ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)
        self.assertIn("Unexpected response trying to check replicas: ",
                      str(context.exception))

    def test_createUsers_ok(self):
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "createUser-ok")

        self.service.createUsers(self.cluster_object)
        expected_calls = [
            call.getSecret('mongo-cluster-admin-credentials', 'default'),
            call.execInPod('mongodb', 'mongo-cluster-0', 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                self.expected_user_create
            ])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_createUsers_ValueError(self):
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "createUser-ok").replace('"user"', '"error"')

        with self.assertRaises(ValueError) as context:
            self.service.createUsers(self.cluster_object)
        expected_calls = [
            call.getSecret('mongo-cluster-admin-credentials', 'default'),
            call.execInPod('mongodb', 'mongo-cluster-0', 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                self.expected_user_create
            ])
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)
        self.assertEquals(
            "Unexpected response creating users for pod mongo-cluster-0 @ ns/default:\n"
            "{'error': 'root', 'roles': [{'role': 'root', 'db': 'admin'}]}",
            str(context.exception))

    def test_createUsers_not_master_then_already_exists(self):
        self.kubernetes_service.execInPod.side_effect = (
            self._getFixture("createUser-notMaster"),
            self._getFixture("createUser-exists"))

        self.service.createUsers(self.cluster_object)
        expected_calls = [
            call.getSecret('mongo-cluster-admin-credentials', 'default'),
            call.execInPod('mongodb', 'mongo-cluster-0', 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                self.expected_user_create
            ]),
            call.execInPod('mongodb', 'mongo-cluster-1', 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                self.expected_user_create
            ]),
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)

    def test_createUsers_TimeoutError(self):
        self.kubernetes_service.execInPod.return_value = self._getFixture(
            "createUser-notMaster")

        with self.assertRaises(TimeoutError) as context:
            self.service.createUsers(self.cluster_object)
        expected_calls = [
            call.getSecret('mongo-cluster-admin-credentials', 'default')
        ] + [
            call.execInPod('mongodb', 'mongo-cluster-' + str(pod), 'default', [
                'mongo', 'localhost:27017/admin', '--eval',
                self.expected_user_create
            ]) for _ in range(4) for pod in range(3)
        ]
        self.assertEquals(expected_calls, self.kubernetes_service.mock_calls)
        self.assertEquals(
            "Could not create users in any of the 3 pods of cluster mongo-cluster @ ns/default.",
            str(context.exception))
    def setUp(self):
        super().setUp()
        self.kubernetes_service = MagicMock()
        self.dummy_credentials = b64encode(
            json.dumps({
                "user": "******"
            }).encode())
        self.kubernetes_service.getSecret.return_value = V1Secret(
            metadata=V1ObjectMeta(name="mongo-cluster-admin-credentials",
                                  namespace="default"),
            data={
                "password": b64encode(b"random-password"),
                "username": b64encode(b"root"),
                "json": self.dummy_credentials
            },
        )
        self.service = MongoService(self.kubernetes_service)
        self.cluster_dict = getExampleClusterDefinition()
        self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict)

        self.not_initialized_response = {
            "info": "run rs.initiate(...) if not yet done for the set",
            "ok": 0,
            "errmsg": "no replset config has been received",
            "code": 94,
            "codeName": "NotYetInitialized"
        }

        self.initiate_ok_response = loads("""
            {"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime":
            {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=",
            "$type": "00"}, "keyId": 0}}}
        """)

        self.initiate_not_found_response = loads("""
            {"ok": 2, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime":
            {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=",
            "$type": "00"}, "keyId": 0}}}
        """)

        self.expected_cluster_config = {
            "_id":
            "mongo-cluster",
            "version":
            1,
            "members": [{
                "_id":
                0,
                "host":
                "mongo-cluster-0.mongo-cluster.mongo-operator-cluster.svc.cluster.local"
            }, {
                "_id":
                1,
                "host":
                "mongo-cluster-1.mongo-cluster.mongo-operator-cluster.svc.cluster.local"
            }, {
                "_id":
                2,
                "host":
                "mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local"
            }]
        }

        self.expected_user_create = {
            "pwd": "random-password",
            "roles": [{
                "role": "root",
                "db": "admin"
            }]
        }
class TestMongoService(TestCase):
    maxDiff = None

    def setUp(self):
        super().setUp()
        self.kubernetes_service = MagicMock()
        self.dummy_credentials = b64encode(
            json.dumps({
                "user": "******"
            }).encode())
        self.kubernetes_service.getSecret.return_value = V1Secret(
            metadata=V1ObjectMeta(name="mongo-cluster-admin-credentials",
                                  namespace="default"),
            data={
                "password": b64encode(b"random-password"),
                "username": b64encode(b"root"),
                "json": self.dummy_credentials
            },
        )
        self.service = MongoService(self.kubernetes_service)
        self.cluster_dict = getExampleClusterDefinition()
        self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict)

        self.not_initialized_response = {
            "info": "run rs.initiate(...) if not yet done for the set",
            "ok": 0,
            "errmsg": "no replset config has been received",
            "code": 94,
            "codeName": "NotYetInitialized"
        }

        self.initiate_ok_response = loads("""
            {"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime":
            {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=",
            "$type": "00"}, "keyId": 0}}}
        """)

        self.initiate_not_found_response = loads("""
            {"ok": 2, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime":
            {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=",
            "$type": "00"}, "keyId": 0}}}
        """)

        self.expected_cluster_config = {
            "_id":
            "mongo-cluster",
            "version":
            1,
            "members": [{
                "_id":
                0,
                "host":
                "mongo-cluster-0.mongo-cluster.mongo-operator-cluster.svc.cluster.local"
            }, {
                "_id":
                1,
                "host":
                "mongo-cluster-1.mongo-cluster.mongo-operator-cluster.svc.cluster.local"
            }, {
                "_id":
                2,
                "host":
                "mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local"
            }]
        }

        self.expected_user_create = {
            "pwd": "random-password",
            "roles": [{
                "role": "root",
                "db": "admin"
            }]
        }

    @staticmethod
    def _getFixture(name):
        with open("tests/fixtures/mongo_responses/{}.json".format(name)) as f:
            return loads(f.read())

    def test_mongoAdminCommand(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.return_value = self._getFixture(
            "initiate-ok")
        result = self.service._executeAdminCommand(self.cluster_object,
                                                   "replSetInitiate")
        self.assertEqual(self.initiate_ok_response, result)

    def test__mongoAdminCommand_NodeNotFound(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = OperationFailure(
            "replSetInitiate quorum check failed because not all proposed set members responded affirmatively:"
        )

        with self.assertRaises(OperationFailure) as ex:
            mongo_command, mongo_args = MongoResources.createReplicaInitiateCommand(
                self.cluster_object)
            self.service._executeAdminCommand(self.cluster_object,
                                              mongo_command, mongo_args)

        self.assertIn("replSetInitiate quorum check failed", str(ex.exception))

    def test__mongoAdminCommand_connect_failed(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = (
            ConnectionFailure("connection attempt failed"),
            self._getFixture("initiate-ok"))
        result = self.service._executeAdminCommand(self.cluster_object,
                                                   "replSetGetStatus")
        self.assertEqual(self.initiate_ok_response, result)

    def test__mongoAdminCommand_TimeoutError(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = (
            ConnectionFailure("connection attempt failed"),
            ConnectionFailure("connection attempt failed"),
            ConnectionFailure("connection attempt failed"),
            ConnectionFailure("connection attempt failed"),
            OperationFailure("no replset config has been received"))

        with self.assertRaises(TimeoutError) as context:
            self.service._executeAdminCommand(self.cluster_object,
                                              "replSetGetStatus")

        self.assertEqual("Could not execute command after 4 retries!",
                         str(context.exception))

    def test__mongoAdminCommand_NoPrimary(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = (
            ConnectionFailure(
                "No replica set members match selector \"Primary()\""),
            self._getFixture("initiate-ok"), self._getFixture("initiate-ok"))

        self.service._executeAdminCommand(self.cluster_object,
                                          "replSetGetStatus")

    def test_initializeReplicaSet(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.return_value = self._getFixture(
            "initiate-ok")
        self.service._initializeReplicaSet(self.cluster_object)

    def test_initializeReplicaSet_ValueError(self, mongo_client_mock):
        command_result = self._getFixture("initiate-ok")
        command_result["ok"] = 2
        mongo_client_mock.return_value.admin.command.return_value = command_result

        with self.assertRaises(ValueError) as context:
            self.service._initializeReplicaSet(self.cluster_object)

        self.assertEqual(
            "Unexpected response initializing replica set mongo-cluster @ ns/"
            + self.cluster_object.metadata.namespace + ":\n" +
            str(self.initiate_not_found_response), str(context.exception))

    def test_reconfigureReplicaSet(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.return_value = self._getFixture(
            "initiate-ok")
        self.service._reconfigureReplicaSet(self.cluster_object)

    def test_reconfigureReplicaSet_ValueError(self, mongo_client_mock):
        command_result = self._getFixture("initiate-ok")
        command_result["ok"] = 2
        mongo_client_mock.return_value.admin.command.return_value = command_result

        with self.assertRaises(ValueError) as context:
            self.service._reconfigureReplicaSet(self.cluster_object)

        self.assertEqual(
            "Unexpected response reconfiguring replica set mongo-cluster @ ns/mongo-operator-cluster:\n"
            + str(self.initiate_not_found_response), str(context.exception))

    def test_checkOrCreateReplicaSet_ok(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.return_value = self._getFixture(
            "replica-status-ok")
        self.service.checkOrCreateReplicaSet(self.cluster_object)

    def test_checkOrCreateReplicaSet_initialize(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = (
            OperationFailure("no replset config has been received"),
            self._getFixture("initiate-ok"))
        self.service.checkOrCreateReplicaSet(self.cluster_object)

    def test_checkOrCreateReplicaSet_reconfigure(self, mongo_client_mock):
        self.cluster_object.spec.mongodb.replicas = 4
        mongo_client_mock.return_value.admin.command.return_value = self._getFixture(
            "replica-status-ok")
        self.service.checkOrCreateReplicaSet(self.cluster_object)
        self.expected_cluster_config["members"].append({
            "_id":
            3,
            "host":
            "mongo-cluster-3.mongo-cluster.mongo-cluster.svc.cluster.local"
        })

    def test_checkOrCreateReplicaSet_ValueError(self, mongo_client_mock):
        response = self._getFixture("replica-status-ok")
        response["ok"] = 2
        mongo_client_mock.return_value.admin.command.return_value = response

        with self.assertRaises(ValueError) as context:
            self.service.checkOrCreateReplicaSet(self.cluster_object)

        self.assertIn("Unexpected response trying to check replicas: ",
                      str(context.exception))

    def test_checkOrCreateReplicaSet_OperationalFailure(
            self, mongo_client_mock):
        bad_value = "BadValue: Unexpected field foo in replica set member configuration for member:" \
            "{ _id: 0, foo: \"localhost:27017\" }"
        mongo_client_mock.return_value.admin.command.side_effect = (
            OperationFailure(bad_value), OperationFailure(bad_value))

        with self.assertRaises(OperationFailure) as context:
            self.service.checkOrCreateReplicaSet(self.cluster_object)

        self.assertEqual(str(context.exception), bad_value)

    def test_createUsers_ok(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = (
            None, self._getFixture("createUser-ok"))
        self.service.createUsers(self.cluster_object)

    def test_createUsers_ValueError(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = (
            None,
            OperationFailure(
                "\"createUser\" had the wrong type. Expected string, found object"
            ))

        with self.assertRaises(OperationFailure) as context:
            self.service.createUsers(self.cluster_object)

        self.assertEqual(
            "\"createUser\" had the wrong type. Expected string, found object",
            str(context.exception))

    def test_createUsers_TimeoutError(self, mongo_client_mock):
        mongo_client_mock.return_value.admin.command.side_effect = (
            None, ConnectionFailure("connection attempt failed"),
            ConnectionFailure("connection attempt failed"),
            ConnectionFailure("connection attempt failed"),
            ConnectionFailure("connection attempt failed"))

        with self.assertRaises(TimeoutError) as context:
            self.service.createUsers(self.cluster_object)

        self.assertEqual("Could not execute command after 4 retries!",
                         str(context.exception))

    def test_onReplicaSetReady(self, mongo_client_mock):
        self.service._restore_helper.restoreIfNeeded = MagicMock()

        self.service._onReplicaSetReady(self.cluster_object)

        self.service._restore_helper.restoreIfNeeded.assert_called()
        mongo_client_mock.assert_not_called()

    def test_onReplicaSetReady_alreadyRestored(self, mongo_client_mock):
        self.service._restore_helper.restoreIfNeeded = MagicMock()
        self.service._restored_cluster_names.append("mongo-cluster")

        self.service._onReplicaSetReady(self.cluster_object)

        self.service._restore_helper.restoreIfNeeded.assert_not_called()
        mongo_client_mock.assert_not_called()

    def test_onAllHostsReady(self, mongo_client_mock):
        self.service.checkOrCreateReplicaSet = MagicMock()

        self.service._onAllHostsReady(self.cluster_object)

        self.service.checkOrCreateReplicaSet.assert_called()
        mongo_client_mock.assert_not_called()
class ClusterChecker:
    """
    Manager that periodically checks the status of the MongoDB objects in the cluster.
    """

    STREAM_REQUEST_TIMEOUT = (15.0, 5.0)  # connect, read timeout

    def __init__(self):
        self.kubernetes_service = KubernetesService()
        self.mongo_service = MongoService(self.kubernetes_service)

        self.resource_checkers = [
            ServiceChecker(self.kubernetes_service),
            StatefulSetChecker(self.kubernetes_service),
            AdminSecretChecker(self.kubernetes_service),
        ]  # type: List[BaseResourceChecker]

        self.backup_checker = BackupChecker(self.kubernetes_service)

        self.cluster_versions = {
        }  # type: Dict[Tuple[str, str], str]  # format: {(cluster_name, namespace): resource_version}

    @staticmethod
    def _parseConfiguration(
            cluster_dict: Dict[str,
                               any]) -> Optional[V1MongoClusterConfiguration]:
        """
        Tries to parse the given cluster configuration, returning None if the object cannot be parsed.
        :param cluster_dict: The dictionary containing the configuration.
        :return: The cluster configuration model, if valid, or None.
        """
        try:
            result = V1MongoClusterConfiguration(**cluster_dict)
            result.validate()
            return result
        except ValueError as err:
            meta = cluster_dict.get("metadata", {})
            logging.error(
                "Could not validate cluster configuration for {} @ ns/{}: {}. The cluster will be ignored."
                .format(meta.get("name"), meta.get("namespace"), err))

    def checkExistingClusters(self) -> None:
        """
        Check all Mongo objects and see if the sub objects are available.
        If they are not, they should be (re-)created to ensure the cluster is in the expected state.
        """
        mongo_objects = self.kubernetes_service.listMongoObjects()
        logging.info("Checking %s mongo objects.", len(mongo_objects["items"]))
        for cluster_dict in mongo_objects["items"]:
            cluster_object = self._parseConfiguration(cluster_dict)
            if cluster_object:
                self.checkCluster(cluster_object)

    def streamEvents(self) -> None:
        """
        Watches for changes to the mongo objects in Kubernetes and processes any changes immediately.
        """
        event_watcher = Watch()

        # start watching from the latest version that we have
        if self.cluster_versions:
            event_watcher.resource_version = max(
                self.cluster_versions.values())

        for event in event_watcher.stream(
                self.kubernetes_service.listMongoObjects,
                _request_timeout=self.STREAM_REQUEST_TIMEOUT):
            logging.info("Received event %s", event)

            if event["type"] in ("ADDED", "MODIFIED"):
                cluster_object = self._parseConfiguration(event["object"])
                if cluster_object:
                    self.checkCluster(cluster_object)
                else:
                    logging.warning(
                        "Could not validate cluster object, stopping event watcher."
                    )
                    event_watcher.stop = True
            elif event["type"] in ("DELETED", ):
                self.collectGarbage()

            else:
                logging.warning(
                    "Could not parse event, stopping event watcher.")
                event_watcher.stop = True

            # Change the resource version manually because of a bug fixed in a later version of the K8s client:
            # https://github.com/kubernetes-client/python-base/pull/64
            if isinstance(event.get('object'),
                          dict) and 'resourceVersion' in event['object'].get(
                              'metadata', {}):
                event_watcher.resource_version = event['object']['metadata'][
                    'resourceVersion']

    def collectGarbage(self) -> None:
        """
        Cleans up any resources that are left after a cluster has been removed.
        """
        for checker in self.resource_checkers:
            checker.cleanResources()

    def checkCluster(self,
                     cluster_object: V1MongoClusterConfiguration,
                     force: bool = False) -> None:
        """
        Checks whether the given cluster is configured and updated.
        :param cluster_object: The cluster object from the YAML file.
        :param force: If this is True, we will re-update the cluster even if it has been checked before.
        """
        key = (cluster_object.metadata.name, cluster_object.metadata.namespace)

        if self.cluster_versions.get(
                key) == cluster_object.metadata.resource_version and not force:
            logging.debug(
                "Cluster object %s has been checked already in version %s.",
                key, cluster_object.metadata.resource_version)
            # we still want to check the replicas to make sure everything is working.
            self.mongo_service.checkReplicaSetOrInitialize(cluster_object)
        else:
            for checker in self.resource_checkers:
                checker.checkResource(cluster_object)
            self.mongo_service.checkReplicaSetOrInitialize(cluster_object)
            self.mongo_service.createUsers(cluster_object)
            self.cluster_versions[
                key] = cluster_object.metadata.resource_version

        self.backup_checker.backupIfNeeded(cluster_object)