def test_read_write_configuration(self, read_file, write_file, chown, chmod): sample_path = Mock() sample_owner = Mock() sample_group = Mock() sample_codec = MagicMock() sample_requires_root = Mock() manager = ConfigurationManager(sample_path, sample_owner, sample_group, sample_codec, requires_root=sample_requires_root) manager.parse_configuration() read_file.assert_called_with(sample_path, codec=sample_codec) with patch.object(manager, 'parse_configuration', return_value={ 'key1': 'v1', 'key2': 'v2' }): self.assertEqual('v1', manager.get_value('key1')) self.assertEqual(None, manager.get_value('key3')) sample_contents = Mock() manager.save_configuration(sample_contents) write_file.assert_called_with(sample_path, sample_contents, as_root=sample_requires_root) chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root) chmod.assert_called_with(sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) sample_options = Mock() with patch.object(manager, 'save_configuration') as save_config: manager.render_configuration(sample_options) save_config.assert_called_once_with( sample_codec.serialize.return_value) sample_codec.serialize.assert_called_once_with(sample_options) with patch('trove.guestagent.common.configuration.' 'ConfigurationOverrideStrategy') as mock_strategy: manager.set_override_strategy(mock_strategy) manager._current_revision = 3 manager.save_configuration(sample_contents) mock_strategy.remove_last.assert_called_once_with( manager._current_revision + 1) write_file.assert_called_with(sample_path, sample_contents, as_root=sample_requires_root)
def test_read_write_configuration(self, read_file, write_file, chown, chmod): sample_path = Mock() sample_owner = Mock() sample_group = Mock() sample_codec = MagicMock() sample_requires_root = Mock() sample_strategy = MagicMock() sample_strategy.configure = Mock() sample_strategy.parse_updates = Mock(return_value={}) manager = ConfigurationManager(sample_path, sample_owner, sample_group, sample_codec, requires_root=sample_requires_root, override_strategy=sample_strategy) manager.parse_configuration() read_file.assert_called_with(sample_path, codec=sample_codec, as_root=sample_requires_root) with patch.object(manager, 'parse_configuration', return_value={ 'key1': 'v1', 'key2': 'v2' }): self.assertEqual('v1', manager.get_value('key1')) self.assertIsNone(manager.get_value('key3')) sample_contents = Mock() manager.save_configuration(sample_contents) write_file.assert_called_with(sample_path, sample_contents, as_root=sample_requires_root) chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root) chmod.assert_called_with(sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) sample_data = {} manager.apply_system_override(sample_data) manager.apply_user_override(sample_data) manager.apply_system_override(sample_data, change_id='sys1') manager.apply_user_override(sample_data, change_id='usr1') sample_strategy.apply.has_calls([ call(manager.SYSTEM_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.SYSTEM_GROUP, 'sys1', sample_data), call(manager.USER_GROUP, 'usr1', sample_data) ])
def test_read_write_configuration(self, read_file, write_file, chown, chmod): sample_path = Mock() sample_owner = Mock() sample_group = Mock() sample_codec = MagicMock() sample_requires_root = Mock() sample_strategy = MagicMock() sample_strategy.configure = Mock() sample_strategy.parse_updates = Mock(return_value={}) manager = ConfigurationManager( sample_path, sample_owner, sample_group, sample_codec, requires_root=sample_requires_root, override_strategy=sample_strategy) manager.parse_configuration() read_file.assert_called_with(sample_path, codec=sample_codec, as_root=sample_requires_root) with patch.object(manager, 'parse_configuration', return_value={'key1': 'v1', 'key2': 'v2'}): self.assertEqual('v1', manager.get_value('key1')) self.assertIsNone(manager.get_value('key3')) sample_contents = Mock() manager.save_configuration(sample_contents) write_file.assert_called_with( sample_path, sample_contents, as_root=sample_requires_root) chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root) chmod.assert_called_with( sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) sample_data = {} manager.apply_system_override(sample_data) manager.apply_user_override(sample_data) manager.apply_system_override(sample_data, change_id='sys1') manager.apply_user_override(sample_data, change_id='usr1') manager.apply_system_override(sample_data, change_id='sys2', pre_user=True) sample_strategy.apply.has_calls([ call(manager.SYSTEM_POST_USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.SYSTEM_POST_USER_GROUP, 'sys1', sample_data), call(manager.USER_GROUP, 'usr1', sample_data), call(manager.SYSTEM_PRE_USER_GROUP, 'sys2', sample_data), ])
def test_read_write_configuration(self, read_file, write_file, chown, chmod): sample_path = Mock() sample_owner = Mock() sample_group = Mock() sample_codec = MagicMock() sample_requires_root = Mock() sample_strategy = MagicMock() sample_strategy.configure = Mock() sample_strategy.parse_updates = Mock(return_value={}) manager = ConfigurationManager( sample_path, sample_owner, sample_group, sample_codec, requires_root=sample_requires_root, override_strategy=sample_strategy, ) manager.parse_configuration() read_file.assert_called_with(sample_path, codec=sample_codec) with patch.object(manager, "parse_configuration", return_value={"key1": "v1", "key2": "v2"}): self.assertEqual("v1", manager.get_value("key1")) self.assertIsNone(manager.get_value("key3")) sample_contents = Mock() manager.save_configuration(sample_contents) write_file.assert_called_with(sample_path, sample_contents, as_root=sample_requires_root) chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root) chmod.assert_called_with(sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) sample_data = {} manager.apply_system_override(sample_data) manager.apply_user_override(sample_data) manager.apply_system_override(sample_data, change_id="sys1") manager.apply_user_override(sample_data, change_id="usr1") sample_strategy.apply.has_calls( [ call(manager.SYSTEM_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.SYSTEM_GROUP, "sys1", sample_data), call(manager.USER_GROUP, "usr1", sample_data), ] )
def test_read_write_configuration(self, read_file, write_file, chown, chmod): sample_path = Mock() sample_owner = Mock() sample_group = Mock() sample_codec = MagicMock() sample_requires_root = Mock() manager = ConfigurationManager( sample_path, sample_owner, sample_group, sample_codec, requires_root=sample_requires_root) manager.parse_configuration() read_file.assert_called_with(sample_path, codec=sample_codec) with patch.object(manager, 'parse_configuration', return_value={'key1': 'v1', 'key2': 'v2'}): self.assertEqual('v1', manager.get_value('key1')) self.assertEqual(None, manager.get_value('key3')) sample_contents = Mock() manager.save_configuration(sample_contents) write_file.assert_called_with( sample_path, sample_contents, as_root=sample_requires_root) chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root) chmod.assert_called_with( sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) sample_options = Mock() with patch.object(manager, 'save_configuration') as save_config: manager.render_configuration(sample_options) save_config.assert_called_once_with( sample_codec.serialize.return_value) sample_codec.serialize.assert_called_once_with(sample_options) with patch('trove.guestagent.common.configuration.' 'ConfigurationOverrideStrategy') as mock_strategy: manager.set_override_strategy(mock_strategy) manager._current_revision = 3 manager.save_configuration(sample_contents) mock_strategy.remove_last.assert_called_once_with( manager._current_revision + 1) write_file.assert_called_with( sample_path, sample_contents, as_root=sample_requires_root)
class MongoDBApp(object): """Prepares DBaaS on a Guest container.""" def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time revision_dir = guestagent_utils.build_file_path( os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.is_query_router = False self.is_cluster_member = False self.status = MongoDBAppStatus() def install_if_needed(self, packages): """Prepare the guest machine with a MongoDB installation.""" LOG.info(_("Preparing Guest as MongoDB.")) if not system.PACKAGER.pkg_is_installed(packages): LOG.debug("Installing packages: %s." % str(packages)) system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) LOG.info(_("Finished installing MongoDB server.")) def _get_service_candidates(self): if self.is_query_router: return system.MONGOS_SERVICE_CANDIDATES return system.MONGOD_SERVICE_CANDIDATES def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( self._get_service_candidates(), self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( self._get_service_candidates(), self.state_change_wait_time) def start_db(self, update_db=False): self.status.start_db_service( self._get_service_candidates(), self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def start_db_with_conf_changes(self, config_contents): LOG.info(_('Starting MongoDB with configuration changes.')) if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info(_("Initiating config.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration( None, mount_point=system.MONGODB_MOUNT_POINT) self.start_db(True) def apply_initial_guestagent_configuration( self, cluster_config, mount_point=None): LOG.debug("Applying initial configuration.") # Mongodb init scripts assume the PID-file path is writable by the # database service. # See: https://jira.mongodb.org/browse/SERVER-20075 self._initialize_writable_run_dir() self.configuration_manager.apply_system_override( {'processManagement.fork': False, 'processManagement.pidFilePath': system.MONGO_PID_FILE, 'systemLog.destination': 'file', 'systemLog.path': system.MONGO_LOG_FILE, 'systemLog.logAppend': True }) if mount_point: self.configuration_manager.apply_system_override( {'storage.dbPath': mount_point}) if cluster_config is not None: self._configure_as_cluster_instance(cluster_config) else: self._configure_network(MONGODB_PORT) def _initialize_writable_run_dir(self): """Create a writable directory for Mongodb's runtime data (e.g. PID-file). """ mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE) LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir) operating_system.create_directory( mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True) def _configure_as_cluster_instance(self, cluster_config): """Configure this guest as a cluster instance and return its new status. """ if cluster_config['instance_type'] == "query_router": self._configure_as_query_router() elif cluster_config["instance_type"] == "config_server": self._configure_as_config_server() elif cluster_config["instance_type"] == "member": self._configure_as_cluster_member( cluster_config['replica_set_name']) else: LOG.error(_("Bad cluster configuration; instance type " "given as %s.") % cluster_config['instance_type']) return ds_instance.ServiceStatuses.FAILED if 'key' in cluster_config: self._configure_cluster_security(cluster_config['key']) def _configure_as_query_router(self): LOG.info(_("Configuring instance as a cluster query router.")) self.is_query_router = True # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict( self.configuration_manager.parse_configuration()) if 'storage' in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config['storage'] self.configuration_manager.save_configuration( guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'sharding.configDB': ''}, CNF_CLUSTER) def _configure_as_config_server(self): LOG.info(_("Configuring instance as a cluster config server.")) self._configure_network(CONFIGSVR_PORT) self.configuration_manager.apply_system_override( {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER) def _configure_as_cluster_member(self, replica_set_name): LOG.info(_("Configuring instance as a cluster member.")) self.is_cluster_member = True self._configure_network(MONGODB_PORT) # we don't want these thinking they are in a replica set yet # as that would prevent us from creating the admin user, # so start mongo before updating the config. # mongo will be started by the cluster taskmanager self.start_db() self.configuration_manager.apply_system_override( {'replication.replSetName': replica_set_name}, CNF_CLUSTER) def _configure_cluster_security(self, key_value): """Force cluster key-file-based authentication. This will enabled RBAC. """ # Store the cluster member authentication key. self.store_key(key_value) self.configuration_manager.apply_system_override( {'security.clusterAuthMode': 'keyFile', 'security.keyFile': self.get_key_file()}, CNF_CLUSTER) def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) options = {'net.bindIp': bind_interfaces_string} if port is not None: guestagent_utils.update_dict({'net.port': port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port) def clear_storage(self): mount_point = "/var/lib/mongodb/*" LOG.debug("Clearing storage at %s." % mount_point) try: operating_system.remove(mount_point, force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception(_("Error clearing storage.")) def _has_config_db(self): value_string = self.configuration_manager.get_value( 'sharding', {}).get('configDB') return value_string is not None # FIXME(pmalik): This method should really be called 'set_config_servers'. # The current name suggests it adds more config servers, but it # rather replaces the existing ones. def add_config_servers(self, config_server_hosts): """Set config servers on a query router (mongos) instance. """ config_servers_string = ','.join(['%s:%s' % (host, CONFIGSVR_PORT) for host in config_server_hosts]) LOG.info(_("Setting config servers: %s") % config_servers_string) self.configuration_manager.apply_system_override( {'sharding.configDB': config_servers_string}, CNF_CLUSTER) self.start_db(True) def add_shard(self, replica_set_name, replica_set_member): """ This method is used by query router (mongos) instances. """ url = "%(rs)s/%(host)s:%(port)s"\ % {'rs': replica_set_name, 'host': replica_set_member, 'port': MONGODB_PORT} MongoDBAdmin().add_shard(url) def add_members(self, members): """ This method is used by a replica-set member instance. """ def check_initiate_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() if((status["ok"] == 1) and (status["members"][0]["stateStr"] == "PRIMARY") and (status["myState"] == 1)): return True else: return False def check_rs_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() primary_count = 0 if status["ok"] != 1: return False if len(status["members"]) != (len(members) + 1): return False for rs_member in status["members"]: if rs_member["state"] not in [1, 2, 7]: return False if rs_member["health"] != 1: return False if rs_member["state"] == 1: primary_count += 1 return primary_count == 1 MongoDBAdmin().rs_initiate() # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_initiate_status, sleep_time=30, time_out=100) # add replica-set members MongoDBAdmin().rs_add_members(members) # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_rs_status, sleep_time=10, time_out=100) def _set_localhost_auth_bypass(self, enabled): """When active, the localhost exception allows connections from the localhost interface to create the first user on the admin database. The exception applies only when there are no users created in the MongoDB instance. """ self.configuration_manager.apply_system_override( {'setParameter': {'enableLocalhostAuthBypass': enabled}}) def list_all_dbs(self): return MongoDBAdmin().list_database_names() def db_data_size(self, db_name): schema = models.MongoDBSchema(db_name) return MongoDBAdmin().db_stats(schema.serialize())['dataSize'] def admin_cmd_auth_params(self): return MongoDBAdmin().cmd_admin_auth_params def get_key_file(self): return system.MONGO_KEY_FILE def get_key(self): return operating_system.read_file( system.MONGO_KEY_FILE, as_root=True).rstrip() def store_key(self, key): """Store the cluster key.""" LOG.debug('Storing key for MongoDB cluster.') operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True) def store_admin_password(self, password): LOG.debug('Storing admin password.') creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, password=password) creds.write(system.MONGO_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): """Create the admin user while the localhost exception is active.""" LOG.debug('Creating the admin user.') creds = self.store_admin_password(password) user = models.MongoDBUser(name='admin.%s' % creds.username, password=creds.password) user.roles = system.MONGO_ADMIN_ROLES # the driver engine is already cached, but we need to change it it with MongoDBClient(None, host='localhost', port=MONGODB_PORT) as client: MongoDBAdmin().create_validated_user(user, client=client) # now revert to the normal engine self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT) LOG.debug('Created admin user.') def secure(self): """Create the Trove admin user. The service should not be running at this point. This will enable role-based access control (RBAC) by default. """ if self.status.is_running: raise RuntimeError(_("Cannot secure the instance. " "The service is still running.")) try: self.configuration_manager.apply_system_override( {'security.authorization': 'enabled'}) self._set_localhost_auth_bypass(True) self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("MongoDB secure complete.") finally: self._set_localhost_auth_bypass(False) self.stop_db() def get_configuration_property(self, name, default=None): """Return the value of a MongoDB configuration property. """ return self.configuration_manager.get_value(name, default) def prep_primary(self): # Prepare the primary member of a replica set. password = utils.generate_random_password() self.create_admin_user(password) self.restart() @property def replica_set_name(self): return MongoDBAdmin().get_repl_status()['set'] @property def admin_password(self): creds = MongoDBCredentials() creds.read(system.MONGO_ADMIN_CREDS_FILE) return creds.password def is_shard_active(self, replica_set_name): shards = MongoDBAdmin().list_active_shards() if replica_set_name in [shard['_id'] for shard in shards]: LOG.debug('Replica set %s is active.' % replica_set_name) return True else: LOG.debug('Replica set %s is not active.' % replica_set_name) return False
class MongoDBApp(object): """Prepares DBaaS on a Guest container.""" def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time revision_dir = guestagent_utils.build_file_path( os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.is_query_router = False self.is_cluster_member = False self.status = MongoDBAppStatus() def install_if_needed(self, packages): """Prepare the guest machine with a MongoDB installation.""" LOG.info(_("Preparing Guest as MongoDB.")) if not system.PACKAGER.pkg_is_installed(packages): LOG.debug("Installing packages: %s." % str(packages)) system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) LOG.info(_("Finished installing MongoDB server.")) def _get_service_candidates(self): if self.is_query_router: return system.MONGOS_SERVICE_CANDIDATES return system.MONGOD_SERVICE_CANDIDATES def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service(self._get_service_candidates(), self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service(self._get_service_candidates(), self.state_change_wait_time) def start_db(self, update_db=False): self.status.start_db_service(self._get_service_candidates(), self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def start_db_with_conf_changes(self, config_contents): LOG.info(_('Starting MongoDB with configuration changes.')) if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info(_("Initiating config.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration( None, mount_point=system.MONGODB_MOUNT_POINT) self.start_db(True) def apply_initial_guestagent_configuration(self, cluster_config, mount_point=None): LOG.debug("Applying initial configuration.") # Mongodb init scripts assume the PID-file path is writable by the # database service. # See: https://jira.mongodb.org/browse/SERVER-20075 self._initialize_writable_run_dir() self.configuration_manager.apply_system_override({ 'processManagement.fork': False, 'processManagement.pidFilePath': system.MONGO_PID_FILE, 'systemLog.destination': 'file', 'systemLog.path': system.MONGO_LOG_FILE, 'systemLog.logAppend': True }) if mount_point: self.configuration_manager.apply_system_override( {'storage.dbPath': mount_point}) if cluster_config is not None: self._configure_as_cluster_instance(cluster_config) else: self._configure_network(MONGODB_PORT) def _initialize_writable_run_dir(self): """Create a writable directory for Mongodb's runtime data (e.g. PID-file). """ mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE) LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir) operating_system.create_directory(mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True) def _configure_as_cluster_instance(self, cluster_config): """Configure this guest as a cluster instance and return its new status. """ if cluster_config['instance_type'] == "query_router": self._configure_as_query_router() elif cluster_config["instance_type"] == "config_server": self._configure_as_config_server() elif cluster_config["instance_type"] == "member": self._configure_as_cluster_member( cluster_config['replica_set_name']) else: LOG.error( _("Bad cluster configuration; instance type " "given as %s.") % cluster_config['instance_type']) return ds_instance.ServiceStatuses.FAILED if 'key' in cluster_config: self._configure_cluster_security(cluster_config['key']) def _configure_as_query_router(self): LOG.info(_("Configuring instance as a cluster query router.")) self.is_query_router = True # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict( self.configuration_manager.parse_configuration()) if 'storage' in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config['storage'] self.configuration_manager.save_configuration( guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'sharding.configDB': ''}, CNF_CLUSTER) def _configure_as_config_server(self): LOG.info(_("Configuring instance as a cluster config server.")) self._configure_network(CONFIGSVR_PORT) self.configuration_manager.apply_system_override( {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER) def _configure_as_cluster_member(self, replica_set_name): LOG.info(_("Configuring instance as a cluster member.")) self.is_cluster_member = True self._configure_network(MONGODB_PORT) # we don't want these thinking they are in a replica set yet # as that would prevent us from creating the admin user, # so start mongo before updating the config. # mongo will be started by the cluster taskmanager self.start_db() self.configuration_manager.apply_system_override( {'replication.replSetName': replica_set_name}, CNF_CLUSTER) def _configure_cluster_security(self, key_value): """Force cluster key-file-based authentication. This will enabled RBAC. """ # Store the cluster member authentication key. self.store_key(key_value) self.configuration_manager.apply_system_override( { 'security.clusterAuthMode': 'keyFile', 'security.keyFile': self.get_key_file() }, CNF_CLUSTER) def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) options = {'net.bindIp': bind_interfaces_string} if port is not None: guestagent_utils.update_dict({'net.port': port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port) def clear_storage(self): mount_point = "/var/lib/mongodb/*" LOG.debug("Clearing storage at %s." % mount_point) try: operating_system.remove(mount_point, force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception(_("Error clearing storage.")) def _has_config_db(self): value_string = self.configuration_manager.get_value('sharding', {}).get('configDB') return value_string is not None # FIXME(pmalik): This method should really be called 'set_config_servers'. # The current name suggests it adds more config servers, but it # rather replaces the existing ones. def add_config_servers(self, config_server_hosts): """Set config servers on a query router (mongos) instance. """ config_servers_string = ','.join( ['%s:%s' % (host, CONFIGSVR_PORT) for host in config_server_hosts]) LOG.info(_("Setting config servers: %s") % config_servers_string) self.configuration_manager.apply_system_override( {'sharding.configDB': config_servers_string}, CNF_CLUSTER) self.start_db(True) def add_shard(self, replica_set_name, replica_set_member): """ This method is used by query router (mongos) instances. """ url = "%(rs)s/%(host)s:%(port)s"\ % {'rs': replica_set_name, 'host': replica_set_member, 'port': MONGODB_PORT} MongoDBAdmin().add_shard(url) def add_members(self, members): """ This method is used by a replica-set member instance. """ def check_initiate_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() if ((status["ok"] == 1) and (status["members"][0]["stateStr"] == "PRIMARY") and (status["myState"] == 1)): return True else: return False def check_rs_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() primary_count = 0 if status["ok"] != 1: return False if len(status["members"]) != (len(members) + 1): return False for rs_member in status["members"]: if rs_member["state"] not in [1, 2, 7]: return False if rs_member["health"] != 1: return False if rs_member["state"] == 1: primary_count += 1 return primary_count == 1 MongoDBAdmin().rs_initiate() # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_initiate_status, sleep_time=30, time_out=CONF.mongodb.add_members_timeout) # add replica-set members MongoDBAdmin().rs_add_members(members) # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_rs_status, sleep_time=10, time_out=CONF.mongodb.add_members_timeout) def _set_localhost_auth_bypass(self, enabled): """When active, the localhost exception allows connections from the localhost interface to create the first user on the admin database. The exception applies only when there are no users created in the MongoDB instance. """ self.configuration_manager.apply_system_override( {'setParameter': { 'enableLocalhostAuthBypass': enabled }}) def list_all_dbs(self): return MongoDBAdmin().list_database_names() def db_data_size(self, db_name): schema = models.MongoDBSchema(db_name) return MongoDBAdmin().db_stats(schema.serialize())['dataSize'] def admin_cmd_auth_params(self): return MongoDBAdmin().cmd_admin_auth_params def get_key_file(self): return system.MONGO_KEY_FILE def get_key(self): return operating_system.read_file(system.MONGO_KEY_FILE, as_root=True).rstrip() def store_key(self, key): """Store the cluster key.""" LOG.debug('Storing key for MongoDB cluster.') operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True) def store_admin_password(self, password): LOG.debug('Storing admin password.') creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, password=password) creds.write(system.MONGO_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): """Create the admin user while the localhost exception is active.""" LOG.debug('Creating the admin user.') creds = self.store_admin_password(password) user = models.MongoDBUser(name='admin.%s' % creds.username, password=creds.password) user.roles = system.MONGO_ADMIN_ROLES # the driver engine is already cached, but we need to change it it with MongoDBClient(None, host='localhost', port=MONGODB_PORT) as client: MongoDBAdmin().create_validated_user(user, client=client) # now revert to the normal engine self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT) LOG.debug('Created admin user.') def secure(self): """Create the Trove admin user. The service should not be running at this point. This will enable role-based access control (RBAC) by default. """ if self.status.is_running: raise RuntimeError( _("Cannot secure the instance. " "The service is still running.")) try: self.configuration_manager.apply_system_override( {'security.authorization': 'enabled'}) self._set_localhost_auth_bypass(True) self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("MongoDB secure complete.") finally: self._set_localhost_auth_bypass(False) self.stop_db() def get_configuration_property(self, name, default=None): """Return the value of a MongoDB configuration property. """ return self.configuration_manager.get_value(name, default) def prep_primary(self): # Prepare the primary member of a replica set. password = utils.generate_random_password() self.create_admin_user(password) self.restart() @property def replica_set_name(self): return MongoDBAdmin().get_repl_status()['set'] @property def admin_password(self): creds = MongoDBCredentials() creds.read(system.MONGO_ADMIN_CREDS_FILE) return creds.password def is_shard_active(self, replica_set_name): shards = MongoDBAdmin().list_active_shards() if replica_set_name in [shard['_id'] for shard in shards]: LOG.debug('Replica set %s is active.' % replica_set_name) return True else: LOG.debug('Replica set %s is not active.' % replica_set_name) return False
class MongoDBApp(object): """Prepares DBaaS on a Guest container.""" @classmethod def _init_overrides_dir(cls): """Initialize a directory for configuration overrides. """ revision_dir = guestagent_utils.build_file_path( os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR ) if not os.path.exists(revision_dir): operating_system.create_directory( revision_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True ) return revision_dir def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time revision_dir = self._init_overrides_dir() self.configuration_manager = ConfigurationManager( CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir), ) self.is_query_router = False self.is_cluster_member = False self.status = MongoDBAppStatus() def install_if_needed(self, packages): """Prepare the guest machine with a MongoDB installation.""" LOG.info(_("Preparing Guest as MongoDB.")) if not system.PACKAGER.pkg_is_installed(packages): LOG.debug("Installing packages: %s." % str(packages)) system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) LOG.info(_("Finished installing MongoDB server.")) def _get_service(self): if self.is_query_router: return operating_system.service_discovery(system.MONGOS_SERVICE_CANDIDATES) else: return operating_system.service_discovery(system.MONGOD_SERVICE_CANDIDATES) def _enable_db_on_boot(self): LOG.info(_("Enabling MongoDB on boot.")) try: mongo_service = self._get_service() utils.execute_with_timeout(mongo_service["cmd_enable"], shell=True) except KeyError: raise RuntimeError(_("MongoDB service is not discovered.")) def _disable_db_on_boot(self): LOG.info(_("Disabling MongoDB on boot.")) try: mongo_service = self._get_service() utils.execute_with_timeout(mongo_service["cmd_disable"], shell=True) except KeyError: raise RuntimeError("MongoDB service is not discovered.") def stop_db(self, update_db=False, do_not_start_on_reboot=False): LOG.info(_("Stopping MongoDB.")) if do_not_start_on_reboot: self._disable_db_on_boot() try: mongo_service = self._get_service() # TODO(ramashri) see if hardcoded values can be removed utils.execute_with_timeout(mongo_service["cmd_stop"], shell=True, timeout=100) except KeyError: raise RuntimeError(_("MongoDB service is not discovered.")) if not self.status.wait_for_real_status_to_change_to( ds_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db ): LOG.error(_("Could not stop MongoDB.")) self.status.end_install_or_restart() raise RuntimeError(_("Could not stop MongoDB")) def restart(self): LOG.info(_("Restarting MongoDB.")) try: self.status.begin_restart() self.stop_db() self.start_db() finally: self.status.end_install_or_restart() def start_db(self, update_db=False): LOG.info(_("Starting MongoDB.")) self._enable_db_on_boot() try: mongo_service = self._get_service() utils.execute_with_timeout(mongo_service["cmd_start"], shell=True) except ProcessExecutionError: pass except KeyError: raise RuntimeError("MongoDB service is not discovered.") self.wait_for_start(update_db=update_db) def wait_for_start(self, update_db=False): LOG.debug("Waiting for MongoDB to start.") if not self.status.wait_for_real_status_to_change_to( ds_instance.ServiceStatuses.RUNNING, self.state_change_wait_time, update_db ): LOG.error(_("Start up of MongoDB failed.")) # If it won't start, but won't die either, kill it by hand so we # don't let a rouge process wander around. try: out, err = utils.execute_with_timeout(system.FIND_PID, shell=True) pid = "".join(out.split(" ")[1:2]) utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) except exception.ProcessExecutionError: LOG.exception(_("Error killing MongoDB start command.")) # There's nothing more we can do... self.status.end_install_or_restart() raise RuntimeError("Could not start MongoDB.") LOG.debug("MongoDB started successfully.") def complete_install_or_restart(self): self.status.end_install_or_restart() def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def start_db_with_conf_changes(self, config_contents): LOG.info(_("Starting MongoDB with configuration changes.")) if self.status.is_running: format = "Cannot start_db_with_conf_changes because status is %s." LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info(_("Initiating config.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration(None, mount_point=system.MONGODB_MOUNT_POINT) self.start_db(True) def reset_configuration(self, configuration): LOG.info(_("Resetting configuration.")) config_contents = configuration["config_contents"] self.configuration_manager.save_configuration(config_contents) def apply_initial_guestagent_configuration(self, cluster_config, mount_point=None): LOG.debug("Applying initial configuration.") # Mongodb init scripts assume the PID-file path is writable by the # database service. # See: https://jira.mongodb.org/browse/SERVER-20075 self._initialize_writable_run_dir() # todo mvandijk: enable authorization. # 'security.authorization': True self.configuration_manager.apply_system_override( { "processManagement.fork": False, "processManagement.pidFilePath": system.MONGO_PID_FILE, "systemLog.destination": "file", "systemLog.path": system.MONGO_LOG_FILE, "systemLog.logAppend": True, } ) if mount_point: self.configuration_manager.apply_system_override({"storage.dbPath": mount_point}) if cluster_config is not None: self._configure_as_cluster_instance(cluster_config) else: self._configure_network(MONGODB_PORT) def _initialize_writable_run_dir(self): """Create a writable directory for Mongodb's runtime data (e.g. PID-file). """ mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE) LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir) operating_system.create_directory( mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True ) def _configure_as_cluster_instance(self, cluster_config): """Configure this guest as a cluster instance and return its new status. """ if cluster_config["instance_type"] == "query_router": self._configure_as_query_router() elif cluster_config["instance_type"] == "config_server": self._configure_as_config_server() elif cluster_config["instance_type"] == "member": self._configure_as_cluster_member(cluster_config["replica_set_name"]) else: LOG.error(_("Bad cluster configuration; instance type " "given as %s.") % cluster_config["instance_type"]) return ds_instance.ServiceStatuses.FAILED if "key" in cluster_config: self._configure_cluster_security(cluster_config["key"]) def _configure_as_query_router(self): LOG.info(_("Configuring instance as a cluster query router.")) self.is_query_router = True # Write the 'mongos' upstart script. # FIXME(pmalik): The control script should really be written in the # elements. # The guestagent will choose the right daemon ('mongod' or 'mongos') # based on the 'cluster_config' values. upstart_contents = system.MONGOS_UPSTART_CONTENTS.format(config_file_placeholder=CONFIG_FILE) operating_system.write_file(system.MONGOS_UPSTART, upstart_contents, as_root=True) # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict(self.configuration_manager.parse_configuration()) if "storage" in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config["storage"] self.configuration_manager.save_configuration(guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override({"sharding.configDB": ""}, CNF_CLUSTER) def _configure_as_config_server(self): LOG.info(_("Configuring instance as a cluster config server.")) self._configure_network(CONFIGSVR_PORT) self.configuration_manager.apply_system_override({"sharding.clusterRole": "configsvr"}, CNF_CLUSTER) def _configure_as_cluster_member(self, replica_set_name): LOG.info(_("Configuring instance as a cluster member.")) self.is_cluster_member = True self._configure_network(MONGODB_PORT) # we don't want these thinking they are in a replica set yet # as that would prevent us from creating the admin user, # so start mongo before updating the config. # mongo will be started by the cluster taskmanager self.start_db() self.configuration_manager.apply_system_override({"replication.replSetName": replica_set_name}, CNF_CLUSTER) def _configure_cluster_security(self, key_value): """Force cluster key-file-based authentication. """ # Store the cluster member authentication key. self.store_key(key_value) # TODO(mvandijk): enable cluster security once Trove features are in # self.configuration_manager.apply_system_override( # {'security.clusterAuthMode': 'keyFile', # 'security.keyFile': self.get_key_file()}, CNF_CLUSTER) def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ",".join([instance_ip, "127.0.0.1"]) options = {"net.bindIp": bind_interfaces_string} if port is not None: guestagent_utils.update_dict({"net.port": port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port) def clear_storage(self): mount_point = "/var/lib/mongodb/*" LOG.debug("Clearing storage at %s." % mount_point) try: operating_system.remove(mount_point, force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception(_("Error clearing storage.")) def _has_config_db(self): value_string = self.configuration_manager.get_value("sharding", {}).get("configDB") return value_string is not None # FIXME(pmalik): This method should really be called 'set_config_servers'. # The current name suggests it adds more config servers, but it # rather replaces the existing ones. def add_config_servers(self, config_server_hosts): """Set config servers on a query router (mongos) instance. """ config_servers_string = ",".join(["%s:27019" % host for host in config_server_hosts]) LOG.info(_("Setting config servers: %s") % config_servers_string) self.configuration_manager.apply_system_override({"sharding.configDB": config_servers_string}, CNF_CLUSTER) self.start_db(True) def add_shard(self, replica_set_name, replica_set_member): """ This method is used by query router (mongos) instances. """ url = "%(rs)s/%(host)s:%(port)s" % {"rs": replica_set_name, "host": replica_set_member, "port": MONGODB_PORT} MongoDBAdmin().add_shard(url) def add_members(self, members): """ This method is used by a replica-set member instance. """ def check_initiate_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() if (status["ok"] == 1) and (status["members"][0]["stateStr"] == "PRIMARY") and (status["myState"] == 1): return True else: return False def check_rs_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() primary_count = 0 if status["ok"] != 1: return False if len(status["members"]) != (len(members) + 1): return False for rs_member in status["members"]: if rs_member["state"] not in [1, 2, 7]: return False if rs_member["health"] != 1: return False if rs_member["state"] == 1: primary_count += 1 return primary_count == 1 MongoDBAdmin().rs_initiate() # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_initiate_status, sleep_time=30, time_out=100) # add replica-set members MongoDBAdmin().rs_add_members(members) # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_rs_status, sleep_time=10, time_out=100) def _set_localhost_auth_bypass(self, enabled): """When active, the localhost exception allows connections from the localhost interface to create the first user on the admin database. The exception applies only when there are no users created in the MongoDB instance. """ self.configuration_manager.apply_system_override({"setParameter": {"enableLocalhostAuthBypass": enabled}}) def list_all_dbs(self): return MongoDBAdmin().list_database_names() def db_data_size(self, db_name): schema = models.MongoDBSchema(db_name) return MongoDBAdmin().db_stats(schema.serialize())["dataSize"] def admin_cmd_auth_params(self): return MongoDBAdmin().cmd_admin_auth_params def get_key_file(self): return system.MONGO_KEY_FILE def get_key(self): return operating_system.read_file(system.MONGO_KEY_FILE, as_root=True).rstrip() def store_key(self, key): """Store the cluster key.""" LOG.debug("Storing key for MongoDB cluster.") operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True) def store_admin_password(self, password): LOG.debug("Storing admin password.") creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, password=password) creds.write(system.MONGO_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): """Create the admin user while the localhost exception is active.""" LOG.debug("Creating the admin user.") creds = self.store_admin_password(password) user = models.MongoDBUser(name="admin.%s" % creds.username, password=creds.password) user.roles = system.MONGO_ADMIN_ROLES # the driver engine is already cached, but we need to change it it with MongoDBClient(None, host="localhost", port=MONGODB_PORT) as client: MongoDBAdmin().create_user(user, client=client) # now revert to the normal engine self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT) LOG.debug("Created admin user.") def secure(self): """Create the Trove admin user. The service should not be running at this point. """ if self.status.is_running: raise RuntimeError(_("Cannot secure the instance. " "The service is still running.")) try: self._set_localhost_auth_bypass(True) self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("MongoDB secure complete.") finally: self._set_localhost_auth_bypass(False) self.stop_db() def get_configuration_property(self, name, default=None): """Return the value of a MongoDB configuration property. """ return self.configuration_manager.get_value(name, default) def prep_primary(self): # Prepare the primary member of a replica set. password = utils.generate_random_password() self.create_admin_user(password) self.restart() @property def replica_set_name(self): return MongoDBAdmin().get_repl_status()["set"] @property def admin_password(self): creds = MongoDBCredentials() creds.read(system.MONGO_ADMIN_CREDS_FILE) return creds.password def is_shard_active(self, replica_set_name): shards = MongoDBAdmin().list_active_shards() if replica_set_name in [shard["_id"] for shard in shards]: LOG.debug("Replica set %s is active." % replica_set_name) return True else: LOG.debug("Replica set %s is not active." % replica_set_name) return False
class MongoDBApp(object): """Prepares DBaaS on a Guest container.""" @classmethod def _init_overrides_dir(cls): """Initialize a directory for configuration overrides. """ revision_dir = guestagent_utils.build_file_path( os.path.dirname(system.MONGO_USER), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) if not os.path.exists(revision_dir): operating_system.create_directory(revision_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True) return revision_dir def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time revision_dir = self._init_overrides_dir() self.configuration_manager = ConfigurationManager( CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.is_query_router = False self.status = MongoDBAppStatus() def install_if_needed(self, packages): """Prepare the guest machine with a MongoDB installation.""" LOG.info(_("Preparing Guest as MongoDB.")) if not system.PACKAGER.pkg_is_installed(packages): LOG.debug("Installing packages: %s." % str(packages)) system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) LOG.info(_("Finished installing MongoDB server.")) def _get_service(self): if self.is_query_router: return (operating_system.service_discovery( system.MONGOS_SERVICE_CANDIDATES)) else: return (operating_system.service_discovery( system.MONGOD_SERVICE_CANDIDATES)) def _enable_db_on_boot(self): LOG.info(_("Enabling MongoDB on boot.")) try: mongo_service = self._get_service() utils.execute_with_timeout(mongo_service['cmd_enable'], shell=True) except KeyError: raise RuntimeError(_("MongoDB service is not discovered.")) def _disable_db_on_boot(self): LOG.info(_("Disabling MongoDB on boot.")) try: mongo_service = self._get_service() utils.execute_with_timeout(mongo_service['cmd_disable'], shell=True) except KeyError: raise RuntimeError("MongoDB service is not discovered.") def stop_db(self, update_db=False, do_not_start_on_reboot=False): LOG.info(_("Stopping MongoDB.")) if do_not_start_on_reboot: self._disable_db_on_boot() try: mongo_service = self._get_service() # TODO(ramashri) see if hardcoded values can be removed utils.execute_with_timeout(mongo_service['cmd_stop'], shell=True, timeout=100) except KeyError: raise RuntimeError(_("MongoDB service is not discovered.")) if not self.status.wait_for_real_status_to_change_to( ds_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db): LOG.error(_("Could not stop MongoDB.")) self.status.end_install_or_restart() raise RuntimeError(_("Could not stop MongoDB")) def restart(self): LOG.info(_("Restarting MongoDB.")) try: self.status.begin_restart() self.stop_db() self.start_db() finally: self.status.end_install_or_restart() def start_db(self, update_db=False): LOG.info(_("Starting MongoDB.")) self._enable_db_on_boot() try: mongo_service = self._get_service() utils.execute_with_timeout(mongo_service['cmd_start'], shell=True) except ProcessExecutionError: pass except KeyError: raise RuntimeError("MongoDB service is not discovered.") self.wait_for_start(update_db=update_db) def wait_for_start(self, update_db=False): LOG.debug('Waiting for MongoDB to start.') if not self.status.wait_for_real_status_to_change_to( ds_instance.ServiceStatuses.RUNNING, self.state_change_wait_time, update_db): LOG.error(_("Start up of MongoDB failed.")) # If it won't start, but won't die either, kill it by hand so we # don't let a rouge process wander around. try: out, err = utils.execute_with_timeout(system.FIND_PID, shell=True) pid = "".join(out.split(" ")[1:2]) utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) except exception.ProcessExecutionError: LOG.exception(_("Error killing MongoDB start command.")) # There's nothing more we can do... self.status.end_install_or_restart() raise RuntimeError("Could not start MongoDB.") LOG.debug('MongoDB started successfully.') def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def start_db_with_conf_changes(self, config_contents): LOG.info(_('Starting MongoDB with configuration changes.')) if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info(_("Initiating config.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration( None, mount_point=system.MONGODB_MOUNT_POINT) self.start_db(True) def reset_configuration(self, configuration): LOG.info(_("Resetting configuration.")) config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) def apply_initial_guestagent_configuration(self, cluster_config, mount_point=None): LOG.debug("Applying initial configuration.") # todo mvandijk: enable authorization. # 'security.authorization': True self.configuration_manager.apply_system_override({ 'processManagement.fork': False, 'processManagement.pidFilePath': system.MONGO_PID_FILE, 'systemLog.destination': 'file', 'systemLog.path': system.MONGO_LOG_FILE, 'systemLog.logAppend': True }) if mount_point: self.configuration_manager.apply_system_override( {'storage.dbPath': mount_point}) if cluster_config is not None: self._configure_as_cluster_instance(cluster_config) else: self._configure_network(MONGODB_PORT) def _configure_as_cluster_instance(self, cluster_config): """Configure this guest as a cluster instance and return its new status. """ if cluster_config['instance_type'] == "query_router": self._configure_as_query_router() elif cluster_config["instance_type"] == "config_server": self._configure_as_config_server() elif cluster_config["instance_type"] == "member": self._configure_as_cluster_member( cluster_config['replica_set_name']) else: LOG.error( _("Bad cluster configuration; instance type " "given as %s.") % cluster_config['instance_type']) return ds_instance.ServiceStatuses.FAILED if 'key' in cluster_config: self._configure_cluster_security(cluster_config['key']) def _configure_as_query_router(self): LOG.info(_("Configuring instance as a cluster query router.")) self.is_query_router = True # Write the 'mongos' upstart script. # FIXME(pmalik): The control script should really be written in the # elements. # The guestagent will choose the right daemon ('mongod' or 'mongos') # based on the 'cluster_config' values. upstart_contents = (system.MONGOS_UPSTART_CONTENTS.format( config_file_placeholder=CONFIG_FILE)) operating_system.write_file(system.MONGOS_UPSTART, upstart_contents, as_root=True) # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict( self.configuration_manager.parse_configuration()) if 'storage' in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config['storage'] self.configuration_manager.save_configuration( guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'sharding.configDB': ''}, CNF_CLUSTER) def _configure_as_config_server(self): LOG.info(_("Configuring instance as a cluster config server.")) self._configure_network(CONFIGSVR_PORT) self.configuration_manager.apply_system_override( {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER) def _configure_as_cluster_member(self, replica_set_name): LOG.info(_("Configuring instance as a cluster member.")) self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'replication.replSetName': replica_set_name}, CNF_CLUSTER) def _configure_cluster_security(self, key_value): """Force cluster key-file-based authentication. """ # Store the cluster member authentication key. self.store_key(key_value) self.configuration_manager.apply_system_override( { 'security.clusterAuthMode': 'keyFile', 'security.keyFile': self.get_key_file() }, CNF_CLUSTER) def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) options = {'net.bindIp': bind_interfaces_string} if port is not None: guestagent_utils.update_dict({'net.port': port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port) def clear_storage(self): mount_point = "/var/lib/mongodb/*" LOG.debug("Clearing storage at %s." % mount_point) try: operating_system.remove(mount_point, force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception(_("Error clearing storage.")) def _has_config_db(self): value_string = self.configuration_manager.get_value('sharding', {}).get('configDB') return value_string is not None # FIXME(pmalik): This method should really be called 'set_config_servers'. # The current name suggests it adds more config servers, but it # rather replaces the existing ones. def add_config_servers(self, config_server_hosts): """Set config servers on a query router (mongos) instance. """ config_servers_string = ','.join( ['%s:27019' % host for host in config_server_hosts]) LOG.info(_("Setting config servers: %s") % config_servers_string) self.configuration_manager.apply_system_override( {'sharding.configDB': config_servers_string}, CNF_CLUSTER) self.start_db(True) def add_shard(self, replica_set_name, replica_set_member): """ This method is used by query router (mongos) instances. """ url = "%(rs)s/%(host)s:%(port)s"\ % {'rs': replica_set_name, 'host': replica_set_member, 'port': MONGODB_PORT} MongoDBAdmin().add_shard(url) def add_members(self, members): """ This method is used by a replica-set member instance. """ def check_initiate_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() if ((status["ok"] == 1) and (status["members"][0]["stateStr"] == "PRIMARY") and (status["myState"] == 1)): return True else: return False def check_rs_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() primary_count = 0 if status["ok"] != 1: return False if len(status["members"]) != (len(members) + 1): return False for rs_member in status["members"]: if rs_member["state"] not in [1, 2, 7]: return False if rs_member["health"] != 1: return False if rs_member["state"] == 1: primary_count += 1 return primary_count == 1 # Create the admin user on this member. # This is only necessary for setting up the replica set. # The query router will handle requests once this set # is added as a shard. password = utils.generate_random_password() self.create_admin_user(password) # initiate replica-set MongoDBAdmin().rs_initiate() # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_initiate_status, sleep_time=60, time_out=100) # add replica-set members MongoDBAdmin().rs_add_members(members) # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_rs_status, sleep_time=60, time_out=100) def _set_localhost_auth_bypass(self, enabled): """When active, the localhost exception allows connections from the localhost interface to create the first user on the admin database. The exception applies only when there are no users created in the MongoDB instance. """ self.configuration_manager.apply_system_override( {'setParameter': { 'enableLocalhostAuthBypass': enabled }}) def list_all_dbs(self): return MongoDBAdmin().list_database_names() def db_data_size(self, db_name): schema = models.MongoDBSchema(db_name) return MongoDBAdmin().db_stats(schema.serialize())['dataSize'] def admin_cmd_auth_params(self): return MongoDBAdmin().cmd_admin_auth_params def get_key_file(self): return system.MONGO_KEY_FILE def get_key(self): return open(system.MONGO_KEY_FILE).read().rstrip() def store_key(self, key): """Store the cluster key.""" LOG.debug('Storing key for MongoDB cluster.') operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True) def store_admin_password(self, password): LOG.debug('Storing admin password.') creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, password=password) creds.write(system.MONGO_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): """Create the admin user while the localhost exception is active.""" LOG.debug('Creating the admin user.') creds = self.store_admin_password(password) user = models.MongoDBUser(name='admin.%s' % creds.username, password=creds.password) user.roles = system.MONGO_ADMIN_ROLES with MongoDBClient(None) as client: MongoDBAdmin().create_user(user, client=client) LOG.debug('Created admin user.') def secure(self): """Create the Trove admin user. The service should not be running at this point. """ if self.status.is_running: raise RuntimeError( _("Cannot secure the instance. " "The service is still running.")) try: self._set_localhost_auth_bypass(True) self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("MongoDB secure complete.") finally: self._set_localhost_auth_bypass(False) self.stop_db() def get_configuration_property(self, name, default=None): """Return the value of a MongoDB configuration property. """ return self.configuration_manager.get_value(name, default)
class TiDbApp(object): """Prepares DBaaS on a Guest container.""" def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time revision_dir = guestagent_utils.build_file_path( os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.is_query_router = False self.is_cluster_member = False self.status = TiDbAppStatus() def install_if_needed(self, packages): """Prepare the guest machine with a TiDb installation.""" LOG.info("Preparing Guest as TiDb.") if not system.PACKAGER.pkg_is_installed(packages): LOG.debug("Installing packages: %s.", str(packages)) system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) LOG.info("Finished installing TiDb server.") def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service(self._get_service_candidates(), self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service(self._get_service_candidates(), self.state_change_wait_time) def start_db(self, update_db=False): self.status.start_db_service(self._get_service_candidates(), self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def start_db_with_conf_changes(self, config_contents): LOG.info('Starting TiDb with configuration changes.') if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info("Initiating config.") self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration( None, mount_point=system.MONGODB_MOUNT_POINT) self.start_db(True) def apply_initial_guestagent_configuration(self, cluster_config, mount_point=None): LOG.debug("Applying initial configuration.") # TiDb init scripts assume the PID-file path is writable by the # database service. self._initialize_writable_run_dir() self.configuration_manager.apply_system_override({ 'processManagement.fork': False, 'systemLog.destination': 'file', 'systemLog.logAppend': True }) if mount_point: self.configuration_manager.apply_system_override( {'storage.dbPath': mount_point}) if cluster_config is not None: self._configure_as_cluster_instance(cluster_config) else: self._configure_network(TIDB_PORT) def _configure_as_cluster_instance(self, cluster_config): """Configure this guest as a cluster instance and return its new status. """ if cluster_config['instance_type'] == "tidb_server": self._configure_as_tidb_server() elif cluster_config["instance_type"] == "pd_server": self._configure_as_pd_server() elif cluster_config["instance_type"] == "tikv": self._configure_as_tikv_server(cluster_config['replica_set_name']) else: LOG.error( "Bad cluster configuration; instance type " "given as %s.", cluster_config['instance_type']) return ds_instance.ServiceStatuses.FAILED def _configure_as_tidb_server(self): LOG.info("Configuring instance as a cluster query router.") self.is_query_router = True # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict( self.configuration_manager.parse_configuration()) if 'storage' in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config['storage'] self.configuration_manager.save_configuration( guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'sharding.configDB': ''}, CNF_CLUSTER) def _configure_as_pd_server(self): LOG.info("Configuring instance as a cluster config server.") self._configure_network(CONFIGSVR_PORT) self.configuration_manager.apply_system_override( {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER) def _configure_as_tikv_server(self, replica_set_name): LOG.info("Configuring instance as a cluster member.") self.is_cluster_member = True self._configure_network(MONGODB_PORT) # we don't want these thinking they are in a replica set yet # as that would prevent us from creating the admin user, # so start mongo before updating the config. # mongo will be started by the cluster taskmanager self.start_db() self.configuration_manager.apply_system_override( {'replication.replSetName': replica_set_name}, CNF_CLUSTER) def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) options = {'net.bindIp': bind_interfaces_string} if port is not None: guestagent_utils.update_dict({'net.port': port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port)