def main(): cfg.parse_args(sys.argv) logging.setup(CONF, None) debug_utils.setup() from trove.guestagent import dbaas manager = dbaas.datastore_registry().get(CONF.datastore_manager) if not manager: msg = (_LE("Manager class not registered for datastore manager %s") % CONF.datastore_manager) raise RuntimeError(msg) if not CONF.guest_id: msg = (_LE( "The guest_id parameter is not set. guest_info.conf " "was not injected into the guest or not read by guestagent")) raise RuntimeError(msg) # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(CONF) from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version server = rpc_service.RpcService( manager=manager, host=CONF.guest_id, rpc_api_version=rpc_version.RPC_API_VERSION) launcher = openstack_service.launch(CONF, server) launcher.wait()
def main(): cfg.parse_args(sys.argv) logging.setup(CONF, None) debug_utils.setup() from trove.guestagent import dbaas manager = dbaas.datastore_registry().get(CONF.datastore_manager) if not manager: msg = (_LE("Manager class not registered for datastore manager %s") % CONF.datastore_manager) raise RuntimeError(msg) if not CONF.guest_id: msg = (_LE("The guest_id parameter is not set. guest_info.conf " "was not injected into the guest or not read by guestagent")) raise RuntimeError(msg) # BUG(1650518): Cleanup in the Pike release # make it fatal if CONF.instance_rpc_encr_key is None # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(CONF) from trove.common.rpc import service as rpc_service server = rpc_service.RpcService( key=CONF.instance_rpc_encr_key, topic="guestagent.%s" % CONF.guest_id, manager=manager, host=CONF.guest_id, rpc_api_version=guest_api.API.API_LATEST_VERSION) launcher = openstack_service.launch(CONF, server) launcher.wait()
def get_attribute(name): value = updates.get(name) if value is None: raise exception.MissingKey( _LE("Specify all user properties.")) return value
def apply_user_updates(self, user_model, updates): # When editing buckets, be sure to always specify all properties. # Couchbase Server may otherwise reset the property value to default. def get_attribute(name): value = updates.get(name) if value is None: raise exception.MissingKey( _LE("Specify all user properties.")) return value if 'name' in updates: raise exception.BadRequest( _LE("Couchbase users cannot be renamed.")) user_model.password = get_attribute('password') user_model.bucket_ramsize_mb = get_attribute('bucket_ramsize') user_model.bucket_replica_count = get_attribute('bucket_replica') user_model.enable_index_replica = get_attribute('enable_index_replica') user_model.bucket_eviction_policy = get_attribute( 'bucket_eviction_policy') user_model.bucket_priority = get_attribute('bucket_priority') # Couchbase buckets cannot be renamed, the ID hence never changes. return None
def _apply_synchronized(self): """Apply the current in-memory set of iptables rules. This will blow away any rules left over from previous runs of the same component of Nova, and replace them with our current set of rules. This happens atomically, thanks to iptables-restore. """ s = [('iptables', self.ipv4)] if self.use_ipv6: s += [('ip6tables', self.ipv6)] for cmd, tables in s: args = ['%s-save' % (cmd,), '-c'] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args all_tables = self.execute(args, run_as_root=True) all_lines = all_tables.split('\n') # Traverse tables in sorted order for predictable dump output for table_name in sorted(tables): table = tables[table_name] start, end = self._find_table(all_lines, table_name) all_lines[start:end] = self._modify_rules( all_lines[start:end], table, table_name) args = ['%s-restore' % (cmd,), '-c'] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args try: self.execute(args, process_input='\n'.join(all_lines), run_as_root=True) except RuntimeError as r_error: with excutils.save_and_reraise_exception(): try: line_no = int(re.search( 'iptables-restore: line ([0-9]+?) failed', str(r_error)).group(1)) context = IPTABLES_ERROR_LINES_OF_CONTEXT log_start = max(0, line_no - context) log_end = line_no + context except AttributeError: # line error wasn't found, print all lines instead log_start = 0 log_end = len(all_lines) log_lines = ('%7d. %s' % (idx, l) for idx, l in enumerate( all_lines[log_start:log_end], log_start + 1) ) LOG.error(_LE("IPTablesManager.apply failed to apply the " "following set of iptables rules:\n%s"), '\n'.join(log_lines)) LOG.debug("IPTablesManager.apply completed with success")
def defer_apply(self): """Defer apply context.""" self.defer_apply_on() try: yield finally: try: self.defer_apply_off() except Exception: msg = _LE('Failure applying iptables rules') LOG.exception(msg) raise IpTablesApplyException(msg)