def _proxy_request(self, instance_id, req): headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id), 'X-Tenant-ID': req.headers.get('X-Tenant-ID') } url = urlparse.urlunsplit( ('http', '%s:%s' % (cfg.CONF.nova_metadata_ip, cfg.CONF.nova_metadata_port), req.path_info, req.query_string, '')) h = httplib2.Http() resp, content = h.request(url, headers=headers) if resp.status == 200: LOG.debug(str(resp)) return content elif resp.status == 403: msg = _LW( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.') LOG.warning(msg) return webob.exc.HTTPForbidden() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 500: msg = _( 'Remote metadata server experienced an internal server error.') LOG.warning(msg) return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) else: raise Exception(_('Unexpected response code: %s') % resp.status)
def update_config(self, management_address, config): """Updates appliance configuration This is responsible for pushing configuration to the managed appliance """ self.log.info(_('Updating config for %s'), self.name) start_time = timeutils.utcnow() akanda_client.update_config(management_address, self.mgt_port, config) delta = timeutils.delta_seconds(start_time, timeutils.utcnow()) self.log.info(_('Config updated for %s after %s seconds'), self.name, round(delta, 2))
def update_config(self, management_address, config): """Updates appliance configuration This is responsible for pushing configuration to the managed appliance """ self.log.info(_('Updating config for %s'), self.name) start_time = timeutils.utcnow() akanda_client.update_config( management_address, self.mgt_port, config) delta = timeutils.delta_seconds(start_time, timeutils.utcnow()) self.log.info(_('Config updated for %s after %s seconds'), self.name, round(delta, 2))
def execute(self, cmds, addl_env={}, check_exit_code=True): if not self._parent.root_helper: m = _('sudo is required to run this command') LOG.error(m) raise Exception(m) elif not self._parent.namespace: m = _('No namespace defined for parent') LOG.error(m) raise Exception(m) else: return utils.execute( ['%s=%s' % pair for pair in addl_env.items()] + ['ip', 'netns', 'exec', self._parent.namespace] + list(cmds), root_helper=self._parent.root_helper, check_exit_code=check_exit_code)
def __init__(self, id_, name, tenant_id, network_id, ip_version, cidr, gateway_ip, enable_dhcp, dns_nameservers, host_routes, ipv6_ra_mode): self.id = id_ self.name = name self.tenant_id = tenant_id self.network_id = network_id self.ip_version = ip_version try: self.cidr = netaddr.IPNetwork(cidr) except (TypeError, netaddr.AddrFormatError) as e: raise ValueError( _('Invalid CIDR %r for subnet %s of network %s: %s') % ( cidr, id_, network_id, e, )) try: self.gateway_ip = netaddr.IPAddress(gateway_ip) except (TypeError, netaddr.AddrFormatError) as e: self.gateway_ip = None LOG.info(_LI('Bad gateway_ip on subnet %s: %r (%s)'), id_, gateway_ip, e) self.enable_dhcp = enable_dhcp self.dns_nameservers = dns_nameservers self.host_routes = host_routes self.ipv6_ra_mode = ipv6_ra_mode
def __init__(self, worker_factory): """ :param num_workers: The number of worker processes to create. :type num_workers: int :param worker_func: Callable for the worker processes to use when a notification is received. :type worker_factory: Callable to create Worker instances. """ self.num_workers = cfg.CONF.num_worker_processes if self.num_workers < 1: raise ValueError(_('Need at least one worker process')) self.workers = [] # Create several worker processes, each with its own queue for # sending it instructions based on the notifications we get # when someone calls our handle_message() method. for i in range(self.num_workers): wq = multiprocessing.JoinableQueue() worker = multiprocessing.Process( target=_worker, kwargs={ 'inq': wq, 'worker_factory': worker_factory, }, name='p%02d' % i, ) worker.start() self.workers.append({ 'queue': wq, 'worker': worker, }) self.dispatcher = Dispatcher(self.workers)
def run(self, ip_address, port=cfg.CONF.rug_api_port): app = RugAPI() for i in xrange(5): LOG.info( _LI('Starting the rug-api on %s/%s'), ip_address, port, ) try: sock = eventlet.listen((ip_address, port), family=socket.AF_INET6, backlog=128) except socket.error as err: if err.errno != 99: # EADDRNOTAVAIL raise LOG.warning(_LW('Could not create rug-api socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError( _('Could not establish rug-api socket on %s/%s') % (ip_address, port)) eventlet.wsgi.server(sock, app, custom_pool=self.pool, log=loggers.WritableLogger(LOG))
def __call__(self, req): try: if req.method != 'PUT': return webob.exc.HTTPMethodNotAllowed() args = filter(None, req.path.split('/')) if not args: return webob.exc.HTTPNotFound() command, _, _ = self.ctl.command_manager.find_command(args) if command.interactive: return webob.exc.HTTPNotImplemented() return str(self.ctl.run(['--debug'] + args)) except SystemExit: # cliff invokes -h (help) on argparse failure # (which in turn results in sys.exit call) return webob.exc.HTTPBadRequest() except ValueError: return webob.exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unexpected error.")) msg = _('An unknown error has occurred. ' 'Please try your request again.') return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
def get_router_detail(self, router_id): """Return detailed information about a router and it's networks.""" router = self.rpc_client.get_routers(router_id=router_id) try: return Router.from_dict(router[0]) except IndexError: raise RouterGone(_('the router is no longer available'))
def run(self, ip_address, port=RUG_META_PORT): app = MetadataProxyHandler() for i in xrange(5): LOG.info(_LI('Starting the metadata proxy on %s/%s'), ip_address, port) try: sock = eventlet.listen((ip_address, port), family=socket.AF_INET6, backlog=128) except socket.error as err: if err.errno != 99: raise LOG.warning(_LW('Could not create metadata proxy socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError( _('Could not establish metadata proxy socket on %s/%s') % (ip_address, port)) eventlet.wsgi.server(sock, app, custom_pool=self.pool, log=loggers.WritableLogger(LOG))
def notify(context, message): """Deprecated in Grizzly. Please use rpc_notifier instead.""" LOG.deprecated( _("The rabbit_notifier is now deprecated." " Please use rpc_notifier instead.")) rpc_notifier.notify(context, message)
def _build_flow_expr_arr(self, **kwargs): flow_expr_arr = [] is_delete_expr = kwargs.get('delete', False) if not is_delete_expr: prefix = ( "hard_timeout=%s,idle_timeout=%s,priority=%s" % (kwargs.get('hard_timeout', '0'), kwargs.get('idle_timeout', '0'), kwargs.get('priority', '1'))) flow_expr_arr.append(prefix) elif 'priority' in kwargs: raise Exception(_("Cannot match priority on flow deletion")) in_port = ('in_port' in kwargs and ",in_port=%s" % kwargs['in_port'] or '') dl_type = ('dl_type' in kwargs and ",dl_type=%s" % kwargs['dl_type'] or '') dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" % kwargs['dl_vlan'] or '') dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or '' dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or '' nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or '' nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or '' tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or '' proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or '' ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or '' match = (in_port + dl_type + dl_vlan + dl_src + dl_dst + (ip or proto) + nw_src + nw_dst + tun_id) if match: match = match[1:] # strip leading comma flow_expr_arr.append(match) return flow_expr_arr
def execute(cmd, root_helper=None, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False): if root_helper: cmd = shlex.split(root_helper) + cmd cmd = map(str, cmd) LOG.debug("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) _stdout, _stderr = (process_input and obj.communicate(process_input) or obj.communicate()) obj.stdin.close() m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode, 'stdout': _stdout, 'stderr': _stderr} LOG.debug(m) if obj.returncode and check_exit_code: raise RuntimeError(m) return return_stderr and (_stdout, _stderr) or _stdout
def __init__(self, id_, name, tenant_id, network_id, ip_version, cidr, gateway_ip, enable_dhcp, dns_nameservers, host_routes, ipv6_ra_mode): self.id = id_ self.name = name self.tenant_id = tenant_id self.network_id = network_id self.ip_version = ip_version try: self.cidr = netaddr.IPNetwork(cidr) except (TypeError, netaddr.AddrFormatError) as e: raise ValueError( _('Invalid CIDR %r for subnet %s of network %s: %s') % ( cidr, id_, network_id, e, ) ) try: self.gateway_ip = netaddr.IPAddress(gateway_ip) except (TypeError, netaddr.AddrFormatError) as e: self.gateway_ip = None LOG.info(_LI('Bad gateway_ip on subnet %s: %r (%s)'), id_, gateway_ip, e) self.enable_dhcp = enable_dhcp self.dns_nameservers = dns_nameservers self.host_routes = host_routes self.ipv6_ra_mode = ipv6_ra_mode
def _build_flow_expr_arr(self, **kwargs): flow_expr_arr = [] is_delete_expr = kwargs.get("delete", False) if not is_delete_expr: prefix = "hard_timeout=%s,idle_timeout=%s,priority=%s" % ( kwargs.get("hard_timeout", "0"), kwargs.get("idle_timeout", "0"), kwargs.get("priority", "1"), ) flow_expr_arr.append(prefix) elif "priority" in kwargs: raise Exception(_("Cannot match priority on flow deletion")) in_port = "in_port" in kwargs and ",in_port=%s" % kwargs["in_port"] or "" dl_type = "dl_type" in kwargs and ",dl_type=%s" % kwargs["dl_type"] or "" dl_vlan = "dl_vlan" in kwargs and ",dl_vlan=%s" % kwargs["dl_vlan"] or "" dl_src = "dl_src" in kwargs and ",dl_src=%s" % kwargs["dl_src"] or "" dl_dst = "dl_dst" in kwargs and ",dl_dst=%s" % kwargs["dl_dst"] or "" nw_src = "nw_src" in kwargs and ",nw_src=%s" % kwargs["nw_src"] or "" nw_dst = "nw_dst" in kwargs and ",nw_dst=%s" % kwargs["nw_dst"] or "" tun_id = "tun_id" in kwargs and ",tun_id=%s" % kwargs["tun_id"] or "" proto = "proto" in kwargs and ",%s" % kwargs["proto"] or "" ip = ("nw_src" in kwargs or "nw_dst" in kwargs) and ",ip" or "" match = in_port + dl_type + dl_vlan + dl_src + dl_dst + (ip or proto) + nw_src + nw_dst + tun_id if match: match = match[1:] # strip leading comma flow_expr_arr.append(match) return flow_expr_arr
def _as_root(self, options, command, args, use_root_namespace=False): if not self.root_helper: raise Exception(_('Sudo is required to run this command')) namespace = self.namespace if not use_root_namespace else None return self._execute(options, command, args, self.root_helper, namespace)
def add_flow(self, **kwargs): if "actions" not in kwargs: raise Exception(_("Must specify one or more actions")) if "priority" not in kwargs: kwargs["priority"] = "0" flow_expr_arr = self._build_flow_expr_arr(**kwargs) flow_expr_arr.append("actions=%s" % (kwargs["actions"])) flow_str = ",".join(flow_expr_arr) self.run_ofctl("add-flow", [flow_str])
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_LE("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s"), dict(e=e, payload=payload))
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException(_('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception( _LE("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s"), dict(e=e, payload=payload))
def _proxy_request(self, instance_id, req): headers = { "X-Forwarded-For": req.headers.get("X-Forwarded-For"), "X-Instance-ID": instance_id, "X-Instance-ID-Signature": self._sign_instance_id(instance_id), "X-Tenant-ID": req.headers.get("X-Tenant-ID"), } url = urlparse.urlunsplit( ( "http", "%s:%s" % (cfg.CONF.nova_metadata_ip, cfg.CONF.nova_metadata_port), req.path_info, req.query_string, "", ) ) h = httplib2.Http() resp, content = h.request(url, headers=headers) if resp.status == 200: LOG.debug(str(resp)) return content elif resp.status == 403: msg = _LW( "The remote metadata server responded with Forbidden. This " "response usually occurs when shared secrets do not match." ) LOG.warning(msg) return webob.exc.HTTPForbidden() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 500: msg = _("Remote metadata server experienced an internal server error.") LOG.warning(msg) return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) else: raise Exception(_("Unexpected response code: %s") % resp.status)
def _run_shell_command(cmd, throw_on_error=False): if os.name == 'nt': output = subprocess.Popen(["cmd.exe", "/C", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: output = subprocess.Popen(["/bin/sh", "-c", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out = output.communicate() if output.returncode and throw_on_error: raise Exception(_("%s returned %d") % cmd, output.returncode) if len(out) == 0: return None if len(out[0].strip()) == 0: return None return out[0].strip()
def run(self, ip_address, port=RUG_META_PORT): app = MetadataProxyHandler() for i in xrange(5): LOG.info(_LI("Starting the metadata proxy on %s/%s"), ip_address, port) try: sock = eventlet.listen((ip_address, port), family=socket.AF_INET6, backlog=128) except socket.error as err: if err.errno != 99: raise LOG.warning(_LW("Could not create metadata proxy socket: %s"), err) LOG.warning(_LW("Sleeping %s before trying again"), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError(_("Could not establish metadata proxy socket on %s/%s") % (ip_address, port)) eventlet.wsgi.server(sock, app, custom_pool=self.pool, log=loggers.WritableLogger(LOG))
def run(self, ip_address, port=cfg.CONF.rug_api_port): app = RugAPI() for i in xrange(5): LOG.info(_LI("Starting the rug-api on %s/%s"), ip_address, port) try: sock = eventlet.listen((ip_address, port), family=socket.AF_INET6, backlog=128) except socket.error as err: if err.errno != 99: # EADDRNOTAVAIL raise LOG.warning(_LW("Could not create rug-api socket: %s"), err) LOG.warning(_LW("Sleeping %s before trying again"), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError(_("Could not establish rug-api socket on %s/%s") % (ip_address, port)) eventlet.wsgi.server(sock, app, custom_pool=self.pool, log=loggers.WritableLogger(LOG))
def create_vrrp_port(self, object_id, network_id, label='VRRP'): port_dict = dict(admin_state_up=True, network_id=network_id, name='AKANDA:%s:%s' % (label, object_id), security_groups=[]) if label == 'VRRP': port_dict['fixed_ips'] = [] response = self.api_client.create_port(dict(port=port_dict)) port_data = response.get('port') if not port_data: raise ValueError( _('Unable to create %s port for %s on network %s') % (label, object_id, network_id)) port = Port.from_dict(port_data) return port
def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge tap_name = self._get_tap_name(device_name, prefix) self.check_bridge_exists(bridge) ovs = ovs_lib.OVSBridge(bridge, self.root_helper) try: ovs.delete_port(tap_name) if self.conf.ovs_use_veth: device = ip_lib.IPDevice(device_name, self.root_helper, namespace) device.link.delete() LOG.debug(_("Unplugged interface '%s'"), device_name) except RuntimeError: LOG.exception(_LE("Failed unplugging interface '%s'"), device_name)
def create_vrrp_port(self, object_id, network_id, label='VRRP'): port_dict = dict( admin_state_up=True, network_id=network_id, name='AKANDA:%s:%s' % (label, object_id), security_groups=[] ) if label == 'VRRP': port_dict['fixed_ips'] = [] response = self.api_client.create_port(dict(port=port_dict)) port_data = response.get('port') if not port_data: raise ValueError(_( 'Unable to create %s port for %s on network %s') % (label, object_id, network_id) ) port = Port.from_dict(port_data) return port
def get_version(package_name, pre_version=None): """Get the version of the project. First, try getting it from PKG-INFO, if it exists. If it does, that means we're in a distribution tarball or that install has happened. Otherwise, if there is no PKG-INFO file, pull the version from git. We do not support setup.py version sanity in git archive tarballs, nor do we support packagers directly sucking our git repo into theirs. We expect that a source tarball be made from our git repo - or that if someone wants to make a source tarball from a fork of our repo with additional tags in it that they understand and desire the results of doing that. """ version = os.environ.get("OSLO_PACKAGE_VERSION", None) if version: return version version = _get_version_from_pkg_info(package_name) if version: return version version = _get_version_from_git(pre_version) if version: return version raise Exception(_("Versioning for this project requires either an sdist" " tarball, or access to an upstream git repository."))
def get_version(package_name, pre_version=None): """Get the version of the project. First, try getting it from PKG-INFO, if it exists. If it does, that means we're in a distribution tarball or that install has happened. Otherwise, if there is no PKG-INFO file, pull the version from git. We do not support setup.py version sanity in git archive tarballs, nor do we support packagers directly sucking our git repo into theirs. We expect that a source tarball be made from our git repo - or that if someone wants to make a source tarball from a fork of our repo with additional tags in it that they understand and desire the results of doing that. """ version = os.environ.get("OSLO_PACKAGE_VERSION", None) if version: return version version = _get_version_from_pkg_info(package_name) if version: return version version = _get_version_from_git(pre_version) if version: return version raise Exception( _("Versioning for this project requires either an sdist" " tarball, or access to an upstream git repository."))
def execute(cmd, root_helper=None, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False): if root_helper: cmd = shlex.split(root_helper) + cmd cmd = map(str, cmd) LOG.debug("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) _stdout, _stderr = (process_input and obj.communicate(process_input) or obj.communicate()) obj.stdin.close() m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" "Stderr: %(stderr)r") % { 'cmd': cmd, 'code': obj.returncode, 'stdout': _stdout, 'stderr': _stderr } LOG.debug(m) if obj.returncode and check_exit_code: raise RuntimeError(m) return return_stderr and (_stdout, _stderr) or _stdout
def __call__(self, req): try: if req.method != "PUT": return webob.exc.HTTPMethodNotAllowed() args = filter(None, req.path.split("/")) if not args: return webob.exc.HTTPNotFound() command, _, _ = self.ctl.command_manager.find_command(args) if command.interactive: return webob.exc.HTTPNotImplemented() return str(self.ctl.run(["--debug"] + args)) except SystemExit: # cliff invokes -h (help) on argparse failure # (which in turn results in sys.exit call) return webob.exc.HTTPBadRequest() except ValueError: return webob.exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unexpected error.")) msg = _("An unknown error has occurred. " "Please try your request again.") return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
from oslo_log import log as logging from akanda.rug.common.i18n import _, _LE, _LW from akanda.rug.common.linux import ip_lib from akanda.rug.common.linux import ovs_lib from akanda.rug.common.linux import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF OPTS = [ cfg.StrOpt('ovs_integration_bridge', default='br-int', help=_('Name of Open vSwitch bridge to use')), cfg.BoolOpt('ovs_use_veth', default=False, help=_('Uses veth for an interface or not')), cfg.StrOpt('network_device_mtu', help=_('MTU setting for device.')), ] CONF.register_opts(OPTS) # NOTE(adam_g): These need a better home AGENT_OPTIONS = [ cfg.StrOpt('root_helper', default='sudo'), ] CONF.register_group(cfg.OptGroup(name='AGENT')) CONF.register_opts(AGENT_OPTIONS, 'AGENT')
def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise Exception(_('Bridge %s does not exist') % bridge)
import os import six from alembic import command as alembic_command from alembic import config as alembic_config from alembic import util as alembic_util from oslo_config import cfg from akanda.rug.common.i18n import _ _db_opts = [ cfg.StrOpt('connection', deprecated_name='sql_connection', default='', secret=True, help=_('URL to database')), cfg.StrOpt('engine', default='', help=_('Database engine')), ] CONF = cfg.CONF def do_alembic_command(config, cmd, *args, **kwargs): try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) def add_alembic_subparser(sub, cmd): return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__)
def notify(context, message): """Deprecated in Grizzly. Please use rpc_notifier instead.""" LOG.deprecated(_("The rabbit_notifier is now deprecated." " Please use rpc_notifier instead.")) rpc_notifier.notify(context, message)
import six from alembic import command as alembic_command from alembic import config as alembic_config from alembic import util as alembic_util from oslo_config import cfg from akanda.rug.common.i18n import _ _db_opts = [ cfg.StrOpt('connection', deprecated_name='sql_connection', default='', secret=True, help=_('URL to database')), cfg.StrOpt('engine', default='', help=_('Database engine')), ] CONF = cfg.CONF def do_alembic_command(config, cmd, *args, **kwargs): try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e))
SQLAlchemy models for baremetal data. """ from akanda.rug.common.i18n import _ from oslo_config import cfg from oslo_db import options as db_options from oslo_db.sqlalchemy import models import six.moves.urllib.parse as urlparse from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import schema, String from sqlalchemy.ext.declarative import declarative_base sql_opts = [cfg.StrOpt("mysql_engine", default="InnoDB", help=_("MySQL engine to use."))] _DEFAULT_SQL_CONNECTION = "sqlite:///akanda-ruxg.db" cfg.CONF.register_opts(sql_opts, "database") db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, "ironic.sqlite") def table_args(): engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme if engine_name == "mysql": return {"mysql_engine": cfg.CONF.database.mysql_engine, "mysql_charset": "utf8"} return None
from akanda.rug.common.i18n import _ from oslo_config import cfg from oslo_db import options as db_options from oslo_db.sqlalchemy import models import six.moves.urllib.parse as urlparse from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import schema, String from sqlalchemy.ext.declarative import declarative_base sql_opts = [ cfg.StrOpt('mysql_engine', default='InnoDB', help=_('MySQL engine to use.')) ] _DEFAULT_SQL_CONNECTION = 'sqlite:///akanda-ruxg.db' cfg.CONF.register_opts(sql_opts, 'database') db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite') def table_args(): engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme if engine_name == 'mysql': return {'mysql_engine': cfg.CONF.database.mysql_engine, 'mysql_charset': "utf8"} return None
from oslo_config import cfg from oslo_log import log as logging from akanda.rug.common.i18n import _, _LE, _LW from akanda.rug.common.linux import ip_lib from akanda.rug.common.linux import ovs_lib from akanda.rug.common.linux import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF OPTS = [ cfg.StrOpt('ovs_integration_bridge', default='br-int', help=_('Name of Open vSwitch bridge to use')), cfg.BoolOpt('ovs_use_veth', default=False, help=_('Uses veth for an interface or not')), cfg.StrOpt('network_device_mtu', help=_('MTU setting for device.')), ] CONF.register_opts(OPTS) # NOTE(adam_g): These need a better home AGENT_OPTIONS = [ cfg.StrOpt('root_helper', default='sudo'), ] CONF.register_group(cfg.OptGroup(name='AGENT')) CONF.register_opts(AGENT_OPTIONS, 'AGENT')
from akanda.rug.common.i18n import _ from oslo_config import cfg from oslo_db import options as db_options from oslo_db.sqlalchemy import models import six.moves.urllib.parse as urlparse from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import schema, String from sqlalchemy.ext.declarative import declarative_base sql_opts = [ cfg.StrOpt('mysql_engine', default='InnoDB', help=_('MySQL engine to use.')) ] _DEFAULT_SQL_CONNECTION = 'sqlite:///akanda-ruxg.db' cfg.CONF.register_opts(sql_opts, 'database') db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite') def table_args(): engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme if engine_name == 'mysql': return { 'mysql_engine': cfg.CONF.database.mysql_engine, 'mysql_charset': "utf8" }