def update_config(self, management_address, config): """Updates appliance configuration This is responsible for pushing configuration to the managed appliance """ self.log.info(_('Updating config for %s'), self.name) start_time = timeutils.utcnow() astara_client.update_config( management_address, self.mgt_port, config) delta = timeutils.delta_seconds(start_time, timeutils.utcnow()) self.log.info(_('Config updated for %s after %s seconds'), self.name, round(delta, 2))
def execute(self, cmds, addl_env={}, check_exit_code=True): if not self._parent.root_helper: m = _('sudo is required to run this command') LOG.error(m) raise Exception(m) elif not self._parent.namespace: m = _('No namespace defined for parent') LOG.error(m) raise Exception(m) else: return utils.execute( ['%s=%s' % pair for pair in addl_env.items()] + ['ip', 'netns', 'exec', self._parent.namespace] + list(cmds), root_helper=self._parent.root_helper, check_exit_code=check_exit_code)
def _build_flow_expr_arr(self, **kwargs): flow_expr_arr = [] is_delete_expr = kwargs.get('delete', False) if not is_delete_expr: prefix = ("hard_timeout=%s,idle_timeout=%s,priority=%s" % (kwargs.get('hard_timeout', '0'), kwargs.get('idle_timeout', '0'), kwargs.get('priority', '1'))) flow_expr_arr.append(prefix) elif 'priority' in kwargs: raise Exception(_("Cannot match priority on flow deletion")) in_port = ('in_port' in kwargs and ",in_port=%s" % kwargs['in_port'] or '') dl_type = ('dl_type' in kwargs and ",dl_type=%s" % kwargs['dl_type'] or '') dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" % kwargs['dl_vlan'] or '') dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or '' dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or '' nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or '' nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or '' tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or '' proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or '' ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or '' match = (in_port + dl_type + dl_vlan + dl_src + dl_dst + (ip or proto) + nw_src + nw_dst + tun_id) if match: match = match[1:] # strip leading comma flow_expr_arr.append(match) return flow_expr_arr
def __call__(self, req): try: if req.method != 'PUT': return webob.exc.HTTPMethodNotAllowed() args = filter(None, req.path.split('/')) if not args: return webob.exc.HTTPNotFound() command, _, _ = self.ctl.command_manager.find_command(args) if command.interactive: return webob.exc.HTTPNotImplemented() return str(self.ctl.run(['--debug'] + args)) except SystemExit: # cliff invokes -h (help) on argparse failure # (which in turn results in sys.exit call) return webob.exc.HTTPBadRequest() except ValueError: return webob.exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unexpected error.")) msg = _('An unknown error has occurred. ' 'Please try your request again.') return webob.exc.HTTPInternalServerError( explanation=six.text_type(msg))
def __init__(self, worker_factory): """ :param num_workers: The number of worker processes to create. :type num_workers: int :param worker_func: Callable for the worker processes to use when a notification is received. :type worker_factory: Callable to create Worker instances. """ self.num_workers = cfg.CONF.num_worker_processes if self.num_workers < 1: raise ValueError(_('Need at least one worker process')) self.workers = [] # Create several worker processes, each with its own queue for # sending it instructions based on the notifications we get # when someone calls our handle_message() method. for i in range(self.num_workers): wq = multiprocessing.JoinableQueue() name = 'p%02d' % i worker = multiprocessing.Process( target=_worker, kwargs={ 'inq': wq, 'worker_factory': worker_factory, 'scheduler': self, 'proc_name': name, }, name=name, ) self.workers.append({ 'queue': wq, 'worker': worker, }) self.dispatcher = Dispatcher(self.workers) for w in self.workers: w['worker'].start()
def __init__(self, hosts, replicas=1): """Create a new hash ring across the specified hosts. :param hosts: an iterable of hosts which will be mapped. :param replicas: number of hosts to map to each hash partition, or len(hosts), which ever is lesser. Default: 1 """ try: self.hosts = set(hosts) self.replicas = replicas if replicas <= len(hosts) else len(hosts) except TypeError: raise Invalid( _("Invalid hosts supplied when building HashRing.")) self._host_hashes = {} for host in hosts: key = str(host).encode('utf8') key_hash = hashlib.md5(key) for p in range(2 ** CONF.hash_partition_exponent): key_hash.update(key) hashed_key = self._hash2int(key_hash) self._host_hashes[hashed_key] = host # Gather the (possibly colliding) resulting hashes into a bisectable # list. self._partitions = sorted(self._host_hashes.keys())
def __init__(self, id_, name, tenant_id, network_id, ip_version, cidr, gateway_ip, enable_dhcp, dns_nameservers, host_routes, ipv6_ra_mode): self.id = id_ self.name = name self.tenant_id = tenant_id self.network_id = network_id self.ip_version = ip_version try: self.cidr = netaddr.IPNetwork(cidr) except (TypeError, netaddr.AddrFormatError) as e: raise ValueError( _('Invalid CIDR %r for subnet %s of network %s: %s') % ( cidr, id_, network_id, e, ) ) try: self.gateway_ip = netaddr.IPAddress(gateway_ip) except (TypeError, netaddr.AddrFormatError) as e: self.gateway_ip = None LOG.info(_LI( 'Bad gateway_ip on subnet %s: %r (%s)'), id_, gateway_ip, e) self.enable_dhcp = enable_dhcp self.dns_nameservers = dns_nameservers self.host_routes = host_routes self.ipv6_ra_mode = ipv6_ra_mode
def execute(cmd, root_helper=None, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False): if root_helper: cmd = shlex.split(root_helper) + cmd cmd = map(str, cmd) LOG.debug("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) _stdout, _stderr = (process_input and obj.communicate(process_input) or obj.communicate()) obj.stdin.close() m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode, 'stdout': _stdout, 'stderr': _stderr} LOG.debug(m) if obj.returncode and check_exit_code: raise RuntimeError(m) return return_stderr and (_stdout, _stderr) or _stdout
def create_vrrp_port(self, object_id, network_id, label='VRRP'): port_dict = dict( admin_state_up=True, network_id=network_id, name='ASTARA:%s:%s' % (label, object_id), security_groups=[] ) dis_port_types = ( constants.ASTARA_SERVICE_PORT_TYPES + constants.ASTARA_MGT_PORT_TYPES ) if label in dis_port_types: port_dict['fixed_ips'] = [] # disable port_securty on VRRP, LB, MGT if self.conf.neutron_port_security_extension_enabled: port_dict['port_security_enabled'] = False response = self.api_client.create_port(dict(port=port_dict)) port_data = response.get('port') if not port_data: raise ValueError(_( 'Unable to create %s port for %s on network %s') % (label, object_id, network_id) ) port = Port.from_dict(port_data) return port
def __init__(self, id_, name, tenant_id, network_id, ip_version, cidr, gateway_ip, enable_dhcp, dns_nameservers, host_routes, ipv6_ra_mode): self.id = id_ self.name = name self.tenant_id = tenant_id self.network_id = network_id self.ip_version = ip_version try: self.cidr = netaddr.IPNetwork(cidr) except (TypeError, netaddr.AddrFormatError) as e: raise ValueError( _('Invalid CIDR %r for subnet %s of network %s: %s') % ( cidr, id_, network_id, e, )) try: self.gateway_ip = netaddr.IPAddress(gateway_ip) except (TypeError, netaddr.AddrFormatError) as e: self.gateway_ip = None LOG.info(_LI('Bad gateway_ip on subnet %s: %r (%s)'), id_, gateway_ip, e) self.enable_dhcp = enable_dhcp self.dns_nameservers = dns_nameservers self.host_routes = host_routes self.ipv6_ra_mode = ipv6_ra_mode
def run(self, ip_address, port=cfg.CONF.astara_metadata_port): """Run the MetadataProxy. :param ip_address: the ip address to bind to for incoming requests :param port: the port to bind to for incoming requests :returns: returns nothing """ app = MetadataProxyHandler() for i in six.moves.range(5): LOG.info(_LI('Starting the metadata proxy on %s:%s'), ip_address, port) try: sock = eventlet.listen((ip_address, port), family=socket.AF_INET6, backlog=128) except socket.error as err: if err.errno != 99: raise LOG.warning(_LW('Could not create metadata proxy socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError( _('Could not establish metadata proxy socket on %s:%s') % (ip_address, port)) eventlet.wsgi.server(sock, app, custom_pool=self.pool, log=LOG)
def _build_flow_expr_arr(self, **kwargs): flow_expr_arr = [] is_delete_expr = kwargs.get('delete', False) if not is_delete_expr: prefix = ( "hard_timeout=%s,idle_timeout=%s,priority=%s" % (kwargs.get('hard_timeout', '0'), kwargs.get('idle_timeout', '0'), kwargs.get('priority', '1'))) flow_expr_arr.append(prefix) elif 'priority' in kwargs: raise Exception(_("Cannot match priority on flow deletion")) in_port = ('in_port' in kwargs and ",in_port=%s" % kwargs['in_port'] or '') dl_type = ('dl_type' in kwargs and ",dl_type=%s" % kwargs['dl_type'] or '') dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" % kwargs['dl_vlan'] or '') dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or '' dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or '' nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or '' nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or '' tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or '' proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or '' ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or '' match = (in_port + dl_type + dl_vlan + dl_src + dl_dst + (ip or proto) + nw_src + nw_dst + tun_id) if match: match = match[1:] # strip leading comma flow_expr_arr.append(match) return flow_expr_arr
def get_router_detail(self, router_id): """Return detailed information about a router and it's networks.""" router = self.l3_rpc_client.get_routers(router_id=router_id) try: return Router.from_dict(router[0]) except IndexError: raise RouterGone(_('the router is no longer available'))
def __init__(self, hosts, replicas=1): """Create a new hash ring across the specified hosts. :param hosts: an iterable of hosts which will be mapped. :param replicas: number of hosts to map to each hash partition, or len(hosts), which ever is lesser. Default: 1 """ try: self.hosts = set(hosts) self.replicas = replicas if replicas <= len(hosts) else len(hosts) except TypeError: raise Invalid(_("Invalid hosts supplied when building HashRing.")) self._host_hashes = {} for host in hosts: key = str(host).encode('utf8') key_hash = hashlib.md5(key) for p in range(2**CONF.hash_partition_exponent): key_hash.update(key) hashed_key = self._hash2int(key_hash) self._host_hashes[hashed_key] = host # Gather the (possibly colliding) resulting hashes into a bisectable # list. self._partitions = sorted(self._host_hashes.keys())
def update_config(self, management_address, config): """Updates appliance configuration This is responsible for pushing configuration to the managed appliance """ self.log.info(_('Updating config for %s'), self.name) astara_client.update_config(management_address, self.mgt_port, config)
def _get_partition(self, data): try: if six.PY3 and data is not None: data = data.encode('utf-8') key_hash = hashlib.md5(data) hashed_key = self._hash2int(key_hash) position = bisect.bisect(self._partitions, hashed_key) return position if position < len(self._partitions) else 0 except TypeError: raise Invalid(_("Invalid data supplied to HashRing.get_hosts."))
def add_flow(self, **kwargs): if "actions" not in kwargs: raise Exception(_("Must specify one or more actions")) if "priority" not in kwargs: kwargs["priority"] = "0" flow_expr_arr = self._build_flow_expr_arr(**kwargs) flow_expr_arr.append("actions=%s" % (kwargs["actions"])) flow_str = ",".join(flow_expr_arr) self.run_ofctl("add-flow", [flow_str])
def _as_root(self, options, command, args, use_root_namespace=False): if not self.root_helper: raise Exception(_('Sudo is required to run this command')) namespace = self.namespace if not use_root_namespace else None return self._execute(options, command, args, self.root_helper, namespace)
def _get_partition(self, data): try: if six.PY3 and data is not None: data = data.encode('utf-8') key_hash = hashlib.md5(data) hashed_key = self._hash2int(key_hash) position = bisect.bisect(self._partitions, hashed_key) return position if position < len(self._partitions) else 0 except TypeError: raise Invalid( _("Invalid data supplied to HashRing.get_hosts."))
def _proxy_request(self, instance_id, req): """Proxy a signed HTTP request to an instance. :param instance_id: ID of the Instance being proxied to :param req: The webob.Request to handle :returns: returns a valid HTTP Response or Error """ headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id), 'X-Tenant-ID': req.headers.get('X-Tenant-ID') } url = urlparse.urlunsplit(( 'http', '%s:%s' % (cfg.CONF.nova_metadata_ip, cfg.CONF.nova_metadata_port), req.path_info, req.query_string, '')) h = httplib2.Http() resp, content = h.request(url, headers=headers) if resp.status == 200: LOG.debug(str(resp)) return content elif resp.status == 403: msg = _LW( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' ) LOG.warning(msg) return webob.exc.HTTPForbidden() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 500: msg = _LW('Remote metadata server experienced an' ' internal server error.') LOG.warning(msg) return webob.exc.HTTPInternalServerError( explanation=six.text_type(msg)) else: raise Exception(_('Unexpected response code: %s') % resp.status)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge tap_name = self._get_tap_name(device_name, prefix) self.check_bridge_exists(bridge) ovs = ovs_lib.OVSBridge(bridge, self.root_helper) try: ovs.delete_port(tap_name) if self.conf.ovs_use_veth: device = ip_lib.IPDevice(device_name, self.root_helper, namespace) device.link.delete() LOG.debug(_("Unplugged interface '%s'"), device_name) except RuntimeError: LOG.exception(_LE("Failed unplugging interface '%s'"), device_name)
def _proxy_request(self, instance_id, req): """Proxy a signed HTTP request to an instance. :param instance_id: ID of the Instance being proxied to :param req: The webob.Request to handle :returns: returns a valid HTTP Response or Error """ headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id), 'X-Tenant-ID': req.headers.get('X-Tenant-ID') } url = urlparse.urlunsplit( ('http', '%s:%s' % (cfg.CONF.nova_metadata_ip, cfg.CONF.nova_metadata_port), req.path_info, req.query_string, '')) h = httplib2.Http() resp, content = h.request(url, headers=headers) if resp.status == 200: LOG.debug(str(resp)) return content elif resp.status == 403: msg = _LW( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.') LOG.warning(msg) return webob.exc.HTTPForbidden() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 500: msg = _LW('Remote metadata server experienced an' ' internal server error.') LOG.warning(msg) return webob.exc.HTTPInternalServerError( explanation=six.text_type(msg)) else: raise Exception(_('Unexpected response code: %s') % resp.status)
def create_vrrp_port(self, object_id, network_id, label='VRRP'): port_dict = dict(admin_state_up=True, network_id=network_id, name='ASTARA:%s:%s' % (label, object_id), security_groups=[]) if label in constants.ASTARA_SERVICE_PORT_TYPES: port_dict['fixed_ips'] = [] # disable port_securty on VRRP if self.conf.neutron_port_security_extension_enabled: port_dict['port_security_enabled'] = False response = self.api_client.create_port(dict(port=port_dict)) port_data = response.get('port') if not port_data: raise ValueError( _('Unable to create %s port for %s on network %s') % (label, object_id, network_id)) port = Port.from_dict(port_data) return port
def run(self, ip_address, port=cfg.CONF.astara_metadata_port): """Run the MetadataProxy. :param ip_address: the ip address to bind to for incoming requests :param port: the port to bind to for incoming requests :returns: returns nothing """ app = MetadataProxyHandler() for i in six.moves.range(5): LOG.info(_LI( 'Starting the metadata proxy on %s:%s'), ip_address, port ) try: sock = eventlet.listen( (ip_address, port), family=socket.AF_INET6, backlog=128 ) except socket.error as err: if err.errno != 99: raise LOG.warning( _LW('Could not create metadata proxy socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError( _('Could not establish metadata proxy socket on %s:%s') % (ip_address, port) ) eventlet.wsgi.server( sock, app, custom_pool=self.pool, log=loggers.WritableLogger(LOG))
def run(self, ip_address, port): app = RugAPI() try: socket.inet_pton(socket.AF_INET6, ip_address) family = socket.AF_INET6 except Exception: family = socket.AF_INET for i in six.moves.range(5): LOG.info(_LI( 'Starting the rug-api on %s:%s'), ip_address, port, ) try: sock = eventlet.listen( (ip_address, port), family=family, backlog=128 ) except socket.error as err: if err.errno != 99: # EADDRNOTAVAIL raise LOG.warning(_LW('Could not create rug-api socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError(_( 'Could not establish rug-api socket on %s:%s') % (ip_address, port) ) eventlet.wsgi.server( sock, app, custom_pool=self.pool, log=LOG)
parser.add_argument('revision', nargs='?') parser.add_argument('--mysql-engine', default='', help='Change MySQL storage engine of current ' 'existing tables') parser.set_defaults(func=do_upgrade) parser = add_alembic_subparser(subparsers, 'stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision') parser.set_defaults(func=do_stamp) command_opt = cfg.SubCommandOpt('command', title='Command', help=_('Available commands'), handler=add_command_parsers) CONF.register_cli_opt(command_opt) def get_alembic_config(): config = alembic_config.Config( os.path.join(os.path.dirname(__file__), 'alembic.ini')) return config def main(): CONF(project='astara-orchestrator') config = get_alembic_config() config.astara_config = CONF
from oslo_log import log import tooz from tooz import coordination as tz_coordination from astara import event as ak_event from astara.common.i18n import _, _LI LOG = log.getLogger(__name__) CONF = cfg.CONF COORD_OPTS = [ cfg.BoolOpt('enabled', default=False, help=_('Whether to use an external coordination service to ' 'a cluster of astara-orchestrator nodes. This may be ' 'disabled for astara-orchestrator node environments.')), cfg.StrOpt('url', default='memcached://localhost:11211', help=_('URL of suppoted coordination service')), cfg.StrOpt('group_id', default='astara.orchestrator', help=_('ID of coordination group to join.')), cfg.IntOpt('heartbeat_interval', default=1, help=_('Interval (in seconds) for cluster heartbeats')), ] CONF.register_group(cfg.OptGroup(name='coordination')) CONF.register_opts(COORD_OPTS, group='coordination')
from astara.common.i18n import _ from oslo_config import cfg from oslo_db import options as db_options from oslo_db.sqlalchemy import models import six.moves.urllib.parse as urlparse from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import schema, String from sqlalchemy.ext.declarative import declarative_base sql_opts = [ cfg.StrOpt('mysql_engine', default='InnoDB', help=_('MySQL engine to use.')) ] _DEFAULT_SQL_CONNECTION = 'sqlite:///astara.db' cfg.CONF.register_opts(sql_opts, 'database') db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite') def table_args(): engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme if engine_name == 'mysql': return { 'mysql_engine': cfg.CONF.database.mysql_engine, 'mysql_charset': "utf8" }
parser.add_argument('revision', nargs='?') parser.add_argument('--mysql-engine', default='', help='Change MySQL storage engine of current ' 'existing tables') parser.set_defaults(func=do_upgrade) parser = add_alembic_subparser(subparsers, 'stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision') parser.set_defaults(func=do_stamp) command_opt = cfg.SubCommandOpt('command', title='Command', help=_('Available commands'), handler=add_command_parsers) CONF.register_cli_opt(command_opt) def get_alembic_config(): config = alembic_config.Config(os.path.join(os.path.dirname(__file__), 'alembic.ini')) return config def main(): CONF(project='astara-orchestrator') config = get_alembic_config() config.astara_config = CONF
from astara.common.i18n import _ from oslo_config import cfg from oslo_db import options as db_options from oslo_db.sqlalchemy import models import six.moves.urllib.parse as urlparse from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import schema, String from sqlalchemy.ext.declarative import declarative_base sql_opts = [ cfg.StrOpt('mysql_engine', default='InnoDB', help=_('MySQL engine to use.')) ] _DEFAULT_SQL_CONNECTION = 'sqlite:///astara.db' cfg.CONF.register_opts(sql_opts, 'database') db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite') def table_args(): engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme if engine_name == 'mysql': return {'mysql_engine': cfg.CONF.database.mysql_engine, 'mysql_charset': "utf8"} return None
CONF = cfg.CONF neutron_opts = [ cfg.StrOpt('management_network_id'), cfg.StrOpt('management_subnet_id'), cfg.StrOpt('management_prefix', default='fdca:3ba5:a17a:acda::/64'), cfg.IntOpt('astara_mgt_service_port', default=5000), cfg.StrOpt('default_instance_flavor', default=1), cfg.StrOpt('interface_driver', default='astara.common.linux.interface.OVSInterfaceDriver'), cfg.BoolOpt('neutron_port_security_extension_enabled', default=True), # legacy_fallback option is deprecated and will be removed in the N-release cfg.BoolOpt('legacy_fallback_mode', default=True, help=_('Check for resources using the Liberty naming scheme ' 'when the modern name does not exist.')) ] CONF.register_opts(neutron_opts) # copied from Neutron source DEVICE_OWNER_ROUTER_MGT = "network:router_management" DEVICE_OWNER_ROUTER_INT = "network:router_interface" DEVICE_OWNER_ROUTER_HA_INT = "network:ha_router_replicated_interface" DEVICE_OWNER_ROUTER_GW = "network:router_gateway" DEVICE_OWNER_FLOATINGIP = "network:floatingip" DEVICE_OWNER_RUG = "network:astara" PLUGIN_ROUTER_RPC_TOPIC = 'q-l3-plugin' STATUS_ACTIVE = 'ACTIVE' STATUS_BUILD = 'BUILD'
CONF = cfg.CONF neutron_opts = [ cfg.StrOpt('management_network_id'), cfg.StrOpt('management_subnet_id'), cfg.StrOpt('management_prefix', default='fdca:3ba5:a17a:acda::/64'), cfg.IntOpt('astara_mgt_service_port', default=5000), cfg.StrOpt('default_instance_flavor', default=1), cfg.StrOpt('interface_driver', default='astara.common.linux.interface.OVSInterfaceDriver'), cfg.BoolOpt('neutron_port_security_extension_enabled', default=True), # legacy_fallback option is deprecated and will be removed in the N-release cfg.BoolOpt('legacy_fallback_mode', default=True, help=_('Check for resources using the Liberty naming scheme ' 'when the modern name does not exist.')) ] CONF.register_opts(neutron_opts) # copied from Neutron source DEVICE_OWNER_ROUTER_MGT = "network:router_management" DEVICE_OWNER_ROUTER_INT = "network:router_interface" DEVICE_OWNER_ROUTER_HA_INT = "network:ha_router_replicated_interface" DEVICE_OWNER_ROUTER_GW = "network:router_gateway" DEVICE_OWNER_FLOATINGIP = "network:floatingip" DEVICE_OWNER_RUG = "network:astara" PLUGIN_ROUTER_RPC_TOPIC = 'q-l3-plugin' STATUS_ACTIVE = 'ACTIVE'
# License for the specific language governing permissions and limitations # under the License. import threading from oslo_config import cfg from astara.common.i18n import _ from astara.pez import pool CONF = cfg.CONF PEZ_OPTIONS = [ cfg.IntOpt('pool_size', default=1, help=_('How many pre-allocated hot standby nodes to keep ' 'in the pez pool.')), # NOTE(adam_g): We should consider how these get configured for when # we support multiple drivers. {router, lbaas}_image_uuid? cfg.StrOpt('image_uuid', help=_('Image uuid to boot.')), cfg.StrOpt('flavor', help=_('Nova flavor to boot')), cfg.StrOpt('rpc_topic', default='astara-pez'), ] CONF.register_group(cfg.OptGroup(name='pez')) CONF.register_opts(PEZ_OPTIONS, group='pez') CONF.import_opt('host', 'astara.main') CONF.import_opt('management_network_id', 'astara.api.neutron')
import tooz from tooz import coordination as tz_coordination from astara import event as ak_event from astara.common.i18n import _, _LI LOG = log.getLogger(__name__) CONF = cfg.CONF COORD_OPTS = [ cfg.BoolOpt('enabled', default=False, help=_('Whether to use an external coordination service to ' 'a cluster of astara-orchestrator nodes. This may be ' 'disabled for astara-orchestrator node environments.')), cfg.StrOpt('url', default='memcached://localhost:11211', help=_('URL of suppoted coordination service')), cfg.StrOpt('group_id', default='astara.orchestrator', help=_('ID of coordination group to join.')), cfg.IntOpt('heartbeat_interval', default=1, help=_('Interval (in seconds) for cluster heartbeats')), ] CONF.register_group(cfg.OptGroup(name='coordination')) CONF.register_opts(COORD_OPTS, group='coordination') class InvalidEventType(Exception): pass
from oslo_log import log as logging from astara.common.i18n import _, _LE, _LW from astara.common.linux import ip_lib from astara.common.linux import ovs_lib from astara.common.linux import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF OPTS = [ cfg.StrOpt('ovs_integration_bridge', default='br-int', help=_('Name of Open vSwitch bridge to use')), cfg.BoolOpt('ovs_use_veth', default=False, help=_('Uses veth for an interface or not')), cfg.StrOpt('network_device_mtu', help=_('MTU setting for device.')), ] CONF.register_opts(OPTS) AGENT_OPTIONS = [ cfg.StrOpt('root_helper', default='sudo astara-rootwrap /etc/astara/rootwrap.conf'), ] CONF.register_group(cfg.OptGroup(name='AGENT')) CONF.register_opts(AGENT_OPTIONS, 'AGENT')
# License for the specific language governing permissions and limitations # under the License. import threading from oslo_config import cfg from astara.common.i18n import _ from astara.pez import pool CONF = cfg.CONF PEZ_OPTIONS = [ cfg.IntOpt('pool_size', default=1, help=_('How many pre-allocated hot standby nodes to keep ' 'in the pez pool.')), # NOTE(adam_g): We should consider how these get configured for when # we support multiple drivers. {router, lbaas}_image_uuid? cfg.StrOpt('image_uuid', help=_('Image uuid to boot.')), cfg.StrOpt('flavor', help=_('Nova flavor to boot')), cfg.StrOpt('rpc_topic', default='astara-pez'), ] CONF.register_group(cfg.OptGroup(name='pez')) CONF.register_opts(PEZ_OPTIONS, group='pez')
def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise Exception(_('Bridge %s does not exist') % bridge)