class AddVN(VNAction): description = 'Add virtual-network' external = Option(default=False, action="store_true") shared = Option(default=False, action="store_true") def __call__(self, project_fqname=None, virtual_network_name=None, external=False, shared=False): vn_fqname = '%s:%s' % (project_fqname, virtual_network_name) # fetch project to sync it from keystone if not already there project = Resource('project', fq_name=project_fqname, fetch=True) vn = Resource('virtual-network', fq_name=vn_fqname, parent=project, router_external=external, is_shared=shared) vn.save() if external: fip_pool = Resource('floating-ip-pool', fq_name='%s:%s' % (vn_fqname, 'floating-ip-pool'), parent_type='virtual-network', parent_uuid=vn.uuid) fip_pool.save()
class TestCmd(Command): paths = Arg(help="%(default)s", default="bar") long = Option('-l', action="store_true") foo = Option(help="foo") bar = Option(nargs="*") def __call__(self, *args, **kwargs): pass
class CheckCommand(Command): """Inherit from this class to add `--check` and `--dry-run` options. Options values are stored in `self.check` and `self.dry_run` (True or False). """ check = Option('-c', default=False, action="store_true") dry_run = Option('-n', default=False, action="store_true", help='run this command in dry-run mode') def __call__(self, dry_run=None, check=None, **kwargs): self.dry_run = dry_run self.check = check super(CheckCommand, self).__call__(**kwargs)
class GetSubnets(Subnet): description = 'Get virtual-network subnets' virtual_network_fqnames = Option( nargs='+', required=True, help='List of VN fqnames (eg: default-domain:admin:net)') def __call__(self, virtual_network_fqnames=None): res = [] for virtual_network_fqname in virtual_network_fqnames: try: vn = Resource('virtual-network', fq_name=virtual_network_fqname, fetch=True) ipam_subnets = self._get_network_ipam_subnets(vn) if ipam_subnets: res.append({ 'virtual_network_fqname': virtual_network_fqname, 'cidrs': [ '%s/%s' % (s['subnet']['ip_prefix'], s['subnet']['ip_prefix_len']) for s in ipam_subnets ] }) except ResourceNotFound: pass return json.dumps(res, indent=2)
class TestCmd2(Command): long = Option('-l', action="store_true") arg1 = Arg(help="%(default)s", default="bar") arg2 = Arg(nargs="*") def __call__(self, *args, **kwargs): pass
class Dot(Command): """Command to create a dot file from a list of paths. The command will automatically add to the graph the parent, refs and back_refs of the given resources:: contrail-api-cli dot path/to/res1 path/to/res2 -f /tmp/output.dot `-e` option can be used to exclude resources from the graph. """ description = "Create a dot file representing provided resources" filename_output = Option('-f', help="Output Dot filename", required=True) exclude_resource_type = Option('-e', help="Exclude resource types", action="append", default=[], dest='excludes') paths = Arg(help="Resource URL", metavar='path', nargs="*") def __call__(self, paths=None, filename_output=None, excludes=[]): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) graph = nx.Graph() # For each provided resources, a edge is created from this # resource to all of its refs, back_refs and parent. for r in resources: print "%s %s" % (short_name(r.path.name), r.path) r.fetch() graph.add_node(r.path, _node_rendering(r.path, r)) paths = [t.path for t in itertools.chain(r.refs, r.back_refs)] try: paths.append(r.parent.path) except ResourceMissing: pass for p in paths: if p.base in excludes: continue print "%s %s" % (short_name(p.name), p) graph.add_node(p, _node_rendering(p)) graph.add_edge(r.path, p) print "Dot file written to %s" % filename_output write_dot(graph, filename_output)
class FixFIPLocks(ZKCommand, CheckCommand, PathCommand): description = "Add missing locks for FIPs" public_fqname = Option(help="Public network fqname", required=True, complete="resources:virtual-network:fq_name") @property def resource_type(self): return "floating-ip" def log(self, message, fip): printo('[%s] %s' % (fip.uuid, message)) def _get_subnet_for_ip(self, ip, subnets): for subnet in subnets: if ip in subnet: return subnet def _zk_node_for_ip(self, ip, subnet): return '/api-server/subnets/%s:%s/%i' % (self.public_fqname, subnet, ip) def _check_fip(self, fip, subnets): try: fip.fetch() except ResourceNotFound: return ip = IPAddress(fip.get('floating_ip_address')) subnet = self._get_subnet_for_ip(ip, subnets) if subnet is None: self.log('No subnet found for FIP %s' % ip, fip) return zk_node = self._zk_node_for_ip(ip, subnet) if not self.zk_client.exists(zk_node): self.log('Lock not found', fip) if self.check is not True or self.dry_run is not True: self.zk_client.create(zk_node) def __call__(self, public_fqname=None, **kwargs): super(FixFIPLocks, self).__call__(**kwargs) self.public_fqname = public_fqname public_vn = Resource('virtual-network', fq_name=public_fqname, fetch=True) subnets = [] for s in public_vn['network_ipam_refs'][0]['attr']['ipam_subnets']: subnets.append( IPNetwork( '%s/%s' % (s['subnet']['ip_prefix'], s['subnet']['ip_prefix_len']))) parallel_map(self._check_fip, self.resources, args=(subnets, ), workers=50)
class AddVRouter(VRouter): description = 'Add vrouter' vrouter_ip = Option(help='IP of compute node', type=ip_type, required=True) vrouter_type = Option(help='vrouter type', choices=['tor-service-mode', 'embedded'], default=None) def __call__(self, vrouter_ip=None, vrouter_name=None, vrouter_type=None): global_config = Resource('global-system-config', fq_name='default-global-system-config') vrouter = Resource('virtual-router', fq_name='default-global-system-config:%s' % vrouter_name, parent=global_config, virtual_router_ip_address=vrouter_ip) if vrouter_type: vrouter['virtual_router_type'] = [vrouter_type] vrouter.save()
class AddSAS(SAS): description = 'Add service appliance set' driver = Option(required=True, help='driver python module path') def __call__(self, appliance_set_name=None, driver=None): global_config = Resource('global-system-config', fq_name='default-global-system-config') sas = Resource('service-appliance-set', fq_name='default-global-system-config:%s' % appliance_set_name, parent=global_config, service_appliance_driver=driver) sas.save()
class RPF(PathCommand): """Simple command to enable or disable RPF (Reverse Path Forwarding) on a VN. To check if RPF is enabled or not run:: contrail-api-cli rpf virtual-network/uuid To enable of disable RPF add ``--on`` or ``--off`` options. """ description = 'enable/disable RPF on network' on = Option(action='store_true', help='Enable RPF') off = Option(action='store_true', help='Disable RPF') @property def resource_type(self): return 'virtual-network' def __call__(self, on=False, off=False, **kwargs): super(RPF, self).__call__(**kwargs) for vn in self.resources: vn.fetch() if 'virtual_network_properties' not in vn: vn['virtual_network_properties'] = { "allow_transit": None, "forwarding_mode": None, "network_id": vn.get('virtual_network_id', None), "vxlan_network_identifier": None } if off: vn['virtual_network_properties']['rpf'] = 'disable' vn.save() elif on: vn['virtual_network_properties']['rpf'] = None vn.save() else: status = 'on' if vn['virtual_network_properties'].get( 'rpf') is None else 'off' printo("%s : %s" % (self.current_path(vn), status))
class AddConfig(Config): description = 'Add config node' config_ip = Option(help='IP of config node', type=ip_type, required=True) def __call__(self, config_name=None, config_ip=None): global_config = Resource('global-system-config', fq_name='default-global-system-config') config = Resource('config-node', fq_name='default-global-system-config:%s' % config_name, parent=global_config, config_node_ip_address=config_ip) config.save()
class AddAnalytics(Analytics): description = 'Add analytics node' analytics_ip = Option(help='IP of compute node', type=ip_type, required=True) def __call__(self, analytics_name=None, analytics_ip=None): global_config = Resource('global-system-config', fq_name='default-global-system-config') analytics = Resource('analytics-node', fq_name='default-global-system-config:%s' % analytics_name, parent=global_config, analytics_node_ip_address=analytics_ip) analytics.save()
class Linklocal(Command): service_name = Option(help='Linklocal service name', required=True) service_ip = Option(help='Linklocal service IP', type=ip_type, required=True) service_port = Option(help='Linklocal service port', type=int, required=True) fabric_dns_service_name = Option(help='DNS service name in the fabric') fabric_service_ip = Option(help='Service IP in the fabric', type=ip_type) fabric_service_port = Option(help='Service port in the fabric', type=int, required=True) def __call__(self, service_name=None, service_ip=None, service_port=None, fabric_dns_service_name=None, fabric_service_ip=None, fabric_service_port=None): if not fabric_dns_service_name and not fabric_service_ip: raise CommandError( '--fabric_dns_service_name or --fabric_service_ip required') self.linklocal_entry = { 'ip_fabric_DNS_service_name': fabric_dns_service_name, 'ip_fabric_service_ip': [], 'ip_fabric_service_port': fabric_service_port, 'linklocal_service_ip': service_ip, 'linklocal_service_port': service_port, 'linklocal_service_name': service_name } if fabric_service_ip: self.linklocal_entry['ip_fabric_service_ip'].append( fabric_service_ip) try: self.vrouter_config = Resource( 'global-vrouter-config', fq_name= 'default-global-system-config:default-global-vrouter-config', fetch=True) except ResourceNotFound: global_config = Resource('global-system-config', fq_name='default-global-system-config') self.vrouter_config = Resource( 'global-vrouter-config', fq_name= 'default-global-system-config:default-global-vrouter-config', parent=global_config) self.vrouter_config.save()
class DNSNameserver(Command): network_ipam_fqname = Option( metavar='fqname', help='Network IPAM fqname (default: %(default)s)', default='default-domain:default-project:default-network-ipam') def __call__(self, network_ipam_fqname=None): self.ipam = Resource('network-ipam', fq_name=network_ipam_fqname, fetch=True) if 'network_ipam_mgmt' not in self.ipam: self.ipam['network_ipam_mgmt'] = { 'ipam_dns_method': 'tenant-dns-server', 'ipam_dns_server': { 'tenant_dns_server_address': { 'ip_address': [] } } }
class SetSubnets(Subnet): description = 'Set subnets to virtual-network' virtual_network_fqname = Option( required=True, help='VN fqname (eg: default-domain:admin:net)') cidrs = Arg(nargs="+", metavar='CIDR', help='subnet CIDR', type=network_type, default=[]) def __call__(self, virtual_network_fqname=None, cidrs=None): vn = Resource('virtual-network', fq_name=virtual_network_fqname, fetch=True) cidrs = [netaddr.IPNetwork(cidr) for cidr in cidrs] ipam_subnets = self._get_network_ipam_subnets(vn) ipam_subnets_current = [{ 'subnet': { 'ip_prefix': s['subnet']['ip_prefix'], 'ip_prefix_len': s['subnet']['ip_prefix_len'] } } for s in ipam_subnets] ipam_subnets_wanted = [{ 'subnet': { 'ip_prefix': text_type(c.network), 'ip_prefix_len': c.prefixlen } } for c in cidrs] modified = False for idx, subnet in enumerate(ipam_subnets_current): if subnet not in ipam_subnets_wanted: del ipam_subnets[idx] modified = True for subnet in ipam_subnets_wanted: if subnet not in ipam_subnets_current: ipam_subnets.append(subnet) modified = True if modified: vn.save()
class ZKCommand(Command): """Inherit from this class when a connection to the Zookeeper cluster is needed. This will add a `--zk-server` option to the command. The ZK client is available in `self.zk_client`. """ zk_server = Option(help="zookeeper server (default: %(default)s)", type=server_type, default='localhost:2181') def __call__(self, zk_server=None, **kwargs): handler = SequentialGeventHandler() self.zk_client = KazooClient(hosts=zk_server, timeout=1.0, handler=handler) try: self.zk_client.start() except handler.timeout_exception: raise CommandError("Can't connect to Zookeeper at %s" % zk_server) super(ZKCommand, self).__call__(**kwargs)
class OrphanedACL(Command): """Removes stale ACLs. ACL is considered as stale if it has no parent:: contrail-api-cli --ns contrail_api_cli.ns clean-orphaned-acl --cassandra-servers <ip1> <ip2> .. note:: Because of an API server limitation the ACLs are removed directly from the cassandra cluster. Thus, the cassandra cluster nodes IPs must be provided. """ description = "Clean all ACLs that don't have any parent" force = Option('-f', help="Delete orphan ACL (default: %(default)s)", default=False, action="store_true") parent_type = Option( help="Parent type the ACL should have (default: %(default)s)", choices=['security-group', 'virtual-network'], default='security-group') cassandra_servers = Option( help="Cassandra server list' (default: %(default)s)", nargs='+', type=server_type, default=['localhost:9160']) def __call__(self, force=False, parent_type=None, cassandra_servers=None): valid_acl = [] parents = Collection(parent_type, fetch=True, recursive=2) for parent in parents: if 'access_control_lists' in parent.keys(): valid_acl += [ acl['uuid'] for acl in parent['access_control_lists'] ] valid_acl = list(set(valid_acl)) orphaned_acls = set([]) # Due to a bug in contrail API, we cannot list more than 10000 elements # on a resource and there is no way to list ACL by tenant. # So that ugly hack directly fetch all ACL UUIDs from the cassandra database :( pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers) fqname_cf = ColumnFamily(pool, 'obj_fq_name_table') for key, value in fqname_cf.xget('access_control_list'): acl_uuid = decode_string(key).split(':')[-1] if acl_uuid in valid_acl: continue acl = Resource('access-control-list', uuid=acl_uuid, fetch=True) if ('parent_uuid' in acl.keys() and 'parent_type' in acl.keys() and acl['parent_type'] == parent_type and acl.uuid not in valid_acl): try: parent_acl = acl.parent except ResourceNotFound: msg = ("The %s parent ACL %s was not found." % (parent_type.replace('-', ' '), acl['parent_uuid'])) if force: msg = msg + " Delete orphan ACL %s." % acl.uuid acl.delete() logger.debug(msg) orphaned_acls.add(acl['uuid']) else: logger.debug( "The ACL %(acl)s have a %(parent_type)s %(parent_acl)s which exists but \ was not found in the precedent %(parent_type)s list. Not delete it." % { 'acl': acl, 'parent_type': parent_type.replace('-', ' '), 'parent_acl': parent_acl }) if force: logger.debug("%d orphaned ACL were deleted" % len(orphaned_acls)) else: logger.debug("Found %d orphaned ACL to delete" % len(orphaned_acls))
class FixVnId(ZKCommand, CheckCommand): """Compare and fix virtual network IDs in Zookeeper and the API server. Checks that the ZK lock for a VN has the correct index. Checks also that the VN has a lock in ZK. To check all VNs run:: contrail-api-cli fix-vn-id --check To fix all VNs or a particular VN run:: contrail-api-cli fix-vn-id [--dry-run] [vn_uuid] """ description = "Fix the virtual network Zookeeper locks" yes = Option('-y', action='store_true', help='Assume Yes to all queries and do not prompt') vn_paths = Arg(nargs='*', metavar='vn_paths', help='List of VN. If no path is provided, all VNs are considered') def fix(self, vn, dry_run=True): if vn['reason'] == "nolock": lock = vn['resource']["virtual_network_network_id"] if vn['reason'] == "badlock": lock = self.indexes.get_available_index() self.indexes.create(lock, vn['resource'], dry_run) if vn['reason'] == "badlock": resource = vn["resource"] resource["virtual_network_network_id"] = lock try: resource["virtual_network_properties"]["network_id"] = lock except KeyError: pass if dry_run: print "[dry_run] ", print "%s Set VN ID to %s" % (resource.path, lock) if not dry_run: resource.save() def generate(self, vn_paths): result = [] if vn_paths == []: vns = Collection("virtual-network", fetch=True, detail=True) else: vns = expand_paths(vn_paths) for vn in vns: vn.fetch() for r in vns: nid = r["virtual_network_network_id"] try: zk_data, _ = self.zk_client.get(ZK_BASEPATH + "/" + to_zk_index(nid)) except kazoo.exceptions.NoNodeError: result.append({"reason": "nolock", "nid": nid, "path": r.path, "api-fqname": text_type(r.fq_name), "resource": r}) continue if "%s" % zk_data.decode('utf-8') != "%s" % r.fq_name: result.append({"reason": "badlock", "nid": nid, "path": r.path, "api-fqname": text_type(r.fq_name), "zk-fqname": zk_data, "resource": r}) return result def __call__(self, vn_paths=None, yes=False, **kwargs): super(FixVnId, self).__call__(**kwargs) if (not yes and not self.dry_run and not self.check and not continue_prompt("Do you really want to repair virtual networks?")): print "Exiting." exit() self.indexes = None result = self.generate(vn_paths) self.indexes = Indexes(self.zk_client) for r in result: if r['reason'] == "nolock": print "No lock for %(path)s with VN Id %(nid)6s" % r if r['reason'] == "badlock": print "Bad lock for %(path)s with VN Id %(nid)6s zk-fqname: %(zk-fqname)s ; api-fqname: %(api-fqname)s" % r if not self.check: self.fix(r, dry_run=self.dry_run)
class Fsck(Command): description = 'Checks and optionally clean API inconsistencies' gremlin_server = Option( default=os.environ.get('GREMLIN_FSCK_SERVER', 'localhost:8182'), help='host:port of gremlin serveri (default: %(default)s)') checks = Option(help='Name of checks to run', nargs='*', choices=[n[6:] for n, o in avail_checks], default=[n[6:] for n, o in avail_checks], metavar='check') tests = Option(help='Name of tests to run', nargs='*', choices=[n[5:] for n, o in avail_tests] + ['all'], default=[], metavar='test') clean = Option(help='Run cleans (default: %(default)s)', action='store_true', default=bool(int(os.environ.get('GREMLIN_FSCK_CLEAN', 0)))) loop = Option(help='Run in loop (default: %(default)s)', action='store_true', default=bool(int(os.environ.get('GREMLIN_FSCK_LOOP', 0)))) loop_interval = Option( help='Interval between loops in seconds (default: %(default)s)', default=os.environ.get('GREMLIN_FSCK_LOOP_INTERVAL', 60 * 5), type=float) json = Option(help='Output logs in json', action='store_true', default=bool(int(os.environ.get('GREMLIN_FSCK_JSON', 0)))) zk_server = Option(help="Zookeeper server (default: %(default)s)", default=os.environ.get('GREMLIN_FSCK_ZK_SERVER', 'localhost:2181')) def _check_by_name(self, name): c = None for n, check in avail_checks: if not name == n[6:]: continue else: c = check if c is None: raise CommandError("Can't find %s check method" % name) return c def _test_by_name(self, name): for n, test in avail_tests: if name == n[5:]: return test def _clean_by_name(self, name): c = None for n, clean in avail_cleans: if not name == n[6:]: continue else: c = clean break if c is None: raise CommandError("Can't find %s clean method" % name) return c def __call__(self, gremlin_server=None, checks=None, tests=None, clean=False, loop=False, loop_interval=None, json=False, zk_server=False): if clean: CommandManager().load_namespace('contrail_api_cli.clean') utils.JSON_OUTPUT = json utils.ZK_SERVER = zk_server self.gremlin_server = gremlin_server if tests: self.run_tests(tests) else: if loop is True: self.run_loop(checks, clean, loop_interval) else: self.run(checks, clean) def get_traversal(self): graph = Graph() try: # take only non deleted resources return graph.traversal().withRemote( DriverRemoteConnection( 'ws://%s/gremlin' % self.gremlin_server, 'g')).withStrategies( SubgraphStrategy(vertices=__.has('deleted', 0))) except (HTTPError, socket.error) as e: raise CommandError('Failed to connect to Gremlin server: %s' % e) def run_loop(self, checks, clean, loop_interval): while True: self.run(checks, clean) gevent.sleep(loop_interval) def run_tests(self, tests): utils.log('Running tests...') graph = Graph() g = graph.traversal().withRemote( DriverRemoteConnection('ws://%s/gremlin' % self.gremlin_server, 'g')) if 'all' in tests: tests = [n[5:] for n, _ in avail_tests] for test_name in tests: test_func = self._test_by_name(test_name) try: g.V().drop().iterate() test_func(g) except AssertionError as e: utils.log("Test %s failed: %s" % (test_name, e)) sys.exit(1) def run(self, checks, clean): g = self.get_traversal() utils.log('Running checks...') start = time.time() for check_name in checks: check_func = self._check_by_name(check_name) r = check_func(g) if len(r) > 0: if clean is False: continue try: clean_func = self._clean_by_name(check_name) except CommandError: continue utils.log('Cleaning...') try: clean_func(r) except (Exception, NotFound) as e: utils.log('Clean failed: %s' % text_type(e)) else: utils.log('Clean done') end = time.time() - start utils.log('Checks done in %ss' % end)
class CleanRT(CheckCommand, ZKCommand, PathCommand): """Removes stale route-targets. RTs that are not linked to a logical-router or a routing-instance are considered as staled and will be removed. If a ZK lock exists for the RT it will be removed:: contrail-api-cli --ns contrail_api_cli.clean clean-route-target --zk-server <ip> [route-target/uuid] If no route-target path is provided all RTs are considered. ``--check`` and ``--dry-run`` options are available. You can exclude RT from the cleaning process:: contrail-api-cli --ns contrail_api_cli.clean clean-route-target --exclude <RT_FQNAME> --exclude <RT_FQNAME> [...] """ exclude = Option('-e', action="append", default=[], help="Exclude RT from the clean procedure") description = "Clean stale route targets" @property def resource_type(self): return "route-target" def log(self, message, rt): printo('[%s] %s' % (rt.uuid, message)) def _get_rt_id(self, rt): return int(rt['name'].split(':')[-1]) def _get_zk_node(self, rt_id): return '/id/bgp/route-targets/%010d' % rt_id def _ensure_lock(self, rt): rt_id = self._get_rt_id(rt) # No locks created for rt_id < 8000000 if rt_id < 8000000: return zk_node = '/id/bgp/route-targets/%010d' % rt_id if not self.zk_client.exists(zk_node): if rt.get('logical_router_back_refs'): fq_name = rt['logical_router_back_refs'][0].fq_name else: # FIXME: can't determine routing-instance for route-target # don't create any lock return if not self.dry_run: self.zk_client.create(zk_node, text_type(fq_name).encode('utf-8')) self.log("Added missing ZK lock %s" % zk_node, rt) def _clean_rt(self, rt): try: if not self.dry_run: rt.delete() self.log("Removed RT %s" % rt.path, rt) rt_id = self._get_rt_id(rt) zk_node = self._get_zk_node(rt_id) if self.zk_client.exists(zk_node): if not self.dry_run: self.zk_client.delete(zk_node) self.log("Removed ZK lock %s" % zk_node, rt) except ResourceNotFound: pass def _check_rt(self, rt): try: rt.fetch() except ResourceNotFound: return if not rt.get('routing_instance_back_refs') and not rt.get( 'logical_router_back_refs'): if text_type(rt.fq_name) in self.exclude: self.log('RT %s staled [excluded]' % rt.fq_name, rt) else: self.log('RT %s staled' % rt.fq_name, rt) if self.check is not True: self._clean_rt(rt) else: self._ensure_lock(rt) def __call__(self, exclude=None, **kwargs): super(CleanRT, self).__call__(**kwargs) self.exclude = exclude parallel_map(self._check_rt, self.resources, workers=50)
class ManageRT(Command): """Command to manage VN custom RTs """ description = "Manage VN custom RTs" path = Arg(help="path", metavar='path', complete="resources:virtual-network:path") action = Option('-a', choices=['add', 'delete', 'show'], default='show', help="Type of action (default: %(default)s)") mode = Option('-m', choices=['import', 'export', 'import_export'], default='import_export', help="Specify type of RT (default: %(default)s)") name = Option( help="Name of the RT to create/delete (eg: target:219.0.0.1:1)") @property def resource_type(self): return 'virtual-network' def show(self, vn): vns = [vn.uuid] import_export_list = vn.get('route_target_list', {}).get('route_target', []) import_list = vn.get('import_route_target_list', {}).get('route_target', []) export_list = vn.get('export_route_target_list', {}).get('route_target', []) table = [ ["VN", "import/export", "import", "export"], ] def add_row(vn, import_export_rt, import_rt, export_rt): table.append([ vn if vn else "", import_export_rt if import_export_rt else "", import_rt if import_rt else "", export_rt if export_rt else "" ]) map(add_row, vns, import_export_list, import_list, export_list) printo(format_table(table)) def add(self, vn, mode, name): prop = mode_map[mode] if prop not in vn: vn[prop] = {'route_target': []} if name in vn[prop]['route_target']: raise CommandError('RT %s already added' % name) vn[prop]['route_target'].append(name) vn.save() printo('RT %s added to VN' % name) def delete(self, vn, mode, name): prop = mode_map[mode] rt_list = vn.get(prop, {}).get('route_target', []) try: rt_list.remove(name) except ValueError: printo('RT %s not found on VN' % name) return vn[prop]['route_target'] = rt_list vn.save() printo('RT %s deleted from VN' % name) @require_schema(version='>= 3') def __call__(self, path=None, mode=None, action=None, name=None): if not action == 'show' and not name: raise CommandError("--name is required") vn = expand_paths([path], predicate=lambda r: r.type == 'virtual-network')[0] vn.fetch() if action == 'show': self.show(vn) elif action == 'add': self.add(vn, mode, name) elif action == 'delete': self.delete(vn, mode, name)
class AddBGPRouter(BGPRouter): description = "Add BgpRouter to the API server" router_ip = Option(help="BGP router IP", type=ip_type) router_port = Option(help="BGP port (default: %(default)s)", type=port_type, default=179) router_asn = Option(type=RouteTargetAction.asn_type, default=64512, help="Autonomous System Number (default: %(default)s)") router_type = Option( default='contrail', help="BGP router type ('contrail' for Contrail control \ nodes and '<WHATEVER>' for non Contrail BGP \ routers) (default: %(default)s)") router_address_families = Option(nargs='+', help="Address family list \ (default: %(default)s)", choices=ADDRESS_FAMILIES, default=ADDRESS_FAMILIES) router_md5 = Option(default=None, type=md5_type, help="MD5 authentication (default: %(default)s)") def __call__(self, router_name=None, router_ip=None, router_port=None, router_asn=None, router_address_families=[], router_type=None, router_md5=None): default_ri = Resource('routing-instance', fq_name=DEFAULT_RI_FQ_NAME, check=True) router_fq_name = DEFAULT_RI_FQ_NAME + [router_name] bgp_router = Resource('bgp-router', fq_name=router_fq_name) if bgp_router.exists: raise CommandError("The BGP router %s already exists" % FQName(router_fq_name)) if router_type != 'contrail' and 'erm-vpn' in router_address_families: router_address_families.remove('erm-vpn') auth_data = None if router_md5: auth_data = { 'key_items': [{ 'key': router_md5, 'key_id': 0, }], 'key_type': 'md5', } router_parameters = { 'address': router_ip, 'address_families': { 'family': router_address_families, }, 'autonomous_system': router_asn, 'identifier': router_ip, 'port': router_port, 'vendor': router_type, 'auth_data': auth_data, } # full-mesh with existing BGP routers bgp_router_refs = [] for bgp_router_neighbor in Collection('bgp-router', parent_uuid=default_ri.uuid, fetch=True): bgp_router_refs.append({ 'to': bgp_router_neighbor.fq_name, 'attr': { 'session': [{ 'attributes': [{ 'address_families': { 'family': router_address_families, }, 'auth_data': auth_data, }], }], }, }) bgp_router = Resource('bgp-router', fq_name=router_fq_name, parent=default_ri, bgp_router_parameters=router_parameters, bgp_router_refs=bgp_router_refs) bgp_router.save()
class FixSg(CheckCommand, PathCommand): """Fix multiple default security groups on projects. It appears sometimes several default security groups have been created. Normally, only one default security group should be created. When there are several security groups, some of them doesn't have the right project name in their fq_name. These security groups are not legitimate. The check mode of this script detects theses security groups and marks default security groups of a tenant as good or bad. Good default security group is legitimate while bad are not. Moreover, if it exists bad security groups, the scripts returns 1 otherwise, it returns 0. Concerning normal mode and dry-run mode, the script tries to delete non used bad default security groups. "Non used" means no VMIs are attached to them. To run the command:: contrail-api-cli fix-sg [project/uuid] If no project path is provided all projects are considered. """ description = "Fix default security group that shouldn't belong to a project" yes = Option('-y', action='store_true', help='Assume Yes to all queries and do not prompt') @property def resource_type(self): return "project" def _handle_sg(self, status, sg, delete=True): print " %s SG: %s %s" % (status, sg.uuid, sg.fq_name) if not self.check: used = False sg.fetch() for vmi in sg.back_refs.virtual_machine_interface: used = True print " Used by VMI %s" % vmi.uuid if not used and delete: if not self.dry_run: print " Deleting SG %s ..." % sg.uuid sg.delete() else: print " [dry-run] Deleting SG %s ..." % sg.uuid def __call__(self, paths=None, yes=False, **kwargs): super(FixSg, self).__call__(**kwargs) if (not yes and not self.dry_run and not self.check and not continue_prompt( "Some SGs will be deleted. Are you sure to continue?")): raise CommandError("Exiting.") bad_sg_exists = False for r in self.resources: r.fetch() fq_name = r.fq_name if r.children.security_group: bad_sg = [] good_sg = [] for sg in r.children.security_group: if sg.fq_name[2] == 'default': if not FQName(sg.fq_name[0:2]) == fq_name: bad_sg.append(sg) else: good_sg.append(sg) if bad_sg != []: bad_sg_exists = True print "Tenant %s %s" % (r.uuid, r.fq_name) for sg in bad_sg: self._handle_sg("Bad ", sg) for sg in good_sg: self._handle_sg("Good", sg, delete=False) if self.check and bad_sg_exists: raise CommandError()
class CheckBadRefs(CheckCommand): """Check for broken references. The command will read all objects from the cassandra DB then for each object check that references exists in the API. References includes refs, back refs, children, parents. To run the command: contrail-api-cli check-bad-refs --cassandra-servers db:9160 [uuids...] """ description = "Check for broken references" uuids = Arg(help="check specific uuids", nargs="*", default=[]) cassandra_servers = Option( help="cassandra server list' (default: %(default)s)", nargs='+', type=server_type, default=['localhost:9160']) force = Option('-f', help="force deletion of incomplete resources", action="store_true", default=False) def _props_to_json(self, values): if self.is_piped: return json.dumps(values, cls=PropertiesEncoder, indent=4) return highlight_json( json.dumps(values, cls=PropertiesEncoder, indent=4)) def _get_current_resource(self, uuid, values): try: r_type = json.loads(values['type']).replace('_', '-') return Resource(r_type, uuid=uuid) except KeyError: printo("[%s] incomplete, no type" % uuid) return False def _check_ref(self, ref, uuid): _, ref_type, ref_uuid = ref.split(':') try: Resource(ref_type.replace('_', '-'), uuid=ref_uuid, check=True) return False except ResourceNotFound: printo("[%s] broken ref to missing %s" % (uuid, ref)) return True def _check_resource_refs(self, uuid, values): ref_attrs = ('ref:', 'backref:', 'children:', 'parent:') to_check = [] for key, _ in values.items(): if key.startswith(ref_attrs): to_check.append(key) results = parallel_map(self._check_ref, to_check, args=(uuid, ), workers=20) return any(results) def _delete(self, uuid_cf, uuid): if not self.dry_run: uuid_cf.remove(uuid) printo("[%s] deleted" % uuid) def __call__(self, uuids=None, cassandra_servers=None, force=False, **kwargs): super(CheckBadRefs, self).__call__(**kwargs) self.force = force pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers) uuid_cf = ColumnFamily(pool, 'obj_uuid_table') if uuids: def uuids_g(): for uuid in uuids: yield uuid else: def uuids_g(): for k, v in uuid_cf.get_range(column_count=1, filter_empty=True): yield k for uuid in uuids_g(): values = dict(uuid_cf.xget(uuid)) res = self._get_current_resource(uuid, values) bad_refs = self._check_resource_refs(uuid, values) if not res or bad_refs: printo(self._props_to_json(values)) if not res and not self.check: if self.force or continue_prompt(message="Delete ?"): self._delete(uuid_cf, uuid)
class Fsck(Command): description = 'Checks and optionally clean API inconsistencies' gremlin_server = Option(default=os.environ.get('GREMLIN_FSCK_SERVER', 'localhost:8182'), help='host:port of gremlin serveri (default: %(default)s)') checks = Option(help='Name of checks to run', nargs='*', choices=[n[6:] for n, o in avail_checks], default=[n[6:] for n, o in avail_checks], metavar='check') clean = Option(help='Run cleans (default: %(default)s)', action='store_true', default=bool(int(os.environ.get('GREMLIN_FSCK_CLEAN', 0)))) loop = Option(help='Run in loop (default: %(default)s)', action='store_true', default=bool(int(os.environ.get('GREMLIN_FSCK_LOOP', 0)))) loop_interval = Option(help='Interval between loops in seconds (default: %(default)s)', default=os.environ.get('GREMLIN_FSCK_LOOP_INTERVAL', 60 * 5), type=float) json = Option(help='Output logs in json', action='store_true', default=bool(int(os.environ.get('GREMLIN_FSCK_JSON', 0)))) zk_server = Option(help="Zookeeper server (default: %(default)s)", default=os.environ.get('GREMLIN_FSCK_ZK_SERVER', 'localhost:2181')) def _check_by_name(self, name): c = None for n, check in avail_checks: if not name == n[6:]: continue else: c = check if c is None: raise CommandError("Can't find %s check method" % name) return c def _clean_by_name(self, name): c = None for n, clean in avail_cleans: if not name == n[6:]: continue else: c = clean if c is None: raise CommandError("Can't find %s clean method" % name) return c def __call__(self, gremlin_server=None, checks=None, clean=False, loop=False, loop_interval=None, json=False, zk_server=False): utils.JSON_OUTPUT = json utils.ZK_SERVER = zk_server self.gremlin_server = gremlin_server if loop is True: self.run_loop(checks, clean, loop_interval) else: self.run(checks, clean) def get_graph(self): time_point = int(time.time()) - 5 * 60 graph = Graph() try: # take only resources updated at least 5min ago and not deleted return graph.traversal().withRemote( DriverRemoteConnection('ws://%s/gremlin' % self.gremlin_server, 'g') ).withStrategies( SubgraphStrategy(vertices=__.has('updated', lt(time_point)).has('deleted', 0)) ) except (HTTPError, socket.error) as e: raise CommandError('Failed to connect to Gremlin server: %s' % e) def run_loop(self, checks, clean, loop_interval): while True: self.run(checks, clean) gevent.sleep(loop_interval) def run(self, checks, clean): g = self.get_graph() utils.log('Running checks...') start = time.time() for check_name in checks: check = self._check_by_name(check_name) r = check(g) if len(r) > 0: if clean is False: continue try: clean = self._clean_by_name(check_name) except CommandError: continue utils.log('Cleaning...') try: clean(r) except CommandError as e: utils.log('Clean failed: %s' % text_type(e)) else: utils.log('Clean done') end = time.time() - start utils.log('Checks done in %ss' % end)
class PurgeProject(Command): """Command to purge a project. All related resources are deleted. .. warning:: This command is experimental and not fully tested. This command works recursively by first trying to remove the project. If other resources are linked to the project the API will return a 409 response with all linked resources. The command will then try to delete these resources and so on until the project resource can be deleted. Because of this no dry-run mode is possible. To run the command:: contrail-api-cli --ns contrail_api_cli.clean purge-project project/uuid """ description = "Purge contrail projects" nova_api_version = Option('-v', default="2.1") paths = Arg(nargs="+", help="path(s)", metavar='path', complete="resources:project:path") def _handle_si_vm(self, iip, vmi): # to cleanup SI we need to remove manually the VMs # since the si -> vm ref is derived it won't trigger # a BackRefsExists error in the backend iip.fetch() for vmi in iip.refs.virtual_machine_interface: vmi.fetch() if 'virtual_machine_interface_properties' not in vmi: continue for vm in vmi.refs.virtual_machine_refs: vm.fetch() for vm_vmi in vm.back_refs.virtual_machine_interface: vm_vmi.remove_ref(vm) self._delete(vm) self._delete(iip) def _remove_vm(self, vmi, parent): logger.debug("trying to remove vmi vms") # VMs are not linked to the project vmi.fetch() for vm in vmi.refs.virtual_machine: if continue_prompt("Nova VM %s will be deleted" % vm.uuid): printo("deleting nova VM %s" % vm.uuid) self.nclient.servers.delete(vm.uuid) else: raise CommandError("Exiting.") self._delete(vmi) def _remove_back_ref(self, resource, parent): printo("remove back ref from %s to %s" % (self.current_path(parent), self.current_path(resource))) parent.remove_back_ref(resource) def _delete(self, resource, parent=None): try: logger.debug("trying to delete %s" % self.current_path(resource)) resource.delete() printo("%s deleted" % self.current_path(resource)) except (ChildrenExists, BackRefsExists) as e: logger.debug("failed: %s" % e) action = self.actions[e.__class__].get( (resource.type, e.resources[0].type), self._delete) for r in e.resources: action(r, resource) self._delete(resource) except ResourceNotFound: pass def __call__(self, paths=None, nova_api_version=None): self.nclient = nclient.Client(nova_api_version, session=Context().session) self.actions = { BackRefsExists: { # we don't want to delete virtual-router objects ('virtual-machine', 'virtual-router'): self._remove_back_ref, ('virtual-machine-interface', 'instance-ip'): self._handle_si_vm, ('virtual-network', 'virtual-machine-interface'): self._remove_vm, ('security-group', 'virtual-machine-interface'): self._remove_vm, }, ChildrenExists: { ('project', 'virtual-machine-interface'): self._remove_vm, } } resources = expand_paths(paths, predicate=lambda r: r.type == 'project') if not continue_prompt( "Do you really want to purge theses projects ? All resources will be destroyed !" ): return for project in resources: self._delete(project)
class FixZkIP(ZKCommand, CheckCommand, PathCommand): """Remove or add ZK locks based on the IPAM configuration. Sometimes, when an instance-ip or a floating-ip is created or deleted, its associated zookeeper node isn't managed properly. This led to situation where, no IPs are reserved from the Contrail API standpoint and nevertheless, you are not able to get one, or the same floating IP address is reserved for several users/tenants. This command list all zookeeper nodes for a given network (which may contain one or several subnet) and compare the nodes with the IPs found with the contrail API. For each IP found in zookeeper and not in the contrail API (abusive lock scenario), the command delete the associated zookeeper node. Then, for each IP found in API, and not in Zookeeper, the command creates the appropriate lock. Usage:: contrail-api-cli fix-zk-ip --zk-server <IP> [path/to/virtual-network] [--dry-run] If no virtual-network is given, all virtual-networks are considered. """ description = "Remove or add ZK locks based on the IPAM configuration" yes = Option('-y', action='store_true', help='Assume Yes to all queries and do not prompt') @property def resource_type(self): return "virtual-network" def _zk_node_for_subnet(self, fqname, subnet): return ('/api-server/subnets/%s:%s/%s' % (fqname, subnet.cidr.network, subnet.cidr.prefixlen)) def _zk_node_for_ip(self, vn, ip): logger.debug("check IP {0} in virtual network {1}".format(ip, vn)) subnets = self._get_vn_subnets(vn) ip_subnet = self._get_ip_subnet(subnets, ip) return ('/api-server/subnets/{0}:{1}/{2}/{3}'.format( vn.fq_name, ip_subnet.cidr.network, ip_subnet.cidr.prefixlen, int(ip))) def _get_vn_subnets(self, vn): if not vn.refs.network_ipam: raise SubnetNotFound() subnets = [] for ipam in vn.refs.network_ipam: ipam_subnets = ipam['attr']['ipam_subnets'] for s in ipam_subnets: prefix = s['subnet']['ip_prefix'] prefix_len = s['subnet']['ip_prefix_len'] cidr = IPNetwork('%s/%s' % (prefix, prefix_len)) gateway = dns = None if 'default_gateway' in s and s['default_gateway'] != '0.0.0.0': gateway = IPAddress(s['default_gateway']) if 'dns_server_address' in s: dns = IPAddress(s['dns_server_address']) subnets.append(Subnet(cidr, gateway, dns)) if not subnets: raise SubnetNotFound() return subnets def _get_ip_subnet(self, subnets, ip): for s in subnets: if ip in s.cidr: return s raise SubnetNotFound('No subnet found for IP %s' % ip) def get_zk_ip(self, vn): zk_ips = {} subnets = self._get_vn_subnets(vn) for s in subnets: zk_subnet_req = self._zk_node_for_subnet(vn.fq_name, s) for ip in self.zk_client.get_children(zk_subnet_req): try: zk_ip = IPAddress(int(ip)) except AddrFormatError: msg = ('{0} INVALID IP : zk_path={1}/{2}'.format( text_type(ip), zk_subnet_req, text_type(ip))) logger.warning(msg) print(msg) continue try: self._get_ip_subnet([s], zk_ip) except SubnetNotFound: msg = ('{0} OUT OF RANGE {1} : zk_path={2}/{3}'.format( text_type(zk_ip), text_type(s.cidr), zk_subnet_req, int(zk_ip))) logger.warning(msg) print(msg) continue zk_ips[zk_ip] = zk_subnet_req + '/' + text_type( int(zk_ip)).zfill(10) return zk_ips def get_api_ip(self, vn): api_ips = {} subnets = self._get_vn_subnets(vn) for subnet in subnets: api_ips[subnet.cidr.network] = vn api_ips[subnet.cidr.broadcast] = vn # Some networks do have dns and gateway IPs if subnet.gateway is not None: api_ips[subnet.gateway] = vn if subnet.dns is not None: api_ips[subnet.dns] = vn for iip in vn.back_refs.instance_ip: try: iip.fetch() except ResourceNotFound: continue try: iip_ip = IPAddress(iip['instance_ip_address']) api_ips[iip_ip] = iip except AddrFormatError: msg = ('{0} for virtual network {1} is not a ' 'valid IP address. unable to check it.'.format( iip_ip, vn.fq_name)) logger.info(msg) print(msg) for pool in vn.children.floating_ip_pool: try: pool.fetch() logger.debug(text_type(vn.fq_name) + " is a public network") except ResourceNotFound: continue for fip in pool.children.floating_ip: try: fip.fetch() except ResourceNotFound: continue try: fip_ip = IPAddress(fip['floating_ip_address']) api_ips[fip_ip] = fip except AddrFormatError: msg = ('{0} for virtual network {1} is not a ' 'valid IP address. unable to check it.'.format( fip_ip, vn.fq_name)) logger.info(msg) print(msg) continue return api_ips def create_znode(self, zk_req, data): if self.zk_client.exists(text_type(zk_req)): msg = ('{0} already exists'.format(zk_req)) logger.info(msg) print(msg) self.stats['miss_lock_fixed'] += 1 return if not self.dry_run: try: # FIXME: python3 self.zk_client.create(zk_req, value=str(data), makepath=True) self.stats['miss_lock_fixed'] += 1 except: self.stats['miss_lock_fix_failed'] += 1 msg = ('Unable to create zookeeper znode {0}'.format(zk_req, )) logger.exception(msg) raise CommandError(msg) def del_znode_ip(self, ip, zk_path): msg = ("Deleting zookeeper node %d for IP %s" % (int(ip), ip)) print(msg) if not self.dry_run: try: self.zk_client.delete(zk_path) self.stats['abusive_lock_fixed'] += 1 except: self.stats['abusive_lock_fix_failed'] += 1 raise CommandError('Unable to delete zookeeper znode for ip ' '%s with path %s' % (ip, zk_path)) def add_ip_lock(self, vn, ip, data_lock): try: zk_req = self._zk_node_for_ip(vn, ip) except SubnetNotFound: self.stats['miss_lock_fix_failed'] += 1 return msg = ('Creating zookeeper node %s for IP %s' % (zk_req, ip)) print(msg) if not self.dry_run: self.create_znode(zk_req, data_lock) def add_znode_ip(self, ip, resource): resource.fetch() vn = None if resource.type == 'floating-ip': fip_pool = resource.parent fip_pool.fetch() vn = fip_pool.parent vn.fetch() elif resource.type == 'instance-ip': vn = resource['virtual_network_refs'][0] vn.fetch() elif resource.type == 'virtual-network': vn = resource else: raise UnhandledResourceType('Unknow type for IP %s' % ip) assert vn is not None data_lock = vn.uuid self.add_ip_lock(vn, ip, data_lock) def check_tuple(self, api_ips, zk_ips): ips = {} ips_index = set(api_ips.keys()) | set(zk_ips.keys()) for ip in ips_index: zk_ok = False api_ok = False ips[ip] = {} if ip in zk_ips: ips[ip].update({'zk_path': zk_ips[ip]}) msg = (text_type(ip) + ' : ZOOKEEPER OK') logger.info(msg) zk_ok = True else: logger.info(text_type(ip) + ' : NOT FOUND IN ZOOKEEPER') self.stats['miss_lock'] += 1 if not self.check: try: self.add_znode_ip(ip, api_ips[ip]) except (ResourceNotFound, UnhandledResourceType) as e: msg = e.msg logger.warning(e) print(msg) if ip in api_ips: ips[ip].update({'resource': api_ips[ip]}) logger.info(text_type(ip) + ' : API OK') api_ok = True else: logger.info(text_type(ip) + ' : NOT FOUND IN API') self.stats['abusive_lock'] += 1 if not self.check: self.del_znode_ip(ip, zk_ips[ip]) if zk_ok and api_ok: self.stats['healthy_lock'] += 1 return def print_stats(self, resource): if (self.stats['miss_lock'] == 0 and self.stats['abusive_lock'] == 0): status = 'OK' else: status = 'KO' print('Status : %s' % status) if status == 'KO': print('Healthy locks : %d ' % self.stats['healthy_lock']) print('Missing locks : %d ' % self.stats['miss_lock']) if not self.dry_run: print('Fixed missing locks : %d' % self.stats['miss_lock_fixed']) print('Failed missing locks fix: %d' % self.stats['miss_lock_fix_failed']) print('Abusive locks : %d' % self.stats['abusive_lock']) if not self.dry_run: print('Fixed abusive locks : %d' % self.stats['abusive_lock_fixed']) print('Failed abusive locks fix: %d' % self.stats['abusive_lock_fix_failed']) print() def __call__(self, yes=False, **kwargs): super(FixZkIP, self).__call__(**kwargs) for r in self.resources: self.stats = { 'miss_lock': 0, 'miss_lock_fixed': 0, 'miss_lock_fix_failed': 0, 'abusive_lock': 0, 'abusive_lock_fixed': 0, 'abusive_lock_fix_failed': 0, 'healthy_lock': 0 } if (not yes and not self.dry_run and not self.check and not continue_prompt( "Do you really want to repair %s network?" % r.fq_name)): raise CommandError("Exiting.") try: r.fetch() except ResourceNotFound: continue print("Checking VN %s" % r.fq_name) try: api_ips = self.get_api_ip(r) zk_ips = self.get_zk_ip(r) self.check_tuple(api_ips, zk_ips) self.print_stats(r) except SubnetNotFound: print("No subnets found") print()
class Provision(Command): description = 'Provision contrail environment' env_file = Arg(help='JSON file of environment to provision', type=argparse.FileType('r')) force = Option('-f', help="Don't ask for confirmation", default=False, action="store_true") @property def __doc__(self): # Hack to include rst doc with sections # not included by sphinx but can be view # with the man command. return sys.modules[__name__].__doc__ def _get_current_env(self, keys): """Build the current environment of the given resources (keys). For each resource we run "get-resource" or "list-resource" and populate the env. """ env = OrderedDict() for key in keys: if self._is_property(key): cmd = self._get_command(key, Actions.GET) else: cmd = self._get_command(key, Actions.LIST) try: env[key] = json.loads( self._call_command(cmd, defaults=self._provision_defaults.get( key, {}))) except CommandError as e: raise CommandError('Failed to get current values for %s: %s' % (key, e)) return env def _is_property(self, string): try: self.mgr.get('set-%s' % string) return True except CommandNotFound: return False def _get_add_command(self, key): if self._is_property(key): action = Actions.SET else: action = Actions.ADD return self._get_command(key, action) def _get_command(self, key, action): try: return self.mgr.get('%s-%s' % (action, key)) except CommandNotFound: raise CommandError("No command to %s %s" % (action, key)) def _get_command_args(self, cmd): argspec = inspect.getargspec(cmd.__call__) if len(argspec.args) > 1: return [arg for arg in argspec.args[1:]] return [] def _call_command(self, cmd, defaults={}): """Call a command. If the command needs arguments they have to be passed as a dict in the defaults kwarg. """ kwargs = {} for arg in self._get_command_args(cmd): kwargs[arg] = defaults.get(arg) logger.debug('Calling %s with %s' % (cmd, kwargs)) return cmd(**kwargs) def _normalize_env(self, env): """Normalize an input environement. Replace '-' by '_' in provisionning arg names. Make sure calls definitions are in lists. """ for key, value in env.items(): if type(value) == OrderedDict: value = dict(value.items()) if type(value) == dict: env[key] = [value] elif type(value) != list: raise CommandError( 'Unsupported provisioning data type in %s: %s' % (key, value)) for idx, call in enumerate(env[key]): env[key][idx] = self._normalize_keys(call) return env def _setup_defaults_values(self, env): for key, values in env.items(): for idx, kwargs in enumerate(values): defaults = copy.deepcopy(self._provision_defaults.get(key, {})) defaults.update(kwargs) env[key][idx] = defaults return env def _normalize_keys(self, values): new_values = {} for key, value in values.items(): new_values[key.replace('-', '_')] = value return new_values def _validate_env(self, env): """Given an env, validate that all arguments are consistent. """ for key, values in env.items(): for idx, call in enumerate(values): env[key][idx] = self._validate_call(key, call) return env def _validate_call(self, key, values): """Validate call parameters. The wanted env is globally structured as follow: { "provision": { "resource": [ { "arg": "value", "arg2": 34 } ], ... } } We try to get the provisioning method for "resource", which is "add-resource" or "set-resource". Then, given the arguments we validate them using the command parser and set default values where needed. """ cmd = self._get_add_command(key) # default args values from argparse parser for action in cmd.parser._actions: if action.dest == 'help': continue if isinstance(action, argparse._StoreConstAction): values[action.dest] = values.get(action.dest, action.default) else: arg_strings = values.get(action.dest, []) if type(arg_strings) != list: arg_strings = [arg_strings] try: values[action.dest] = cmd.parser._get_values( action, arg_strings) if not values[action.dest] and action.default: values[action.dest] = action.default except argparse.ArgumentError as e: raise CommandError('Error in %s: %s' % (key, text_type(e))) # remove unknown args from call for arg, value in copy.deepcopy(values.items()): if arg not in self._get_command_args(cmd): logger.debug('Unknown arg %s for %s. Ignored' % (arg, cmd.__call__)) del values[arg] return values def _diff_envs(self, current, wanted): """Make a diff of the current env and the wanted env. Compare only common resources between the envs. This allows to partially provision the env. If the wanted env has a bgp-router list we try to converge the current env bgp-router list. Removing bgp-routers not in the wanted list, adding bgp-routers not in the current list. """ diff_env = { Actions.SET: OrderedDict(), Actions.ADD: OrderedDict(), Actions.DEL: OrderedDict(), } for key, values in wanted.items(): add_values = [v for v in values if v not in current.get(key, [])] if add_values: if self._is_property(key): diff_env[Actions.SET][key] = add_values else: diff_env[Actions.ADD][key] = add_values for key, values in current.items(): if key not in wanted: continue if self._is_property(key): continue del_values = [v for v in values if v not in wanted[key]] if del_values: diff_env[Actions.DEL][key] = del_values return diff_env def _confirm_diff(self, diff, force=False): """Show actions to be made and ask for confirmation unless force is True. """ if not any( [True if diff[action] else False for action in Actions.APPLY]): printo('Nothing to do') return False for action in Actions.APPLY: if not diff[action]: continue printo("\n%s the resources :\n" % action.capitalize()) for key, values in diff[action].items(): printo('%s : %s\n' % (key, json.dumps(values, indent=2))) if force or continue_prompt(): return True else: return False def _apply_diff(self, diff): """Takes the generated diff and call methods to converge to the wanted env. First delete unwanted resources, then set wanted properties, finally add wanted resources. """ for action in Actions.APPLY: for key, values in diff.get(action, {}).items(): cmd = self._get_command(key, action) for kwargs in values: try: self._call_command(cmd, defaults=kwargs) except CommandError as e: raise CommandError('Call to %s %s failed: %s' % (action, key, e)) def __call__(self, env_file=None, force=False): env = json.load(env_file, object_pairs_hook=OrderedDict) self._provision_defaults = {} for key, defaults in env.get('defaults', {}).items(): self._provision_defaults[key] = self._normalize_keys(defaults) self.mgr = CommandManager() if env.get('namespace'): self.mgr.load_namespace(env.get('namespace')) logger.debug('Namespace %s loaded' % env['namespace']) wanted_env = env['provision'] wanted_env = self._normalize_env(wanted_env) wanted_env = self._setup_defaults_values(wanted_env) wanted_env = self._validate_env(wanted_env) current_env = self._get_current_env(wanted_env.keys()) current_env = self._normalize_env(current_env) current_env = self._setup_defaults_values(current_env) current_env = self._validate_env(current_env) diff_env = self._diff_envs(current_env, wanted_env) if self._confirm_diff(diff_env, force=force): self._apply_diff(diff_env)
class VN(Command): project_fqname = Option(required=True, dest='project_fqname', help='Project fqname (eg: default-domain:admin)')