class TestCmd2(Command): long = Option('-l', action="store_true") arg1 = Arg(help="%(default)s", default="bar") arg2 = Arg(nargs="*") def __call__(self, *args, **kwargs): pass
class SetEncapsulation(Command): description = 'Set vrouters encapsulation modes' modes = Arg(nargs='+', help='List of encapsulations modes by priority', choices=['MPLSoUDP', 'MPLSoGRE', 'VXLAN']) def __call__(self, modes=None): if len(modes) > 3: raise CommandError('Too much modes provided') try: vrouter_config = Resource( 'global-vrouter-config', fq_name= 'default-global-system-config:default-global-vrouter-config', fetch=True) except ResourceNotFound: global_config = Resource('global-system-config', fq_name='default-global-system-config') vrouter_config = Resource( 'global-vrouter-config', fq_name= 'default-global-system-config:default-global-vrouter-config', parent=global_config) vrouter_config['encapsulation_priorities'] = {'encapsulation': modes} vrouter_config.save()
class TestCmd(Command): paths = Arg(help="%(default)s", default="bar") long = Option('-l', action="store_true") foo = Option(help="foo") bar = Option(nargs="*") def __call__(self, *args, **kwargs): pass
def __new__(cls, *args): cmd = super(PathCommand, cls).__new__(cls, *args) cls.paths = Arg( nargs="*", help="{type} path(s). " "When no path is provided " "all {type}s are considered.".format(type=cmd.resource_type), metavar='path', complete="resources:%s:path" % cmd.resource_type) return cmd
class DNSNameserverAction(DNSNameserver): ips = Arg(nargs="+", metavar='nameserver', help='IPs of DNS servers', type=ip_type, default=[]) def __call__(self, ips=None, network_ipam_fqname=None): super(DNSNameserverAction, self).__call__(network_ipam_fqname=network_ipam_fqname)
class SetGlobalASN(Command): description = "Set the global ASN to the API server" asn = Arg(nargs='?', help="Autonomous System Number (default: %(default)s)", type=RouteTargetAction.asn_type, default=64512) def __call__(self, asn=None): global_config = Resource('global-system-config', fq_name='default-global-system-config', check=True) global_config['autonomous_system'] = asn global_config.save()
class Dot(Command): """Command to create a dot file from a list of paths. The command will automatically add to the graph the parent, refs and back_refs of the given resources:: contrail-api-cli dot path/to/res1 path/to/res2 -f /tmp/output.dot `-e` option can be used to exclude resources from the graph. """ description = "Create a dot file representing provided resources" filename_output = Option('-f', help="Output Dot filename", required=True) exclude_resource_type = Option('-e', help="Exclude resource types", action="append", default=[], dest='excludes') paths = Arg(help="Resource URL", metavar='path', nargs="*") def __call__(self, paths=None, filename_output=None, excludes=[]): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) graph = nx.Graph() # For each provided resources, a edge is created from this # resource to all of its refs, back_refs and parent. for r in resources: print "%s %s" % (short_name(r.path.name), r.path) r.fetch() graph.add_node(r.path, _node_rendering(r.path, r)) paths = [t.path for t in itertools.chain(r.refs, r.back_refs)] try: paths.append(r.parent.path) except ResourceMissing: pass for p in paths: if p.base in excludes: continue print "%s %s" % (short_name(p.name), p) graph.add_node(p, _node_rendering(p)) graph.add_edge(r.path, p) print "Dot file written to %s" % filename_output write_dot(graph, filename_output)
class Graph(Command): description = "Create a graph file (graphml) by listing several collections" filename = Arg(help="Output filename", metavar='filename', type=str) def _to_graphml(self, filename): tmp_filename = "%s.tmp" % filename nx.write_graphml(self.graph, tmp_filename) # This is a hack since gremlin/thinkerpop requires a id on edges to be # able to load a graphml structure counter = 0 with open(tmp_filename, 'r') as f1: with open(filename, 'w') as f2: for l in f1.readlines(): e = unicode(l, "utf8").replace(u'edge ', u'edge id="e%d" ' % counter) f2.write(e.encode("utf8")) counter += 1 os.remove(tmp_filename) def __call__(self, filename): self.graph = nx.DiGraph() cols = [ Collection(r, fetch=True, detail=True) for r in [ "virtual-machine-interface", "virtual-network", "instance-ip", "loadbalancer-pool", "virtual-ip", "instance-ip", "logical-router", "floating-ip", "service-template", "service-instance" ] ] def add_node(g, r): self.graph.add_node(r.uuid, type=r.type, fq_name=":".join(r.fq_name), name=r.fq_name[-1]) for e in chain(*cols): add_node(self.graph, e) for r in e.refs: add_node(self.graph, r) self.graph.add_edge(e.uuid, r.uuid) self._to_graphml(filename)
class SetSubnets(Subnet): description = 'Set subnets to virtual-network' virtual_network_fqname = Option( required=True, help='VN fqname (eg: default-domain:admin:net)') cidrs = Arg(nargs="+", metavar='CIDR', help='subnet CIDR', type=network_type, default=[]) def __call__(self, virtual_network_fqname=None, cidrs=None): vn = Resource('virtual-network', fq_name=virtual_network_fqname, fetch=True) cidrs = [netaddr.IPNetwork(cidr) for cidr in cidrs] ipam_subnets = self._get_network_ipam_subnets(vn) ipam_subnets_current = [{ 'subnet': { 'ip_prefix': s['subnet']['ip_prefix'], 'ip_prefix_len': s['subnet']['ip_prefix_len'] } } for s in ipam_subnets] ipam_subnets_wanted = [{ 'subnet': { 'ip_prefix': text_type(c.network), 'ip_prefix_len': c.prefixlen } } for c in cidrs] modified = False for idx, subnet in enumerate(ipam_subnets_current): if subnet not in ipam_subnets_wanted: del ipam_subnets[idx] modified = True for subnet in ipam_subnets_wanted: if subnet not in ipam_subnets_current: ipam_subnets.append(subnet) modified = True if modified: vn.save()
class MigrateSI110221(CheckCommand): """Migration command for SI from 1.10 version to 2.21 version """ description = 'Migrate SIs from 1.10 to 2.21' paths = Arg(nargs="*", help="SI path(s)", metavar='path') def _remove_back_ref(self, r1, r2): printo('Remove back_ref from %s to %s' % (r1.path, r2.path)) if not self.dry_run: r1.remove_back_ref(r2) def _add_back_ref(self, r1, r2): printo('Add back_ref from %s to %s' % (r1.path, r2.path)) if not self.dry_run: r1.add_back_ref(r2) def _delete_res(self, r): printo('Delete %s' % r.path) if not self.dry_run: r.delete() def _create_res(self, r): printo('Create %s/%s' % (r.type, r.fq_name)) if not self.dry_run: r.save() printo('Created %s' % r.path) def _migrate_iip(self, old_iip, si, itf_type): new_fq_name = '%s__%s-%s' % (str(si.parent.fq_name).replace( ':', '__'), si.fq_name[-1], itf_type) # there is 2 left itf so migrate the iip only once if str(old_iip.fq_name) == new_fq_name: printo('Already migrated %s. Skip' % old_iip.fq_name) return old_iip self._delete_res(old_iip) iip = copy.deepcopy(old_iip) del iip['uuid'] iip['fq_name'] = FQName([new_fq_name]) iip['display_name'] = new_fq_name iip['name'] = new_fq_name self._create_res(iip) return iip def _migrate_vmi(self, old_vmi, new_vm, si): itf_type = old_vmi['virtual_machine_interface_properties'][ 'service_interface_type'] if itf_type == 'right': vmi_index = str(1) else: vmi_index = str(2) new_fq_name = list(si.parent.fq_name) + [ str(new_vm.fq_name) + '__' + itf_type + '__' + vmi_index ] vmi = copy.deepcopy(old_vmi) del vmi['uuid'] vmi['fq_name'] = FQName(new_fq_name) vmi['display_name'] = new_fq_name[-1] vmi['name'] = new_fq_name[-1] self._create_res(vmi) for old_iip in old_vmi.get('instance_ip_back_refs', []): self._remove_back_ref(old_vmi, old_iip) new_iip = self._migrate_iip(old_iip, si, itf_type) self._add_back_ref(vmi, new_iip) for fip in old_vmi.get('floating_ip_back_refs', []): self._remove_back_ref(old_vmi, fip) self._add_back_ref(vmi, fip) self._delete_res(old_vmi) return vmi def _migrate_vm(self, old_vm, old_vm_index, si): new_fq_name = '%s__%s__%s' % (str(si.parent.fq_name).replace( ':', '__'), si.fq_name[-1], old_vm_index) new_vm = Resource('virtual-machine', fq_name=[new_fq_name], display_name=new_fq_name + '__network-namespace', name=new_fq_name) try: self._create_res(new_vm) except Conflict: return old_vm for vr in old_vm.get('virtual_router_back_refs', []): self._remove_back_ref(old_vm, vr) self._add_back_ref(new_vm, vr) for old_vmi in old_vm.get('virtual_machine_interface_back_refs', []): old_vmi.fetch() self._remove_back_ref(old_vm, old_vmi) new_vmi = self._migrate_vmi(old_vmi, new_vm, si) self._add_back_ref(new_vm, new_vmi) self._delete_res(old_vm) return new_vm def _migrate_si(self, old_si): si_target = old_si.get('loadbalancer_pool_back_refs', [None])[0] if si_target is not None: si_target_uuid = si_target.uuid elif si_target is None: si_target = old_si.get('logical_router_back_refs', [None])[0] si_target_uuid = old_si.fq_name[-1] if si_target is None: printo('No pool or router attached to SI') return new_si = Resource( 'service-instance', fq_name=list(old_si.parent.fq_name) + [si_target_uuid], display_name=si_target_uuid, service_instance_properties=old_si['service_instance_properties'], parent=old_si.parent) new_si['service_template_refs'] = old_si['service_template_refs'] # SNAT si fq_name doesn't change so we create the same # resource and it will conflict re_use = False try: self._create_res(new_si) except Conflict: new_si = old_si re_use = True for old_vm in old_si.get('virtual_machine_back_refs', []): old_vm.fetch() self._remove_back_ref(old_si, old_vm) # VM index is the last char of the fq_name new_vm = self._migrate_vm(old_vm, str(old_vm.fq_name)[-1], new_si) self._add_back_ref(new_si, new_vm) if not re_use: self._remove_back_ref(old_si, si_target) self._delete_res(old_si) self._add_back_ref(new_si, si_target) def __call__(self, paths=None, **kwargs): super(MigrateSI110221, self).__call__(**kwargs) if not paths: resources = Collection('service-instance', fetch=True) else: resources = expand_paths( paths, predicate=lambda r: r.type == 'service-instance') for si in resources: si.fetch() try: si['service_template_refs'][0] except (KeyError, IndexError): printo('SI %s has no template, skipping.' % si.uuid) continue vms = si.get('virtual_machine_back_refs', []) if not vms: printo('SI %s has no VM, skipping.' % si.uuid) if all([si.fq_name[-1] in str(vm.fq_name) for vm in vms]): continue printo('Found lbaas SI to migrate %s (%s)' % (si.path, si.fq_name)) if not self.check: self._migrate_si(si) printo('Done')
class Config(Command): config_name = Arg(help='Hostname of config node')
class FixVnId(ZKCommand, CheckCommand): """Compare and fix virtual network IDs in Zookeeper and the API server. Checks that the ZK lock for a VN has the correct index. Checks also that the VN has a lock in ZK. To check all VNs run:: contrail-api-cli fix-vn-id --check To fix all VNs or a particular VN run:: contrail-api-cli fix-vn-id [--dry-run] [vn_uuid] """ description = "Fix the virtual network Zookeeper locks" yes = Option('-y', action='store_true', help='Assume Yes to all queries and do not prompt') vn_paths = Arg(nargs='*', metavar='vn_paths', help='List of VN. If no path is provided, all VNs are considered') def fix(self, vn, dry_run=True): if vn['reason'] == "nolock": lock = vn['resource']["virtual_network_network_id"] if vn['reason'] == "badlock": lock = self.indexes.get_available_index() self.indexes.create(lock, vn['resource'], dry_run) if vn['reason'] == "badlock": resource = vn["resource"] resource["virtual_network_network_id"] = lock try: resource["virtual_network_properties"]["network_id"] = lock except KeyError: pass if dry_run: print "[dry_run] ", print "%s Set VN ID to %s" % (resource.path, lock) if not dry_run: resource.save() def generate(self, vn_paths): result = [] if vn_paths == []: vns = Collection("virtual-network", fetch=True, detail=True) else: vns = expand_paths(vn_paths) for vn in vns: vn.fetch() for r in vns: nid = r["virtual_network_network_id"] try: zk_data, _ = self.zk_client.get(ZK_BASEPATH + "/" + to_zk_index(nid)) except kazoo.exceptions.NoNodeError: result.append({"reason": "nolock", "nid": nid, "path": r.path, "api-fqname": text_type(r.fq_name), "resource": r}) continue if "%s" % zk_data.decode('utf-8') != "%s" % r.fq_name: result.append({"reason": "badlock", "nid": nid, "path": r.path, "api-fqname": text_type(r.fq_name), "zk-fqname": zk_data, "resource": r}) return result def __call__(self, vn_paths=None, yes=False, **kwargs): super(FixVnId, self).__call__(**kwargs) if (not yes and not self.dry_run and not self.check and not continue_prompt("Do you really want to repair virtual networks?")): print "Exiting." exit() self.indexes = None result = self.generate(vn_paths) self.indexes = Indexes(self.zk_client) for r in result: if r['reason'] == "nolock": print "No lock for %(path)s with VN Id %(nid)6s" % r if r['reason'] == "badlock": print "Bad lock for %(path)s with VN Id %(nid)6s zk-fqname: %(zk-fqname)s ; api-fqname: %(api-fqname)s" % r if not self.check: self.fix(r, dry_run=self.dry_run)
class BGPRouter(Command): router_name = Arg(help="BGP router name")
class VNAction(VN): virtual_network_name = Arg(help='Virtual network name')
class FixSubnets(CheckCommand): """Fix subnet/vn association in kv store. When the API server is not properly started the hooks that populates the kv store on subnet creation are not properly run. As a result doing neutron net-list will lead to the following error:: 404-{u'NeutronError': {u'message': u'Subnet 646f986a-67c9-4e1b-bf13-59f18f787068 could not be found', u'type': u'SubnetNotFound', u'detail': u''}} This command check all the IPAM subnet informations and verifies that proper kv store keys exists for each subnet. This command assumes there is only one IPAM in the contrail installation (``default-domain:default-project:default-network-ipam``). To check all subnets run:: contrail-api-cli fix-subnets --check To fix all subnets run:: contrail-api-cli fix-subnets [--dry-run] Or to fix a particular subnet run:: contrail-api-cli fix-subnets <subnet_uuid> [--dry-run] """ description = "Fix subnets key-value store entries" subnet_uuid = Arg(nargs="?", help="subnet uuid to fix") def _subnet_key(self, vn_uuid, subnet): return "%s %s/%s" % (vn_uuid, subnet['subnet']['ip_prefix'], subnet['subnet']['ip_prefix_len']) def chk(self, item): vn_uuid, subnet_uuid, subnet = item to_add = [] if self.subnet_uuid is not None and self.subnet_uuid != subnet_uuid: return to_add subnet_key = self._subnet_key(vn_uuid, subnet) try: self.session.search_kv_store(subnet_uuid) except HttpError: printo('Missing key %s for subnet %s' % (subnet_uuid, subnet_uuid)) to_add.append((subnet_uuid, subnet_key)) try: self.session.search_kv_store(subnet_key) except HttpError: printo('Missing key %s for subnet %s' % (subnet_key, subnet_uuid)) to_add.append((subnet_key, subnet_uuid)) return to_add def fix(self, to_add): for (key, value) in to_add: printo('Adding kv %s:%s' % (key, value)) if not self.dry_run: self.session.add_kv_store(key, value) def __call__(self, subnet_uuid=None, **kwargs): super(FixSubnets, self).__call__(**kwargs) self.session = Context().session self.subnet_uuid = subnet_uuid ipam = Resource( 'network-ipam', fq_name='default-domain:default-project:default-network-ipam', fetch=True) to_check = [(vn.uuid, subnet.get('subnet_uuid'), subnet) for vn in ipam.back_refs.virtual_network for subnet in vn.get('attr', {}).get('ipam_subnets', [])] to_fix = parallel_map(self.chk, to_check, workers=50) if not self.dry_run and not self.check: parallel_map(self.fix, to_fix, workers=50)
class ManageRT(Command): """Command to manage VN custom RTs """ description = "Manage VN custom RTs" path = Arg(help="path", metavar='path', complete="resources:virtual-network:path") action = Option('-a', choices=['add', 'delete', 'show'], default='show', help="Type of action (default: %(default)s)") mode = Option('-m', choices=['import', 'export', 'import_export'], default='import_export', help="Specify type of RT (default: %(default)s)") name = Option( help="Name of the RT to create/delete (eg: target:219.0.0.1:1)") @property def resource_type(self): return 'virtual-network' def show(self, vn): vns = [vn.uuid] import_export_list = vn.get('route_target_list', {}).get('route_target', []) import_list = vn.get('import_route_target_list', {}).get('route_target', []) export_list = vn.get('export_route_target_list', {}).get('route_target', []) table = [ ["VN", "import/export", "import", "export"], ] def add_row(vn, import_export_rt, import_rt, export_rt): table.append([ vn if vn else "", import_export_rt if import_export_rt else "", import_rt if import_rt else "", export_rt if export_rt else "" ]) map(add_row, vns, import_export_list, import_list, export_list) printo(format_table(table)) def add(self, vn, mode, name): prop = mode_map[mode] if prop not in vn: vn[prop] = {'route_target': []} if name in vn[prop]['route_target']: raise CommandError('RT %s already added' % name) vn[prop]['route_target'].append(name) vn.save() printo('RT %s added to VN' % name) def delete(self, vn, mode, name): prop = mode_map[mode] rt_list = vn.get(prop, {}).get('route_target', []) try: rt_list.remove(name) except ValueError: printo('RT %s not found on VN' % name) return vn[prop]['route_target'] = rt_list vn.save() printo('RT %s deleted from VN' % name) @require_schema(version='>= 3') def __call__(self, path=None, mode=None, action=None, name=None): if not action == 'show' and not name: raise CommandError("--name is required") vn = expand_paths([path], predicate=lambda r: r.type == 'virtual-network')[0] vn.fetch() if action == 'show': self.show(vn) elif action == 'add': self.add(vn, mode, name) elif action == 'delete': self.delete(vn, mode, name)
class PurgeProject(Command): """Command to purge a project. All related resources are deleted. .. warning:: This command is experimental and not fully tested. This command works recursively by first trying to remove the project. If other resources are linked to the project the API will return a 409 response with all linked resources. The command will then try to delete these resources and so on until the project resource can be deleted. Because of this no dry-run mode is possible. To run the command:: contrail-api-cli --ns contrail_api_cli.clean purge-project project/uuid """ description = "Purge contrail projects" nova_api_version = Option('-v', default="2.1") paths = Arg(nargs="+", help="path(s)", metavar='path', complete="resources:project:path") def _handle_si_vm(self, iip, vmi): # to cleanup SI we need to remove manually the VMs # since the si -> vm ref is derived it won't trigger # a BackRefsExists error in the backend iip.fetch() for vmi in iip.refs.virtual_machine_interface: vmi.fetch() if 'virtual_machine_interface_properties' not in vmi: continue for vm in vmi.refs.virtual_machine_refs: vm.fetch() for vm_vmi in vm.back_refs.virtual_machine_interface: vm_vmi.remove_ref(vm) self._delete(vm) self._delete(iip) def _remove_vm(self, vmi, parent): logger.debug("trying to remove vmi vms") # VMs are not linked to the project vmi.fetch() for vm in vmi.refs.virtual_machine: if continue_prompt("Nova VM %s will be deleted" % vm.uuid): printo("deleting nova VM %s" % vm.uuid) self.nclient.servers.delete(vm.uuid) else: raise CommandError("Exiting.") self._delete(vmi) def _remove_back_ref(self, resource, parent): printo("remove back ref from %s to %s" % (self.current_path(parent), self.current_path(resource))) parent.remove_back_ref(resource) def _delete(self, resource, parent=None): try: logger.debug("trying to delete %s" % self.current_path(resource)) resource.delete() printo("%s deleted" % self.current_path(resource)) except (ChildrenExists, BackRefsExists) as e: logger.debug("failed: %s" % e) action = self.actions[e.__class__].get( (resource.type, e.resources[0].type), self._delete) for r in e.resources: action(r, resource) self._delete(resource) except ResourceNotFound: pass def __call__(self, paths=None, nova_api_version=None): self.nclient = nclient.Client(nova_api_version, session=Context().session) self.actions = { BackRefsExists: { # we don't want to delete virtual-router objects ('virtual-machine', 'virtual-router'): self._remove_back_ref, ('virtual-machine-interface', 'instance-ip'): self._handle_si_vm, ('virtual-network', 'virtual-machine-interface'): self._remove_vm, ('security-group', 'virtual-machine-interface'): self._remove_vm, }, ChildrenExists: { ('project', 'virtual-machine-interface'): self._remove_vm, } } resources = expand_paths(paths, predicate=lambda r: r.type == 'project') if not continue_prompt( "Do you really want to purge theses projects ? All resources will be destroyed !" ): return for project in resources: self._delete(project)
class CheckBadRefs(CheckCommand): """Check for broken references. The command will read all objects from the cassandra DB then for each object check that references exists in the API. References includes refs, back refs, children, parents. To run the command: contrail-api-cli check-bad-refs --cassandra-servers db:9160 [uuids...] """ description = "Check for broken references" uuids = Arg(help="check specific uuids", nargs="*", default=[]) cassandra_servers = Option( help="cassandra server list' (default: %(default)s)", nargs='+', type=server_type, default=['localhost:9160']) force = Option('-f', help="force deletion of incomplete resources", action="store_true", default=False) def _props_to_json(self, values): if self.is_piped: return json.dumps(values, cls=PropertiesEncoder, indent=4) return highlight_json( json.dumps(values, cls=PropertiesEncoder, indent=4)) def _get_current_resource(self, uuid, values): try: r_type = json.loads(values['type']).replace('_', '-') return Resource(r_type, uuid=uuid) except KeyError: printo("[%s] incomplete, no type" % uuid) return False def _check_ref(self, ref, uuid): _, ref_type, ref_uuid = ref.split(':') try: Resource(ref_type.replace('_', '-'), uuid=ref_uuid, check=True) return False except ResourceNotFound: printo("[%s] broken ref to missing %s" % (uuid, ref)) return True def _check_resource_refs(self, uuid, values): ref_attrs = ('ref:', 'backref:', 'children:', 'parent:') to_check = [] for key, _ in values.items(): if key.startswith(ref_attrs): to_check.append(key) results = parallel_map(self._check_ref, to_check, args=(uuid, ), workers=20) return any(results) def _delete(self, uuid_cf, uuid): if not self.dry_run: uuid_cf.remove(uuid) printo("[%s] deleted" % uuid) def __call__(self, uuids=None, cassandra_servers=None, force=False, **kwargs): super(CheckBadRefs, self).__call__(**kwargs) self.force = force pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers) uuid_cf = ColumnFamily(pool, 'obj_uuid_table') if uuids: def uuids_g(): for uuid in uuids: yield uuid else: def uuids_g(): for k, v in uuid_cf.get_range(column_count=1, filter_empty=True): yield k for uuid in uuids_g(): values = dict(uuid_cf.xget(uuid)) res = self._get_current_resource(uuid, values) bad_refs = self._check_resource_refs(uuid, values) if not res or bad_refs: printo(self._props_to_json(values)) if not res and not self.check: if self.force or continue_prompt(message="Delete ?"): self._delete(uuid_cf, uuid)
class VRouter(Command): vrouter_name = Arg(help='Hostname of compute node')
class Analytics(Command): analytics_name = Arg(help='Hostname of analytics node')
class RescheduleVM(CheckCommand): """Command to move a SI VMs from one virtual-router to other virtual-routers. This command will move all SI VMs found on the provided VR to a list of VRs. The command will make sure two VMs of the same SI will not end up on the same VR. The command can be run in dry-run mode to see how the VMs will be moved. To reschedule VMs run:: contrail-api-cli reschedule-vm virtual-router/uuid1 virtual-router/uuid2 virtual-router/uuid3 This will move all VR/uuid1 SI VMs to VR/uuid2 and VR/uuid3 evenly. """ description = "Move SI VMs from one virtual-router to other virtual-router(s)" src = Arg(help='source vrouter path', complete='resources:virtual-router:path') dst = Arg(help='destination vrouters paths', nargs='+', complete='resources:virtual-router:path') def __call__(self, src=None, dst=None, **kwargs): super(RescheduleVM, self).__call__(**kwargs) src = expand_paths([src], predicate=lambda r: r.type == 'virtual-router')[0] dst = expand_paths(dst, predicate=lambda r: r.type == 'virtual-router') # get list of VMs to move vms = [] src.fetch() for vm in src.refs.virtual_machine: vm.fetch() if vm.refs.service_instance: vms.append((0, vm)) if not vms: return "No VMs on this virtual-router" if self.check: return "\n".join([text_type(vm.path) for (_, vm) in vms]) # get all the resources we need for vr in dst: vr.fetch() for vm in vr.refs.virtual_machine: vm.fetch() printo("Moving %d VMs from %s to %s" % (len(vms), src['name'], ", ".join([vr['name'] for vr in dst]))) for vr in itertools.cycle(dst): # no more VMs to process if not vms: break failures, vm = vms.pop(0) vm_si = vm.refs.service_instance[0] vr_sis = [ si for vr_vm in vr.refs.virtual_machine for si in vr_vm.refs.service_instance ] logger.debug("Checking VM %s of SI %s" % (vm, vm_si)) if vm_si in vr_sis: logger.debug("%s already has a VM for SI %s" % (vr['name'], vm_si)) if failures == len(dst): printo( "Unable to move VM %s. Dest vrouter already has a VM of the same SI." % vm) else: vms.insert(0, (failures + 1, vm)) else: printo("Moving VM %s on %s" % (vm, vr['name'])) if not self.dry_run: src.remove_ref(vm) vr.add_ref(vm)
class Provision(Command): description = 'Provision contrail environment' env_file = Arg(help='JSON file of environment to provision', type=argparse.FileType('r')) force = Option('-f', help="Don't ask for confirmation", default=False, action="store_true") @property def __doc__(self): # Hack to include rst doc with sections # not included by sphinx but can be view # with the man command. return sys.modules[__name__].__doc__ def _get_current_env(self, keys): """Build the current environment of the given resources (keys). For each resource we run "get-resource" or "list-resource" and populate the env. """ env = OrderedDict() for key in keys: if self._is_property(key): cmd = self._get_command(key, Actions.GET) else: cmd = self._get_command(key, Actions.LIST) try: env[key] = json.loads( self._call_command(cmd, defaults=self._provision_defaults.get( key, {}))) except CommandError as e: raise CommandError('Failed to get current values for %s: %s' % (key, e)) return env def _is_property(self, string): try: self.mgr.get('set-%s' % string) return True except CommandNotFound: return False def _get_add_command(self, key): if self._is_property(key): action = Actions.SET else: action = Actions.ADD return self._get_command(key, action) def _get_command(self, key, action): try: return self.mgr.get('%s-%s' % (action, key)) except CommandNotFound: raise CommandError("No command to %s %s" % (action, key)) def _get_command_args(self, cmd): argspec = inspect.getargspec(cmd.__call__) if len(argspec.args) > 1: return [arg for arg in argspec.args[1:]] return [] def _call_command(self, cmd, defaults={}): """Call a command. If the command needs arguments they have to be passed as a dict in the defaults kwarg. """ kwargs = {} for arg in self._get_command_args(cmd): kwargs[arg] = defaults.get(arg) logger.debug('Calling %s with %s' % (cmd, kwargs)) return cmd(**kwargs) def _normalize_env(self, env): """Normalize an input environement. Replace '-' by '_' in provisionning arg names. Make sure calls definitions are in lists. """ for key, value in env.items(): if type(value) == OrderedDict: value = dict(value.items()) if type(value) == dict: env[key] = [value] elif type(value) != list: raise CommandError( 'Unsupported provisioning data type in %s: %s' % (key, value)) for idx, call in enumerate(env[key]): env[key][idx] = self._normalize_keys(call) return env def _setup_defaults_values(self, env): for key, values in env.items(): for idx, kwargs in enumerate(values): defaults = copy.deepcopy(self._provision_defaults.get(key, {})) defaults.update(kwargs) env[key][idx] = defaults return env def _normalize_keys(self, values): new_values = {} for key, value in values.items(): new_values[key.replace('-', '_')] = value return new_values def _validate_env(self, env): """Given an env, validate that all arguments are consistent. """ for key, values in env.items(): for idx, call in enumerate(values): env[key][idx] = self._validate_call(key, call) return env def _validate_call(self, key, values): """Validate call parameters. The wanted env is globally structured as follow: { "provision": { "resource": [ { "arg": "value", "arg2": 34 } ], ... } } We try to get the provisioning method for "resource", which is "add-resource" or "set-resource". Then, given the arguments we validate them using the command parser and set default values where needed. """ cmd = self._get_add_command(key) # default args values from argparse parser for action in cmd.parser._actions: if action.dest == 'help': continue if isinstance(action, argparse._StoreConstAction): values[action.dest] = values.get(action.dest, action.default) else: arg_strings = values.get(action.dest, []) if type(arg_strings) != list: arg_strings = [arg_strings] try: values[action.dest] = cmd.parser._get_values( action, arg_strings) if not values[action.dest] and action.default: values[action.dest] = action.default except argparse.ArgumentError as e: raise CommandError('Error in %s: %s' % (key, text_type(e))) # remove unknown args from call for arg, value in copy.deepcopy(values.items()): if arg not in self._get_command_args(cmd): logger.debug('Unknown arg %s for %s. Ignored' % (arg, cmd.__call__)) del values[arg] return values def _diff_envs(self, current, wanted): """Make a diff of the current env and the wanted env. Compare only common resources between the envs. This allows to partially provision the env. If the wanted env has a bgp-router list we try to converge the current env bgp-router list. Removing bgp-routers not in the wanted list, adding bgp-routers not in the current list. """ diff_env = { Actions.SET: OrderedDict(), Actions.ADD: OrderedDict(), Actions.DEL: OrderedDict(), } for key, values in wanted.items(): add_values = [v for v in values if v not in current.get(key, [])] if add_values: if self._is_property(key): diff_env[Actions.SET][key] = add_values else: diff_env[Actions.ADD][key] = add_values for key, values in current.items(): if key not in wanted: continue if self._is_property(key): continue del_values = [v for v in values if v not in wanted[key]] if del_values: diff_env[Actions.DEL][key] = del_values return diff_env def _confirm_diff(self, diff, force=False): """Show actions to be made and ask for confirmation unless force is True. """ if not any( [True if diff[action] else False for action in Actions.APPLY]): printo('Nothing to do') return False for action in Actions.APPLY: if not diff[action]: continue printo("\n%s the resources :\n" % action.capitalize()) for key, values in diff[action].items(): printo('%s : %s\n' % (key, json.dumps(values, indent=2))) if force or continue_prompt(): return True else: return False def _apply_diff(self, diff): """Takes the generated diff and call methods to converge to the wanted env. First delete unwanted resources, then set wanted properties, finally add wanted resources. """ for action in Actions.APPLY: for key, values in diff.get(action, {}).items(): cmd = self._get_command(key, action) for kwargs in values: try: self._call_command(cmd, defaults=kwargs) except CommandError as e: raise CommandError('Call to %s %s failed: %s' % (action, key, e)) def __call__(self, env_file=None, force=False): env = json.load(env_file, object_pairs_hook=OrderedDict) self._provision_defaults = {} for key, defaults in env.get('defaults', {}).items(): self._provision_defaults[key] = self._normalize_keys(defaults) self.mgr = CommandManager() if env.get('namespace'): self.mgr.load_namespace(env.get('namespace')) logger.debug('Namespace %s loaded' % env['namespace']) wanted_env = env['provision'] wanted_env = self._normalize_env(wanted_env) wanted_env = self._setup_defaults_values(wanted_env) wanted_env = self._validate_env(wanted_env) current_env = self._get_current_env(wanted_env.keys()) current_env = self._normalize_env(current_env) current_env = self._setup_defaults_values(current_env) current_env = self._validate_env(current_env) diff_env = self._diff_envs(current_env, wanted_env) if self._confirm_diff(diff_env, force=force): self._apply_diff(diff_env)
class SAS(Command): appliance_set_name = Arg(help='name')