def main(): """ Main common routine for show interface description :return: None """ # Set up the command line options creds = Credentials(['apic', 'nosnapshotfiles'], description=("This application replicates the switch " "CLI command 'show interface fex'")) creds.add_argument('-s', '--switch', type=str, default=None, help='Specify a particular switch id, e.g. "101"') args = creds.get() # Login to APIC apic = Session(args.url, args.login, args.password) if not apic.login().ok: print('%% Could not login to APIC') return # Show interface description node_ids = get_node_ids(apic, args) show_interface_fex(apic, node_ids)
def main(): """ Main execution routine """ description = ('Simple application that logs on to the APIC' ' and displays usage information for a given DN') creds = Credentials('apic', description) creds.add_argument("-d", "--dn_name", help="DN to query for usage information") args = creds.get() session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') url = '/api/mo/{}.json?query-target=children&target-subtree-class=relnFrom' url = url.format(args.dn_name) resp = session.get(url) if resp.ok: used_by = resp.json()['imdata'] for item in used_by: kls = next(iter(item)) attributes = item[kls]['attributes'] data.append((attributes['tDn'], kls)) print(tabulate(data, headers=["Used by", "Class"]))
def main(): """ Main execution routine :return: None """ creds = Credentials('apic') creds.add_argument('--tenant', help='The name of Tenant') creds.add_argument('--app', help='The name of ApplicationProfile') creds.add_argument('--bd', help='The name of BridgeDomain') creds.add_argument('--epg', help='The name of EPG') creds.add_argument('--json', const='false', nargs='?', help='Json output only') args = creds.get() session = Session(args.url, args.login, args.password) session.login() tenant = Tenant(args.tenant) app = AppProfile(args.app, tenant) bd = BridgeDomain(args.bd, tenant) epg = EPG(args.epg, app) epg.add_bd(bd) if args.json: print(tenant.get_json()) else: resp = session.push_to_apic(tenant.get_url(), tenant.get_json()) if not resp.ok: print('%% Error: Could not push configuration to APIC') print(resp.text)
def main(): """ Main common routine for show interface description :return: None """ # Set up the command line options creds = Credentials(['apic', 'nosnapshotfiles'], description=("This application replicates the switch " "CLI command 'show interface description'")) creds.add_argument('-s', '--switch', type=str, default=None, help='Specify a particular switch id, e.g. "101"') creds.add_argument('-i', '--interface', type=str, default=None, help='Specify a specific interface, e.g. "eth1/1"') args = creds.get() # Login to APIC apic = Session(args.url, args.login, args.password) if not apic.login().ok: print('%% Could not login to APIC') return # Show interface description node_ids = get_node_ids(apic, args) apic_intf_classes = ['l1PhysIf', 'pcAggrIf', 'l3EncRtdIf', 'sviIf', 'tunnelIf', 'mgmtMgmtIf', 'l3LbRtdIf'] for apic_intf_class in apic_intf_classes: show_interface_description(apic, node_ids, apic_intf_class=apic_intf_class, specific_interface=args.interface)
def __init__(self, url, login, password): # Login to APIC self._apic = Session(url, login, password) self._if_brief_headers = { 'l1PhysIf': [ 'Ethernet Interface', 'VLAN', 'Type', 'Mode', 'Status', 'Reason', 'Speed', 'Port Ch #' ], 'pcAggrIf': [ 'Port-channel Interface', 'VLAN', 'Type', 'Mode', 'Status', 'Reason', 'Speed', 'Protocol' ], 'l3LbRtdIf': ['Interface', 'Status', 'Description'], 'tunnelIf': ['Interface', 'Status', 'IP Address', 'Encap type', 'MTU'], 'sviIf': ['Interface', 'Secondary VLAN(Type)', 'Status', 'Reason'], 'l3EncRtdIf': [], 'mgmtMgmtIf': ['Port', 'VRF', 'Status', 'IP Address', 'Speed', 'MTU'], 'l2ExtIf': [], 'l2VfcIf': [ 'Interface', 'Vsan', 'Admin\nMode', 'Admin Trunk Mode', 'Status', 'Bind Info', 'Oper Mode', 'Oper Speed (Gbps)' ] } self._if_types = self._if_brief_headers.keys() if not self._apic.login().ok: self._logged_in = False print '%% Could not login to APIC' else: self._logged_in = True self._interfaces = []
def main(): """ Main show Process routine :return: None """ description = 'Simple application that logs on to the APIC and check cluster information for a fabric' creds = Credentials('apic', description) args = creds.get() session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print '%% Could not login to APIC' sys.exit(0) cluster = Cluster.get(session) if (cluster.config_size != cluster.cluster_size): print("*******************************************************") print("WARNING, configured cluster size "), cluster.config_size print(": not equal to the actual size "), cluster.cluster_size print "WARNING, desired stats collection might be lost" print("*******************************************************") print("APICs in the cluster"), cluster.name, (":") for apic in cluster.apics: print json.dumps(apic, indent=4, sort_keys=True) else: print("PASS")
def main(): """ Main execution routine :return: None """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = ( 'Simple application that logs on to the APIC and displays all' ' of the physical nodes; both belonging to and connected to the fabric.' ) creds = Credentials('apic', description) args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') sys.exit(0) # List of classes to get and print phy_classes = (Node, ExternalSwitch) for phy_class in phy_classes: # Print the class name class_name = phy_class.__name__ print(class_name) print('=' * len(class_name)) # Get and print all of the items from the APIC items = phy_class.get(session) for item in items: print(item.info())
def __init__(self, url, login, password): # Login to APIC self._apic = Session(url, login, password) if not self._apic.login().ok: self._logged_in = False print '%% Could not login to APIC' else: self._logged_in = True
def main(): """ Main execution routine :return: None """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = ( 'Application dealing with tenant configuration. ' 'It can download a tenant configuration from the APIC and store it as raw JSON in a file. ' 'It can also push a tenant configuration stored as raw JSON in a file to the APIC.' ) creds = Credentials(('apic', 'nosnapshotfiles'), description) creds.add_argument( '--config', default=None, help='Configuration file to push/pull tenant configuration') creds.add_argument('--tenant', default=None, help='Tenant name') group = creds.add_mutually_exclusive_group() group.add_argument('--push-to-apic', action='store_true', help='Push the tenant configuration file to the APIC') group.add_argument('--pull-from-apic', action='store_true', help=('Pull the tenant configuration from the APIC and' 'store in the specified configuration file')) # Get the command line arguments args = creds.get() # Sanity check the command line arguments if args.config is None: print '%% No configuration file given.' creds.print_help() return if args.tenant is None: print '%% No Tenant name given.' creds.print_help() return if not args.push_to_apic and not args.pull_from_apic: print '%% No direction (push-to-apic/pull-from-apic) given.' creds.print_help() return # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print '%% Could not login to APIC' return # Do the work if args.pull_from_apic: pull_config_from_apic(session, args.tenant, args.config) if args.push_to_apic: push_config_to_apic(session, args.tenant, args.config)
def main(): """ Main execution routine """ description = 'Simple application that logs on to the APIC and displays all of the Tenants.' creds = Credentials('apic', description) creds.add_argument("-d", "--domain-name", type=str, help="list of domains. usage -d tennat.infra") creds.add_argument( "-t", "--tenant-name", type=str, help= "name of the tenant of which faults are to be displayed. If not given faults of all the tenants are shown" ) creds.add_argument('--continuous', action='store_true', help='Continuously monitor for faults') args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') return faults_obj = Faults() fault_filter = None if args.domain_name is not None: fault_filter = {'domain': args.domain_name.split(',')} tenant_name = None if args.tenant_name is not None: tenant_name = args.tenant_name faults_obj.subscribe_faults(session, fault_filter) while faults_obj.has_faults(session, fault_filter) or args.continuous: if faults_obj.has_faults(session, fault_filter): faults = faults_obj.get_faults(session, fault_filter=fault_filter, tenant_name=tenant_name) if faults is not None: for fault in faults: if fault is not None: print("---------------") if fault.descr is not None: print(" descr : " + fault.descr) else: print(" descr : " + " ") print(" dn : " + fault.dn) print(" rule : " + fault.rule) print(" severity : " + fault.severity) print(" type : " + fault.type) print(" domain : " + fault.domain)
def __init__(self, url, login, password): # Login to APIC self._apic = Session(url, login, password) if not self._apic.login().ok: self._logged_in = False print('%% Could not login to APIC') else: self._logged_in = True self._interfaces = [] self._port_channels = []
def main(): """ Main execution routine """ description = ( 'Simple application that logs on to the APIC' ' and displays all the tenant info of the contract_interface related to the imported contract.' ) creds = Credentials('apic', description) creds.add_argument("-t", "--tenant_name", help="Tenant Name of where the contract is created") creds.add_argument("-i", "--contract_name", help="Imported Contract Name") args = creds.get() if (args.tenant_name is not None) and (args.contract_name is None): args.contract_name = raw_input("Contract Name: ") session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') tenants = Tenant.get_deep(session) for tenant in tenants: contracts_interfaces = tenant.get_children( only_class=ContractInterface) for contract_interface in contracts_interfaces: imported_contract = contract_interface.get_import_contract() if imported_contract is not None: if args.tenant_name is not None: if (imported_contract.name == args.contract_name) and ( imported_contract.get_parent().name == args.tenant_name): apps = AppProfile.get(session, tenant) for app in apps: epgs = EPG.get(session, app, tenant) for epg in epgs: data.append((imported_contract.name, tenant.name, app.name, epg.name)) else: apps = AppProfile.get(session, tenant) for app in apps: epgs = EPG.get(session, app, tenant) for epg in epgs: data.append((imported_contract.name, tenant.name, app.name, epg.name)) print tabulate( data, headers=["IMPORTED_CONTRACT", "TENANT", "APP_PROFILE", "EPG"])
def main(): # Set up the Command Line options creds = Credentials(('apic', 'nosnapshotfiles'), description='') creds.add_argument('--printonly', action='store_true', help='Only print the JSON but do not push to APIC.') creds.add_argument('--testloop', action='store_true', help='Run in a continual testing loop.') group = creds.add_mutually_exclusive_group() group.add_argument( '--config', default=None, help='Optional .ini file providing failure scenario configuration') group.add_argument( '--delete', action='store_true', help='Delete ALL of the randomized configuration from the APIC') args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') print resp.status_code, resp.text return # Handle the delete case if args.delete: delete_all_randomized_tenants(session) return # Ensure that a config file has been given if args.config is None: print '%% Expected --config or --delete option' return if args.testloop: while True: generate_config(session, args) time.sleep(random_number(5, 30)) delete_all_randomized_tenants(session) time.sleep(random_number(5, 30)) else: generate_config(session, args)
def main(): """ Main show Subnets routine :return: None """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = ('Simple application that logs on to the APIC' ' and displays all of the Subnets.') creds = Credentials('apic', description) creds.add_argument('--tenant', help='The name of Tenant') args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') # Download all of the tenants, app profiles, and Subnets # and store the names as tuples in a list tenants = Tenant.get(session) for tenant in tenants: check_longest_name(tenant.name, "Tenant") if args.tenant is None: get_subnet(session, tenant) else: if tenant.name == args.tenant: get_subnet(session, tenant) # Display the data downloaded template = '{0:' + str(longest_names["Tenant"]) + '} ' \ '{1:' + str(longest_names["Application Profile"]) + '} ' \ '{2:' + str(longest_names["Bridge Domain"]) + '} ' \ '{3:' + str(longest_names["Subnet"]) + '} ' \ '{4:' + str(longest_names["Scope"]) + '}' print(template.format("Tenant", "Application Profile", "Bridge Domain", "Subnet", "Scope")) print(template.format('-' * longest_names["Tenant"], '-' * longest_names["Application Profile"], '-' * longest_names["Bridge Domain"], '-' * longest_names["Subnet"], '-' * longest_names["Scope"])) for rec in sorted(data): print(template.format(*rec))
def send_to_apic(tenant): """ Login to APIC and push the config :param tenant: Tenant class instance :return: request response object """ description = 'Basic Connectivity Example' creds = Credentials('apic', description) args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password, False) session.login() resp = tenant.push_to_apic(session) if resp.ok: print('Success') return resp
def main(): """ Main routine """ # Get all the arguments description = 'Creates a tenant with a micro-EPG.' creds = Credentials('apic', description) args = creds.get() # Login to the APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') # Create the Tenant and AppProfile tenant = Tenant('acitoolkit-microepg-example') app_profile = AppProfile('myapp', tenant) # Create a Base EPG that will provide networking for the microEPGs base_epg = EPG('base', app_profile) base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged') vrf = Context('myvrf', tenant) bd = BridgeDomain('mybd', tenant) bd.add_context(vrf) base_epg.add_bd(bd) # Create a microEPG microepg = EPG('microepg', app_profile) microepg.is_attributed_based = True microepg.set_base_epg(base_epg) # Add an IP address to this microepg criterion = AttributeCriterion('criterion', microepg) criterion.add_ip_address('1.2.3.4') # Contracts can be provided/consumed from the microepg as desired (not shown) # Push the tenant to the APIC resp = tenant.push_to_apic(session) if not resp.ok: print('%% Error: Could not push configuration to APIC') print(resp.text)
def main(): """ Main Show VM Names Routine :return: None """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = ('Simple application that logs on to the APIC' ' and displays all of the virtual machine names.') creds = Credentials('apic', description) args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') return # Make a direct call to the APIC REST API # Get all of the VMs (all objects of compVM class) and include the compVNic children # which contain the MAC address of the NIC. # The advantage of using acitoolkit Session.get() instead of direct Requests.get() calls # is that acitoolkit will automatically handle retries and pagination for queries with # large response data class_url = '/api/node/class/compVm.json?rsp-subtree=children&rsp-subtree-class=compVNic' ret = session.get(class_url) vm_list = ret.json()['imdata'] # Process the response. We're looking for the VM name and the associated vNIC MAC addresses. data = [] for vm in vm_list: vm_name = vm['compVm']['attributes']['name'] for vnic in vm['compVm']['children']: vm_mac = vnic['compVNic']['attributes']['mac'] # Store the VM name and MAC address. Note that VM names may be associated with # multiple MAC addresses if they have multiple vNICs. data.append((vm_name, vm_mac)) # Display the data downloaded print(tabulate(data, headers=["VMNAME", "MACADDRESS"]))
def main(): """ Main execution routine """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = ('Application that logs on to the APIC and tracks' ' all of the Endpoint stats in a MySQL database.') creds = Credentials(qualifier=('apic', 'mysql'), description=description) args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') return # Create the MySQL database cnx = mysql.connector.connect(user=args.mysqllogin, password=args.mysqlpassword, host=args.mysqlip) c = cnx.cursor() c.execute('CREATE DATABASE IF NOT EXISTS acitoolkit_interface_stats;') cnx.commit() c.execute('USE acitoolkit_interface_stats;') all_stats = InterfaceStats.get_all_ports(session, 1) for intf in all_stats: stats = all_stats[intf] for stats_family in stats: if '5min' in stats[stats_family]: for epoch in stats[stats_family]['5min']: if epoch != 0: ss = stats[stats_family]['5min'][epoch] if stats_family not in valid_tables: create_table(c, cnx, stats_family, list(ss.keys())) if not interval_end_exists(c, stats_family, intf, ss['intervalEnd']): insert_stats_row(c, cnx, stats_family, intf, ss)
def main(): """ Main show EPGs routine :return: None """ # Login to APIC description = ('Simple application that logs on to the APIC' ' and displays all of the EPGs.') creds = Credentials('apic', description) creds.add_argument('--tenant', help='The name of Tenant') args = creds.get() session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') # Download all of the tenants, app profiles, and EPGs # and store the names as tuples in a list tenants = Tenant.get(session) for tenant in tenants: check_longest_name(tenant.name, "Tenant") if args.tenant is None: get_epg(session, tenant) else: if tenant.name == args.tenant: get_epg(session, tenant) # Display the data downloaded template = '{0:' + str(longest_names["Tenant"]) + '} ' \ '{1:' + str(longest_names["Application Profile"]) + '} ' \ '{2:' + str(longest_names["EPG"]) + '}' print(template.format("Tenant", "Application Profile", "EPG")) print(template.format('-' * longest_names["Tenant"], '-' * longest_names["Application Profile"], '-' * longest_names["EPG"])) for rec in sorted(data): print(template.format(*rec))
tabs[k]['class'], tabs[k]['properties'], headers=tabs.get(k).get('headers')) tenants = class_query(session, 'fvTenant') for t in tenants: createTenantSheet(session, workbook, t) workbook.close() if __name__ == "__main__": description = 'aci-doc' # Gather credentials for ACI creds = Credentials('apic', description) args = creds.get() # Establish an API session to the APIC apic = Session(args.url, args.login, args.password) if apic.login().ok: print("Connected to ACI") print("depending on your configuration, this could take a little while...") with open('config.yaml', 'r') as config: config = yaml.safe_load(config) CreateWorkBook(apic, config['filename'], config['tabs'])
def main(): """ Main routine :return: None """ # Login to APIC description = ('Simple application that logs on to the APIC' ' and displays all of the External Subnets.') creds = Credentials('apic', description) creds.add_argument('-f', '--find_ip', help='IP address to search for') args = creds.get() session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') if not args.find_ip: print("Error: -f|--find_ip <ip_address> argument required") sys.exit(1) print("searching for " + args.find_ip) # Download all of the tenants, app profiles, and Subnets # and store the names as tuples in two lists priv = [] publ = [] ip = args.find_ip tenants = Tenant.get_deep(session, limit_to=[ 'fvTenant', 'fvSubnet', 'l3extOut', 'l3extInstP', 'l3extSubnet' ]) for tenant in tenants: apps = AppProfile.get(session, tenant) for app in apps: bds = BridgeDomain.get(session, tenant) for bd in bds: subnets = Subnet.get(session, bd, tenant) for subnet in subnets: net = IPNetwork(subnet.addr) if net.Contains(IPNetwork(ip)): priv.append( (tenant.name, app.name, bd.name, subnet.addr, subnet.get_scope())) for tenant in tenants: outside_l3s = tenant.get_children(only_class=OutsideL3) for outside_l3 in outside_l3s: outside_epgs = outside_l3.get_children(only_class=OutsideEPG) for outside_epg in outside_epgs: outside_networks = outside_epg.get_children( only_class=OutsideNetwork) for outside_network in outside_networks: net = IPNetwork(outside_network.addr) if net.Contains(IPNetwork(ip)): publ.append((tenant.name, outside_l3.name, outside_epg.name, outside_network.addr, outside_network.get_scope())) # Display template = "{0:20} {1:20} {2:20} {3:18} {4:15}" if len(priv): print("") print( template.format("Tenant", "App", "Bridge Domain", "Subnet", "Scope")) print(template.format("-" * 20, "-" * 20, "-" * 20, "-" * 18, "-" * 15)) for rec in priv: print(template.format(*rec)) if len(publ): print("") print( template.format("Tenant", "OutsideL3", "OutsideEPG", "Subnet", "Scope")) print(template.format("-" * 20, "-" * 20, "-" * 20, "-" * 18, "-" * 15)) for rec in publ: print(template.format(*rec))
def main(): """ Main execution routine :return: None """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = 'Simple application that logs on to the APIC and displays all of the Interfaces.' creds = Credentials('apic', description) creds.add_argument('--tenant', help='The name of Tenant') args = creds.get() # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') sys.exit(0) resp = session.get('/api/class/ipv4Addr.json') intfs = json.loads(resp.text)['imdata'] for i in intfs: ip = i['ipv4Addr']['attributes']['addr'] op = i['ipv4Addr']['attributes']['operSt'] cfg = i['ipv4Addr']['attributes']['operStQual'] dn = i['ipv4Addr']['attributes']['dn'] node = dn.split('/')[2] intf = re.split(r'\[|\]', dn)[1] vrf = re.split(r'/|dom-', dn)[7] tn = vrf if vrf.find(":") != -1: tn = re.search("(.*):(.*)", vrf).group(1) check_longest_name(node, "Node") check_longest_name(intf, "Interface") check_longest_name(ip, "IP Address") check_longest_name(cfg, "Admin Status") check_longest_name(op, "Status") if args.tenant is None: if vrf not in data.keys(): data[vrf] = [] else: data[vrf].append((node, intf, ip, cfg, op)) else: if tn == args.tenant: if vrf not in data.keys(): data[vrf] = [] else: data[vrf].append((node, intf, ip, cfg, op)) for k in data.keys(): header = 'IP Interface Status for VRF "{}"'.format(k) print(header) template = '{0:' + str(longest_names["Node"]) + '} ' \ '{1:' + str(longest_names["Interface"]) + '} ' \ '{2:' + str(longest_names["IP Address"]) + '} ' \ '{3:' + str(longest_names["Admin Status"]) + '} ' \ '{4:' + str(longest_names["Status"]) + '}' print( template.format("Node", "Interface", "IP Address", "Admin Status", "Status")) print( template.format('-' * longest_names["Node"], '-' * longest_names["Interface"], '-' * longest_names["IP Address"], '-' * longest_names["Admin Status"], '-' * longest_names["Status"])) for rec in sorted(data[k]): print(template.format(*rec)) print('')
def push_config_to_apic(self): """ Push the configuration to the APIC :return: Requests Response instance indicating success or not """ THROTTLE_SIZE = 500000 / 8 # Set the tenant name correctly if self._tenant_name == '' and self.cdb.has_context_config(): self.set_tenant_name(self.cdb.get_context_config().tenant_name) elif self._tenant_name == '': self.set_tenant_name('acitoolkit') # Find all the unique contract providers logging.debug('Finding the unique contract providers') unique_providers = {} for provided_policy in self.cdb.get_contract_policies(): if provided_policy.dst_id not in unique_providers: unique_providers[provided_policy.dst_id] = 0 else: unique_providers[provided_policy.dst_id] += 1 logging.debug('Found %s unique contract providers', len(unique_providers)) # Find any duplicate contracts that this provider is providing (remove) logging.debug('Finding any duplicate contracts') duplicate_policies = [] for provider in unique_providers: for provided_policy in self.cdb.get_contract_policies(): if provided_policy in duplicate_policies: continue if provider in provided_policy.dst_ids: for other_policy in self.cdb.get_contract_policies(): if other_policy == provided_policy or other_policy in duplicate_policies: continue if other_policy.dst_ids == provided_policy.dst_ids and other_policy.has_same_permissions( provided_policy): provided_policy.src_ids = provided_policy.src_ids + other_policy.src_ids duplicate_policies.append(other_policy) logging.debug( 'duplicate_policies now has %s entries', len(duplicate_policies)) logging.debug('Removing duplicate contracts') for duplicate_policy in duplicate_policies: self.cdb.remove_contract_policy(duplicate_policy) if not self.displayonly: # Log on to the APIC apic_cfg = self.cdb.get_apic_config() apic = Session(apic_cfg.url, apic_cfg.user_name, apic_cfg.password) resp = apic.login() if not resp.ok: return resp logging.debug('Generating JSON....') # Push all of the Contracts logging.debug('Pushing contracts. # of Contract policies: %s', len(self.cdb.get_contract_policies())) tenant = Tenant(self._tenant_name) for contract_policy in self.cdb.get_contract_policies(): name = contract_policy.src_name + '::' + contract_policy.dst_name contract = Contract(name, tenant) contract.descr = contract_policy.descr[0:127 - ( contract_policy.descr.count('"') + contract_policy.descr.count("'") + contract_policy.descr.count('/'))] for whitelist_policy in contract_policy.get_whitelist_policies(): entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max if whitelist_policy.proto == '6' or whitelist_policy.proto == '17': entry = FilterEntry(entry_name, applyToFrag='no', arpOpc='unspecified', dFromPort=whitelist_policy.port_min, dToPort=whitelist_policy.port_max, etherT='ip', prot=whitelist_policy.proto, sFromPort='1', sToPort='65535', tcpRules='unspecified', parent=contract) else: entry = FilterEntry(entry_name, applyToFrag='no', arpOpc='unspecified', etherT='ip', prot=whitelist_policy.proto, parent=contract) if not self.displayonly: if len(str(tenant.get_json())) > THROTTLE_SIZE: logging.debug('Throttling contracts. Pushing config...') resp = tenant.push_to_apic(apic) if not resp.ok: return resp tenant = Tenant(self._tenant_name) if self.displayonly: print json.dumps(tenant.get_json(), indent=4, sort_keys=True) else: logging.debug('Pushing remaining contracts') resp = tenant.push_to_apic(apic) if not resp.ok: return resp # Push all of the EPGs logging.debug('Pushing EPGs') if not self.displayonly: tenant = Tenant(self._tenant_name) app = AppProfile(self._app_name, tenant) if self._use_ip_epgs: # Create a Base EPG base_epg = EPG('base', app) if self.cdb.has_context_config(): context_name = self.cdb.get_context_config().name else: context_name = 'vrf1' context = Context(context_name, tenant) bd = BridgeDomain('bd', tenant) bd.add_context(context) base_epg.add_bd(bd) if self.displayonly: # If display only, just deploy the EPG to leaf 101 base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged') else: # Deploy the EPG to all of the leaf switches nodes = Node.get(apic) for node in nodes: if node.role == 'leaf': base_epg.add_static_leaf_binding(node.node, 'vlan', '1', encap_mode='untagged') # Create the Attribute based EPGs logging.debug('Creating Attribute Based EPGs') for epg_policy in self.cdb.get_epg_policies(): if not self.displayonly: # Check if we need to throttle very large configs if len(str(tenant.get_json())) > THROTTLE_SIZE: resp = tenant.push_to_apic(apic) if not resp.ok: return resp tenant = Tenant(self._tenant_name) app = AppProfile(self._app_name, tenant) context = Context(context_name, tenant) bd = BridgeDomain('bd', tenant) bd.add_context(context) if self._use_ip_epgs: base_epg = EPG('base', app) base_epg.add_bd(bd) epg = EPG(epg_policy.name, app) # Check if the policy has the default 0.0.0.0 IP address no_default_endpoint = True for node_policy in epg_policy.get_node_policies(): if node_policy.ip == '0.0.0.0' and node_policy.prefix_len == 0: no_default_endpoint = False epg.add_bd(bd) # Add all of the IP addresses if no_default_endpoint: epg.is_attributed_based = True epg.set_base_epg(base_epg) criterion = AttributeCriterion('criterion', epg) ipaddrs = [] for node_policy in epg_policy.get_node_policies(): ipaddr = ipaddress.ip_address(unicode(node_policy.ip)) if not ipaddr.is_multicast: # Skip multicast addresses. They cannot be IP based EPGs ipaddrs.append(ipaddr) nets = ipaddress.collapse_addresses(ipaddrs) for net in nets: criterion.add_ip_address(str(net)) epg.descr = epg_policy.descr[0:127] # Consume and provide all of the necessary contracts for contract_policy in self.cdb.get_contract_policies(): contract = None if epg_policy.id in contract_policy.src_ids: name = contract_policy.src_name + '::' + contract_policy.dst_name contract = Contract(name, tenant) epg.consume(contract) if epg_policy.id in contract_policy.dst_ids: name = contract_policy.src_name + '::' + contract_policy.dst_name if contract is None: contract = Contract(name, tenant) epg.provide(contract) else: logging.debug('Creating EPGs') for epg_policy in self.cdb.get_epg_policies(): epg = EPG(epg_policy.name, app) epg.descr = epg_policy.descr[0:127] # Consume and provide all of the necessary contracts for contract_policy in self.cdb.get_contract_policies(): contract = None if epg_policy.id in contract_policy.src_ids: name = contract_policy.src_name + '::' + contract_policy.dst_name contract = Contract(name, tenant) epg.consume(contract) if epg_policy.id in contract_policy.dst_ids: name = contract_policy.src_name + '::' + contract_policy.dst_name if contract is None: contract = Contract(name, tenant) epg.provide(contract) if self.displayonly: print json.dumps(tenant.get_json(), indent=4, sort_keys=True) else: resp = tenant.push_to_apic(apic) return resp
def main(): """ Main create tenant routine :return: None """ # Get all the arguments description = 'It logs in to the APIC and will delete tenants named with the specified string.' creds = Credentials(['apic', 'nosnapshotfiles'], description) group = creds.add_mutually_exclusive_group() group.add_argument('--startswith', default=None, help='String to match that starts the tenant name') group.add_argument('--endswith', default=None, help='String to match that ends the tenant name') group.add_argument('--exactmatch', default=None, help='String that exactly matches the tenant name') group.add_argument('--contains', default=None, help='String that is contained in the tenant name') creds.add_argument( '--force', action='store_true', help='Attempt to remove the tenants without prompting for confirmation' ) args = creds.get() # Login to the APIC apic = Session(args.url, args.login, args.password) resp = apic.login() if not resp.ok: print('%% Could not login to APIC') # Get all of the Tenants tenants = Tenant.get(apic) # Find the list of Tenants to delete according to command line options tenants_to_delete = [] for tenant in tenants: if args.startswith is not None: if tenant.name.startswith(args.startswith): tenants_to_delete.append(tenant) elif args.endswith is not None: if tenant.name.endswith(args.endswith): tenants_to_delete.append(tenant) elif args.exactmatch is not None: if args.exactmatch == tenant.name: tenants_to_delete.append(tenant) elif args.contains is not None: if args.contains in tenant.name: tenants_to_delete.append(tenant) # Query the user to be sure of deletion if not args.force: for tenant in tenants_to_delete: prompt = 'Delete tenant %s ? [y/N]' % tenant.name try: resp = raw_input(prompt) except NameError: resp = input(prompt) if not resp.lower().startswith('y'): tenants_to_delete.remove(tenant) print('Skipping tenant', tenant.name) # Delete the tenants for tenant in tenants_to_delete: tenant.mark_as_deleted() resp = tenant.push_to_apic(apic) if resp.ok: print('Deleted tenant', tenant.name) else: print('Could not delete tenant', tenant.name) print resp.text
for domain in domains: if domain.domain_type == 'phys': if domain.epg_name == epg.name: print(domain.dn) list_domains.append(domain.dn) counter = counter + 1 return list_domains dirname = os.path.dirname(__file__) # Login to APIC description = ('simple python script.') creds = Credentials('apic', description) creds.add_argument('--tenant', help='The name of Tenant') session = Session(credentials.url, credentials.username, credentials.password) resp = session.login() if resp.ok: print("Login to APIC sucessful") else: (print( "Error logging into APC, please check the ip address and credentials")) port_channels = aci.PortChannel.get(session) tenants = aci.Tenant.get(session) print(len(tenants), " amount of tenants") print("----------") for tenant in tenants:
def main(): """ Main show EPGs routine :return: None """ # Login to APIC description = ('Simple application that logs on to the APIC' ' and displays all of the EPGs.') creds = Credentials('apic', description) args = creds.get() session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print('%% Could not login to APIC') return # Download all of the tenants, app profiles, and EPGs # and store the names as tuples in a list tenants = Tenant.get_deep(session) tenants_list = [] for tenant in tenants: tenants_dict = {} tenants_dict['name'] = tenant.name if tenant.descr: tenants_dict['description'] = tenant.descr tenants_dict['app-profiles'] = [] for app in tenant.get_children(AppProfile): app_profiles = {'name': app.name} if app.descr: app_profiles['description'] = app.descr app_profiles['epgs'] = [] for epg in app.get_children(EPG): epgs_info = {'name': epg.name} if epg.descr: epgs_info['description'] = epg.descr epgs_info['endpoints'] = [] for endpoint in epg.get_children(Endpoint): endpoint_info = {'name': endpoint.name} if endpoint.ip != '0.0.0.0': endpoint_info['ip'] = endpoint.ip try: hostname = socket.gethostbyaddr(endpoint.ip)[0] except socket.error: hostname = None if hostname: endpoint_info['hostname'] = hostname if endpoint.descr: endpoint_info['description'] = endpoint.descr epgs_info['endpoints'].append(endpoint_info) app_profiles['epgs'].append(epgs_info) tenants_dict['app-profiles'].append(app_profiles) tenants_list.append(tenants_dict) tenants_info = {'tenants': tenants_list} print( yaml.safe_dump(tenants_info, sys.stdout, indent=4, default_flow_style=False))
def main(): ''' Main Function ''' # Setup Arguments utilizing the ACIToolkit Credentials Method description = ('Help to determine EP movement during Maintenance Windows') creds = Credentials('apic', description) creds.add_argument('-v', '--version', action='version', version='%(prog)s == {}'.format(__version__)) creds.add_argument("--debug", dest="debug", choices=["debug", "info", "warn", "critical"], default="info", help='Enable debugging output to screen') creds.add_argument( '--log', action='store_true', help= 'Write the output to a log file: {}.log. Automatically adds timestamp to filename' .format(__file__.split(".py")[0])) creds.add_argument( '--list', action='store_true', help= 'Print out the list of Tenants / App Profiles / EPGs available to work with' ) creds.add_argument( '--filter', help= 'Specify what to filter on. Eg: "tn-mipetrin" or "ap-mipetrin-AppProfile". Use --list to identify what can be used for filtering. Default = None' ) creds.add_argument( '--pre', help= 'Write the data to a file of your choosing. Specify your prefix. Format will be JSON and this extension is automatically added' ) creds.add_argument( '--post', help= 'Write the data to a file of your choosing. Specify your prefix. Format will be JSON and this extension is automatically added' ) creds.add_argument( '--compare', nargs=2, help= 'Compare the 2 files you specify. Be sure to pick a PRE and POST file') creds.add_argument( '--summary', type=int, help= 'Optionally, print out detailed summary of identified Endpoints greater than x (provide totals per Tenant/App/EPG/MAC/Encap)' ) args = creds.get() # Set up custom logger setup_logger(logger, args.debug, args.log) # If --suumary enabled, set up globals to then utlize the additonal calculations throughout code if args.summary: global detailed_summary global detailed_summary_number detailed_summary = True detailed_summary_number = args.summary # Due to creds / argparse above, will always need to provide APIC / User / Pass even if wanting to do local comparison of PRE/POST JSON files # However, below check will ensure we actually only perform login if NOT doing a comparison. That is, if doing --compare, you can type ANY password even simply hitting enter if not args.compare: # Login to APIC only if NOT doing a comparison - as already have the data we need in the local JSON files session = Session(args.url, args.login, args.password) resp = session.login() # Check if the login was successful if not resp.ok: logger.critical('Could not login to APIC') my_error = resp.json() logger.critical("Specific Error: {}".format( my_error["imdata"][0]["error"]["attributes"]["text"])) exit(0) # Start time count at this point, otherwise takes into consideration the amount of time taken to input the password by the user start_time = time.time() logger.debug("Begin Execution of script") # Order of precedence is to execute list of tenants, pre capture, post capture, compare if args.list: print_header("Gathering available information from APIC...") get_raw_tenant_info(session) elif args.pre: print_header("Gathering 'PRE' Endpoints...") # Setup Filename for PRE file (using user input) and global pre_suffix my_filename_pre = args.pre + pre_suffix # Confirm if user has selected any --filter if args.filter: get_fvCEp(session, my_filename_pre, args.filter) else: get_fvCEp(session, my_filename_pre, "None") elif args.post: print_header("Gathering 'POST' Endpoints...") # Setup Filename for POST file (using user input) and global post_suffix my_filename_post = args.post + post_suffix # Confirm if user has selected any --filter if args.filter: get_fvCEp(session, my_filename_post, args.filter) else: get_fvCEp(session, my_filename_post, "None") elif args.compare: # Ensure *BOTH* the specified PRE and POST files exist. If not, throw error and explain which ones currently exist # Look for the suffix that I auto append during the --pre and --post file generation for file in args.compare: if pre_suffix in file: my_filename_pre = file elif post_suffix in file: my_filename_post = file else: logger.critical( "Issue with file names supplied as don't contain the suffix defined. Are they the files generated by this script via the --pre / --post options?" ) exit(0) # Check that the files do in fact exist and are readable if not os.path.isfile(my_filename_pre): logger.critical( "Need to ensure the PRE capture has been completed and readable" ) exit(0) # Check that the files do in fact exist and are readable if not os.path.isfile(my_filename_post): logger.critical( "Need to ensure the POST capture has been completed and readable" ) exit(0) print_header("Analyzing 'PRE' Endpoints...") analyze_file(my_filename_pre, "pre") print_header("Analyzing 'POST' Endpoints...") analyze_file(my_filename_post, "post") print_header("Comparing 'PRE' and 'POST' Endpoints...") compare_eps() print_header("Endpoints with Movements...") logger.info("\n" + tabulate(ep_tracker_diff, headers=[ "Tenant", "App Profile", "EPG", "MAC", "Stage", "Node", "Interface", "Encap" ], tablefmt="grid")) print_header("Endpoints only in PRE capture") logger.info("\n" + tabulate(ep_only_in_pre_capture, headers=[ "Tenant", "App Profile", "EPG", "MAC", "Stage", "Node", "Interface", "Encap" ], tablefmt="grid")) print_header("Endpoints only in POST capture") logger.info("\n" + tabulate(ep_only_in_post_capture, headers=[ "Tenant", "App Profile", "EPG", "MAC", "Stage", "Node", "Interface", "Encap" ], tablefmt="grid")) # Check if the --summary option is enabled if detailed_summary: print_header( "(Moved/PRE/POST) Category entries that have a total greater than: {}" .format(detailed_summary_number)) logger.debug(ep_category_summary) ep_summary_data = "" # String object to print out detailed summary that will be built using code below # Loop through EP Categories to then be stored in the string object "ep_summary_data" for category, entries in ep_category_summary.iteritems(): ep_summary_data += "\n" + category.upper() + "\n" # Then loop through each item within each category to highlight the particular Tenant/App/EPG/MAC/Encap for item, number in entries.iteritems(): # Check if the current entry has a value greater than or equal to the value specified on the CLI if number >= detailed_summary_number: ep_summary_data += "{:6} == {}\n".format(number, item) # Also provide a tally of the total amount of EPs that are in BOTH / PRE / POST - as identified grand_total_eps = ep_summary["both"] + ep_summary[ "pre"] + ep_summary["post"] ep_summary_data += "\nGRAND TOTAL\n" ep_summary_data += "{:6} EPs across all captures\n".format( grand_total_eps) logger.info(ep_summary_data) # Print out the data print_header("Summary") # Structure of ep_summary{'pre': 11, 'post': 15, 'compare_ep_move_PRE.json': 11, 'compare_ep_move_POST.json': 15} for key, value in sorted(ep_summary.iteritems(), reverse=True): # Loop through dictionary and find if they are the .JSON filenames if "json" in key: if "pre" in key: # Check for _PRE logger.info("PRE Filename: {}".format(key)) logger.info(" Endpoints read: {}".format(value)) logger.info(" Captured on: {}\n".format( ep_analysis_time["pre"])) elif "post" in key: # Check for _POST logger.info("POST Filename: {}".format(key)) logger.info(" Endpoints read: {}".format(value)) logger.info(" Captured on: {}\n".format( ep_analysis_time["post"])) else: logger.warning( "ERROR with determiniation of PRE/POST filename in ep_summary" ) # Print out analysis logger.info("Endpoints with movement: {}".format(ep_summary["both"])) logger.info("Endpoints only in PRE: {}".format(ep_summary["pre"])) logger.info("Endpoints only in POST: {}\n".format(ep_summary["post"])) if args.log: logger.info("Log file written: {}\n".format(logging_filename)) else: logger.critical( "\nSomething wrong with your selections. Please try again or use the --help option\n" ) creds.print_help() finish_time = time.time() # Calculate finish time logger.info("#" * 80) logger.info("Started analysis @ {}".format( time.asctime(time.localtime(start_time)))) logger.info("Ended analysis @ {}".format( time.asctime(time.localtime(finish_time)))) logger.info("--- Total Execution Time: %s seconds ---" % (finish_time - start_time)) logger.info("#" * 80)
def get_data_from_apic(url, username, password): """ Gets the Endpoint data from the APIC :param url: String containing the URL of the APIC :param username: String containing the username to login to the APIC :param password: String containing the password to login to the APIC :return: None """ ep_db = {} # Login to the APIC print 'Logging in to APIC...' session = Session(url, username, password, subscription_enabled=False) resp = session.login() if not resp.ok: print 'Could not login to APIC' sys.exit(0) # Get the endpoint from the APIC print 'Getting endpoints from the APIC....' endpoints = Endpoint.get(session) # Loop through the endpoints and count them on a per EPG basis print 'Counting the endpoints....' for endpoint in endpoints: epg = endpoint.get_parent() app = epg.get_parent() tenant = app.get_parent() if tenant.name not in ep_db: ep_db[tenant.name] = {} if app.name not in ep_db[tenant.name]: ep_db[tenant.name][app.name] = {} if epg.name not in ep_db[tenant.name][app.name]: ep_db[tenant.name][app.name][epg.name] = 0 ep_db[tenant.name][app.name][epg.name] += 1 # Write the results to a JSON formatted dictionary print 'Translating results to JSON...' epgs = {'name': 'epgs', 'children': []} for tenant in ep_db: tenant_json = {'name': tenant, 'children': []} for app in ep_db[tenant]: app_json = {'name': app, 'children': []} for epg in ep_db[tenant][app]: epg_json = {'name': epg, 'size': ep_db[tenant][app][epg]} app_json['children'].append(epg_json) tenant_json['children'].append(app_json) epgs['children'].append(tenant_json) # Write the formatted JSON to a file print 'Writing results to a file....' try: with open('static/epgs.json', 'w') as epg_file: epg_file.write(json.dumps(epgs)) except IOError: print '%% Unable to open configuration file', 'static/epgs.json' sys.exit(0) except ValueError: print '%% File could not be decoded as JSON.' sys.exit(0)
def run(self): """ Spin a seperate thread off to sit in the background monitoring APIC MOs. The heavy lifting is done via the ACI Toolkit that implements a Websocket connection to listen for events pushed by the APIC. """ evnt_logger = logging.getLogger('monitor') stdout = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') stdout.setFormatter(formatter) evnt_logger.addHandler(stdout) evnt_logger.info('Starting Thread') evnt_logger.info('Getting DB Connection') try: conn = _get_db() except sqlite3.Error as e: evnt_logger.critical('Could not get handle to DB: %s', e.message) sys.exit(1) # Create table evnt_cursor = conn.cursor() try: evnt_cursor.execute( '''CREATE TABLE IF NOT EXISTS events (cls TEXT, name TEXT, timestamp timestamp, json TEXT, url TEXT)''') evnt_cursor.execute( '''CREATE INDEX IF NOT EXISTS datetime_index ON events (timestamp DESC)''') conn.commit() except sqlite3.OperationalError: evnt_logger.info('No need to create Table') # Login to APIC session = Session(self.url, self.login, self.password) resp = session.login() if not resp.ok: evnt_logger.critical('Could not login to APIC') return selected_classes = feed.selected_classes for cls in selected_classes: evnt_logger.info('Subscribing to %s', cls.__name__) cls.subscribe(session) evnt_logger.info('Subscribed to %s', cls.__name__) TableRow = namedtuple('TableRow', ('cls', 'name', 'timestamp', 'json', 'url')) while True: try: for cls in selected_classes: if cls.has_events(session): event_object = cls.get_event(session) row = TableRow( cls=event_object.__class__.__name__, name=event_object.__str__(), timestamp=datetime.datetime.now(), json=json.dumps(event_object.get_json()), url='Not Implemented') evnt_cursor.execute('INSERT INTO events VALUES (?, ?, ?, ?, ?)', row) conn.commit() evnt_logger.info('[%s] Update to %s', event_object.__class__.__name__, event_object) except KeyboardInterrupt: evnt_logger.info('Closing Down') return