def test_inherit_contract_and_delete(self):
        self.delete_tenant()
        config_json = self.get_config_json()
        args = TestArgs()
        apic = Session(APIC_URL, APIC_USERNAME, APIC_PASSWORD)
        apic.login()
        self.setup_tenant(apic)
        tool = execute_tool(args, cli_mode=False)
        tool.add_config(config_json)
        time.sleep(2)

        # Verify that the contract is not inherited by the child EPG
        self.verify_not_inherited(apic)
        time.sleep(2)

        # Add the contract
        self.add_contract(apic)
        time.sleep(2)

        # Verify that the contract is now inherited by the child EPG
        self.verify_inherited(apic)

        # Remove the contract from the parent EPG
        self.remove_contract(apic)
        time.sleep(2)

        # Verify that the contract is not inherited by the child EPG
        self.verify_not_inherited(apic)

        self.delete_tenant()
Example #2
0
def main():
    """
    Main execution routine

    :return: None
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = ('Simple application that logs on to the APIC and displays all'
                   ' of the physical nodes; both belonging to and connected to the fabric.')
    creds = Credentials('apic', description)
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        sys.exit(0)

    # List of classes to get and print
    phy_classes = (Node, ExternalSwitch)

    for phy_class in phy_classes:
        # Print the class name
        class_name = phy_class.__name__
        print(class_name)
        print('=' * len(class_name))

        # Get and print all of the items from the APIC
        items = phy_class.get(session)
        for item in items:
            print(item.info())
def main():
    """
    Main common routine for show interface description
    :return: None
    """
    # Set up the command line options
    creds = Credentials(['apic', 'nosnapshotfiles'],
                        description=("This application replicates the switch "
                                     "CLI command 'show interface description'"))
    creds.add_argument('-s', '--switch',
                       type=str,
                       default=None,
                       help='Specify a particular switch id, e.g. "101"')
    creds.add_argument('-i', '--interface',
                       type=str,
                       default=None,
                       help='Specify a specific interface, e.g. "eth1/1"')
    args = creds.get()

    # Login to APIC
    apic = Session(args.url, args.login, args.password)
    if not apic.login().ok:
        print('%% Could not login to APIC')
        return

    # Show interface description
    node_ids = get_node_ids(apic, args)
    apic_intf_classes = ['l1PhysIf', 'pcAggrIf', 'l3EncRtdIf', 'sviIf',
                         'tunnelIf', 'mgmtMgmtIf', 'l3LbRtdIf']
    for apic_intf_class in apic_intf_classes:
        show_interface_description(apic, node_ids, apic_intf_class=apic_intf_class,
                                   specific_interface=args.interface)
def main():
    """
    Main common routine for show interface description
    :return: None
    """
    # Set up the command line options
    creds = Credentials(['apic', 'nosnapshotfiles'],
                        description=("This application replicates the switch "
                                     "CLI command 'show interface fex'"))
    creds.add_argument('-s',
                       '--switch',
                       type=str,
                       default=None,
                       help='Specify a particular switch id, e.g. "101"')
    args = creds.get()

    # Login to APIC
    apic = Session(args.url, args.login, args.password)
    if not apic.login().ok:
        print('%% Could not login to APIC')
        return

    # Show interface description
    node_ids = get_node_ids(apic, args)
    show_interface_fex(apic, node_ids)
Example #5
0
 def __init__(self, url, login, password):
     # Login to APIC
     self._apic = Session(url, login, password)
     self._if_brief_headers = {
         'l1PhysIf': [
             'Ethernet Interface', 'VLAN', 'Type', 'Mode', 'Status',
             'Reason', 'Speed', 'Port Ch #'
         ],
         'pcAggrIf': [
             'Port-channel Interface', 'VLAN', 'Type', 'Mode', 'Status',
             'Reason', 'Speed', 'Protocol'
         ],
         'l3LbRtdIf': ['Interface', 'Status', 'Description'],
         'tunnelIf':
         ['Interface', 'Status', 'IP Address', 'Encap type', 'MTU'],
         'sviIf': ['Interface', 'Secondary VLAN(Type)', 'Status', 'Reason'],
         'l3EncRtdIf': [],
         'mgmtMgmtIf':
         ['Port', 'VRF', 'Status', 'IP Address', 'Speed', 'MTU'],
         'l2ExtIf': [],
         'l2VfcIf': [
             'Interface', 'Vsan', 'Admin\nMode', 'Admin Trunk Mode',
             'Status', 'Bind Info', 'Oper Mode', 'Oper Speed (Gbps)'
         ]
     }
     self._if_types = self._if_brief_headers.keys()
     if not self._apic.login().ok:
         self._logged_in = False
         print '%% Could not login to APIC'
     else:
         self._logged_in = True
     self._interfaces = []
Example #6
0
def main():
    """
    Main execution routine

    :return: None
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = (
        'Simple application that logs on to the APIC and displays all'
        ' of the physical nodes; both belonging to and connected to the fabric.'
    )
    creds = Credentials('apic', description)
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        sys.exit(0)

    # List of classes to get and print
    phy_classes = (Node, ExternalSwitch)

    for phy_class in phy_classes:
        # Print the class name
        class_name = phy_class.__name__
        print(class_name)
        print('=' * len(class_name))

        # Get and print all of the items from the APIC
        items = phy_class.get(session)
        for item in items:
            print(item.info())
Example #7
0
def main():
    """
    Main execution routine

    :return: None
    """
    creds = Credentials('apic')
    creds.add_argument('--tenant', help='The name of Tenant')
    creds.add_argument('--app', help='The name of ApplicationProfile')
    creds.add_argument('--bd', help='The name of BridgeDomain')
    creds.add_argument('--epg', help='The name of EPG')
    creds.add_argument('--json', const='false', nargs='?', help='Json output only')

    args = creds.get()
    session = Session(args.url, args.login, args.password)
    session.login()

    tenant = Tenant(args.tenant)
    app = AppProfile(args.app, tenant)
    bd = BridgeDomain(args.bd, tenant)
    epg = EPG(args.epg, app)
    epg.add_bd(bd)

    if args.json:
        print(tenant.get_json())
    else:
        resp = session.push_to_apic(tenant.get_url(),
                                    tenant.get_json())

        if not resp.ok:
            print('%% Error: Could not push configuration to APIC')
            print(resp.text)
Example #8
0
def main():
    """
    Main execution routine
    """
    description = ('Simple application that logs on to the APIC'
                   ' and displays usage information for a given DN')
    creds = Credentials('apic', description)
    creds.add_argument("-d", "--dn_name",
                       help="DN to query for usage information")

    args = creds.get()

    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
    url = '/api/mo/{}.json?query-target=children&target-subtree-class=relnFrom'
    url = url.format(args.dn_name)

    resp = session.get(url)

    if resp.ok:
        used_by = resp.json()['imdata']
        for item in used_by:
            kls = next(iter(item))
            attributes = item[kls]['attributes']
            data.append((attributes['tDn'], kls))
    print(tabulate(data, headers=["Used by", "Class"]))
Example #9
0
def main():
    """
    Main show Process routine
    :return: None
    """
    description = 'Simple application that logs on to the APIC and check cluster information for a fabric'
    creds = Credentials('apic', description)

    args = creds.get()

    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print '%% Could not login to APIC'
        sys.exit(0)

    cluster = Cluster.get(session)

    if (cluster.config_size != cluster.cluster_size):
        print("*******************************************************")
        print("WARNING, configured cluster size "), cluster.config_size
        print(":   not equal to the actual size "), cluster.cluster_size
        print "WARNING, desired stats collection might be lost"
        print("*******************************************************")
        print("APICs in the cluster"), cluster.name, (":")
        for apic in cluster.apics:
            print json.dumps(apic, indent=4, sort_keys=True)
    else:
        print("PASS")
Example #10
0
def main():
    """
    Main execution routine

    :return: None
    """
    creds = Credentials('apic')
    creds.add_argument('--tenant', help='The name of Tenant')
    creds.add_argument('--app', help='The name of ApplicationProfile')
    creds.add_argument('--bd', help='The name of BridgeDomain')
    creds.add_argument('--epg', help='The name of EPG')
    creds.add_argument('--json',
                       const='false',
                       nargs='?',
                       help='Json output only')

    args = creds.get()
    session = Session(args.url, args.login, args.password)
    session.login()

    tenant = Tenant(args.tenant)
    app = AppProfile(args.app, tenant)
    bd = BridgeDomain(args.bd, tenant)
    epg = EPG(args.epg, app)
    epg.add_bd(bd)

    if args.json:
        print(tenant.get_json())
    else:
        resp = session.push_to_apic(tenant.get_url(), tenant.get_json())

        if not resp.ok:
            print('%% Error: Could not push configuration to APIC')
            print(resp.text)
def main():
    """
    Main common routine for show interface description
    :return: None
    """
    # Set up the command line options
    creds = Credentials(['apic', 'nosnapshotfiles'],
                        description=("This application replicates the switch "
                                     "CLI command 'show interface description'"))
    creds.add_argument('-s', '--switch',
                       type=str,
                       default=None,
                       help='Specify a particular switch id, e.g. "101"')
    creds.add_argument('-i', '--interface',
                       type=str,
                       default=None,
                       help='Specify a specific interface, e.g. "eth1/1"')
    args = creds.get()

    # Login to APIC
    apic = Session(args.url, args.login, args.password)
    if not apic.login().ok:
        print('%% Could not login to APIC')
        return

    # Show interface description
    node_ids = get_node_ids(apic, args)
    apic_intf_classes = ['l1PhysIf', 'pcAggrIf', 'l3EncRtdIf', 'sviIf',
                         'tunnelIf', 'mgmtMgmtIf', 'l3LbRtdIf']
    for apic_intf_class in apic_intf_classes:
        show_interface_description(apic, node_ids, apic_intf_class=apic_intf_class,
                                   specific_interface=args.interface)
Example #12
0
def main():
    """
    Main show Process routine
    :return: None
    """
    description = 'Simple application that logs on to the APIC and check cluster information for a fabric'
    creds = Credentials('apic', description)

    args = creds.get()

    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print '%% Could not login to APIC'
        sys.exit(0)

    cluster = Cluster.get(session)

    if (cluster.config_size != cluster.cluster_size):
        print("*******************************************************")
        print ("WARNING, configured cluster size "), cluster.config_size
        print (":   not equal to the actual size "), cluster.cluster_size
        print "WARNING, desired stats collection might be lost"
        print("*******************************************************")
        print("APICs in the cluster"), cluster.name, (":")
        for apic in cluster.apics:
            print json.dumps(apic, indent=4, sort_keys=True)
    else:
        print("PASS")
Example #13
0
def main():
    """
    Main execution routine
    """
    description = ('Simple application that logs on to the APIC'
                   ' and displays usage information for a given DN')
    creds = Credentials('apic', description)
    creds.add_argument("-d",
                       "--dn_name",
                       help="DN to query for usage information")

    args = creds.get()

    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
    url = '/api/mo/{}.json?query-target=children&target-subtree-class=relnFrom'
    url = url.format(args.dn_name)

    resp = session.get(url)

    if resp.ok:
        used_by = resp.json()['imdata']
        for item in used_by:
            kls = next(iter(item))
            attributes = item[kls]['attributes']
            data.append((attributes['tDn'], kls))
    print(tabulate(data, headers=["Used by", "Class"]))
Example #14
0
 def __init__(self, url, login, password):
     # Login to APIC
     self._apic = Session(url, login, password)
     if not self._apic.login().ok:
         self._logged_in = False
         print '%% Could not login to APIC'
     else:
         self._logged_in = True
def main():
    """
    Main execution routine

    :return: None
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = (
        'Application dealing with tenant configuration. '
        'It can download a tenant configuration from the APIC and store it as raw JSON in a file. '
        'It can also push a tenant configuration stored as raw JSON in a file to the APIC.'
    )
    creds = Credentials(('apic', 'nosnapshotfiles'), description)
    creds.add_argument(
        '--config',
        default=None,
        help='Configuration file to push/pull tenant configuration')
    creds.add_argument('--tenant', default=None, help='Tenant name')
    group = creds.add_mutually_exclusive_group()
    group.add_argument('--push-to-apic',
                       action='store_true',
                       help='Push the tenant configuration file to the APIC')
    group.add_argument('--pull-from-apic',
                       action='store_true',
                       help=('Pull the tenant configuration from the APIC and'
                             'store in the specified configuration file'))

    # Get the command line arguments
    args = creds.get()

    # Sanity check the command line arguments
    if args.config is None:
        print '%% No configuration file given.'
        creds.print_help()
        return
    if args.tenant is None:
        print '%% No Tenant name given.'
        creds.print_help()
        return
    if not args.push_to_apic and not args.pull_from_apic:
        print '%% No direction (push-to-apic/pull-from-apic) given.'
        creds.print_help()
        return

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print '%% Could not login to APIC'
        return

    # Do the work
    if args.pull_from_apic:
        pull_config_from_apic(session, args.tenant, args.config)

    if args.push_to_apic:
        push_config_to_apic(session, args.tenant, args.config)
Example #16
0
def main():
    """
    Main execution routine
    """
    description = 'Simple application that logs on to the APIC and displays all of the Tenants.'
    creds = Credentials('apic', description)
    creds.add_argument("-d",
                       "--domain-name",
                       type=str,
                       help="list of domains. usage -d tennat.infra")
    creds.add_argument(
        "-t",
        "--tenant-name",
        type=str,
        help=
        "name of the tenant of which faults are to be displayed. If not given faults of all the tenants are shown"
    )
    creds.add_argument('--continuous',
                       action='store_true',
                       help='Continuously monitor for faults')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        return

    faults_obj = Faults()
    fault_filter = None
    if args.domain_name is not None:
        fault_filter = {'domain': args.domain_name.split(',')}
    tenant_name = None
    if args.tenant_name is not None:
        tenant_name = args.tenant_name

    faults_obj.subscribe_faults(session, fault_filter)
    while faults_obj.has_faults(session, fault_filter) or args.continuous:
        if faults_obj.has_faults(session, fault_filter):
            faults = faults_obj.get_faults(session,
                                           fault_filter=fault_filter,
                                           tenant_name=tenant_name)
            if faults is not None:
                for fault in faults:
                    if fault is not None:
                        print("---------------")
                        if fault.descr is not None:
                            print("     descr     : " + fault.descr)
                        else:
                            print("     descr     : " + "  ")
                        print("     dn        : " + fault.dn)
                        print("     rule      : " + fault.rule)
                        print("     severity  : " + fault.severity)
                        print("     type      : " + fault.type)
                        print("     domain    : " + fault.domain)
Example #17
0
 def __init__(self, url, login, password):
     # Login to APIC
     self._apic = Session(url, login, password)
     if not self._apic.login().ok:
         self._logged_in = False
         print('%% Could not login to APIC')
     else:
         self._logged_in = True
     self._interfaces = []
     self._port_channels = []
Example #18
0
    def test_basic_inherit_add_subnet(self):
        config_json = {
            "apic": {
                "user_name": APIC_USERNAME,
                "password": APIC_PASSWORD,
                "ip_address": APIC_IP,
                "use_https": False
            },
            "inheritance_policies": [
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "childepg"
                    },
                    "allowed": True,
                    "enabled": True
                },
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "parentepg"
                    },
                    "allowed": True,
                    "enabled": False
                }
            ]
        }
        args = TestArgs()
        apic = Session(APIC_URL, APIC_USERNAME, APIC_PASSWORD)
        apic.login()
        self.setup_tenant(apic)
        tool = execute_tool(args, cli_mode=False)
        tool.add_config(config_json)
        time.sleep(2)

        # Verify that the contract is not inherited by the child EPG
        self.verify_not_inherited(apic)

        # Add the child subnet
        self.add_child_subnet(apic)
        time.sleep(2)

        # Verify that the contract is now inherited by the child EPG
        self.verify_inherited(apic)

        self.delete_tenant()
def main():
    # Set up the Command Line options
    creds = Credentials(('apic', 'nosnapshotfiles'), description='')
    creds.add_argument('--printonly', action='store_true',
                       help='Only print the JSON but do not push to APIC.')
    group = creds.add_mutually_exclusive_group()
    group.add_argument('--config', default=None,
                       help='Optional .ini file providing failure scenario configuration')
    group.add_argument('--delete', action='store_true',
                       help='Delete ALL of the randomized configuration from the APIC')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        print resp.status_code, resp.text
        return

    # Handle the delete case
    if args.delete:
        delete_all_randomized_tenants(session)
        return

    # Ensure that a config file has been given
    if args.config is None:
        print '%% Expected --config or --delete option'
        return

    config = ConfigParser.ConfigParser()
    config.read(args.config)
    randomizer = ConfigRandomizer(config)
    interfaces = ['eth 1/101/1/17', 'eth 1/102/1/17']
    randomizer.create_random_config(interfaces)
    flows = randomizer.get_flows(1)
    flow_json = []
    for flow in flows:
        flow_json.append(flow.get_json())
    flow_json = json.dumps({'flows': flow_json})

    for tenant in randomizer.tenants:
        print 'TENANT CONFIG'
        print '-------------'
        print tenant.get_json()
        print
        print
        if not args.printonly:
            resp = tenant.push_to_apic(session)
            if not resp.ok:
                print resp.status_code, resp.text
            assert resp.ok
    print 'Total number of tenants pushed:', len(randomizer.tenants)
def main():
    """
    Main execution routine
    """
    description = 'Simple application that logs on to the APIC and displays all of the Tenants.'
    creds = Credentials('apic', description)
    creds.add_argument(
        "-d",
        "--domain-name",
        type=str,
        help="list of domains. usage -d tennat.infra")
    creds.add_argument(
        "-t",
        "--tenant-name",
        type=str,
        help="name of the tenant of which faults are to be displayed. If not given faults of all the tenants are shown")
    creds.add_argument('--continuous', action='store_true',
                       help='Continuously monitor for faults')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        return

    faults_obj = Faults()
    fault_filter = None
    if args.domain_name is not None:
        fault_filter = {'domain': args.domain_name.split(',')}
    tenant_name = None
    if args.tenant_name is not None:
        tenant_name = args.tenant_name

    faults_obj.subscribe_faults(session, fault_filter)
    while faults_obj.has_faults(session, fault_filter) or args.continuous:
        if faults_obj.has_faults(session, fault_filter):
            faults = faults_obj.get_faults(
                session, fault_filter=fault_filter, tenant_name=tenant_name)
            if faults is not None:
                for fault in faults:
                    if fault is not None:
                        print "---------------"
                        if fault.descr is not None:
                            print "     descr     : " + fault.descr
                        else:
                            print "     descr     : " + "  "
                        print "     dn        : " + fault.dn
                        print "     rule      : " + fault.rule
                        print "     severity  : " + fault.severity
                        print "     type      : " + fault.type
                        print "     domain    : " + fault.domain
def main():
    """
    Main execution routine

    :return: None
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = ('Application dealing with tenant configuration. '
                   'It can download a tenant configuration from the APIC and store it as raw JSON in a file. '
                   'It can also push a tenant configuration stored as raw JSON in a file to the APIC.')
    creds = Credentials(('apic', 'nosnapshotfiles'), description)
    creds.add_argument('--config', default=None, help='Configuration file to push/pull tenant configuration')
    creds.add_argument('--tenant', default=None, help='Tenant name')
    group = creds.add_mutually_exclusive_group()
    group.add_argument('--push-to-apic', action='store_true',
                       help='Push the tenant configuration file to the APIC')
    group.add_argument('--pull-from-apic', action='store_true',
                       help=('Pull the tenant configuration from the APIC and'
                             'store in the specified configuration file'))

    # Get the command line arguments
    args = creds.get()

    # Sanity check the command line arguments
    if args.config is None:
        print '%% No configuration file given.'
        creds.print_help()
        return
    if args.tenant is None:
        print '%% No Tenant name given.'
        creds.print_help()
        return
    if not args.push_to_apic and not args.pull_from_apic:
        print '%% No direction (push-to-apic/pull-from-apic) given.'
        creds.print_help()
        return

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print '%% Could not login to APIC'
        return

    # Do the work
    if args.pull_from_apic:
        pull_config_from_apic(session, args.tenant, args.config)

    if args.push_to_apic:
        push_config_to_apic(session, args.tenant, args.config)
Example #22
0
 def delete_tenant(self):
     tenant = Tenant('inheritanceautomatedtest')
     tenant.mark_as_deleted()
     apic = Session(APIC_URL, APIC_USERNAME, APIC_PASSWORD)
     apic.login()
     resp = tenant.push_to_apic(apic)
     self.assertTrue(resp.ok)
     time.sleep(4)
     resp = tenant.push_to_apic(apic)
     self.assertTrue(resp.ok)
     time.sleep(2)
     tenants = Tenant.get(apic)
     for tenant in tenants:
         self.assertTrue(tenant.name != 'inheritanceautomatedtest')
Example #23
0
def main():
    """
    Main execution routine
    """
    description = (
        'Simple application that logs on to the APIC'
        ' and displays all the tenant info of the contract_interface related to the imported contract.'
    )
    creds = Credentials('apic', description)
    creds.add_argument("-t",
                       "--tenant_name",
                       help="Tenant Name of where the contract is created")
    creds.add_argument("-i", "--contract_name", help="Imported Contract Name")
    args = creds.get()

    if (args.tenant_name is not None) and (args.contract_name is None):
        args.contract_name = raw_input("Contract Name: ")

    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    tenants = Tenant.get_deep(session)
    for tenant in tenants:
        contracts_interfaces = tenant.get_children(
            only_class=ContractInterface)
        for contract_interface in contracts_interfaces:
            imported_contract = contract_interface.get_import_contract()
            if imported_contract is not None:
                if args.tenant_name is not None:
                    if (imported_contract.name == args.contract_name) and (
                            imported_contract.get_parent().name
                            == args.tenant_name):
                        apps = AppProfile.get(session, tenant)
                        for app in apps:
                            epgs = EPG.get(session, app, tenant)
                            for epg in epgs:
                                data.append((imported_contract.name,
                                             tenant.name, app.name, epg.name))
                else:
                    apps = AppProfile.get(session, tenant)
                    for app in apps:
                        epgs = EPG.get(session, app, tenant)
                        for epg in epgs:
                            data.append((imported_contract.name, tenant.name,
                                         app.name, epg.name))
    print tabulate(
        data, headers=["IMPORTED_CONTRACT", "TENANT", "APP_PROFILE", "EPG"])
def main():
    # Set up the Command Line options
    creds = Credentials(('apic', 'nosnapshotfiles'), description='')
    creds.add_argument('--printonly',
                       action='store_true',
                       help='Only print the JSON but do not push to APIC.')
    creds.add_argument('--testloop',
                       action='store_true',
                       help='Run in a continual testing loop.')
    group = creds.add_mutually_exclusive_group()
    group.add_argument(
        '--config',
        default=None,
        help='Optional .ini file providing failure scenario configuration')
    group.add_argument(
        '--delete',
        action='store_true',
        help='Delete ALL of the randomized configuration from the APIC')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        print resp.status_code, resp.text
        return

    # Handle the delete case
    if args.delete:
        delete_all_randomized_tenants(session)
        return

    # Ensure that a config file has been given
    if args.config is None:
        print '%% Expected --config or --delete option'
        return

    if args.testloop:
        while True:
            generate_config(session, args)
            time.sleep(random_number(5, 30))
            delete_all_randomized_tenants(session)
            time.sleep(random_number(5, 30))
    else:
        generate_config(session, args)
Example #25
0
 def __init__(self, url, login, password):
     # Login to APIC
     self._apic = Session(url, login, password)
     if not self._apic.login().ok:
         self._logged_in = False
         print '%% Could not login to APIC'
     else:
         self._logged_in = True
def main():
    # Set up the Command Line options
    creds = Credentials(('apic', 'nosnapshotfiles'), description='')
    group = creds.add_mutually_exclusive_group()
    group.add_argument('--config', default=None,
                       help='Optional .ini file providing failure scenario configuration')
    group.add_argument('--delete', action='store_true',
                       help='Delete ALL of the randomized configuration from the APIC')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        print resp.status_code, resp.text
        return

    # Handle the delete case
    if args.delete:
        delete_all_randomized_tenants(session)
        return

    # Ensure that a config file has been given
    if args.config is None:
        print '%% Expected --config or --delete option'
        return

    config = ConfigParser.ConfigParser()
    config.read(args.config)

    # Handle the random creation
    num_tenants = random_number(int(config.get('Tenants', 'Minimum')),
                                int(config.get('Tenants', 'Maximum')))
    for i in range(0, num_tenants):
        tenant = create_random_tenant_config(config)
        print 'TENANT CONFIG'
        print '-------------'
        print tenant.get_json()
        print
        print
        resp = tenant.push_to_apic(session)
        if not resp.ok:
            print resp.status_code, resp.text
        assert resp.ok
    print 'Total number of tenants pushed:', num_tenants
Example #27
0
def main():
    """
    Main show Subnets routine
    :return: None
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = ('Simple application that logs on to the APIC'
                   ' and displays all of the Subnets.')
    creds = Credentials('apic', description)
    creds.add_argument('--tenant', help='The name of Tenant')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    # Download all of the tenants, app profiles, and Subnets
    # and store the names as tuples in a list
    tenants = Tenant.get(session)
    for tenant in tenants:
        check_longest_name(tenant.name, "Tenant")
        if args.tenant is None:
            get_subnet(session, tenant)
        else:
            if tenant.name == args.tenant:
                get_subnet(session, tenant)

    # Display the data downloaded
    template = '{0:' + str(longest_names["Tenant"]) + '} ' \
               '{1:' + str(longest_names["Application Profile"]) + '} ' \
               '{2:' + str(longest_names["Bridge Domain"]) + '} ' \
               '{3:' + str(longest_names["Subnet"]) + '} ' \
               '{4:' + str(longest_names["Scope"]) + '}'
    print(template.format("Tenant", "Application Profile",
                          "Bridge Domain", "Subnet", "Scope"))
    print(template.format('-' * longest_names["Tenant"],
                          '-' * longest_names["Application Profile"],
                          '-' * longest_names["Bridge Domain"],
                          '-' * longest_names["Subnet"],
                          '-' * longest_names["Scope"]))
    for rec in sorted(data):
        print(template.format(*rec))
Example #28
0
def send_to_apic(tenant):
    """
    Login to APIC and push the config

    :param tenant: Tenant class instance
    :return: request response object
    """
    description = 'Basic Connectivity Example'
    creds = Credentials('apic', description)
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password, False)
    session.login()
    resp = tenant.push_to_apic(session)
    if resp.ok:
        print('Success')
    return resp
Example #29
0
def main():
    """
    Main routine
    """
    # Get all the arguments
    description = 'Creates a tenant with a micro-EPG.'
    creds = Credentials('apic', description)
    args = creds.get()

    # Login to the APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    # Create the Tenant and AppProfile
    tenant = Tenant('acitoolkit-microepg-example')
    app_profile = AppProfile('myapp', tenant)

    # Create a Base EPG that will provide networking for the microEPGs
    base_epg = EPG('base', app_profile)
    base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged')
    vrf = Context('myvrf', tenant)
    bd = BridgeDomain('mybd', tenant)
    bd.add_context(vrf)
    base_epg.add_bd(bd)

    # Create a microEPG
    microepg = EPG('microepg', app_profile)
    microepg.is_attributed_based = True
    microepg.set_base_epg(base_epg)
    # Add an IP address to this microepg
    criterion = AttributeCriterion('criterion', microepg)
    criterion.add_ip_address('1.2.3.4')

    # Contracts can be provided/consumed from the microepg as desired (not shown)

    # Push the tenant to the APIC
    resp = tenant.push_to_apic(session)
    if not resp.ok:
        print('%% Error: Could not push configuration to APIC')
        print(resp.text)
Example #30
0
def main():
    """
    Main Show VM Names Routine
    :return: None
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = ('Simple application that logs on to the APIC'
                   ' and displays all of the virtual machine names.')
    creds = Credentials('apic', description)
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        return

    # Make a direct call to the APIC REST API
    # Get all of the VMs (all objects of compVM class) and include the compVNic children
    # which contain the MAC address of the NIC.
    # The advantage of using acitoolkit Session.get() instead of direct Requests.get() calls
    # is that acitoolkit will automatically handle retries and pagination for queries with
    # large response data
    class_url = '/api/node/class/compVm.json?rsp-subtree=children&rsp-subtree-class=compVNic'
    ret = session.get(class_url)
    vm_list = ret.json()['imdata']

    # Process the response. We're looking for the VM name and the associated vNIC MAC addresses.
    data = []
    for vm in vm_list:
        vm_name = vm['compVm']['attributes']['name']
        for vnic in vm['compVm']['children']:
            vm_mac = vnic['compVNic']['attributes']['mac']
            # Store the VM name and MAC address. Note that VM names may be associated with
            # multiple MAC addresses if they have multiple vNICs.
            data.append((vm_name, vm_mac))

    # Display the data downloaded
    print(tabulate(data, headers=["VMNAME", "MACADDRESS"]))
def main():
    # Set up the Command Line options
    creds = Credentials(('apic', 'nosnapshotfiles'), description='')
    creds.add_argument('--printonly', action='store_true',
                       help='Only print the JSON but do not push to APIC.')
    creds.add_argument('--testloop', action='store_true',
                       help='Run in a continual testing loop.')
    group = creds.add_mutually_exclusive_group()
    group.add_argument('--config', default=None,
                       help='Optional .ini file providing failure scenario configuration')
    group.add_argument('--delete', action='store_true',
                       help='Delete ALL of the randomized configuration from the APIC')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        print resp.status_code, resp.text
        return

    # Handle the delete case
    if args.delete:
        delete_all_randomized_tenants(session)
        return

    # Ensure that a config file has been given
    if args.config is None:
        print '%% Expected --config or --delete option'
        return

    if args.testloop:
        while True:
            generate_config(session, args)
            time.sleep(random_number(5, 30))
            delete_all_randomized_tenants(session)
            time.sleep(random_number(5, 30))
    else:
        generate_config(session, args)
def main():
    """
    Main execution routine
    """
    description = ('Simple application that logs on to the APIC'
                   ' and displays all the tenant info of the contract_interface related to the imported contract.')
    creds = Credentials('apic', description)
    creds.add_argument("-t", "--tenant_name", help="Tenant Name of where the contract is created")
    creds.add_argument("-i", "--contract_name", help="Imported Contract Name")
    args = creds.get()

    if (args.tenant_name is not None) and (args.contract_name is None):
        args.contract_name = raw_input("Contract Name: ")

    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    tenants = Tenant.get_deep(session)
    for tenant in tenants:
        contracts_interfaces = tenant.get_children(only_class=ContractInterface)
        for contract_interface in contracts_interfaces:
            imported_contract = contract_interface.get_import_contract()
            if imported_contract is not None:
                if args.tenant_name is not None:
                    if (imported_contract.name == args.contract_name) and (imported_contract.get_parent().name == args.tenant_name):
                        apps = AppProfile.get(session, tenant)
                        for app in apps:
                            epgs = EPG.get(session, app, tenant)
                            for epg in epgs:
                                data.append((imported_contract.name, tenant.name, app.name, epg.name))
                else:
                    apps = AppProfile.get(session, tenant)
                    for app in apps:
                        epgs = EPG.get(session, app, tenant)
                        for epg in epgs:
                            data.append((imported_contract.name, tenant.name, app.name, epg.name))
    print tabulate(data, headers=["IMPORTED_CONTRACT", "TENANT", "APP_PROFILE", "EPG"])
Example #33
0
def main():
    """
    Main execution routine
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = ('Application that logs on to the APIC and tracks'
                   ' all of the Endpoint stats in a MySQL database.')
    creds = Credentials(qualifier=('apic', 'mysql'),
                        description=description)
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        return

    # Create the MySQL database
    cnx = mysql.connector.connect(user=args.mysqllogin, password=args.mysqlpassword,
                                  host=args.mysqlip)
    c = cnx.cursor()
    c.execute('CREATE DATABASE IF NOT EXISTS acitoolkit_interface_stats;')
    cnx.commit()
    c.execute('USE acitoolkit_interface_stats;')

    all_stats = InterfaceStats.get_all_ports(session, 1)
    for intf in all_stats:
        stats = all_stats[intf]
        for stats_family in stats:
            if '5min' in stats[stats_family]:
                for epoch in stats[stats_family]['5min']:
                    if epoch != 0:
                        ss = stats[stats_family]['5min'][epoch]
                        if stats_family not in valid_tables:
                            create_table(c, cnx, stats_family, list(ss.keys()))
                        if not interval_end_exists(c, stats_family, intf, ss['intervalEnd']):
                            insert_stats_row(c, cnx, stats_family, intf, ss)
Example #34
0
def main():
    """
    Main show EPGs routine
    :return: None
    """
    # Login to APIC
    description = ('Simple application that logs on to the APIC'
                   ' and displays all of the EPGs.')
    creds = Credentials('apic', description)
    creds.add_argument('--tenant', help='The name of Tenant')
    args = creds.get()

    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    # Download all of the tenants, app profiles, and EPGs
    # and store the names as tuples in a list
    tenants = Tenant.get(session)
    for tenant in tenants:
        check_longest_name(tenant.name, "Tenant")
        if args.tenant is None:
            get_epg(session, tenant)
        else:
            if tenant.name == args.tenant:
                get_epg(session, tenant)

    # Display the data downloaded
    template = '{0:' + str(longest_names["Tenant"]) + '} ' \
               '{1:' + str(longest_names["Application Profile"]) + '} ' \
               '{2:' + str(longest_names["EPG"]) + '}'
    print(template.format("Tenant", "Application Profile", "EPG"))
    print(template.format('-' * longest_names["Tenant"],
                          '-' * longest_names["Application Profile"],
                          '-' * longest_names["EPG"]))
    for rec in sorted(data):
        print(template.format(*rec))
def main():
    """
    Main common routine for show vlan ext, show vlan brief, and show vlan info
    :return: None
    """
    # Set up the command line options
    creds = Credentials(['apic', 'nosnapshotfiles'],
                        description="This application replicates the switch CLI command 'show vlan extended'")
    creds.add_argument('-s', '--switch',
                       type=str,
                       default=None,
                       help='Specify a particular switch id, e.g. "101"')
    args = creds.get()

    # Login to APIC
    apic = Session(args.url, args.login, args.password)
    if not apic.login().ok:
        print('%% Could not login to APIC')
        return

    node_ids = get_node_ids(apic, args)
    show_vlan_brief(apic, node_ids)
    show_vlan_info(apic, node_ids)
 def __init__(self, url, login, password):
     # Login to APIC
     self._apic = Session(url, login, password)
     self._if_brief_headers = {
         'l1PhysIf': ['Ethernet Interface', 'VLAN', 'Type', 'Mode', 'Status', 'Reason', 'Speed', 'Port Ch #'],
         'pcAggrIf': ['Port-channel Interface', 'VLAN', 'Type', 'Mode', 'Status', 'Reason', 'Speed', 'Protocol'],
         'l3LbRtdIf': ['Interface', 'Status', 'Description'],
         'tunnelIf': ['Interface', 'Status', 'IP Address', 'Encap type', 'MTU'],
         'sviIf': ['Interface', 'Secondary VLAN(Type)', 'Status', 'Reason'],
         'l3EncRtdIf': [],
         'mgmtMgmtIf': ['Port', 'VRF', 'Status', 'IP Address', 'Speed', 'MTU'],
         'l2ExtIf': [],
         'l2VfcIf': ['Interface', 'Vsan', 'Admin\nMode', 'Admin Trunk Mode', 'Status',
                     'Bind Info', 'Oper Mode', 'Oper Speed (Gbps)']
     }
     self._if_types = self._if_brief_headers.keys()
     if not self._apic.login().ok:
         self._logged_in = False
         print '%% Could not login to APIC'
     else:
         self._logged_in = True
     self._interfaces = []
Example #37
0
    def push_config_to_apic(self):
        """
        Push the configuration to the APIC

        :return: Requests Response instance indicating success or not
        """
        THROTTLE_SIZE = 500000 / 8
        # Set the tenant name correctly
        if self._tenant_name == '' and self.cdb.has_context_config():
            self.set_tenant_name(self.cdb.get_context_config().tenant_name)
        elif self._tenant_name == '':
            self.set_tenant_name('acitoolkit')

        # Find all the unique contract providers
        logging.debug('Finding the unique contract providers')
        unique_providers = {}
        for provided_policy in self.cdb.get_contract_policies():
            if provided_policy.dst_id not in unique_providers:
                unique_providers[provided_policy.dst_id] = 0
            else:
                unique_providers[provided_policy.dst_id] += 1
        logging.debug('Found %s unique contract providers', len(unique_providers))

        # Find any duplicate contracts that this provider is providing (remove)
        logging.debug('Finding any duplicate contracts')
        duplicate_policies = []
        for provider in unique_providers:
            for provided_policy in self.cdb.get_contract_policies():
                if provided_policy in duplicate_policies:
                    continue
                if provider in provided_policy.dst_ids:
                    for other_policy in self.cdb.get_contract_policies():
                        if other_policy == provided_policy or other_policy in duplicate_policies:
                            continue
                        if other_policy.dst_ids == provided_policy.dst_ids and other_policy.has_same_permissions(provided_policy):
                            provided_policy.src_ids = provided_policy.src_ids + other_policy.src_ids
                            duplicate_policies.append(other_policy)
                            logging.debug('duplicate_policies now has %s entries', len(duplicate_policies))

        logging.debug('Removing duplicate contracts')
        for duplicate_policy in duplicate_policies:
            self.cdb.remove_contract_policy(duplicate_policy)

        if not self.displayonly:
            # Log on to the APIC
            apic_cfg = self.cdb.get_apic_config()
            apic = Session(apic_cfg.url, apic_cfg.user_name, apic_cfg.password)
            resp = apic.login()
            if not resp.ok:
                return resp

        logging.debug('Generating JSON....')
        # Push all of the Contracts
        logging.debug('Pushing contracts. # of Contract policies: %s', len(self.cdb.get_contract_policies()))
        tenant = Tenant(self._tenant_name)
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_name + '::' + contract_policy.dst_name
            contract = Contract(name, tenant)
            contract.descr = contract_policy.descr[0:127 - (contract_policy.descr.count('"') + contract_policy.descr.count("'") + contract_policy.descr.count('/'))]
            for whitelist_policy in contract_policy.get_whitelist_policies():
                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                if whitelist_policy.proto == '6' or whitelist_policy.proto == '17':
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        dFromPort=whitelist_policy.port_min,
                                        dToPort=whitelist_policy.port_max,
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        sFromPort='unspecified',
                                        sToPort='unspecified',
                                        tcpRules='unspecified',
                                        parent=contract)
                else:
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        parent=contract)
            if not self.displayonly:
                if len(str(tenant.get_json())) > THROTTLE_SIZE:
                    logging.debug('Throttling contracts. Pushing config...')
                    resp = tenant.push_to_apic(apic)
                    if not resp.ok:
                        return resp
                    tenant = Tenant(self._tenant_name)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing remaining contracts')
            resp = tenant.push_to_apic(apic)
            if not resp.ok:
                return resp

        # Push all of the EPGs
        logging.debug('Pushing EPGs')
        if not self.displayonly:
            tenant = Tenant(self._tenant_name)
        app = AppProfile(self._app_name, tenant)

        if self._use_ip_epgs:
            # Create a Base EPG
            base_epg = EPG('base', app)
            if self.cdb.has_context_config():
                context_name = self.cdb.get_context_config().name
            else:
                context_name = 'vrf1'
            context = Context(context_name, tenant)
            bd = BridgeDomain('bd', tenant)
            bd.add_context(context)
            base_epg.add_bd(bd)
            if self.displayonly:
                # If display only, just deploy the EPG to leaf 101
                base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged')
            else:
                # Deploy the EPG to all of the leaf switches
                nodes = Node.get(apic)
                for node in nodes:
                    if node.role == 'leaf':
                        base_epg.add_static_leaf_binding(node.node, 'vlan', '1', encap_mode='untagged')

            # Create the Attribute based EPGs
            logging.debug('Creating Attribute Based EPGs')
            for epg_policy in self.cdb.get_epg_policies():
                if not self.displayonly:
                    # Check if we need to throttle very large configs
                    if len(str(tenant.get_json())) > THROTTLE_SIZE:
                        resp = tenant.push_to_apic(apic)
                        if not resp.ok:
                            return resp
                        tenant = Tenant(self._tenant_name)
                        app = AppProfile(self._app_name, tenant)
                        context = Context(context_name, tenant)
                        bd = BridgeDomain('bd', tenant)
                        bd.add_context(context)
                        if self._use_ip_epgs:
                            base_epg = EPG('base', app)
                            base_epg.add_bd(bd)
                epg = EPG(epg_policy.name, app)

                # Check if the policy has the default 0.0.0.0 IP address
                no_default_endpoint = True
                for node_policy in epg_policy.get_node_policies():
                    if node_policy.ip == '0.0.0.0' and node_policy.prefix_len == 0:
                        no_default_endpoint = False
                        epg.add_bd(bd)

                # Add all of the IP addresses
                if no_default_endpoint:
                    epg.is_attributed_based = True
                    epg.set_base_epg(base_epg)
                    criterion = AttributeCriterion('criterion', epg)
                    ipaddrs = []
                    for node_policy in epg_policy.get_node_policies():
                        ipaddr = ipaddress.ip_address(unicode(node_policy.ip))
                        if not ipaddr.is_multicast: # Skip multicast addresses. They cannot be IP based EPGs
                            ipaddrs.append(ipaddr)
                    nets = ipaddress.collapse_addresses(ipaddrs)
                    for net in nets:
                        criterion.add_ip_address(str(net))
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        contract = Contract(name, tenant)
                        epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            contract = Contract(name, tenant)
                        epg.provide(contract)
        else:
            logging.debug('Creating EPGs')
            for epg_policy in self.cdb.get_epg_policies():
                epg = EPG(epg_policy.name, app)
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        contract = Contract(name, tenant)
                        epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            contract = Contract(name, tenant)
                        epg.provide(contract)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            return resp
def main():
    """
    Main create tenant routine
    :return: None
    """
    # Get all the arguments
    description = 'It logs in to the APIC and will delete tenants named with the specified string.'
    creds = Credentials(['apic', 'nosnapshotfiles'], description)
    group = creds.add_mutually_exclusive_group()
    group.add_argument('--startswith', default=None,
                       help='String to match that starts the tenant name')
    group.add_argument('--endswith', default=None,
                       help='String to match that ends the tenant name')
    group.add_argument('--exactmatch', default=None,
                       help='String that exactly matches the tenant name')
    group.add_argument('--contains', default=None,
                       help='String that is contained in the tenant name')
    creds.add_argument('--force', action='store_true',
                       help='Attempt to remove the tenants without prompting for confirmation')
    args = creds.get()

    # Login to the APIC
    apic = Session(args.url, args.login, args.password)
    resp = apic.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    # Get all of the Tenants
    tenants = Tenant.get(apic)

    # Find the list of Tenants to delete according to command line options
    tenants_to_delete = []
    for tenant in tenants:
        if args.startswith is not None:
            if tenant.name.startswith(args.startswith):
                tenants_to_delete.append(tenant)
        elif args.endswith is not None:
            if tenant.name.endswith(args.endswith):
                tenants_to_delete.append(tenant)
        elif args.exactmatch is not None:
            if args.exactmatch == tenant.name:
                tenants_to_delete.append(tenant)
        elif args.contains is not None:
            if args.contains in tenant.name:
                tenants_to_delete.append(tenant)

    # Query the user to be sure of deletion
    if not args.force:
        for tenant in tenants_to_delete:
            prompt = 'Delete tenant %s ? [y/N]' % tenant.name
            try:
                resp = raw_input(prompt)
            except NameError:
                resp = input(prompt)
            if not resp.lower().startswith('y'):
                tenants_to_delete.remove(tenant)
                print 'Skipping tenant', tenant.name

    # Delete the tenants
    for tenant in tenants_to_delete:
        tenant.mark_as_deleted()
        resp = tenant.push_to_apic(apic)
        if resp.ok:
            print 'Deleted tenant', tenant.name
        else:
            print 'Could not delete tenant', tenant.name
            print resp.text
Example #39
0
class FexCollector(object):
    def __init__(self, url, login, password):
        # Login to APIC
        self._apic = Session(url, login, password)
        if not self._apic.login().ok:
            self._logged_in = False
            print '%% Could not login to APIC'
        else:
            self._logged_in = True

    def _get_query(self, query_url, error_msg):
        resp = self._apic.get(query_url)
        if not resp.ok:
            print error_msg
            print resp.text
            return []
        return resp.json()['imdata']

    def get_fex_attributes(self, node_id, fex_id=None):
        if fex_id is None:
            query_url = (
                '/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
                '&target-subtree-class=satmDExtCh' % node_id)
        else:
            query_url = (
                '/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
                '&target-subtree-class=satmDExtCh&query-target-filter=eq(satmDExtCh.id, "%s")'
                % (node_id, fex_id))
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        return self._get_query(query_url, error_message)

    def get_fabric_port_attributes(self, node_id, fex_id):
        query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
                     '&target-subtree-class=satmFabP&query-target-filter='
                     'eq(satmFabP.extChId,"%s")' % (node_id, fex_id))
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        return self._get_query(query_url, error_message)

    def get_transceiver_attributes(self, node_id, fab_port_id):
        query_url = ('/api/mo/topology/pod-1/node-%s/sys/satm/fabp-[%s].json?'
                     'query-target=subtree&target-subtree-class=satmRemoteFcot'
                     ',satmRemoteFcotX2' % (node_id, fab_port_id))
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        return self._get_query(query_url, error_message)

    def get_chassis_attributes(self, node_id, fex_id):
        query_url = '/api/mo/topology/pod-1/node-%s/sys/extch-%s.json' % (
            node_id, fex_id)
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        return self._get_query(query_url, error_message)

    def get_chassis_card_attributes(self, node_id, fex_id):
        query_url = (
            '/api/mo/topology/pod-1/node-%s/sys/extch-%s.json?'
            'query-target=subtree&target-subtree-class=eqptExtChCard' %
            (node_id, fex_id))
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        return self._get_query(query_url, error_message)

    def get_chassis_running_attributes(self, node_id, fex_id):
        query_url = '/api/mo/topology/pod-1/node-%s/sys/extch-%s/running.json' % (
            node_id, fex_id)
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        return self._get_query(query_url, error_message)

    def get_chassis_cpu_attributes(self, node_id, fex_id):
        query_url = ('/api/mo/topology/pod-1/node-%s/sys/extch-%s.json?'
                     'query-target=subtree&target-subtree-class=eqptExtChCPU' %
                     (node_id, fex_id))
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        return self._get_query(query_url, error_message)

    def get_fex_ids(self, node_id):
        fex_attrs = self.get_fex_attributes(node_id)
        fex_ids = []
        print fex_attrs
        for fex_attr in fex_attrs:
            fex_ids.append(str(fex_attr['satmDExtCh']['attributes']['id']))
        return fex_ids

    def get_node_ids(self, node_id):
        """
        Get the list of node ids from the command line arguments.
        If none, get all of the node ids
        :param args: Command line arguments
        :return: List of strings containing node ids
        """
        if node_id is not None:
            names = [node_id]
        else:
            names = []
            query_url = ('/api/node/class/fabricNode.json?'
                         'query-target-filter=eq(fabricNode.role,"leaf")')
            error_message = 'Could not get switch list from APIC.'
            nodes = self._get_query(query_url, error_message)
            for node in nodes:
                names.append(str(node['fabricNode']['attributes']['id']))
        return names

    @staticmethod
    def print_fex(fex_attr, chassis_attr, detail=False):
        print 'FEX:%s  Description: FEX0%s  state: %s' % (
            fex_attr['id'], fex_attr['id'], fex_attr['operSt'])
        print '  FEX version: %s [Switch version: %s]' % (fex_attr['ver'],
                                                          fex_attr['swVer'])

        if detail:
            print '  FEX Interim version:', fex_attr['intVer']
            print '  Switch Interim version:', fex_attr['swIntVer']
        print '  Extender Model: %s, Extender Serial: %s' % (fex_attr['model'],
                                                             fex_attr['ser'])
        print '  Part No:', chassis_attr['partNum']
        if detail:
            print '  Card Id: %s,' % fex_attr['swCId']
            print 'Mac Addr: %s,' % fex_attr['macAddr']
            print 'Num Macs:', fex_attr['numMacs']
            print '  Module Sw Gen:', fex_attr['swGen']
            print ' [Switch Sw Gen: %s]' % fex_attr['swSwGen']
        print ' pinning-mode: static    Max-links: 1'
        print '  Fabric port for control traffic:', fex_attr['controlFPort']

    @staticmethod
    def convert_to_ascii(data):
        data = str(data).split(',')
        resp = ''
        for letter in data:
            resp += str(unichr(int(letter)))
        return resp

    def print_fex_transceiver(self, node_id, fex_id):
        if fex_id is None:
            fex_ids = self.get_fex_ids(node_id)
        else:
            fex_ids = [fex_id]
        for fex_id in fex_ids:
            fab_port_num = 1
            fab_ports = self.get_fabric_port_attributes(node_id, fex_id)
            for fab_port in fab_ports:
                fab_port_attr = fab_port['satmFabP']['attributes']
                if fab_port_attr['id'].startswith('po'):
                    continue
                print 'Fex Uplink:', fab_port_num
                print '    Fabric Port :', fab_port_attr['id']
                if 'fcot-present' in fab_port_attr['flags']:
                    transceiver_attr = self.get_transceiver_attributes(
                        node_id, str(fab_port_attr['id']))
                    try:
                        transceiver_attr = transceiver_attr[0][
                            'satmRemoteFcot']['attributes']
                    except KeyError:
                        raise NotImplementedError  # probably satmRemoteFcotV2
                    print '    sfp is present'
                    print '    name is', self.convert_to_ascii(
                        transceiver_attr['vendorName'])
                    print '    type is', transceiver_attr['typeName']
                    print '    part number is', self.convert_to_ascii(
                        transceiver_attr['vendorPn'])
                    print '    revision is', self.convert_to_ascii(
                        transceiver_attr['vendorRev'])
                    print '    serial number is', self.convert_to_ascii(
                        transceiver_attr['vendorSn'])
                    print '    nominal bitrate is %s MBits/sec' % str(
                        int(transceiver_attr['brIn100MHz']) * 100)
                    print '    Link length supported for 50/125mm fiber is 0 m(s)'
                    print '    Link length supported for 62.5/125mm fiber is 0 m(s)'
                    print '    Link length supported for copper is %s m' % transceiver_attr[
                        'distIn1mForCu']
                    print '    cisco id is', transceiver_attr['xcvrId']
                    print '    cisco extended id number is', transceiver_attr[
                        'xcvrExtId']
                fab_port_num += 1

    def print_fex_version(self, node_id, fex_id):
        if fex_id is None:
            fex_ids = self.get_fex_ids(node_id)
        else:
            fex_ids = [fex_id]
        for fex_id in fex_ids:
            chassis_attr = self.get_chassis_attributes(node_id, fex_id)
            chassis_attr = chassis_attr[0]['eqptExtCh']['attributes']
            chassis_running_attr = self.get_chassis_running_attributes(
                node_id, fex_id)
            chassis_running_attr = chassis_running_attr[0][
                'firmwareExtChRunning']['attributes']
            card_attr = self.get_chassis_card_attributes(node_id, fex_id)
            card_attr = card_attr[0]['eqptExtChCard']['attributes']
            fex_attr = self.get_fex_attributes(node_id, fex_id)
            fex_attr = fex_attr[0]['satmDExtCh']['attributes']
            cpu_attr = self.get_chassis_cpu_attributes(node_id, fex_id)
            cpu_attr = cpu_attr[0]['eqptExtChCPU']['attributes']

            print 'Software'
            print '  Bootloader version:           %s' % chassis_running_attr[
                'loaderVer']
            print '  System boot mode:             primary'
            print '  System image version:         %s [build %s]' % (
                fex_attr['ver'], fex_attr['intVer'])

            print '\nHardware'
            print '  Module:                       %s' % card_attr['descr']
            print '  CPU:                          %s' % cpu_attr['model']
            print '  Serial number:                %s' % card_attr['modSerial']
            print '  Bootflash:                    locked'

            # TODO: Finish - need to add timestamping

    def show_fex(self,
                 node=None,
                 fex_id=None,
                 detail=False,
                 transceiver=False,
                 version=False):
        """
        Show fex

        :param fex_id: String containing the specific FEX id. If none, all FEXs are used
        :param detail: Boolean indicating whether a detailed report should be given.
        :param transceiver: Boolean indicating whether a transceiver report should be given.
        :param version: Boolean indicating whether a version report should be given.
        :return: None
        """
        for node_id in self.get_node_ids(node):
            if fex_id is None:
                if not (detail or transceiver or version):
                    # Show fex
                    data = []
                    for fex in self.get_fex_attributes(node_id):
                        fex_attr = fex['satmDExtCh']['attributes']
                        data.append(
                            (int(fex_attr['id']), 'FEX0' + str(fex_attr['id']),
                             fex_attr['operSt'], fex_attr['model'],
                             fex_attr['ser']))
                    data.sort(key=lambda tup: tup[0])
                    if len(data):
                        print 'Switch:', node_id
                        print tabulate(data,
                                       headers=[
                                           'Number', 'Description', 'State',
                                           'Model', 'Serial'
                                       ])
                        print '\n'
                elif detail:
                    # Show fex detail
                    fex_ids = self.get_fex_ids(node_id)
                    for fex_id in fex_ids:
                        self.print_show_fex(node_id, fex_id, detailed=True)
                elif transceiver:
                    self.print_fex_transceiver(node_id, None)
            elif detail:
                # Show fex <fex_id> detail
                self.print_show_fex(node_id, fex_id, detailed=True)
            elif transceiver:
                # Show fex <fex_id> transceiver
                self.print_fex_transceiver(node_id, fex_id)
            elif version:
                # Show fex <fex_id> version
                self.print_fex_version(node_id, fex_id)
            else:
                # Show fex <fex_id>
                self.print_show_fex(node_id, fex_id)

    def print_show_fex(self, node_id, fex_id, detailed=False):
        for fex in self.get_fex_attributes(node_id, fex_id):
            fex_attr = fex['satmDExtCh']['attributes']
            for chassis in self.get_chassis_attributes(node_id,
                                                       fex_attr['id']):
                chassis_attr = chassis['eqptExtCh']['attributes']
                self.print_fex(fex_attr, chassis_attr)
                query_url = (
                    '/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
                    '&target-subtree-class=satmFabP&query-target-filter=eq(satmFabP.extChId,"%s")'
                    % (node_id, fex_attr['id']))
                resp = self._apic.get(query_url)
                if not resp.ok:
                    print 'Could not collect APIC data for switch %s.' % node_id
                    print resp.text
                    return
                if int(resp.json()['totalCount']) > 0:
                    print '  Fabric interface state:'
                    for interface in resp.json()['imdata']:
                        intf_attr = interface['satmFabP']['attributes']
                        print '    %15s - Interface %4s. State: %s' % (
                            intf_attr['id'], intf_attr['operSt'],
                            intf_attr['fsmSt'])
                        if detailed:
                            query_url = (
                                '/api/mo/topology/pod-1/node-%s/sys/satm/fabp-[%s].json?query-target=subtree'
                                '&target-subtree-class=satmHostP' %
                                (node_id, intf_attr['id']))
                            resp = self._apic.get(query_url)
                            if not resp.ok:
                                print 'Could not collect APIC data for switch %s.' % node_id
                                print resp.text
                                return
                            if int(resp.json()['totalCount']) > 0:
                                data = []
                                for port in resp.json()['imdata']:
                                    port_attr = port['satmHostP']['attributes']
                                    data.append(
                                        (port_attr['id'], port_attr['operSt'],
                                         port_attr['fabricPort']))
                                data.sort(key=lambda tup: tup[0])
                                print tabulate(data,
                                               headers=[
                                                   'Fex Port', 'State',
                                                   'Fabric Port'
                                               ])
Example #40
0
import sys
import re
import json
import csv
import re
from acitoolkit import Credentials, Session

#Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = 'Application that logs on to the APIC, and extracts the EPG Static Paths for the user provided input Node ID'
creds = Credentials('apic', description)
creds.add_argument('--tenant', help='The name of Tenant')
args = creds.get()

# Login to APIC
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
    print('%% Could not login to APIC')
    sys.exit(0)

while True:
    try:
        inputnode = input("Enter the Node ID: ")
        if (len(inputnode) == 4):
            nodeidint = int(inputnode)
            break
        else:
            print("Node ID is INVALID ! try again")
            continue
    except ValueError:
Example #41
0
    def push_config_to_apic(self):
        """
        Push the configuration to the APIC

        :return: Requests Response instance indicating success or not
        """
        # Set the tenant name correctly
        if self.cdb.has_context_config():
            self.set_tenant_name(self.cdb.get_context_config().tenant_name)

        # Find all the unique contract providers
        unique_providers = {}
        for provided_policy in self.cdb.get_contract_policies():
            if provided_policy.dst_id not in unique_providers:
                unique_providers[provided_policy.dst_id] = 0
            else:
                unique_providers[provided_policy.dst_id] += 1

        # Find any duplicate contracts that this provider is providing (remove)
        duplicate_policies = []
        for provider in unique_providers:
            for provided_policy in self.cdb.get_contract_policies():
                if provided_policy in duplicate_policies:
                    continue
                if provider in provided_policy.dst_ids:
                    for other_policy in self.cdb.get_contract_policies():
                        if other_policy == provided_policy or other_policy in duplicate_policies:
                            continue
                        if other_policy.dst_ids == provided_policy.dst_ids and other_policy.has_same_permissions(provided_policy):
                            provided_policy.src_ids = provided_policy.src_ids + other_policy.src_ids
                            duplicate_policies.append(other_policy)

        for duplicate_policy in duplicate_policies:
            self.cdb.remove_contract_policy(duplicate_policy)

        if not self.displayonly:
            # Log on to the APIC
            apic_cfg = self.cdb.get_apic_config()
            apic = Session(apic_cfg.url, apic_cfg.user_name, apic_cfg.password)
            resp = apic.login()
            if not resp.ok:
                return resp

        # Push all of the Contracts
        tenant = Tenant(self._tenant_name)
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_id + '::' + contract_policy.dst_id
            descr = contract_policy.src_name + '::' + contract_policy.dst_name
            contract = Contract(name, tenant)
            contract.descr = descr
            for whitelist_policy in contract_policy.get_whitelist_policies():
                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                if whitelist_policy.proto == '6' or whitelist_policy.proto == '17':
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        dFromPort=whitelist_policy.port_min,
                                        dToPort=whitelist_policy.port_max,
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        sFromPort='1',
                                        sToPort='65535',
                                        tcpRules='unspecified',
                                        parent=contract)
                else:
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        parent=contract)
        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            if not resp.ok:
                return resp

        # Push all of the EPGs
        if not self.displayonly:
            tenant = Tenant(self._tenant_name)
        app = AppProfile(self._app_name, tenant)
        # Create a Base EPG
        base_epg = EPG('base', app)
        if self.cdb.has_context_config():
            context_name = self.cdb.get_context_config().name
        else:
            context_name = 'vrf1'
        context = Context(context_name, tenant)
        bd = BridgeDomain('bd', tenant)
        bd.add_context(context)
        base_epg.add_bd(bd)
        if self.displayonly:
            # If display only, just deploy the EPG to leaf 101
            base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged')
        else:
            # Deploy the EPG to all of the leaf switches
            nodes = Node.get(apic)
            for node in nodes:
                if node.role == 'leaf':
                    base_epg.add_static_leaf_binding(node.node, 'vlan', '1', encap_mode='untagged')

        # Create the Attribute based EPGs
        for epg_policy in self.cdb.get_epg_policies():
            epg = EPG(epg_policy.id, app)

            # Add all of the IP addresses
            epg.is_attributed_based = True
            epg.set_base_epg(base_epg)
            criterion = AttributeCriterion('criterion', epg)
            for node_policy in epg_policy.get_node_policies():
                ipaddr = ipaddress.ip_address(unicode(node_policy.ip))
                if ipaddr.is_multicast:
                    # Skip multicast addresses. They cannot be IP based EPGs
                    continue
                criterion.add_ip_address(node_policy.ip)

            epg.descr = epg_policy.name
            # Consume and provide all of the necessary contracts
            for contract_policy in self.cdb.get_contract_policies():
                contract = None
                if epg_policy.id in contract_policy.src_ids:
                    name = contract_policy.src_id + '::' + contract_policy.dst_id
                    contract = Contract(name, tenant)
                    epg.consume(contract)
                if epg_policy.id in contract_policy.dst_ids:
                    name = contract_policy.src_id + '::' + contract_policy.dst_id
                    if contract is None:
                        contract = Contract(name, tenant)
                    epg.provide(contract)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            return resp
def main():
    """
    Main show EPGs routine
    :return: None
    """
    # Login to APIC
    description = ('Simple application that logs on to the APIC'
                   ' and displays all of the EPGs.')
    creds = Credentials('apic', description)
    args = creds.get()
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        return

    # Download all of the tenants, app profiles, and EPGs
    # and store the names as tuples in a list
    tenants = Tenant.get_deep(session)
    tenants_list = []
    for tenant in tenants:
        tenants_dict = {}
        tenants_dict['name'] = tenant.name

        if tenant.descr:
            tenants_dict['description'] = tenant.descr

        tenants_dict['app-profiles'] = []
        for app in tenant.get_children(AppProfile):
            app_profiles = {'name': app.name}
            if app.descr:
                app_profiles['description'] = app.descr
            app_profiles['epgs'] = []

            for epg in app.get_children(EPG):
                epgs_info = {'name': epg.name}
                if epg.descr:
                    epgs_info['description'] = epg.descr
                epgs_info['endpoints'] = []

                for endpoint in epg.get_children(Endpoint):
                    endpoint_info = {'name': endpoint.name}
                    if endpoint.ip != '0.0.0.0':
                        endpoint_info['ip'] = endpoint.ip
                        try:
                            hostname = socket.gethostbyaddr(endpoint.ip)[0]
                        except socket.error:
                            hostname = None
                        if hostname:
                            endpoint_info['hostname'] = hostname
                    if endpoint.descr:
                        endpoint_info['description'] = endpoint.descr

                    epgs_info['endpoints'].append(endpoint_info)
                app_profiles['epgs'].append(epgs_info)
            tenants_dict['app-profiles'].append(app_profiles)
        tenants_list.append(tenants_dict)

    tenants_info = {'tenants': tenants_list}
    print(yaml.safe_dump(tenants_info, sys.stdout,
                         indent=4, default_flow_style=False))
Example #43
0
                        tabs[k]['class'],
                        tabs[k]['properties'],
                        headers=tabs.get(k).get('headers'))

    tenants = class_query(session, 'fvTenant')
    for t in tenants:
        createTenantSheet(session, workbook, t)
    workbook.close()


if __name__ == "__main__":

    description = 'aci-doc'

    # Gather credentials for ACI
    creds = Credentials('apic', description)
    args = creds.get()

    # Establish an API session to the APIC
    apic = Session(args.url, args.login, args.password)

    if apic.login().ok:
        print("Connected to ACI")

    print("depending on your configuration, this could take a little while...")

    with open('config.yaml', 'r') as config:
        config = yaml.safe_load(config)

    CreateWorkBook(apic, config['filename'], config['tabs'])
Example #44
0
    def push_config_to_apic(self):
        """
        Push the configuration to the APIC

        :return: Requests Response instance indicating success or not
        """
        THROTTLE_SIZE = 500000 / 8
        # Set the tenant name correctly
        if self._tenant_name == '' and self.cdb.has_context_config():
            self.set_tenant_name(self.cdb.get_context_config().tenant_name)
        elif self._tenant_name == '':
            self.set_tenant_name('acitoolkit')

        # Find all the unique contract providers
        logging.debug('Finding the unique contract providers')
        unique_providers = {}
        for provided_policy in self.cdb.get_contract_policies():
            if provided_policy.dst_id not in unique_providers:
                unique_providers[provided_policy.dst_id] = 0
            else:
                unique_providers[provided_policy.dst_id] += 1
        logging.debug('Found %s unique contract providers',
                      len(unique_providers))

        # Find any duplicate contracts that this provider is providing (remove)
        logging.debug('Finding any duplicate contracts')
        duplicate_policies = []
        for provider in unique_providers:
            for provided_policy in self.cdb.get_contract_policies():
                if provided_policy in duplicate_policies:
                    continue
                if provider in provided_policy.dst_ids:
                    for other_policy in self.cdb.get_contract_policies():
                        if other_policy == provided_policy or other_policy in duplicate_policies:
                            continue
                        if other_policy.dst_ids == provided_policy.dst_ids and other_policy.has_same_permissions(
                                provided_policy):
                            provided_policy.src_ids = provided_policy.src_ids + other_policy.src_ids
                            duplicate_policies.append(other_policy)
                            logging.debug(
                                'duplicate_policies now has %s entries',
                                len(duplicate_policies))

        logging.debug('Removing duplicate contracts')
        for duplicate_policy in duplicate_policies:
            self.cdb.remove_contract_policy(duplicate_policy)

        if not self.displayonly:
            # Log on to the APIC
            apic_cfg = self.cdb.get_apic_config()
            apic = Session(apic_cfg.url, apic_cfg.user_name, apic_cfg.password)
            resp = apic.login()
            if not resp.ok:
                return resp

        logging.debug('Generating JSON....')
        # Push all of the Contracts
        logging.debug('Pushing contracts. # of Contract policies: %s',
                      len(self.cdb.get_contract_policies()))
        tenant = Tenant(self._tenant_name)
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_name + '::' + contract_policy.dst_name
            contract = Contract(name, tenant)
            contract.descr = contract_policy.descr[0:127 - (
                contract_policy.descr.count('"') +
                contract_policy.descr.count("'") +
                contract_policy.descr.count('/'))]
            for whitelist_policy in contract_policy.get_whitelist_policies():
                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                if whitelist_policy.proto == '6' or whitelist_policy.proto == '17':
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        dFromPort=whitelist_policy.port_min,
                                        dToPort=whitelist_policy.port_max,
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        sFromPort='1',
                                        sToPort='65535',
                                        tcpRules='unspecified',
                                        parent=contract)
                else:
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        parent=contract)
            if not self.displayonly:
                if len(str(tenant.get_json())) > THROTTLE_SIZE:
                    logging.debug('Throttling contracts. Pushing config...')
                    resp = tenant.push_to_apic(apic)
                    if not resp.ok:
                        return resp
                    tenant = Tenant(self._tenant_name)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing remaining contracts')
            resp = tenant.push_to_apic(apic)
            if not resp.ok:
                return resp

        # Push all of the EPGs
        logging.debug('Pushing EPGs')
        if not self.displayonly:
            tenant = Tenant(self._tenant_name)
        app = AppProfile(self._app_name, tenant)

        if self._use_ip_epgs:
            # Create a Base EPG
            base_epg = EPG('base', app)
            if self.cdb.has_context_config():
                context_name = self.cdb.get_context_config().name
            else:
                context_name = 'vrf1'
            context = Context(context_name, tenant)
            bd = BridgeDomain('bd', tenant)
            bd.add_context(context)
            base_epg.add_bd(bd)
            if self.displayonly:
                # If display only, just deploy the EPG to leaf 101
                base_epg.add_static_leaf_binding('101',
                                                 'vlan',
                                                 '1',
                                                 encap_mode='untagged')
            else:
                # Deploy the EPG to all of the leaf switches
                nodes = Node.get(apic)
                for node in nodes:
                    if node.role == 'leaf':
                        base_epg.add_static_leaf_binding(node.node,
                                                         'vlan',
                                                         '1',
                                                         encap_mode='untagged')

            # Create the Attribute based EPGs
            logging.debug('Creating Attribute Based EPGs')
            for epg_policy in self.cdb.get_epg_policies():
                if not self.displayonly:
                    # Check if we need to throttle very large configs
                    if len(str(tenant.get_json())) > THROTTLE_SIZE:
                        resp = tenant.push_to_apic(apic)
                        if not resp.ok:
                            return resp
                        tenant = Tenant(self._tenant_name)
                        app = AppProfile(self._app_name, tenant)
                        context = Context(context_name, tenant)
                        bd = BridgeDomain('bd', tenant)
                        bd.add_context(context)
                        if self._use_ip_epgs:
                            base_epg = EPG('base', app)
                            base_epg.add_bd(bd)
                epg = EPG(epg_policy.name, app)

                # Check if the policy has the default 0.0.0.0 IP address
                no_default_endpoint = True
                for node_policy in epg_policy.get_node_policies():
                    if node_policy.ip == '0.0.0.0' and node_policy.prefix_len == 0:
                        no_default_endpoint = False
                        epg.add_bd(bd)

                # Add all of the IP addresses
                if no_default_endpoint:
                    epg.is_attributed_based = True
                    epg.set_base_epg(base_epg)
                    criterion = AttributeCriterion('criterion', epg)
                    ipaddrs = []
                    for node_policy in epg_policy.get_node_policies():
                        ipaddr = ipaddress.ip_address(unicode(node_policy.ip))
                        if not ipaddr.is_multicast:  # Skip multicast addresses. They cannot be IP based EPGs
                            ipaddrs.append(ipaddr)
                    nets = ipaddress.collapse_addresses(ipaddrs)
                    for net in nets:
                        criterion.add_ip_address(str(net))
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        contract = Contract(name, tenant)
                        epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            contract = Contract(name, tenant)
                        epg.provide(contract)
        else:
            logging.debug('Creating EPGs')
            for epg_policy in self.cdb.get_epg_policies():
                epg = EPG(epg_policy.name, app)
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        contract = Contract(name, tenant)
                        epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            contract = Contract(name, tenant)
                        epg.provide(contract)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            return resp
import sys
import re
import json
import csv
from acitoolkit import Credentials, Session

# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = 'Simple application that logs on to the APIC and extracts Node IDs, Node names and their serial numbers.'
creds = Credentials('apic', description)
creds.add_argument('--tenant', help='The name of Tenant')
args = creds.get()

# Login to APIC
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
    print('%% Could not login to APIC')
    sys.exit(0)

resp = session.get(
    '/api/class/fabricNodeIdentP.json?rsp-subtree=full&rsp-prop-include=config-only'
)
data = json.loads(resp.text)['imdata']
datacount = int(json.loads(resp.text)['totalCount'])

fname = "NodeReginfo.csv"

with open(fname, "w", newline='') as file:
    csv_file = csv.writer(file)
    csv_file.writerow(["Node ID", "Node Name", "Serial Number"])
Example #46
0
                        tabs[k]['class'],
                        tabs[k]['properties'],
                        headers=tabs.get(k).get('headers'))

    tenants = class_query(session, 'fvTenant')
    for t in tenants:
        createTenantSheet(session,workbook,t)
    workbook.close()


if __name__ == "__main__":

    description = 'aci-doc'

    # Gather credentials for ACI
    creds = Credentials('apic', description)
    args = creds.get()

    # Establish an API session to the APIC
    apic = Session(args.url, args.login, args.password)

    if apic.login().ok:
        print("Connected to ACI")

    print("depending on your configuration, this could take a little while...")

    with open('config.yaml', 'r') as config:
        config = yaml.safe_load(config)

    CreateWorkBook(apic, config['filename'], config['tabs'])
Example #47
0
def main():
    '''
    Main Function
    '''
    # Setup Arguments utilizing the ACIToolkit Credentials Method
    description = ('Help to determine EP movement during Maintenance Windows')
    creds = Credentials('apic', description)
    creds.add_argument('-v',
                       '--version',
                       action='version',
                       version='%(prog)s == {}'.format(__version__))
    creds.add_argument("--debug",
                       dest="debug",
                       choices=["debug", "info", "warn", "critical"],
                       default="info",
                       help='Enable debugging output to screen')
    creds.add_argument(
        '--log',
        action='store_true',
        help=
        'Write the output to a log file: {}.log. Automatically adds timestamp to filename'
        .format(__file__.split(".py")[0]))
    creds.add_argument(
        '--list',
        action='store_true',
        help=
        'Print out the list of Tenants / App Profiles / EPGs available to work with'
    )
    creds.add_argument(
        '--filter',
        help=
        'Specify what to filter on. Eg: "tn-mipetrin" or "ap-mipetrin-AppProfile". Use --list to identify what can be used for filtering. Default = None'
    )
    creds.add_argument(
        '--pre',
        help=
        'Write the data to a file of your choosing. Specify your prefix. Format will be JSON and this extension is automatically added'
    )
    creds.add_argument(
        '--post',
        help=
        'Write the data to a file of your choosing. Specify your prefix. Format will be JSON and this extension is automatically added'
    )
    creds.add_argument(
        '--compare',
        nargs=2,
        help=
        'Compare the 2 files you specify. Be sure to pick a PRE and POST file')
    creds.add_argument(
        '--summary',
        type=int,
        help=
        'Optionally, print out detailed summary of identified Endpoints greater than x (provide totals per Tenant/App/EPG/MAC/Encap)'
    )
    args = creds.get()

    # Set up custom logger
    setup_logger(logger, args.debug, args.log)

    # If --suumary enabled, set up globals to then utlize the additonal calculations throughout code
    if args.summary:
        global detailed_summary
        global detailed_summary_number
        detailed_summary = True
        detailed_summary_number = args.summary

    # Due to creds / argparse above, will always need to provide APIC / User / Pass even if wanting to do local comparison of PRE/POST JSON files
    # However, below check will ensure we actually only perform login if NOT doing a comparison. That is, if doing --compare, you can type ANY password even simply hitting enter
    if not args.compare:
        # Login to APIC only if NOT doing a comparison - as already have the data we need in the local JSON files
        session = Session(args.url, args.login, args.password)
        resp = session.login()

        # Check if the login was successful
        if not resp.ok:
            logger.critical('Could not login to APIC')
            my_error = resp.json()
            logger.critical("Specific Error: {}".format(
                my_error["imdata"][0]["error"]["attributes"]["text"]))
            exit(0)

    # Start time count at this point, otherwise takes into consideration the amount of time taken to input the password by the user
    start_time = time.time()
    logger.debug("Begin Execution of script")

    # Order of precedence is to execute list of tenants, pre capture, post capture, compare
    if args.list:
        print_header("Gathering available information from APIC...")
        get_raw_tenant_info(session)
    elif args.pre:
        print_header("Gathering 'PRE' Endpoints...")

        # Setup Filename for PRE file (using user input) and global pre_suffix
        my_filename_pre = args.pre + pre_suffix

        # Confirm if user has selected any --filter
        if args.filter:
            get_fvCEp(session, my_filename_pre, args.filter)
        else:
            get_fvCEp(session, my_filename_pre, "None")
    elif args.post:
        print_header("Gathering 'POST' Endpoints...")

        # Setup Filename for POST file (using user input) and global post_suffix
        my_filename_post = args.post + post_suffix

        # Confirm if user has selected any --filter
        if args.filter:
            get_fvCEp(session, my_filename_post, args.filter)
        else:
            get_fvCEp(session, my_filename_post, "None")
    elif args.compare:
        # Ensure *BOTH* the specified PRE and POST files exist. If not, throw error and explain which ones currently exist
        # Look for the suffix that I auto append during the --pre and --post file generation
        for file in args.compare:
            if pre_suffix in file:
                my_filename_pre = file
            elif post_suffix in file:
                my_filename_post = file
            else:
                logger.critical(
                    "Issue with file names supplied as don't contain the suffix defined. Are they the files generated by this script via the --pre / --post options?"
                )
                exit(0)

        # Check that the files do in fact exist and are readable
        if not os.path.isfile(my_filename_pre):
            logger.critical(
                "Need to ensure the PRE capture has been completed and readable"
            )
            exit(0)

        # Check that the files do in fact exist and are readable
        if not os.path.isfile(my_filename_post):
            logger.critical(
                "Need to ensure the POST capture has been completed and readable"
            )
            exit(0)

        print_header("Analyzing 'PRE' Endpoints...")
        analyze_file(my_filename_pre, "pre")

        print_header("Analyzing 'POST' Endpoints...")
        analyze_file(my_filename_post, "post")

        print_header("Comparing 'PRE' and 'POST' Endpoints...")
        compare_eps()

        print_header("Endpoints with Movements...")
        logger.info("\n" + tabulate(ep_tracker_diff,
                                    headers=[
                                        "Tenant", "App Profile", "EPG", "MAC",
                                        "Stage", "Node", "Interface", "Encap"
                                    ],
                                    tablefmt="grid"))

        print_header("Endpoints only in PRE capture")
        logger.info("\n" + tabulate(ep_only_in_pre_capture,
                                    headers=[
                                        "Tenant", "App Profile", "EPG", "MAC",
                                        "Stage", "Node", "Interface", "Encap"
                                    ],
                                    tablefmt="grid"))

        print_header("Endpoints only in POST capture")
        logger.info("\n" + tabulate(ep_only_in_post_capture,
                                    headers=[
                                        "Tenant", "App Profile", "EPG", "MAC",
                                        "Stage", "Node", "Interface", "Encap"
                                    ],
                                    tablefmt="grid"))

        # Check if the --summary option is enabled
        if detailed_summary:
            print_header(
                "(Moved/PRE/POST) Category entries that have a total greater than: {}"
                .format(detailed_summary_number))

            logger.debug(ep_category_summary)
            ep_summary_data = ""  # String object to print out detailed summary that will be built using code below

            # Loop through EP Categories to then be stored in the string object "ep_summary_data"
            for category, entries in ep_category_summary.iteritems():
                ep_summary_data += "\n" + category.upper() + "\n"

                # Then loop through each item within each category to highlight the particular Tenant/App/EPG/MAC/Encap
                for item, number in entries.iteritems():
                    # Check if the current entry has a value greater than or equal to the value specified on the CLI
                    if number >= detailed_summary_number:
                        ep_summary_data += "{:6} == {}\n".format(number, item)

            # Also provide a tally of the total amount of EPs that are in BOTH / PRE / POST - as identified
            grand_total_eps = ep_summary["both"] + ep_summary[
                "pre"] + ep_summary["post"]
            ep_summary_data += "\nGRAND TOTAL\n"
            ep_summary_data += "{:6} EPs across all captures\n".format(
                grand_total_eps)
            logger.info(ep_summary_data)  # Print out the data

        print_header("Summary")
        # Structure of ep_summary{'pre': 11, 'post': 15, 'compare_ep_move_PRE.json': 11, 'compare_ep_move_POST.json': 15}
        for key, value in sorted(ep_summary.iteritems(), reverse=True):
            # Loop through dictionary and find if they are the .JSON filenames
            if "json" in key:
                if "pre" in key:
                    # Check for _PRE
                    logger.info("PRE Filename: {}".format(key))
                    logger.info("   Endpoints read: {}".format(value))
                    logger.info("   Captured on: {}\n".format(
                        ep_analysis_time["pre"]))
                elif "post" in key:
                    # Check for _POST
                    logger.info("POST Filename: {}".format(key))
                    logger.info("   Endpoints read: {}".format(value))
                    logger.info("   Captured on: {}\n".format(
                        ep_analysis_time["post"]))
                else:
                    logger.warning(
                        "ERROR with determiniation of PRE/POST filename in ep_summary"
                    )

        # Print out analysis
        logger.info("Endpoints with movement: {}".format(ep_summary["both"]))
        logger.info("Endpoints only in PRE: {}".format(ep_summary["pre"]))
        logger.info("Endpoints only in POST: {}\n".format(ep_summary["post"]))

        if args.log:
            logger.info("Log file written: {}\n".format(logging_filename))
    else:
        logger.critical(
            "\nSomething wrong with your selections. Please try again or use the --help option\n"
        )
        creds.print_help()

    finish_time = time.time()  # Calculate finish time

    logger.info("#" * 80)
    logger.info("Started analysis @ {}".format(
        time.asctime(time.localtime(start_time))))
    logger.info("Ended analysis @ {}".format(
        time.asctime(time.localtime(finish_time))))
    logger.info("--- Total Execution Time: %s seconds ---" %
                (finish_time - start_time))
    logger.info("#" * 80)
def main():
    """
    Main execution routine

    :return: None
    """
    # Take login credentials from the command line if provided
    # Otherwise, take them from your environment variables file ~/.profile
    description = 'Simple application that logs on to the APIC and displays all of the Interfaces.'
    creds = Credentials('apic', description)
    creds.add_argument('--tenant', help='The name of Tenant')
    args = creds.get()

    # Login to APIC
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        sys.exit(0)

    resp = session.get('/api/class/ipv4Addr.json')
    intfs = json.loads(resp.text)['imdata']

    for i in intfs:
        ip = i['ipv4Addr']['attributes']['addr']
        op = i['ipv4Addr']['attributes']['operSt']
        cfg = i['ipv4Addr']['attributes']['operStQual']
        dn = i['ipv4Addr']['attributes']['dn']
        node = dn.split('/')[2]
        intf = re.split(r'\[|\]', dn)[1]
        vrf = re.split(r'/|dom-', dn)[7]
        tn = vrf
        if vrf.find(":") != -1:
            tn = re.search("(.*):(.*)", vrf).group(1)

        check_longest_name(node, "Node")
        check_longest_name(intf, "Interface")
        check_longest_name(ip, "IP Address")
        check_longest_name(cfg, "Admin Status")
        check_longest_name(op, "Status")

        if args.tenant is None:
            if vrf not in data.keys():
                data[vrf] = []
            else:
                data[vrf].append((node, intf, ip, cfg, op))
        else:
            if tn == args.tenant:
                if vrf not in data.keys():
                    data[vrf] = []
                else:
                    data[vrf].append((node, intf, ip, cfg, op))

    for k in data.keys():
        header = 'IP Interface Status for VRF "{}"'.format(k)
        print(header)
        template = '{0:' + str(longest_names["Node"]) + '} ' \
                   '{1:' + str(longest_names["Interface"]) + '} ' \
                   '{2:' + str(longest_names["IP Address"]) + '} ' \
                   '{3:' + str(longest_names["Admin Status"]) + '} ' \
                   '{4:' + str(longest_names["Status"]) + '}'
        print(template.format("Node", "Interface", "IP Address", "Admin Status", "Status"))
        print(template.format('-' * longest_names["Node"],
                              '-' * longest_names["Interface"],
                              '-' * longest_names["IP Address"],
                              '-' * longest_names["Admin Status"],
                              '-' * longest_names["Status"]))
        for rec in sorted(data[k]):
            print(template.format(*rec))
        print('')
def main():
    """
    Main show EPGs routine
    :return: None
    """
    # Login to APIC
    description = ('Simple application that logs on to the APIC'
                   ' and displays all of the EPGs.')
    creds = Credentials('apic', description)
    args = creds.get()
    session = Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')
        return

    # Download all of the tenants, app profiles, and EPGs
    # and store the names as tuples in a list
    tenants = Tenant.get_deep(session)
    tenants_list = []
    for tenant in tenants:
        tenants_dict = {}
        tenants_dict['name'] = tenant.name

        if tenant.descr:
            tenants_dict['description'] = tenant.descr

        tenants_dict['app-profiles'] = []
        for app in tenant.get_children(AppProfile):
            app_profiles = {'name': app.name}
            if app.descr:
                app_profiles['description'] = app.descr
            app_profiles['epgs'] = []

            for epg in app.get_children(EPG):
                epgs_info = {'name': epg.name}
                if epg.descr:
                    epgs_info['description'] = epg.descr
                epgs_info['endpoints'] = []

                for endpoint in epg.get_children(Endpoint):
                    endpoint_info = {'name': endpoint.name}
                    if endpoint.ip != '0.0.0.0':
                        endpoint_info['ip'] = endpoint.ip
                        try:
                            hostname = socket.gethostbyaddr(endpoint.ip)[0]
                        except socket.error:
                            hostname = None
                        if hostname:
                            endpoint_info['hostname'] = hostname
                    if endpoint.descr:
                        endpoint_info['description'] = endpoint.descr

                    epgs_info['endpoints'].append(endpoint_info)
                app_profiles['epgs'].append(epgs_info)
            tenants_dict['app-profiles'].append(app_profiles)
        tenants_list.append(tenants_dict)

    tenants_info = {'tenants': tenants_list}
    print(
        yaml.safe_dump(tenants_info,
                       sys.stdout,
                       indent=4,
                       default_flow_style=False))
Example #50
0
class InterfaceBriefCollector(object):
    def __init__(self, url, login, password):
        # Login to APIC
        self._apic = Session(url, login, password)
        self._if_brief_headers = {
            'l1PhysIf': ['Ethernet Interface', 'VLAN', 'Type', 'Mode', 'Status', 'Reason', 'Speed', 'Port Ch #'],
            'pcAggrIf': ['Port-channel Interface', 'VLAN', 'Type', 'Mode', 'Status', 'Reason', 'Speed', 'Protocol'],
            'l3LbRtdIf': ['Interface', 'Status', 'Description'],
            'tunnelIf': ['Interface', 'Status', 'IP Address', 'Encap type', 'MTU'],
            'sviIf': ['Interface', 'Secondary VLAN(Type)', 'Status', 'Reason'],
            'l3EncRtdIf': [],
            'mgmtMgmtIf': ['Port', 'VRF', 'Status', 'IP Address', 'Speed', 'MTU'],
            'l2ExtIf': [],
            'l2VfcIf': ['Interface', 'Vsan', 'Admin\nMode', 'Admin Trunk Mode', 'Status',
                        'Bind Info', 'Oper Mode', 'Oper Speed (Gbps)']
        }
        self._if_types = self._if_brief_headers.keys()
        if not self._apic.login().ok:
            self._logged_in = False
            print('%% Could not login to APIC')
        else:
            self._logged_in = True
        self._interfaces = []

    @property
    def _all_if_types_as_string(self):
        resp = ''
        for if_type in self._if_types:
            if len(resp):
                resp += ','
            resp += if_type
        return resp

    def _get_query(self, query_url, error_msg):
        resp = self._apic.get(query_url)
        if not resp.ok:
            print error_msg
            print resp.text
            return []
        return resp.json()['imdata']

    def populate_interfaces(self, node_id, intf_id=None):
        query_url = ('/api/mo/topology/pod-1/node-%s/sys.json?query-target=subtree'
                     '&target-subtree-class=%s&rsp-subtree=children&'
                     'rsp-subtree-class=ethpmPhysIf,l1RtMbrIfs,ethpmAggrIf' % (node_id, self._all_if_types_as_string))
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        interfaces = self._get_query(query_url, error_message)
        if intf_id is None:
            self._interfaces = interfaces
        else:
            self._interfaces = []
            for interface in interfaces:
                for if_type in interface:
                    if interface[if_type]['attributes']['id'] == intf_id:
                        self._interfaces.append(interface)

    def _has_interface_type(self, if_type, intf_id=None):
        for interface in self._interfaces:
            if if_type in interface:
                if intf_id is None or intf_id == interface[if_type]['attributes']['id']:
                    return True
        return False

    def _get_interface_type(self, if_type):
        resp = []
        for interface in self._interfaces:
            if if_type in interface:
                resp.append(interface)
        return resp

    def get_node_ids(self, node_id):
        """
        Get the list of node ids from the command line arguments.
        If none, get all of the node ids
        :param args: Command line arguments
        :return: List of strings containing node ids
        """
        if node_id is not None:
            names = [node_id]
        else:
            names = []
            query_url = ('/api/node/class/fabricNode.json?'
                         'query-target-filter=eq(fabricNode.role,"leaf")')
            error_message = 'Could not get switch list from APIC.'
            nodes = self._get_query(query_url, error_message)
            for node in nodes:
                names.append(str(node['fabricNode']['attributes']['id']))
        return names

    @staticmethod
    def convert_to_ascii(data):
        data = str(data).split(',')
        resp = ''
        for letter in data:
            resp += str(unichr(int(letter)))
        return resp

    def _get_interface_type_brief_data(self, if_type, intf_id=None):
        data = []
        for interface in self._interfaces:
            if if_type in interface:
                if intf_id is not None and intf_id != interface[if_type]['attributes']['id']:
                    continue
                if_attrs = interface[if_type]['attributes']
                if if_type == 'mgmtMgmtIf':
                    data.append((if_attrs['id'], '--', if_attrs['adminSt'], '', if_attrs['speed'], if_attrs['mtu']))
                elif if_type == 'l1PhysIf':
                    port_channel = '--'
                    for child in interface[if_type]['children']:
                        if 'l1RtMbrIfs' in child:
                            port_channel = child['l1RtMbrIfs']['attributes']['tSKey']
                        else:
                            oper_attrs = child['ethpmPhysIf']['attributes']
                    data.append((if_attrs['id'], '--', 'eth', oper_attrs['operMode'], oper_attrs['operSt'],
                                 oper_attrs['operStQual'], oper_attrs['operSpeed'], port_channel))
                elif if_type == 'tunnelIf':
                    data.append((if_attrs['id'], if_attrs['operSt'], '--', if_attrs['tType'], if_attrs['cfgdMtu']))
                elif if_type == 'pcAggrIf':
                    for child in interface[if_type]['children']:
                        protocol = '--'
                        if if_attrs['pcMode'] in ['active', 'passive', 'mac-pin']:
                            protocol = 'lacp'
                        elif if_attrs['pcMode'] == 'static':
                            protocol = 'none'
                        if 'ethpmAggrIf' in child:
                            oper_attrs = child['ethpmAggrIf']['attributes']
                    data.append((if_attrs['id'], '--', 'eth', oper_attrs['operMode'], oper_attrs['operSt'],
                                 oper_attrs['operStQual'], oper_attrs['operSpeed'], protocol))
                elif if_type == 'sviIf':
                    data.append((if_attrs['id'], '--', if_attrs['operSt'], if_attrs['operStQual']))
                elif if_type == 'l3LbRtdIf':
                    if len(if_attrs['descr']):
                        description = if_attrs['descr']
                    else:
                        description = '--'
                    data.append((if_attrs['id'], if_attrs['adminSt'], description))
                elif if_type == 'l2VfcIf':
                    raise NotImplementedError
                    # TODO: finish this
        return data

    def show_brief(self, node=None, intf_id=None):
        """
        show interface brief

        :param node: String containing the specific switch id. If none, all switches are used
        :param intf_id: String containing the specific interface id. If none, all interfaces are used
        :return: None
        """
        for node_id in self.get_node_ids(node):
            self.populate_interfaces(node_id, intf_id)

            for if_type in self._if_types:
                if self._has_interface_type(if_type, intf_id):
                    data = self._get_interface_type_brief_data(if_type, intf_id)
                    data.sort(key=lambda tup: tup[0])
                    if len(data):
                        print tabulate(data, headers=self._if_brief_headers[if_type])
                        print
class InterfaceCollector(object):
    def __init__(self, url, login, password):
        # Login to APIC
        self._apic = Session(url, login, password)
        if not self._apic.login().ok:
            self._logged_in = False
            print '%% Could not login to APIC'
        else:
            self._logged_in = True
        self._interfaces = []
        self._port_channels = []

    def _get_query(self, query_url, error_msg):
        resp = self._apic.get(query_url)
        if not resp.ok:
            print error_msg
            print resp.text
            return []
        return resp.json()['imdata']

    def populate_port_channels(self, node_id, intf_id=None):
        query_url = ('/api/mo/topology/pod-1/node-%s/sys.json?query-target=subtree'
                     '&target-subtree-class=pcAggrIf&rsp-subtree=children&'
                     'rsp-subtree-class=ethpmAggrIf,pcRsMbrIfs' % node_id)
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        port_channels = self._get_query(query_url, error_message)
        if intf_id is None:
            self._port_channels = port_channels
        else:
            self._port_channels = []
            for port_channel in port_channels:
                for if_type in port_channel:
                    if port_channel[if_type]['attributes']['id'] == intf_id:
                        self._port_channels.append(port_channel)

    def populate_interfaces(self, node_id):
        query_url = ('/api/mo/topology/pod-1/node-%s/sys.json?query-target=subtree'
                     '&target-subtree-class=l1PhysIf&rsp-subtree=children&'
                     'rsp-subtree-class=pcAggrMbrIf' % node_id)
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        self._interfaces = self._get_query(query_url, error_message)

    def get_node_ids(self, node_id):
        """
        Get the list of node ids from the command line arguments.
        If none, get all of the node ids
        :param args: Command line arguments
        :return: List of strings containing node ids
        """
        if node_id is not None:
            names = [node_id]
        else:
            names = []
            query_url = ('/api/node/class/fabricNode.json?'
                         'query-target-filter=eq(fabricNode.role,"leaf")')
            error_message = 'Could not get switch list from APIC.'
            nodes = self._get_query(query_url, error_message)
            for node in nodes:
                names.append(str(node['fabricNode']['attributes']['id']))
        return names

    def _get_member_extension(self, port_channel):
        resp = ''
        for child in port_channel['pcAggrIf']['children']:
            if 'pcRsMbrIfs' in child:
                for interface in self._interfaces:
                    if child['pcRsMbrIfs']['attributes']['tDn'] == interface['l1PhysIf']['attributes']['dn']:
                        oper_attr = interface['l1PhysIf']['children'][0]['pcAggrMbrIf']['attributes']
                        if oper_attr['operSt'] == 'module-removed':
                            resp = '(r)'
                        elif oper_attr['operSt'] == 'up':
                            resp = '(P)'
                        elif oper_attr['channelingSt'] == 'individual':
                            resp = "(I)"
                        elif oper_attr['channelingSt'] == 'suspended':
                            resp = "(s)"
                        elif oper_attr['channelingSt'] == 'hot-standby':
                            resp = "(H)"
                        else:
                            resp = "(D)"
                    if resp != '':
                        break
        return resp

    def show_summary(self, node=None, intf_id=None):
        """
        show port-channel summary

        :param node: String containing the specific switch id. If none, all switches are used
        :param intf_id: String containing the specific interface id. If none, all interfaces are used
        :return: None
        """
        for node_id in self.get_node_ids(node):
            self.populate_interfaces(node_id)
            self.populate_port_channels(node_id, intf_id)
            if not len(self._port_channels):
                continue
            print "Switch:", node_id
            print "Flags:  D - Down        P - Up in port-channel (members)"
            print "        I - Individual  H - Hot-standby (LACP only)"
            print "        s - Suspended   r - Module-removed"
            print "        S - Switched    R - Routed"
            print "        U - Up (port-channel)"
            print "        M - Not in use. Min-links not met"
            print "        F - Configuration failed"
            data = []
            for interface in self._port_channels:
                intf_attr = interface['pcAggrIf']['attributes']
                name = intf_attr['id']
                if intf_attr['layer'] == 'Layer2':
                    name += "(S"
                else:
                    name += "(R"

                for child in interface['pcAggrIf']['children']:
                    if 'ethpmAggrIf' in child:
                        oper_attr = child['ethpmAggrIf']['attributes']
                        if oper_attr['operSt'] == 'up':
                            name += "U)"
                        elif intf_attr['suspMinlinks'] == 'yes':
                            name += "M)"
                        else:
                            name += "D)"
                        members = oper_attr['activeMbrs']
                        while ',unspecified,' in members:
                            members = members.replace(',unspecified,', ',')
                        members = members.replace(',unspecified', '')

                members += self._get_member_extension(interface)
                protocol = 'none'
                if intf_attr['pcMode'] in ['active', 'passive', 'mac-pin']:
                    protocol = 'lacp'
                data.append((int(intf_attr['id'][2:]), name, 'eth', protocol, members))
            data.sort(key=lambda tup: tup[0])
            headers = ['Group', 'Port channel', 'Type', 'Protocol', 'Member Ports']
            print tabulate(data, headers=headers)
Example #52
0
def main():
    """
    Main create tenant routine
    :return: None
    """
    # Get all the arguments
    description = 'It logs in to the APIC and will delete tenants named with the specified string.'
    creds = Credentials(['apic', 'nosnapshotfiles'], description)
    group = creds.add_mutually_exclusive_group()
    group.add_argument('--startswith',
                       default=None,
                       help='String to match that starts the tenant name')
    group.add_argument('--endswith',
                       default=None,
                       help='String to match that ends the tenant name')
    group.add_argument('--exactmatch',
                       default=None,
                       help='String that exactly matches the tenant name')
    group.add_argument('--contains',
                       default=None,
                       help='String that is contained in the tenant name')
    creds.add_argument(
        '--force',
        action='store_true',
        help='Attempt to remove the tenants without prompting for confirmation'
    )
    args = creds.get()

    # Login to the APIC
    apic = Session(args.url, args.login, args.password)
    resp = apic.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    # Get all of the Tenants
    tenants = Tenant.get(apic)

    # Find the list of Tenants to delete according to command line options
    tenants_to_delete = []
    for tenant in tenants:
        if args.startswith is not None:
            if tenant.name.startswith(args.startswith):
                tenants_to_delete.append(tenant)
        elif args.endswith is not None:
            if tenant.name.endswith(args.endswith):
                tenants_to_delete.append(tenant)
        elif args.exactmatch is not None:
            if args.exactmatch == tenant.name:
                tenants_to_delete.append(tenant)
        elif args.contains is not None:
            if args.contains in tenant.name:
                tenants_to_delete.append(tenant)

    # Query the user to be sure of deletion
    if not args.force:
        for tenant in tenants_to_delete:
            prompt = 'Delete tenant %s ? [y/N]' % tenant.name
            try:
                resp = raw_input(prompt)
            except NameError:
                resp = input(prompt)
            if not resp.lower().startswith('y'):
                tenants_to_delete.remove(tenant)
                print('Skipping tenant', tenant.name)

    # Delete the tenants
    for tenant in tenants_to_delete:
        tenant.mark_as_deleted()
        resp = tenant.push_to_apic(apic)
        if resp.ok:
            print('Deleted tenant', tenant.name)
        else:
            print('Could not delete tenant', tenant.name)
            print resp.text
Example #53
0
def get_data_from_apic(url, username, password):
    """
    Gets the Endpoint data from the APIC

    :param url: String containing the URL of the APIC
    :param username: String containing the username to login to the APIC
    :param password: String containing the password to login to the APIC
    :return: None
    """
    ep_db = {}

    # Login to the APIC
    print 'Logging in to APIC...'
    session = Session(url, username, password, subscription_enabled=False)
    resp = session.login()
    if not resp.ok:
        print 'Could not login to APIC'
        sys.exit(0)

    # Get the endpoint from the APIC
    print 'Getting endpoints from the APIC....'
    endpoints = Endpoint.get(session)

    # Loop through the endpoints and count them on a per EPG basis
    print 'Counting the endpoints....'
    for endpoint in endpoints:
        epg = endpoint.get_parent()
        app = epg.get_parent()
        tenant = app.get_parent()
        if tenant.name not in ep_db:
            ep_db[tenant.name] = {}
        if app.name not in ep_db[tenant.name]:
            ep_db[tenant.name][app.name] = {}
        if epg.name not in ep_db[tenant.name][app.name]:
            ep_db[tenant.name][app.name][epg.name] = 0
        ep_db[tenant.name][app.name][epg.name] += 1

    # Write the results to a JSON formatted dictionary
    print 'Translating results to JSON...'
    epgs = {'name': 'epgs', 'children': []}
    for tenant in ep_db:
        tenant_json = {'name': tenant, 'children': []}
        for app in ep_db[tenant]:
            app_json = {'name': app, 'children': []}
            for epg in ep_db[tenant][app]:
                epg_json = {'name': epg, 'size': ep_db[tenant][app][epg]}
                app_json['children'].append(epg_json)
            tenant_json['children'].append(app_json)
        epgs['children'].append(tenant_json)

    # Write the formatted JSON to a file
    print 'Writing results to a file....'
    try:
        with open('static/epgs.json', 'w') as epg_file:
            epg_file.write(json.dumps(epgs))
    except IOError:
        print '%% Unable to open configuration file', 'static/epgs.json'
        sys.exit(0)
    except ValueError:
        print '%% File could not be decoded as JSON.'
        sys.exit(0)
Example #54
0
class InterfaceDetailedCollector(object):
    def __init__(self, url, login, password):
        # Login to APIC
        self._apic = Session(url, login, password)
        if not self._apic.login().ok:
            self._logged_in = False
            print('%% Could not login to APIC')
        else:
            self._logged_in = True
        self._interfaces = []

    def _get_query(self, query_url, error_msg):
        resp = self._apic.get(query_url)
        if not resp.ok:
            print error_msg
            print resp.text
            return []
        return resp.json()['imdata']

    def _populate_beacon_states(self, data):
        for beacon_data in data:
            if 'eqptLocLed' not in beacon_data:
                continue
            dn = beacon_data['eqptLocLed']['attributes']['dn']
            oper_state = beacon_data['eqptLocLed']['attributes']['operSt']
            if 'leafport-' in dn:
                port_num = dn.partition('/leafport-')[2].partition('/')[0]
                mod_num = dn.partition('/lcslot-')[2].partition('/')[0]
                node_num = dn.partition('/node-')[2].partition('/')[0]
                beacon_interface_id = 'eth' + mod_num + '/' + port_num
                beacon_node_id = '/node-%s/' % node_num
                for interface in self._interfaces:
                    if not interface.is_ether():
                        continue
                    if interface.id == beacon_interface_id:
                        if beacon_node_id in dn:
                            interface.beacon_state = oper_state

    def populate_detailed_interfaces(self, node_id, intf_id=None):
        query_url = ('/api/mo/topology/pod-1/node-%s/sys.json?query-target=subtree'
                     '&target-subtree-class=l1PhysIf,pcAggrIf,l3LbRtdIf,tunnelIf,sviIf,l3EncRtdIf,'
                     'mgmtMgmtIf,l2ExtIf,l2VfcIf,eqptLocLed&rsp-subtree=full&'
                     'rsp-subtree-class=ethpmPhysIf,ethpmPortCap,l1RtMbrIfs,ethpmAggrIf,'
                     'rmonEtherStats,rmonIfIn,rmonIfOut,rmonIfStorm,eqptIngrTotal5min,'
                     'eqptEgrTotal5min,l1EeeP,rmonDot3Stats' % node_id)
        error_message = 'Could not collect APIC data for switch %s.' % node_id
        interfaces = self._get_query(query_url, error_message)
        self._interfaces = []
        if intf_id is None:
            for interface in interfaces:
                self._interfaces.append(Interface(interface))
        else:
            for interface in interfaces:
                for if_type in interface:
                    if if_type == 'eqptLocLed':
                        continue
                    if interface[if_type]['attributes']['id'] == intf_id:
                        self._interfaces.append(Interface(interface))
        self._populate_beacon_states(interfaces)


    def get_node_ids(self, node_id):
        """
        Get the list of node ids from the command line arguments.
        If none, get all of the node ids
        :param args: Command line arguments
        :return: List of strings containing node ids
        """
        if node_id is not None:
            names = [node_id]
        else:
            names = []
            query_url = ('/api/node/class/fabricNode.json?'
                         'query-target-filter=eq(fabricNode.role,"leaf")')
            error_message = 'Could not get switch list from APIC.'
            nodes = self._get_query(query_url, error_message)
            for node in nodes:
                names.append(str(node['fabricNode']['attributes']['id']))
        return names

    def show_detailed(self, node=None, intf_id=None):
        """
        show interface

        :param node: String containing the specific switch id. If none, all switches are used
        :param intf_id: String containing the specific interface id. If none, all interfaces are used
        :return: None
        """
        for node_id in self.get_node_ids(node):
            print('Switch', node_id)
            self.populate_detailed_interfaces(node_id, intf_id)
            for interface in self._interfaces:
                if interface.if_type == 'l1PhysIf':
                    if interface.is_ether or interface.is_pc() or interface.is_tun():
                        state = interface.oper_st
                        rsn = interface.oper_st_qual
                        if state is None:
                            state = "unknown"
                            rsn = "unknown"
                        if state == 'link-up':
                            # see ethpm_copy_eth_port_log_info()
                            # link-up state is physical up, but not operationally up
                            state = 'down'
                        if state == 'up':
                            if not interface.is_tun() and interface.switching_st == 'disabled':
                                print "%s is %s (%s)" % (interface.id, state, "out-of-service")
                            else:
                                print "%s is %s" % (interface.id, state)
                        elif interface.oper_st_qual == "err-disabled":
                            print "%s is %s (%s)" % (interface.id, state, interface.oper_err_dis_qual)
                        else:
                            print "%s is %s (%s)" % (interface.id, state, rsn)

                    print('admin state is', interface.admin_st)
                    if interface.is_member_pc():
                        print "  Belongs to %s" % interface.port_channel_id
                    if not interface.descr == '':
                        print('  Port description is', interface.descr)
                    print('  Hardware:', interface.port_cap_speed, 'Ethernet, address:', interface.address, \)
                          '(bia', interface.backplane_mac, ')'
                    print('  MTU', interface.mtu, 'bytes, BW', interface.bw, 'Kbit, DLY', interface.delay, 'usec')
                    print('  reliability', '%s/255' % interface.reliability, \)
                          'txload %d/255, rxload %d/255' %  (interface.tx_load, interface.rx_load)
                    print('  Encapsulation ARPA, medium is broadcast')
                    if interface.layer != 'Layer2':
                        print('  Port mode is routed')
                    else:
                        print "  Port mode is %s" % interface.mode
                    if not interface.is_mgmt() and interface.oper_mode == 'ips':
                        duplex = 'auto'
                    else:
                        duplex = interface.oper_duplex
                    print "  %s-duplex, %sb/s%s" % (duplex, interface.speed, interface.fcot_str)
                    if (interface.is_ether() and not interface.is_sub()) or interface.is_mgmt():
                        if not interface.is_mgmt():
                            print('  FEC (forward-error-correction) :', interface.oper_fec_mode)
                        print "  Beacon is turned", interface.beacon_state
                        print "  Auto-Negotiation is turned", interface.auto_neg
                    if interface.is_ether() or interface.is_pc() or interface.is_mgmt():
                        print "  Input flow-control is off, output flow-control is off"
                        if interface.mdix == 'auto':
                            print "  Auto-mdix is turned on"
                        else:
                            print "  Auto-mdix is turned off"
                    elif interface.is_loop():
                        print "  Auto-mdix is turned off"
                    if interface.is_ether() and not interface.is_sub() and interface.port_cap_fcot_capable == '1':
                        if interface.port_cap_rate_mode == "1":
                            rateMode = "dedicated"
                        elif interface.port_cap_rate_mode == "2":
                            rateMode = "shared"
                        else:
                            rateMode = interface.port_cap_rate_mode
                        print "  Rate mode is %s" % rateMode

                    if interface.is_ether():
                        if interface.span_mode == "not-a-span-dest":
                            print('  Switchport monitor is off')
                        else:
                            print('  Switchport monitor is on')

                    if interface.is_ether() or interface.is_pc() or interface.is_mgmt():
                        print('  EtherType is', interface.dot1q_ethertype)

                    if interface.is_ether():
                        if interface.eee_state == "not-applicable":
                            print "  EEE (efficient-ethernet) : n/a"
                        elif interface.eee_state == "enable":
                            print "  EEE (efficient-ethernet) : Operational"
                        elif interface.eee_state == "disable":
                            print "  EEE (efficient-ethernet) : Disabled"
                        elif interface.eee_state == "disagreed":
                            print "  EEE (efficient-ethernet) : Disagreed"

                        if interface.last_link_st_chg.startswith('1970-'):
                            print "  Last link flapped never"
                        else:
                            last_flap = dateutil.parser.parse(interface.last_link_st_chg).replace(tzinfo=None)
                            seconds_since_flap = datetime.datetime.now() - last_flap
                            print "  Last link flapped", seconds_since_flap

                    if interface.is_ether() or interface.is_pc() or interface.is_svi():
                        last_clear = 'never'
                        if interface.clear_ts != 'never':
                            last_clear = dateutil.parser.parse(interface.clear_ts).replace(tzinfo=None)
                        print('  Last clearing of "show interface" counters %s' % last_clear)
                        if not interface.is_svi():
                            print('  ', interface.reset_ctr,'interface resets')
                    elif interface.is_tun():
                        pass
                    if interface.is_svi():
                        pass
                    elif interface.is_ether() or interface.is_pc():
                        print "  30 seconds input rate %d bits/sec, %d packets/sec" % \
                               (interface.input_bitrate_30sec, interface.input_packetrate_30sec)
                        print "  30 seconds output rate %d bits/sec, %d packets/sec" % \
                               (interface.output_bitrate_30sec, interface.output_packetrate_30sec)
                        print "  Load-Interval #2: 5 minute (300 seconds)"
                        print "    input rate %d bps, %d pps; output rate %d bps, %d pps" % \
                                (interface.input_bitrate_300sec, interface.input_packetrate_300sec,
                                 interface.output_bitrate_300sec, interface.output_packetrate_300sec)
                        if interface.layer == 'Layer3':
                            print "  L3 in Switched:"
                            print "    ucast: %d pkts, %d bytes - mcast: %d pkts, %d bytes" % \
                                  (0, 0, 0, 0)
                                                        # (stats.l3InSwitchedUcastPackets,
                                                        #  stats.l3InSwitchedUcastBytes,
                                                        #  stats.l3InSwitchedMcastPackets,
                                                        #  stats.l3InSwitchedMcastBytes)
                            print "  L3 out Switched:"
                            print "    ucast: %d pkts, %d bytes - mcast: %d pkts, %d bytes" % \
                                  (0, 0, 0, 0)
                                                        # (stats.l3OutSwitchedUcastPackets,
                                                        #  stats.l3OutSwitchedUcastBytes,
                                                        #  stats.l3OutSwitchedMcastPackets,
                                                        #  stats.l3OutSwitchedMcastBytes)
                    if (interface.is_ether() or interface.is_pc()) and not interface.is_sub():
                        print "  RX"
                        ucast = "%d unicast packets" % interface.rx_unicast_packets
                        mcast = "%d multicast packets" % interface.rx_multicast_packets
                        bcast = "%d broadcast packets" % interface.rx_broadcast_packets
                        print "    %s  %s  %s" % (ucast, mcast, bcast)

                        pkts = "%d input packets" % interface.rx_input_packets
                        bytes = "%d bytes" % interface.rx_input_bytes
                        print "    %s  %s" % (pkts, bytes)

                        print('   ', interface.rx_oversize_packets, 'jumbo packets ',\)
                              interface.rx_storm_supression_packets, 'storm suppression bytes'

                        print('   ', interface.rx_runts, 'runts', interface.rx_oversize_packets,\)
                              'giants', interface.rx_crc, 'CRC  0 no buffer'

                        print('   ', interface.rx_error_packets, 'input error',\)
                              interface.rx_runts, 'short frame  0 overrun  0 underrun  0 ignored'

                        print('    0 watchdog  0 bad etype drop', interface.bad_proto_drop,\)
                              'bad proto drop  0 if down drop'
Example #55
0
    def test_dual_inheritance_contract(self):
        self.delete_tenant()
        config_json = {
            "apic": {
                "user_name": APIC_USERNAME,
                "password": APIC_PASSWORD,
                "ip_address": APIC_IP,
                "use_https": False
            },
            "inheritance_policies": [
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "childepg"
                    },
                    "allowed": True,
                    "enabled": True
                },
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "parentepg1"
                    },
                    "allowed": True,
                    "enabled": False
                },
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "parentepg2"
                    },
                    "allowed": True,
                    "enabled": False
                }
            ]
        }

        args = TestArgs()
        apic = Session(APIC_URL, APIC_USERNAME, APIC_PASSWORD)
        apic.login()
        self.setup_tenant_with_2_parent_epgs(apic)
        tool = execute_tool(args, cli_mode=False)
        tool.add_config(config_json)
        time.sleep(2)

        print 'STARTING VERIFICATION...'
        # Verify that the contract is now inherited by the child EPG
        self.verify_inherited(apic)

        self.delete_tenant()
Example #56
0
    def push_config_to_apic(self):
        """
        Push the configuration to the APIC

        :return: Requests Response instance indicating success or not
        """
        THROTTLE_SIZE = 500000 / 8
        # Set the tenant name correctly
        if self._tenant_name == '' and self.cdb.has_context_config():
            self.set_tenant_name(self.cdb.get_context_config().tenant_name)
        elif self._tenant_name == '':
            self.set_tenant_name('acitoolkit')

        # Find all the unique contract providers
        logging.debug('Finding the unique contract providers')
        unique_providers = {}
        for provided_policy in self.cdb.get_contract_policies():
            if provided_policy.dst_id not in unique_providers:
                unique_providers[provided_policy.dst_id] = 0
            else:
                unique_providers[provided_policy.dst_id] += 1
        logging.debug('Found %s unique contract providers', len(unique_providers))

        # Find any duplicate contracts that this provider is providing (remove)
        logging.debug('Finding any duplicate contracts')
        duplicate_policies = []
        for provider in unique_providers:
            for provided_policy in self.cdb.get_contract_policies():
                if provided_policy in duplicate_policies:
                    continue
                if provider in provided_policy.dst_ids:
                    for other_policy in self.cdb.get_contract_policies():
                        if other_policy == provided_policy or other_policy in duplicate_policies:
                            continue
                        if other_policy.dst_ids == provided_policy.dst_ids and other_policy.has_same_permissions(
                                provided_policy):
                            provided_policy.src_ids = provided_policy.src_ids + other_policy.src_ids
                            duplicate_policies.append(other_policy)
                            logging.debug('duplicate_policies now has %s entries', len(duplicate_policies))

        logging.debug('Removing duplicate contracts')
        for duplicate_policy in duplicate_policies:
            self.cdb.remove_contract_policy(duplicate_policy)

        if not self.displayonly:
            # Log on to the APIC
            apic_cfg = self.cdb.get_apic_config()
            apic = Session(apic_cfg.url, apic_cfg.user_name, apic_cfg.password)
            resp = apic.login()
            if not resp.ok:
                return resp

        tenant_names = []
        tenant_names.append(self._tenant_name)

        # delete all the unwanted epgs
        tenant = Tenant(self._tenant_name)
        existing_epgs = []
        if Tenant.exists(apic, tenant):
            tenants = Tenant.get_deep(
                apic,
                names=tenant_names,
                limit_to=[
                    'fvTenant',
                    'fvAp',
                    'vzFilter',
                    'vzEntry',
                    'vzBrCP',
                    'vzSubj',
                    'vzRsSubjFiltAtt'])
            tenant = tenants[0]
            appProfiles = tenant.get_children(AppProfile)
            app = appProfiles[0]
            existing_epgs = app.get_children(EPG)
        else:

            app = AppProfile(self._app_name, tenant)

        for existing_epg in existing_epgs:
            matched = False
            if existing_epg.name != "base":
                for epg_policy in self.cdb.get_epg_policies():
                    if existing_epg.descr.split(":")[1] == epg_policy.descr.split(":")[1]:
                        matched = True
                if not matched:
                    existing_epg.mark_as_deleted()

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing EPGS by deleting unwanted epgs ')
            if len(tenant.get_children()) > 0:
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        # delete all the unwanted contracts
        tenants = Tenant.get_deep(
            apic,
            names=tenant_names,
            limit_to=[
                'fvTenant',
                'fvAp',
                'vzFilter',
                'vzEntry',
                'vzBrCP',
                'vzSubj',
                'vzRsSubjFiltAtt'])
        tenant = tenants[0]
        existing_contracts = tenant.get_children(Contract)
        for existing_contract in existing_contracts:
            matched = False
            for contract_policy in self.cdb.get_contract_policies():
                if existing_contract.descr.split("::")[1] == contract_policy.descr.split("::")[1]:
                    matched = True
            if not matched:
                existing_contract.mark_as_deleted()
                exist_contract_providing_epgs = existing_contract.get_all_providing_epgs()
                for exist_contract_providing_epg in exist_contract_providing_epgs:
                    exist_contract_providing_epg.mark_as_deleted()
                exist_contract_consuming_epgs = existing_contract.get_all_consuming_epgs()
                for exist_contract_consuming_epg in exist_contract_consuming_epgs:
                    exist_contract_consuming_epg.mark_as_deleted()

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing contracts by deleting unwanted contracts')
            if len(tenant.get_children()) > 0:
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        filterEntry_list = []

        logging.debug('Generating JSON....')
        # Push all of the Contracts
        logging.debug('Pushing contracts. # of Contract policies: %s', len(self.cdb.get_contract_policies()))
        tenant = Tenant(self._tenant_name)
        if Tenant.exists(apic, tenant):
            tenants = Tenant.get_deep(
                apic,
                names=tenant_names,
                limit_to=[
                    'fvTenant',
                    'vzFilter',
                    'vzEntry',
                    'vzBrCP',
                    'vzSubj',
                    'vzRsSubjFiltAtt'])
            tenant = tenants[0]
            existing_contracts = tenant.get_children(Contract)
        else:
            existing_contracts = tenant.get_children(Contract)
        # removing the unwanted contractsubject filters for each contract subject
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_name + '::' + contract_policy.dst_name
            for existing_contract in existing_contracts:
                if existing_contract.descr.split("::")[1] == contract_policy.descr.split("::")[1]:
                    for child_contractSubject in existing_contract.get_children(ContractSubject):
                        for child_filter in child_contractSubject.get_filters():
                            matched = False
                            for whitelist_policy in contract_policy.get_whitelist_policies():
                                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                                if child_filter.name == entry_name + '_Filter':
                                    matched = True
                                    continue
                            if not matched:
                                # TBD need to check this. this is not working
                                child_contractSubject._remove_relation(child_filter)
                                child_filter._remove_attachment(child_contractSubject)
                                logging.debug('removing filter ' + child_filter.name)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing contracts by deleting unwanted filters')
            if len(tenant.get_children()) > 0:
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        # if num of contract_subjects is 0 then remove it finally
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_name + '::' + contract_policy.dst_name
            contract = Contract(name, tenant)
            contract.descr = contract_policy.descr[0:127 -
                                                   (contract_policy.descr.count('"') +
                                                    contract_policy.descr.count("'") +
                                                       contract_policy.descr.count('/'))]
            for whitelist_policy in contract_policy.get_whitelist_policies():
                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                if whitelist_policy.proto == '6' or whitelist_policy.proto == '17':
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        dFromPort=whitelist_policy.port_min,
                                        dToPort=whitelist_policy.port_max,
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        sFromPort='unspecified',
                                        sToPort='unspecified',
                                        tcpRules='unspecified',
                                        parent=contract)
                else:
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        parent=contract)
                filterEntry_list.append(entry_name)
            if not self.displayonly:
                if len(str(tenant.get_json())) > THROTTLE_SIZE:
                    logging.debug('Throttling contracts. Pushing config...')
                    resp = tenant.push_to_apic(apic)
                    if not resp.ok:
                        return resp
                    tenant = Tenant(self._tenant_name)

            if self.displayonly:
                print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
            else:
                logging.debug('Pushing remaining contracts')
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        # Push all of the EPGs
        logging.debug('Pushing EPGs')
        if not self.displayonly:
            tenants = Tenant.get_deep(apic, names=tenant_names)
            tenant = tenants[0]
            appProfiles = tenant.get_children(AppProfile)
            app = appProfiles[0]

        if self._use_ip_epgs:
            # Create a Base EPG
            base_epg = EPG('base', app)
            if self.cdb.has_context_config():
                context_name = self.cdb.get_context_config().name
            else:
                context_name = 'vrf1'
            context = Context(context_name, tenant)
            bd = BridgeDomain('bd', tenant)
            bd.add_context(context)
            base_epg.add_bd(bd)
            if self.displayonly:
                # If display only, just deploy the EPG to leaf 101
                base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged')
            else:
                # Deploy the EPG to all of the leaf switches
                nodes = Node.get(apic)
                for node in nodes:
                    if node.role == 'leaf':
                        base_epg.add_static_leaf_binding(node.node, 'vlan', '1', encap_mode='untagged')

            # Create the Attribute based EPGs
            logging.debug('Creating Attribute Based EPGs')
            existing_epgs = app.get_children(EPG)
            for epg_policy in self.cdb.get_epg_policies():
                if not self.displayonly:
                    # Check if we need to throttle very large configs
                    if len(str(tenant.get_json())) > THROTTLE_SIZE:
                        resp = tenant.push_to_apic(apic)
                        if not resp.ok:
                            return resp
                        tenant = Tenant(self._tenant_name)
                        app = AppProfile(self._app_name, tenant)
                        context = Context(context_name, tenant)
                        bd = BridgeDomain('bd', tenant)
                        bd.add_context(context)
                        if self._use_ip_epgs:
                            base_epg = EPG('base', app)
                            base_epg.add_bd(bd)

                matched = False
                for existing_epg in existing_epgs:
                    if existing_epg.name != "base":
                        if existing_epg.descr.split(":")[1] == epg_policy.descr.split(":")[1]:
                            matched = True
                            break

                consumed_contracts = []
                provided_contracts = []
                if matched is True:
                    consumed_contracts = existing_epg.get_all_consumed()
                    provided_contracts = existing_epg.get_all_provided()
                    epg = existing_epg
                else:
                    epg = EPG(epg_policy.name, app)

                # Check if the policy has the default 0.0.0.0 IP address
                no_default_endpoint = True
                for node_policy in epg_policy.get_node_policies():
                    if node_policy.ip == '0.0.0.0' and node_policy.prefix_len == 0:
                        no_default_endpoint = False
                        epg.add_bd(bd)

                # Add all of the IP addresses
                if no_default_endpoint:
                    epg.is_attributed_based = True
                    epg.set_base_epg(base_epg)
                    criterion = AttributeCriterion('criterion', epg)
                    ipaddrs = []
                    # check if the existing nodes are there in the present config,if not delete them
                    for node_policy in epg_policy.get_node_policies():
                        ipaddr = ipaddress.ip_address(unicode(node_policy.ip))
                        if not ipaddr.is_multicast:  # Skip multicast addresses. They cannot be IP based EPGs
                            ipaddrs.append(ipaddr)
                    nets = ipaddress.collapse_addresses(ipaddrs)
                    for net in nets:
                        criterion.add_ip_address(str(net))
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        existing = False
                        for existing_consumed_contract in consumed_contracts:
                            if name == existing_consumed_contract.name:
                                existing = True
                                contract = existing_consumed_contract
                        if not existing:
                            contract = Contract(name, tenant)
                            epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            existing = False
                            for existing_provided_contract in provided_contracts:
                                if name == existing_provided_contract.name:
                                    existing = True
                                    contract = existing_provided_contract
                            if not existing:
                                contract = Contract(name, tenant)
                        epg.provide(contract)
        else:
            logging.debug('Creating EPGs')
            tenants = Tenant.get_deep(apic, names=tenant_names)
            tenant = tenants[0]
            appProfiles = tenant.get_children(AppProfile)
            if len(appProfiles) > 0:
                app = appProfiles[0]
            else:
                app = AppProfile(self._app_name, tenant)

            existing_epgs = app.get_children(EPG)

            for epg_policy in self.cdb.get_epg_policies():

                matched = False
                for existing_epg in existing_epgs:
                    if existing_epg.name != "base":
                        if existing_epg.descr.split(":")[1] == epg_policy.descr.split(":")[1]:
                            matched = True
                            break

                consumed_contracts = []
                provided_contracts = []
                if matched is True:
                    consumed_contracts = existing_epg.get_all_consumed()
                    provided_contracts = existing_epg.get_all_provided()
                epg = EPG(epg_policy.name, app)
                epg.descr = epg_policy.descr[0:127]

                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        existing = False
                        for existing_consumed_contract in consumed_contracts:
                            if name == existing_consumed_contract.name:
                                existing = True
                                contract = existing_consumed_contract
                        if not existing:
                            contract = Contract(name, tenant)
                            epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            existing = False
                            for existing_provided_contract in provided_contracts:
                                if name == existing_provided_contract.name:
                                    existing = True
                                    contract = existing_provided_contract
                            if not existing:
                                contract = Contract(name, tenant)
                        epg.provide(contract)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            if not resp.ok:
                return resp

        # remove the unwanted filters
        existing_filters = tenant.get_children(Filter)
        for existing_filetrEntry in existing_filters:
            matched = False
            for filterEntry in filterEntry_list:
                if filterEntry + '_Filter' == existing_filetrEntry.name:
                    matched = True
            if not matched:
                existing_filetrEntry.mark_as_deleted()
        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            return resp
Example #57
0
    def test_dual_inheritance_contract_delete_both_relations(self):
        config_json = {
            "apic": {
                "user_name": APIC_USERNAME,
                "password": APIC_PASSWORD,
                "ip_address": APIC_IP,
                "use_https": False
            },
            "inheritance_policies": [
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "childepg"
                    },
                    "allowed": True,
                    "enabled": True
                },
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "parentepg1"
                    },
                    "allowed": True,
                    "enabled": False
                },
                {
                    "epg": {
                        "tenant": "inheritanceautomatedtest",
                        "epg_container": {
                            "name": "myl3out",
                            "container_type": "l3out"
                        },
                        "name": "parentepg2"
                    },
                    "allowed": True,
                    "enabled": False
                }
            ]
        }

        args = TestArgs()
        apic = Session(APIC_URL, APIC_USERNAME, APIC_PASSWORD)
        apic.login()
        self.setup_tenant_with_2_parent_epgs(apic)
        tool = execute_tool(args, cli_mode=False)
        tool.add_config(config_json)
        time.sleep(4)

        # Verify that the contract is now inherited by the child EPG
        self.verify_inherited(apic)

        print 'REMOVING 1 CONTRACT'

        # Remove contracts
        tenant = Tenant('inheritanceautomatedtest')
        l3out = OutsideL3('myl3out', tenant)
        contract = self.get_contract(tenant)
        parent_epg1 = OutsideEPG('parentepg1', l3out)
        parent_epg1.provide(contract)
        parent_epg1.dont_provide(contract)
        parent_epg2 = OutsideEPG('parentepg2', l3out)
        parent_epg2.provide(contract)
        parent_epg2.dont_provide(contract)
        resp = tenant.push_to_apic(apic)
        self.assertTrue(resp.ok)

        print 'STARTING VERIFICATION'

        # Verify that the contract is still inherited by the child EPG
        time.sleep(4)
        self.verify_not_inherited(apic)

        self.delete_tenant()