def main(): """ Main execution routine :return: None """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = ( 'Application dealing with tenant configuration. ' 'It can download a tenant configuration from the APIC and store it as raw JSON in a file. ' 'It can also push a tenant configuration stored as raw JSON in a file to the APIC.' ) creds = Credentials(('apic', 'nosnapshotfiles'), description) creds.add_argument( '--config', default=None, help='Configuration file to push/pull tenant configuration') creds.add_argument('--tenant', default=None, help='Tenant name') group = creds.add_mutually_exclusive_group() group.add_argument('--push-to-apic', action='store_true', help='Push the tenant configuration file to the APIC') group.add_argument('--pull-from-apic', action='store_true', help=('Pull the tenant configuration from the APIC and' 'store in the specified configuration file')) # Get the command line arguments args = creds.get() # Sanity check the command line arguments if args.config is None: print '%% No configuration file given.' creds.print_help() return if args.tenant is None: print '%% No Tenant name given.' creds.print_help() return if not args.push_to_apic and not args.pull_from_apic: print '%% No direction (push-to-apic/pull-from-apic) given.' creds.print_help() return # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print '%% Could not login to APIC' return # Do the work if args.pull_from_apic: pull_config_from_apic(session, args.tenant, args.config) if args.push_to_apic: push_config_to_apic(session, args.tenant, args.config)
def main(): """ Main execution routine :return: None """ # Take login credentials from the command line if provided # Otherwise, take them from your environment variables file ~/.profile description = ('Application dealing with tenant configuration. ' 'It can download a tenant configuration from the APIC and store it as raw JSON in a file. ' 'It can also push a tenant configuration stored as raw JSON in a file to the APIC.') creds = Credentials(('apic', 'nosnapshotfiles'), description) creds.add_argument('--config', default=None, help='Configuration file to push/pull tenant configuration') creds.add_argument('--tenant', default=None, help='Tenant name') group = creds.add_mutually_exclusive_group() group.add_argument('--push-to-apic', action='store_true', help='Push the tenant configuration file to the APIC') group.add_argument('--pull-from-apic', action='store_true', help=('Pull the tenant configuration from the APIC and' 'store in the specified configuration file')) # Get the command line arguments args = creds.get() # Sanity check the command line arguments if args.config is None: print '%% No configuration file given.' creds.print_help() return if args.tenant is None: print '%% No Tenant name given.' creds.print_help() return if not args.push_to_apic and not args.pull_from_apic: print '%% No direction (push-to-apic/pull-from-apic) given.' creds.print_help() return # Login to APIC session = Session(args.url, args.login, args.password) resp = session.login() if not resp.ok: print '%% Could not login to APIC' return # Do the work if args.pull_from_apic: pull_config_from_apic(session, args.tenant, args.config) if args.push_to_apic: push_config_to_apic(session, args.tenant, args.config)
def main(): ''' Main Function ''' # Setup Arguments utilizing the ACIToolkit Credentials Method description = ('Help to determine EP movement during Maintenance Windows') creds = Credentials('apic', description) creds.add_argument('-v', '--version', action='version', version='%(prog)s == {}'.format(__version__)) creds.add_argument("--debug", dest="debug", choices=["debug", "info", "warn", "critical"], default="info", help='Enable debugging output to screen') creds.add_argument( '--log', action='store_true', help= 'Write the output to a log file: {}.log. Automatically adds timestamp to filename' .format(__file__.split(".py")[0])) creds.add_argument( '--list', action='store_true', help= 'Print out the list of Tenants / App Profiles / EPGs available to work with' ) creds.add_argument( '--filter', help= 'Specify what to filter on. Eg: "tn-mipetrin" or "ap-mipetrin-AppProfile". Use --list to identify what can be used for filtering. Default = None' ) creds.add_argument( '--pre', help= 'Write the data to a file of your choosing. Specify your prefix. Format will be JSON and this extension is automatically added' ) creds.add_argument( '--post', help= 'Write the data to a file of your choosing. Specify your prefix. Format will be JSON and this extension is automatically added' ) creds.add_argument( '--compare', nargs=2, help= 'Compare the 2 files you specify. Be sure to pick a PRE and POST file') creds.add_argument( '--summary', type=int, help= 'Optionally, print out detailed summary of identified Endpoints greater than x (provide totals per Tenant/App/EPG/MAC/Encap)' ) args = creds.get() # Set up custom logger setup_logger(logger, args.debug, args.log) # If --suumary enabled, set up globals to then utlize the additonal calculations throughout code if args.summary: global detailed_summary global detailed_summary_number detailed_summary = True detailed_summary_number = args.summary # Due to creds / argparse above, will always need to provide APIC / User / Pass even if wanting to do local comparison of PRE/POST JSON files # However, below check will ensure we actually only perform login if NOT doing a comparison. That is, if doing --compare, you can type ANY password even simply hitting enter if not args.compare: # Login to APIC only if NOT doing a comparison - as already have the data we need in the local JSON files session = Session(args.url, args.login, args.password) resp = session.login() # Check if the login was successful if not resp.ok: logger.critical('Could not login to APIC') my_error = resp.json() logger.critical("Specific Error: {}".format( my_error["imdata"][0]["error"]["attributes"]["text"])) exit(0) # Start time count at this point, otherwise takes into consideration the amount of time taken to input the password by the user start_time = time.time() logger.debug("Begin Execution of script") # Order of precedence is to execute list of tenants, pre capture, post capture, compare if args.list: print_header("Gathering available information from APIC...") get_raw_tenant_info(session) elif args.pre: print_header("Gathering 'PRE' Endpoints...") # Setup Filename for PRE file (using user input) and global pre_suffix my_filename_pre = args.pre + pre_suffix # Confirm if user has selected any --filter if args.filter: get_fvCEp(session, my_filename_pre, args.filter) else: get_fvCEp(session, my_filename_pre, "None") elif args.post: print_header("Gathering 'POST' Endpoints...") # Setup Filename for POST file (using user input) and global post_suffix my_filename_post = args.post + post_suffix # Confirm if user has selected any --filter if args.filter: get_fvCEp(session, my_filename_post, args.filter) else: get_fvCEp(session, my_filename_post, "None") elif args.compare: # Ensure *BOTH* the specified PRE and POST files exist. If not, throw error and explain which ones currently exist # Look for the suffix that I auto append during the --pre and --post file generation for file in args.compare: if pre_suffix in file: my_filename_pre = file elif post_suffix in file: my_filename_post = file else: logger.critical( "Issue with file names supplied as don't contain the suffix defined. Are they the files generated by this script via the --pre / --post options?" ) exit(0) # Check that the files do in fact exist and are readable if not os.path.isfile(my_filename_pre): logger.critical( "Need to ensure the PRE capture has been completed and readable" ) exit(0) # Check that the files do in fact exist and are readable if not os.path.isfile(my_filename_post): logger.critical( "Need to ensure the POST capture has been completed and readable" ) exit(0) print_header("Analyzing 'PRE' Endpoints...") analyze_file(my_filename_pre, "pre") print_header("Analyzing 'POST' Endpoints...") analyze_file(my_filename_post, "post") print_header("Comparing 'PRE' and 'POST' Endpoints...") compare_eps() print_header("Endpoints with Movements...") logger.info("\n" + tabulate(ep_tracker_diff, headers=[ "Tenant", "App Profile", "EPG", "MAC", "Stage", "Node", "Interface", "Encap" ], tablefmt="grid")) print_header("Endpoints only in PRE capture") logger.info("\n" + tabulate(ep_only_in_pre_capture, headers=[ "Tenant", "App Profile", "EPG", "MAC", "Stage", "Node", "Interface", "Encap" ], tablefmt="grid")) print_header("Endpoints only in POST capture") logger.info("\n" + tabulate(ep_only_in_post_capture, headers=[ "Tenant", "App Profile", "EPG", "MAC", "Stage", "Node", "Interface", "Encap" ], tablefmt="grid")) # Check if the --summary option is enabled if detailed_summary: print_header( "(Moved/PRE/POST) Category entries that have a total greater than: {}" .format(detailed_summary_number)) logger.debug(ep_category_summary) ep_summary_data = "" # String object to print out detailed summary that will be built using code below # Loop through EP Categories to then be stored in the string object "ep_summary_data" for category, entries in ep_category_summary.iteritems(): ep_summary_data += "\n" + category.upper() + "\n" # Then loop through each item within each category to highlight the particular Tenant/App/EPG/MAC/Encap for item, number in entries.iteritems(): # Check if the current entry has a value greater than or equal to the value specified on the CLI if number >= detailed_summary_number: ep_summary_data += "{:6} == {}\n".format(number, item) # Also provide a tally of the total amount of EPs that are in BOTH / PRE / POST - as identified grand_total_eps = ep_summary["both"] + ep_summary[ "pre"] + ep_summary["post"] ep_summary_data += "\nGRAND TOTAL\n" ep_summary_data += "{:6} EPs across all captures\n".format( grand_total_eps) logger.info(ep_summary_data) # Print out the data print_header("Summary") # Structure of ep_summary{'pre': 11, 'post': 15, 'compare_ep_move_PRE.json': 11, 'compare_ep_move_POST.json': 15} for key, value in sorted(ep_summary.iteritems(), reverse=True): # Loop through dictionary and find if they are the .JSON filenames if "json" in key: if "pre" in key: # Check for _PRE logger.info("PRE Filename: {}".format(key)) logger.info(" Endpoints read: {}".format(value)) logger.info(" Captured on: {}\n".format( ep_analysis_time["pre"])) elif "post" in key: # Check for _POST logger.info("POST Filename: {}".format(key)) logger.info(" Endpoints read: {}".format(value)) logger.info(" Captured on: {}\n".format( ep_analysis_time["post"])) else: logger.warning( "ERROR with determiniation of PRE/POST filename in ep_summary" ) # Print out analysis logger.info("Endpoints with movement: {}".format(ep_summary["both"])) logger.info("Endpoints only in PRE: {}".format(ep_summary["pre"])) logger.info("Endpoints only in POST: {}\n".format(ep_summary["post"])) if args.log: logger.info("Log file written: {}\n".format(logging_filename)) else: logger.critical( "\nSomething wrong with your selections. Please try again or use the --help option\n" ) creds.print_help() finish_time = time.time() # Calculate finish time logger.info("#" * 80) logger.info("Started analysis @ {}".format( time.asctime(time.localtime(start_time)))) logger.info("Ended analysis @ {}".format( time.asctime(time.localtime(finish_time)))) logger.info("--- Total Execution Time: %s seconds ---" % (finish_time - start_time)) logger.info("#" * 80)